1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <asm/system.h> 77 #include <linux/bitops.h> 78 #include <linux/capability.h> 79 #include <linux/cpu.h> 80 #include <linux/types.h> 81 #include <linux/kernel.h> 82 #include <linux/hash.h> 83 #include <linux/slab.h> 84 #include <linux/sched.h> 85 #include <linux/mutex.h> 86 #include <linux/string.h> 87 #include <linux/mm.h> 88 #include <linux/socket.h> 89 #include <linux/sockios.h> 90 #include <linux/errno.h> 91 #include <linux/interrupt.h> 92 #include <linux/if_ether.h> 93 #include <linux/netdevice.h> 94 #include <linux/etherdevice.h> 95 #include <linux/ethtool.h> 96 #include <linux/notifier.h> 97 #include <linux/skbuff.h> 98 #include <net/net_namespace.h> 99 #include <net/sock.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/proc_fs.h> 102 #include <linux/seq_file.h> 103 #include <linux/stat.h> 104 #include <net/dst.h> 105 #include <net/pkt_sched.h> 106 #include <net/checksum.h> 107 #include <net/xfrm.h> 108 #include <linux/highmem.h> 109 #include <linux/init.h> 110 #include <linux/kmod.h> 111 #include <linux/module.h> 112 #include <linux/netpoll.h> 113 #include <linux/rcupdate.h> 114 #include <linux/delay.h> 115 #include <net/wext.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <linux/ipv6.h> 127 #include <linux/in.h> 128 #include <linux/jhash.h> 129 #include <linux/random.h> 130 #include <trace/events/napi.h> 131 #include <linux/pci.h> 132 133 #include "net-sysfs.h" 134 135 /* Instead of increasing this, you should create a hash table. */ 136 #define MAX_GRO_SKBS 8 137 138 /* This should be increased if a protocol with a bigger head is added. */ 139 #define GRO_MAX_HEAD (MAX_HEADER + 128) 140 141 /* 142 * The list of packet types we will receive (as opposed to discard) 143 * and the routines to invoke. 144 * 145 * Why 16. Because with 16 the only overlap we get on a hash of the 146 * low nibble of the protocol value is RARP/SNAP/X.25. 147 * 148 * NOTE: That is no longer true with the addition of VLAN tags. Not 149 * sure which should go first, but I bet it won't make much 150 * difference if we are running VLANs. The good news is that 151 * this protocol won't be in the list unless compiled in, so 152 * the average user (w/out VLANs) will not be adversely affected. 153 * --BLG 154 * 155 * 0800 IP 156 * 8100 802.1Q VLAN 157 * 0001 802.3 158 * 0002 AX.25 159 * 0004 802.2 160 * 8035 RARP 161 * 0005 SNAP 162 * 0805 X.25 163 * 0806 ARP 164 * 8137 IPX 165 * 0009 Localtalk 166 * 86DD IPv6 167 */ 168 169 #define PTYPE_HASH_SIZE (16) 170 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 171 172 static DEFINE_SPINLOCK(ptype_lock); 173 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 174 static struct list_head ptype_all __read_mostly; /* Taps */ 175 176 /* 177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 178 * semaphore. 179 * 180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 181 * 182 * Writers must hold the rtnl semaphore while they loop through the 183 * dev_base_head list, and hold dev_base_lock for writing when they do the 184 * actual updates. This allows pure readers to access the list even 185 * while a writer is preparing to update it. 186 * 187 * To put it another way, dev_base_lock is held for writing only to 188 * protect against pure readers; the rtnl semaphore provides the 189 * protection against other writers. 190 * 191 * See, for example usages, register_netdevice() and 192 * unregister_netdevice(), which must be called with the rtnl 193 * semaphore held. 194 */ 195 DEFINE_RWLOCK(dev_base_lock); 196 EXPORT_SYMBOL(dev_base_lock); 197 198 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 199 { 200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 202 } 203 204 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 205 { 206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 207 } 208 209 static inline void rps_lock(struct softnet_data *sd) 210 { 211 #ifdef CONFIG_RPS 212 spin_lock(&sd->input_pkt_queue.lock); 213 #endif 214 } 215 216 static inline void rps_unlock(struct softnet_data *sd) 217 { 218 #ifdef CONFIG_RPS 219 spin_unlock(&sd->input_pkt_queue.lock); 220 #endif 221 } 222 223 /* Device list insertion */ 224 static int list_netdevice(struct net_device *dev) 225 { 226 struct net *net = dev_net(dev); 227 228 ASSERT_RTNL(); 229 230 write_lock_bh(&dev_base_lock); 231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 233 hlist_add_head_rcu(&dev->index_hlist, 234 dev_index_hash(net, dev->ifindex)); 235 write_unlock_bh(&dev_base_lock); 236 return 0; 237 } 238 239 /* Device list removal 240 * caller must respect a RCU grace period before freeing/reusing dev 241 */ 242 static void unlist_netdevice(struct net_device *dev) 243 { 244 ASSERT_RTNL(); 245 246 /* Unlink dev from the device chain */ 247 write_lock_bh(&dev_base_lock); 248 list_del_rcu(&dev->dev_list); 249 hlist_del_rcu(&dev->name_hlist); 250 hlist_del_rcu(&dev->index_hlist); 251 write_unlock_bh(&dev_base_lock); 252 } 253 254 /* 255 * Our notifier list 256 */ 257 258 static RAW_NOTIFIER_HEAD(netdev_chain); 259 260 /* 261 * Device drivers call our routines to queue packets here. We empty the 262 * queue in the local softnet handler. 263 */ 264 265 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 266 EXPORT_PER_CPU_SYMBOL(softnet_data); 267 268 #ifdef CONFIG_LOCKDEP 269 /* 270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 271 * according to dev->type 272 */ 273 static const unsigned short netdev_lock_type[] = 274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 286 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 287 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 288 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, 289 ARPHRD_VOID, ARPHRD_NONE}; 290 291 static const char *const netdev_lock_name[] = 292 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 293 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 294 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 295 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 296 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 297 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 298 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 299 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 300 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 301 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 302 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 303 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 304 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 305 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 306 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", 307 "_xmit_VOID", "_xmit_NONE"}; 308 309 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 310 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 311 312 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 313 { 314 int i; 315 316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 317 if (netdev_lock_type[i] == dev_type) 318 return i; 319 /* the last key is used by default */ 320 return ARRAY_SIZE(netdev_lock_type) - 1; 321 } 322 323 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 324 unsigned short dev_type) 325 { 326 int i; 327 328 i = netdev_lock_pos(dev_type); 329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 330 netdev_lock_name[i]); 331 } 332 333 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 334 { 335 int i; 336 337 i = netdev_lock_pos(dev->type); 338 lockdep_set_class_and_name(&dev->addr_list_lock, 339 &netdev_addr_lock_key[i], 340 netdev_lock_name[i]); 341 } 342 #else 343 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 344 unsigned short dev_type) 345 { 346 } 347 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 348 { 349 } 350 #endif 351 352 /******************************************************************************* 353 354 Protocol management and registration routines 355 356 *******************************************************************************/ 357 358 /* 359 * Add a protocol ID to the list. Now that the input handler is 360 * smarter we can dispense with all the messy stuff that used to be 361 * here. 362 * 363 * BEWARE!!! Protocol handlers, mangling input packets, 364 * MUST BE last in hash buckets and checking protocol handlers 365 * MUST start from promiscuous ptype_all chain in net_bh. 366 * It is true now, do not change it. 367 * Explanation follows: if protocol handler, mangling packet, will 368 * be the first on list, it is not able to sense, that packet 369 * is cloned and should be copied-on-write, so that it will 370 * change it and subsequent readers will get broken packet. 371 * --ANK (980803) 372 */ 373 374 /** 375 * dev_add_pack - add packet handler 376 * @pt: packet type declaration 377 * 378 * Add a protocol handler to the networking stack. The passed &packet_type 379 * is linked into kernel lists and may not be freed until it has been 380 * removed from the kernel lists. 381 * 382 * This call does not sleep therefore it can not 383 * guarantee all CPU's that are in middle of receiving packets 384 * will see the new packet type (until the next received packet). 385 */ 386 387 void dev_add_pack(struct packet_type *pt) 388 { 389 int hash; 390 391 spin_lock_bh(&ptype_lock); 392 if (pt->type == htons(ETH_P_ALL)) 393 list_add_rcu(&pt->list, &ptype_all); 394 else { 395 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 396 list_add_rcu(&pt->list, &ptype_base[hash]); 397 } 398 spin_unlock_bh(&ptype_lock); 399 } 400 EXPORT_SYMBOL(dev_add_pack); 401 402 /** 403 * __dev_remove_pack - remove packet handler 404 * @pt: packet type declaration 405 * 406 * Remove a protocol handler that was previously added to the kernel 407 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 408 * from the kernel lists and can be freed or reused once this function 409 * returns. 410 * 411 * The packet type might still be in use by receivers 412 * and must not be freed until after all the CPU's have gone 413 * through a quiescent state. 414 */ 415 void __dev_remove_pack(struct packet_type *pt) 416 { 417 struct list_head *head; 418 struct packet_type *pt1; 419 420 spin_lock_bh(&ptype_lock); 421 422 if (pt->type == htons(ETH_P_ALL)) 423 head = &ptype_all; 424 else 425 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 426 427 list_for_each_entry(pt1, head, list) { 428 if (pt == pt1) { 429 list_del_rcu(&pt->list); 430 goto out; 431 } 432 } 433 434 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 435 out: 436 spin_unlock_bh(&ptype_lock); 437 } 438 EXPORT_SYMBOL(__dev_remove_pack); 439 440 /** 441 * dev_remove_pack - remove packet handler 442 * @pt: packet type declaration 443 * 444 * Remove a protocol handler that was previously added to the kernel 445 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 446 * from the kernel lists and can be freed or reused once this function 447 * returns. 448 * 449 * This call sleeps to guarantee that no CPU is looking at the packet 450 * type after return. 451 */ 452 void dev_remove_pack(struct packet_type *pt) 453 { 454 __dev_remove_pack(pt); 455 456 synchronize_net(); 457 } 458 EXPORT_SYMBOL(dev_remove_pack); 459 460 /****************************************************************************** 461 462 Device Boot-time Settings Routines 463 464 *******************************************************************************/ 465 466 /* Boot time configuration table */ 467 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 468 469 /** 470 * netdev_boot_setup_add - add new setup entry 471 * @name: name of the device 472 * @map: configured settings for the device 473 * 474 * Adds new setup entry to the dev_boot_setup list. The function 475 * returns 0 on error and 1 on success. This is a generic routine to 476 * all netdevices. 477 */ 478 static int netdev_boot_setup_add(char *name, struct ifmap *map) 479 { 480 struct netdev_boot_setup *s; 481 int i; 482 483 s = dev_boot_setup; 484 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 485 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 486 memset(s[i].name, 0, sizeof(s[i].name)); 487 strlcpy(s[i].name, name, IFNAMSIZ); 488 memcpy(&s[i].map, map, sizeof(s[i].map)); 489 break; 490 } 491 } 492 493 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 494 } 495 496 /** 497 * netdev_boot_setup_check - check boot time settings 498 * @dev: the netdevice 499 * 500 * Check boot time settings for the device. 501 * The found settings are set for the device to be used 502 * later in the device probing. 503 * Returns 0 if no settings found, 1 if they are. 504 */ 505 int netdev_boot_setup_check(struct net_device *dev) 506 { 507 struct netdev_boot_setup *s = dev_boot_setup; 508 int i; 509 510 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 511 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 512 !strcmp(dev->name, s[i].name)) { 513 dev->irq = s[i].map.irq; 514 dev->base_addr = s[i].map.base_addr; 515 dev->mem_start = s[i].map.mem_start; 516 dev->mem_end = s[i].map.mem_end; 517 return 1; 518 } 519 } 520 return 0; 521 } 522 EXPORT_SYMBOL(netdev_boot_setup_check); 523 524 525 /** 526 * netdev_boot_base - get address from boot time settings 527 * @prefix: prefix for network device 528 * @unit: id for network device 529 * 530 * Check boot time settings for the base address of device. 531 * The found settings are set for the device to be used 532 * later in the device probing. 533 * Returns 0 if no settings found. 534 */ 535 unsigned long netdev_boot_base(const char *prefix, int unit) 536 { 537 const struct netdev_boot_setup *s = dev_boot_setup; 538 char name[IFNAMSIZ]; 539 int i; 540 541 sprintf(name, "%s%d", prefix, unit); 542 543 /* 544 * If device already registered then return base of 1 545 * to indicate not to probe for this interface 546 */ 547 if (__dev_get_by_name(&init_net, name)) 548 return 1; 549 550 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 551 if (!strcmp(name, s[i].name)) 552 return s[i].map.base_addr; 553 return 0; 554 } 555 556 /* 557 * Saves at boot time configured settings for any netdevice. 558 */ 559 int __init netdev_boot_setup(char *str) 560 { 561 int ints[5]; 562 struct ifmap map; 563 564 str = get_options(str, ARRAY_SIZE(ints), ints); 565 if (!str || !*str) 566 return 0; 567 568 /* Save settings */ 569 memset(&map, 0, sizeof(map)); 570 if (ints[0] > 0) 571 map.irq = ints[1]; 572 if (ints[0] > 1) 573 map.base_addr = ints[2]; 574 if (ints[0] > 2) 575 map.mem_start = ints[3]; 576 if (ints[0] > 3) 577 map.mem_end = ints[4]; 578 579 /* Add new entry to the list */ 580 return netdev_boot_setup_add(str, &map); 581 } 582 583 __setup("netdev=", netdev_boot_setup); 584 585 /******************************************************************************* 586 587 Device Interface Subroutines 588 589 *******************************************************************************/ 590 591 /** 592 * __dev_get_by_name - find a device by its name 593 * @net: the applicable net namespace 594 * @name: name to find 595 * 596 * Find an interface by name. Must be called under RTNL semaphore 597 * or @dev_base_lock. If the name is found a pointer to the device 598 * is returned. If the name is not found then %NULL is returned. The 599 * reference counters are not incremented so the caller must be 600 * careful with locks. 601 */ 602 603 struct net_device *__dev_get_by_name(struct net *net, const char *name) 604 { 605 struct hlist_node *p; 606 struct net_device *dev; 607 struct hlist_head *head = dev_name_hash(net, name); 608 609 hlist_for_each_entry(dev, p, head, name_hlist) 610 if (!strncmp(dev->name, name, IFNAMSIZ)) 611 return dev; 612 613 return NULL; 614 } 615 EXPORT_SYMBOL(__dev_get_by_name); 616 617 /** 618 * dev_get_by_name_rcu - find a device by its name 619 * @net: the applicable net namespace 620 * @name: name to find 621 * 622 * Find an interface by name. 623 * If the name is found a pointer to the device is returned. 624 * If the name is not found then %NULL is returned. 625 * The reference counters are not incremented so the caller must be 626 * careful with locks. The caller must hold RCU lock. 627 */ 628 629 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 630 { 631 struct hlist_node *p; 632 struct net_device *dev; 633 struct hlist_head *head = dev_name_hash(net, name); 634 635 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 636 if (!strncmp(dev->name, name, IFNAMSIZ)) 637 return dev; 638 639 return NULL; 640 } 641 EXPORT_SYMBOL(dev_get_by_name_rcu); 642 643 /** 644 * dev_get_by_name - find a device by its name 645 * @net: the applicable net namespace 646 * @name: name to find 647 * 648 * Find an interface by name. This can be called from any 649 * context and does its own locking. The returned handle has 650 * the usage count incremented and the caller must use dev_put() to 651 * release it when it is no longer needed. %NULL is returned if no 652 * matching device is found. 653 */ 654 655 struct net_device *dev_get_by_name(struct net *net, const char *name) 656 { 657 struct net_device *dev; 658 659 rcu_read_lock(); 660 dev = dev_get_by_name_rcu(net, name); 661 if (dev) 662 dev_hold(dev); 663 rcu_read_unlock(); 664 return dev; 665 } 666 EXPORT_SYMBOL(dev_get_by_name); 667 668 /** 669 * __dev_get_by_index - find a device by its ifindex 670 * @net: the applicable net namespace 671 * @ifindex: index of device 672 * 673 * Search for an interface by index. Returns %NULL if the device 674 * is not found or a pointer to the device. The device has not 675 * had its reference counter increased so the caller must be careful 676 * about locking. The caller must hold either the RTNL semaphore 677 * or @dev_base_lock. 678 */ 679 680 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 681 { 682 struct hlist_node *p; 683 struct net_device *dev; 684 struct hlist_head *head = dev_index_hash(net, ifindex); 685 686 hlist_for_each_entry(dev, p, head, index_hlist) 687 if (dev->ifindex == ifindex) 688 return dev; 689 690 return NULL; 691 } 692 EXPORT_SYMBOL(__dev_get_by_index); 693 694 /** 695 * dev_get_by_index_rcu - find a device by its ifindex 696 * @net: the applicable net namespace 697 * @ifindex: index of device 698 * 699 * Search for an interface by index. Returns %NULL if the device 700 * is not found or a pointer to the device. The device has not 701 * had its reference counter increased so the caller must be careful 702 * about locking. The caller must hold RCU lock. 703 */ 704 705 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 706 { 707 struct hlist_node *p; 708 struct net_device *dev; 709 struct hlist_head *head = dev_index_hash(net, ifindex); 710 711 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 712 if (dev->ifindex == ifindex) 713 return dev; 714 715 return NULL; 716 } 717 EXPORT_SYMBOL(dev_get_by_index_rcu); 718 719 720 /** 721 * dev_get_by_index - find a device by its ifindex 722 * @net: the applicable net namespace 723 * @ifindex: index of device 724 * 725 * Search for an interface by index. Returns NULL if the device 726 * is not found or a pointer to the device. The device returned has 727 * had a reference added and the pointer is safe until the user calls 728 * dev_put to indicate they have finished with it. 729 */ 730 731 struct net_device *dev_get_by_index(struct net *net, int ifindex) 732 { 733 struct net_device *dev; 734 735 rcu_read_lock(); 736 dev = dev_get_by_index_rcu(net, ifindex); 737 if (dev) 738 dev_hold(dev); 739 rcu_read_unlock(); 740 return dev; 741 } 742 EXPORT_SYMBOL(dev_get_by_index); 743 744 /** 745 * dev_getbyhwaddr - find a device by its hardware address 746 * @net: the applicable net namespace 747 * @type: media type of device 748 * @ha: hardware address 749 * 750 * Search for an interface by MAC address. Returns NULL if the device 751 * is not found or a pointer to the device. The caller must hold the 752 * rtnl semaphore. The returned device has not had its ref count increased 753 * and the caller must therefore be careful about locking 754 * 755 * BUGS: 756 * If the API was consistent this would be __dev_get_by_hwaddr 757 */ 758 759 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha) 760 { 761 struct net_device *dev; 762 763 ASSERT_RTNL(); 764 765 for_each_netdev(net, dev) 766 if (dev->type == type && 767 !memcmp(dev->dev_addr, ha, dev->addr_len)) 768 return dev; 769 770 return NULL; 771 } 772 EXPORT_SYMBOL(dev_getbyhwaddr); 773 774 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 775 { 776 struct net_device *dev; 777 778 ASSERT_RTNL(); 779 for_each_netdev(net, dev) 780 if (dev->type == type) 781 return dev; 782 783 return NULL; 784 } 785 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 786 787 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 788 { 789 struct net_device *dev, *ret = NULL; 790 791 rcu_read_lock(); 792 for_each_netdev_rcu(net, dev) 793 if (dev->type == type) { 794 dev_hold(dev); 795 ret = dev; 796 break; 797 } 798 rcu_read_unlock(); 799 return ret; 800 } 801 EXPORT_SYMBOL(dev_getfirstbyhwtype); 802 803 /** 804 * dev_get_by_flags_rcu - find any device with given flags 805 * @net: the applicable net namespace 806 * @if_flags: IFF_* values 807 * @mask: bitmask of bits in if_flags to check 808 * 809 * Search for any interface with the given flags. Returns NULL if a device 810 * is not found or a pointer to the device. Must be called inside 811 * rcu_read_lock(), and result refcount is unchanged. 812 */ 813 814 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 815 unsigned short mask) 816 { 817 struct net_device *dev, *ret; 818 819 ret = NULL; 820 for_each_netdev_rcu(net, dev) { 821 if (((dev->flags ^ if_flags) & mask) == 0) { 822 ret = dev; 823 break; 824 } 825 } 826 return ret; 827 } 828 EXPORT_SYMBOL(dev_get_by_flags_rcu); 829 830 /** 831 * dev_valid_name - check if name is okay for network device 832 * @name: name string 833 * 834 * Network device names need to be valid file names to 835 * to allow sysfs to work. We also disallow any kind of 836 * whitespace. 837 */ 838 int dev_valid_name(const char *name) 839 { 840 if (*name == '\0') 841 return 0; 842 if (strlen(name) >= IFNAMSIZ) 843 return 0; 844 if (!strcmp(name, ".") || !strcmp(name, "..")) 845 return 0; 846 847 while (*name) { 848 if (*name == '/' || isspace(*name)) 849 return 0; 850 name++; 851 } 852 return 1; 853 } 854 EXPORT_SYMBOL(dev_valid_name); 855 856 /** 857 * __dev_alloc_name - allocate a name for a device 858 * @net: network namespace to allocate the device name in 859 * @name: name format string 860 * @buf: scratch buffer and result name string 861 * 862 * Passed a format string - eg "lt%d" it will try and find a suitable 863 * id. It scans list of devices to build up a free map, then chooses 864 * the first empty slot. The caller must hold the dev_base or rtnl lock 865 * while allocating the name and adding the device in order to avoid 866 * duplicates. 867 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 868 * Returns the number of the unit assigned or a negative errno code. 869 */ 870 871 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 872 { 873 int i = 0; 874 const char *p; 875 const int max_netdevices = 8*PAGE_SIZE; 876 unsigned long *inuse; 877 struct net_device *d; 878 879 p = strnchr(name, IFNAMSIZ-1, '%'); 880 if (p) { 881 /* 882 * Verify the string as this thing may have come from 883 * the user. There must be either one "%d" and no other "%" 884 * characters. 885 */ 886 if (p[1] != 'd' || strchr(p + 2, '%')) 887 return -EINVAL; 888 889 /* Use one page as a bit array of possible slots */ 890 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 891 if (!inuse) 892 return -ENOMEM; 893 894 for_each_netdev(net, d) { 895 if (!sscanf(d->name, name, &i)) 896 continue; 897 if (i < 0 || i >= max_netdevices) 898 continue; 899 900 /* avoid cases where sscanf is not exact inverse of printf */ 901 snprintf(buf, IFNAMSIZ, name, i); 902 if (!strncmp(buf, d->name, IFNAMSIZ)) 903 set_bit(i, inuse); 904 } 905 906 i = find_first_zero_bit(inuse, max_netdevices); 907 free_page((unsigned long) inuse); 908 } 909 910 if (buf != name) 911 snprintf(buf, IFNAMSIZ, name, i); 912 if (!__dev_get_by_name(net, buf)) 913 return i; 914 915 /* It is possible to run out of possible slots 916 * when the name is long and there isn't enough space left 917 * for the digits, or if all bits are used. 918 */ 919 return -ENFILE; 920 } 921 922 /** 923 * dev_alloc_name - allocate a name for a device 924 * @dev: device 925 * @name: name format string 926 * 927 * Passed a format string - eg "lt%d" it will try and find a suitable 928 * id. It scans list of devices to build up a free map, then chooses 929 * the first empty slot. The caller must hold the dev_base or rtnl lock 930 * while allocating the name and adding the device in order to avoid 931 * duplicates. 932 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 933 * Returns the number of the unit assigned or a negative errno code. 934 */ 935 936 int dev_alloc_name(struct net_device *dev, const char *name) 937 { 938 char buf[IFNAMSIZ]; 939 struct net *net; 940 int ret; 941 942 BUG_ON(!dev_net(dev)); 943 net = dev_net(dev); 944 ret = __dev_alloc_name(net, name, buf); 945 if (ret >= 0) 946 strlcpy(dev->name, buf, IFNAMSIZ); 947 return ret; 948 } 949 EXPORT_SYMBOL(dev_alloc_name); 950 951 static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 952 { 953 struct net *net; 954 955 BUG_ON(!dev_net(dev)); 956 net = dev_net(dev); 957 958 if (!dev_valid_name(name)) 959 return -EINVAL; 960 961 if (fmt && strchr(name, '%')) 962 return dev_alloc_name(dev, name); 963 else if (__dev_get_by_name(net, name)) 964 return -EEXIST; 965 else if (dev->name != name) 966 strlcpy(dev->name, name, IFNAMSIZ); 967 968 return 0; 969 } 970 971 /** 972 * dev_change_name - change name of a device 973 * @dev: device 974 * @newname: name (or format string) must be at least IFNAMSIZ 975 * 976 * Change name of a device, can pass format strings "eth%d". 977 * for wildcarding. 978 */ 979 int dev_change_name(struct net_device *dev, const char *newname) 980 { 981 char oldname[IFNAMSIZ]; 982 int err = 0; 983 int ret; 984 struct net *net; 985 986 ASSERT_RTNL(); 987 BUG_ON(!dev_net(dev)); 988 989 net = dev_net(dev); 990 if (dev->flags & IFF_UP) 991 return -EBUSY; 992 993 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 994 return 0; 995 996 memcpy(oldname, dev->name, IFNAMSIZ); 997 998 err = dev_get_valid_name(dev, newname, 1); 999 if (err < 0) 1000 return err; 1001 1002 rollback: 1003 ret = device_rename(&dev->dev, dev->name); 1004 if (ret) { 1005 memcpy(dev->name, oldname, IFNAMSIZ); 1006 return ret; 1007 } 1008 1009 write_lock_bh(&dev_base_lock); 1010 hlist_del(&dev->name_hlist); 1011 write_unlock_bh(&dev_base_lock); 1012 1013 synchronize_rcu(); 1014 1015 write_lock_bh(&dev_base_lock); 1016 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1017 write_unlock_bh(&dev_base_lock); 1018 1019 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1020 ret = notifier_to_errno(ret); 1021 1022 if (ret) { 1023 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1024 if (err >= 0) { 1025 err = ret; 1026 memcpy(dev->name, oldname, IFNAMSIZ); 1027 goto rollback; 1028 } else { 1029 printk(KERN_ERR 1030 "%s: name change rollback failed: %d.\n", 1031 dev->name, ret); 1032 } 1033 } 1034 1035 return err; 1036 } 1037 1038 /** 1039 * dev_set_alias - change ifalias of a device 1040 * @dev: device 1041 * @alias: name up to IFALIASZ 1042 * @len: limit of bytes to copy from info 1043 * 1044 * Set ifalias for a device, 1045 */ 1046 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1047 { 1048 ASSERT_RTNL(); 1049 1050 if (len >= IFALIASZ) 1051 return -EINVAL; 1052 1053 if (!len) { 1054 if (dev->ifalias) { 1055 kfree(dev->ifalias); 1056 dev->ifalias = NULL; 1057 } 1058 return 0; 1059 } 1060 1061 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1062 if (!dev->ifalias) 1063 return -ENOMEM; 1064 1065 strlcpy(dev->ifalias, alias, len+1); 1066 return len; 1067 } 1068 1069 1070 /** 1071 * netdev_features_change - device changes features 1072 * @dev: device to cause notification 1073 * 1074 * Called to indicate a device has changed features. 1075 */ 1076 void netdev_features_change(struct net_device *dev) 1077 { 1078 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1079 } 1080 EXPORT_SYMBOL(netdev_features_change); 1081 1082 /** 1083 * netdev_state_change - device changes state 1084 * @dev: device to cause notification 1085 * 1086 * Called to indicate a device has changed state. This function calls 1087 * the notifier chains for netdev_chain and sends a NEWLINK message 1088 * to the routing socket. 1089 */ 1090 void netdev_state_change(struct net_device *dev) 1091 { 1092 if (dev->flags & IFF_UP) { 1093 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1094 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1095 } 1096 } 1097 EXPORT_SYMBOL(netdev_state_change); 1098 1099 int netdev_bonding_change(struct net_device *dev, unsigned long event) 1100 { 1101 return call_netdevice_notifiers(event, dev); 1102 } 1103 EXPORT_SYMBOL(netdev_bonding_change); 1104 1105 /** 1106 * dev_load - load a network module 1107 * @net: the applicable net namespace 1108 * @name: name of interface 1109 * 1110 * If a network interface is not present and the process has suitable 1111 * privileges this function loads the module. If module loading is not 1112 * available in this kernel then it becomes a nop. 1113 */ 1114 1115 void dev_load(struct net *net, const char *name) 1116 { 1117 struct net_device *dev; 1118 1119 rcu_read_lock(); 1120 dev = dev_get_by_name_rcu(net, name); 1121 rcu_read_unlock(); 1122 1123 if (!dev && capable(CAP_NET_ADMIN)) 1124 request_module("%s", name); 1125 } 1126 EXPORT_SYMBOL(dev_load); 1127 1128 static int __dev_open(struct net_device *dev) 1129 { 1130 const struct net_device_ops *ops = dev->netdev_ops; 1131 int ret; 1132 1133 ASSERT_RTNL(); 1134 1135 /* 1136 * Is it even present? 1137 */ 1138 if (!netif_device_present(dev)) 1139 return -ENODEV; 1140 1141 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1142 ret = notifier_to_errno(ret); 1143 if (ret) 1144 return ret; 1145 1146 /* 1147 * Call device private open method 1148 */ 1149 set_bit(__LINK_STATE_START, &dev->state); 1150 1151 if (ops->ndo_validate_addr) 1152 ret = ops->ndo_validate_addr(dev); 1153 1154 if (!ret && ops->ndo_open) 1155 ret = ops->ndo_open(dev); 1156 1157 /* 1158 * If it went open OK then: 1159 */ 1160 1161 if (ret) 1162 clear_bit(__LINK_STATE_START, &dev->state); 1163 else { 1164 /* 1165 * Set the flags. 1166 */ 1167 dev->flags |= IFF_UP; 1168 1169 /* 1170 * Enable NET_DMA 1171 */ 1172 net_dmaengine_get(); 1173 1174 /* 1175 * Initialize multicasting status 1176 */ 1177 dev_set_rx_mode(dev); 1178 1179 /* 1180 * Wakeup transmit queue engine 1181 */ 1182 dev_activate(dev); 1183 } 1184 1185 return ret; 1186 } 1187 1188 /** 1189 * dev_open - prepare an interface for use. 1190 * @dev: device to open 1191 * 1192 * Takes a device from down to up state. The device's private open 1193 * function is invoked and then the multicast lists are loaded. Finally 1194 * the device is moved into the up state and a %NETDEV_UP message is 1195 * sent to the netdev notifier chain. 1196 * 1197 * Calling this function on an active interface is a nop. On a failure 1198 * a negative errno code is returned. 1199 */ 1200 int dev_open(struct net_device *dev) 1201 { 1202 int ret; 1203 1204 /* 1205 * Is it already up? 1206 */ 1207 if (dev->flags & IFF_UP) 1208 return 0; 1209 1210 /* 1211 * Open device 1212 */ 1213 ret = __dev_open(dev); 1214 if (ret < 0) 1215 return ret; 1216 1217 /* 1218 * ... and announce new interface. 1219 */ 1220 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1221 call_netdevice_notifiers(NETDEV_UP, dev); 1222 1223 return ret; 1224 } 1225 EXPORT_SYMBOL(dev_open); 1226 1227 static int __dev_close(struct net_device *dev) 1228 { 1229 const struct net_device_ops *ops = dev->netdev_ops; 1230 1231 ASSERT_RTNL(); 1232 might_sleep(); 1233 1234 /* 1235 * Tell people we are going down, so that they can 1236 * prepare to death, when device is still operating. 1237 */ 1238 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1239 1240 clear_bit(__LINK_STATE_START, &dev->state); 1241 1242 /* Synchronize to scheduled poll. We cannot touch poll list, 1243 * it can be even on different cpu. So just clear netif_running(). 1244 * 1245 * dev->stop() will invoke napi_disable() on all of it's 1246 * napi_struct instances on this device. 1247 */ 1248 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1249 1250 dev_deactivate(dev); 1251 1252 /* 1253 * Call the device specific close. This cannot fail. 1254 * Only if device is UP 1255 * 1256 * We allow it to be called even after a DETACH hot-plug 1257 * event. 1258 */ 1259 if (ops->ndo_stop) 1260 ops->ndo_stop(dev); 1261 1262 /* 1263 * Device is now down. 1264 */ 1265 1266 dev->flags &= ~IFF_UP; 1267 1268 /* 1269 * Shutdown NET_DMA 1270 */ 1271 net_dmaengine_put(); 1272 1273 return 0; 1274 } 1275 1276 /** 1277 * dev_close - shutdown an interface. 1278 * @dev: device to shutdown 1279 * 1280 * This function moves an active device into down state. A 1281 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1282 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1283 * chain. 1284 */ 1285 int dev_close(struct net_device *dev) 1286 { 1287 if (!(dev->flags & IFF_UP)) 1288 return 0; 1289 1290 __dev_close(dev); 1291 1292 /* 1293 * Tell people we are down 1294 */ 1295 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1296 call_netdevice_notifiers(NETDEV_DOWN, dev); 1297 1298 return 0; 1299 } 1300 EXPORT_SYMBOL(dev_close); 1301 1302 1303 /** 1304 * dev_disable_lro - disable Large Receive Offload on a device 1305 * @dev: device 1306 * 1307 * Disable Large Receive Offload (LRO) on a net device. Must be 1308 * called under RTNL. This is needed if received packets may be 1309 * forwarded to another interface. 1310 */ 1311 void dev_disable_lro(struct net_device *dev) 1312 { 1313 if (dev->ethtool_ops && dev->ethtool_ops->get_flags && 1314 dev->ethtool_ops->set_flags) { 1315 u32 flags = dev->ethtool_ops->get_flags(dev); 1316 if (flags & ETH_FLAG_LRO) { 1317 flags &= ~ETH_FLAG_LRO; 1318 dev->ethtool_ops->set_flags(dev, flags); 1319 } 1320 } 1321 WARN_ON(dev->features & NETIF_F_LRO); 1322 } 1323 EXPORT_SYMBOL(dev_disable_lro); 1324 1325 1326 static int dev_boot_phase = 1; 1327 1328 /* 1329 * Device change register/unregister. These are not inline or static 1330 * as we export them to the world. 1331 */ 1332 1333 /** 1334 * register_netdevice_notifier - register a network notifier block 1335 * @nb: notifier 1336 * 1337 * Register a notifier to be called when network device events occur. 1338 * The notifier passed is linked into the kernel structures and must 1339 * not be reused until it has been unregistered. A negative errno code 1340 * is returned on a failure. 1341 * 1342 * When registered all registration and up events are replayed 1343 * to the new notifier to allow device to have a race free 1344 * view of the network device list. 1345 */ 1346 1347 int register_netdevice_notifier(struct notifier_block *nb) 1348 { 1349 struct net_device *dev; 1350 struct net_device *last; 1351 struct net *net; 1352 int err; 1353 1354 rtnl_lock(); 1355 err = raw_notifier_chain_register(&netdev_chain, nb); 1356 if (err) 1357 goto unlock; 1358 if (dev_boot_phase) 1359 goto unlock; 1360 for_each_net(net) { 1361 for_each_netdev(net, dev) { 1362 err = nb->notifier_call(nb, NETDEV_REGISTER, dev); 1363 err = notifier_to_errno(err); 1364 if (err) 1365 goto rollback; 1366 1367 if (!(dev->flags & IFF_UP)) 1368 continue; 1369 1370 nb->notifier_call(nb, NETDEV_UP, dev); 1371 } 1372 } 1373 1374 unlock: 1375 rtnl_unlock(); 1376 return err; 1377 1378 rollback: 1379 last = dev; 1380 for_each_net(net) { 1381 for_each_netdev(net, dev) { 1382 if (dev == last) 1383 break; 1384 1385 if (dev->flags & IFF_UP) { 1386 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1387 nb->notifier_call(nb, NETDEV_DOWN, dev); 1388 } 1389 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1390 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); 1391 } 1392 } 1393 1394 raw_notifier_chain_unregister(&netdev_chain, nb); 1395 goto unlock; 1396 } 1397 EXPORT_SYMBOL(register_netdevice_notifier); 1398 1399 /** 1400 * unregister_netdevice_notifier - unregister a network notifier block 1401 * @nb: notifier 1402 * 1403 * Unregister a notifier previously registered by 1404 * register_netdevice_notifier(). The notifier is unlinked into the 1405 * kernel structures and may then be reused. A negative errno code 1406 * is returned on a failure. 1407 */ 1408 1409 int unregister_netdevice_notifier(struct notifier_block *nb) 1410 { 1411 int err; 1412 1413 rtnl_lock(); 1414 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1415 rtnl_unlock(); 1416 return err; 1417 } 1418 EXPORT_SYMBOL(unregister_netdevice_notifier); 1419 1420 /** 1421 * call_netdevice_notifiers - call all network notifier blocks 1422 * @val: value passed unmodified to notifier function 1423 * @dev: net_device pointer passed unmodified to notifier function 1424 * 1425 * Call all network notifier blocks. Parameters and return value 1426 * are as for raw_notifier_call_chain(). 1427 */ 1428 1429 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1430 { 1431 ASSERT_RTNL(); 1432 return raw_notifier_call_chain(&netdev_chain, val, dev); 1433 } 1434 1435 /* When > 0 there are consumers of rx skb time stamps */ 1436 static atomic_t netstamp_needed = ATOMIC_INIT(0); 1437 1438 void net_enable_timestamp(void) 1439 { 1440 atomic_inc(&netstamp_needed); 1441 } 1442 EXPORT_SYMBOL(net_enable_timestamp); 1443 1444 void net_disable_timestamp(void) 1445 { 1446 atomic_dec(&netstamp_needed); 1447 } 1448 EXPORT_SYMBOL(net_disable_timestamp); 1449 1450 static inline void net_timestamp_set(struct sk_buff *skb) 1451 { 1452 if (atomic_read(&netstamp_needed)) 1453 __net_timestamp(skb); 1454 else 1455 skb->tstamp.tv64 = 0; 1456 } 1457 1458 static inline void net_timestamp_check(struct sk_buff *skb) 1459 { 1460 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) 1461 __net_timestamp(skb); 1462 } 1463 1464 /** 1465 * dev_forward_skb - loopback an skb to another netif 1466 * 1467 * @dev: destination network device 1468 * @skb: buffer to forward 1469 * 1470 * return values: 1471 * NET_RX_SUCCESS (no congestion) 1472 * NET_RX_DROP (packet was dropped, but freed) 1473 * 1474 * dev_forward_skb can be used for injecting an skb from the 1475 * start_xmit function of one device into the receive queue 1476 * of another device. 1477 * 1478 * The receiving device may be in another namespace, so 1479 * we have to clear all information in the skb that could 1480 * impact namespace isolation. 1481 */ 1482 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1483 { 1484 skb_orphan(skb); 1485 nf_reset(skb); 1486 1487 if (!(dev->flags & IFF_UP) || 1488 (skb->len > (dev->mtu + dev->hard_header_len))) { 1489 kfree_skb(skb); 1490 return NET_RX_DROP; 1491 } 1492 skb_set_dev(skb, dev); 1493 skb->tstamp.tv64 = 0; 1494 skb->pkt_type = PACKET_HOST; 1495 skb->protocol = eth_type_trans(skb, dev); 1496 return netif_rx(skb); 1497 } 1498 EXPORT_SYMBOL_GPL(dev_forward_skb); 1499 1500 /* 1501 * Support routine. Sends outgoing frames to any network 1502 * taps currently in use. 1503 */ 1504 1505 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1506 { 1507 struct packet_type *ptype; 1508 1509 #ifdef CONFIG_NET_CLS_ACT 1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) 1511 net_timestamp_set(skb); 1512 #else 1513 net_timestamp_set(skb); 1514 #endif 1515 1516 rcu_read_lock(); 1517 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1518 /* Never send packets back to the socket 1519 * they originated from - MvS (miquels@drinkel.ow.org) 1520 */ 1521 if ((ptype->dev == dev || !ptype->dev) && 1522 (ptype->af_packet_priv == NULL || 1523 (struct sock *)ptype->af_packet_priv != skb->sk)) { 1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1525 if (!skb2) 1526 break; 1527 1528 /* skb->nh should be correctly 1529 set by sender, so that the second statement is 1530 just protection against buggy protocols. 1531 */ 1532 skb_reset_mac_header(skb2); 1533 1534 if (skb_network_header(skb2) < skb2->data || 1535 skb2->network_header > skb2->tail) { 1536 if (net_ratelimit()) 1537 printk(KERN_CRIT "protocol %04x is " 1538 "buggy, dev %s\n", 1539 ntohs(skb2->protocol), 1540 dev->name); 1541 skb_reset_network_header(skb2); 1542 } 1543 1544 skb2->transport_header = skb2->network_header; 1545 skb2->pkt_type = PACKET_OUTGOING; 1546 ptype->func(skb2, skb->dev, ptype, skb->dev); 1547 } 1548 } 1549 rcu_read_unlock(); 1550 } 1551 1552 /* 1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1555 */ 1556 void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1557 { 1558 unsigned int real_num = dev->real_num_tx_queues; 1559 1560 if (unlikely(txq > dev->num_tx_queues)) 1561 ; 1562 else if (txq > real_num) 1563 dev->real_num_tx_queues = txq; 1564 else if (txq < real_num) { 1565 dev->real_num_tx_queues = txq; 1566 qdisc_reset_all_tx_gt(dev, txq); 1567 } 1568 } 1569 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 1570 1571 static inline void __netif_reschedule(struct Qdisc *q) 1572 { 1573 struct softnet_data *sd; 1574 unsigned long flags; 1575 1576 local_irq_save(flags); 1577 sd = &__get_cpu_var(softnet_data); 1578 q->next_sched = NULL; 1579 *sd->output_queue_tailp = q; 1580 sd->output_queue_tailp = &q->next_sched; 1581 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1582 local_irq_restore(flags); 1583 } 1584 1585 void __netif_schedule(struct Qdisc *q) 1586 { 1587 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 1588 __netif_reschedule(q); 1589 } 1590 EXPORT_SYMBOL(__netif_schedule); 1591 1592 void dev_kfree_skb_irq(struct sk_buff *skb) 1593 { 1594 if (atomic_dec_and_test(&skb->users)) { 1595 struct softnet_data *sd; 1596 unsigned long flags; 1597 1598 local_irq_save(flags); 1599 sd = &__get_cpu_var(softnet_data); 1600 skb->next = sd->completion_queue; 1601 sd->completion_queue = skb; 1602 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1603 local_irq_restore(flags); 1604 } 1605 } 1606 EXPORT_SYMBOL(dev_kfree_skb_irq); 1607 1608 void dev_kfree_skb_any(struct sk_buff *skb) 1609 { 1610 if (in_irq() || irqs_disabled()) 1611 dev_kfree_skb_irq(skb); 1612 else 1613 dev_kfree_skb(skb); 1614 } 1615 EXPORT_SYMBOL(dev_kfree_skb_any); 1616 1617 1618 /** 1619 * netif_device_detach - mark device as removed 1620 * @dev: network device 1621 * 1622 * Mark device as removed from system and therefore no longer available. 1623 */ 1624 void netif_device_detach(struct net_device *dev) 1625 { 1626 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1627 netif_running(dev)) { 1628 netif_tx_stop_all_queues(dev); 1629 } 1630 } 1631 EXPORT_SYMBOL(netif_device_detach); 1632 1633 /** 1634 * netif_device_attach - mark device as attached 1635 * @dev: network device 1636 * 1637 * Mark device as attached from system and restart if needed. 1638 */ 1639 void netif_device_attach(struct net_device *dev) 1640 { 1641 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1642 netif_running(dev)) { 1643 netif_tx_wake_all_queues(dev); 1644 __netdev_watchdog_up(dev); 1645 } 1646 } 1647 EXPORT_SYMBOL(netif_device_attach); 1648 1649 static bool can_checksum_protocol(unsigned long features, __be16 protocol) 1650 { 1651 return ((features & NETIF_F_GEN_CSUM) || 1652 ((features & NETIF_F_IP_CSUM) && 1653 protocol == htons(ETH_P_IP)) || 1654 ((features & NETIF_F_IPV6_CSUM) && 1655 protocol == htons(ETH_P_IPV6)) || 1656 ((features & NETIF_F_FCOE_CRC) && 1657 protocol == htons(ETH_P_FCOE))); 1658 } 1659 1660 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) 1661 { 1662 if (can_checksum_protocol(dev->features, skb->protocol)) 1663 return true; 1664 1665 if (skb->protocol == htons(ETH_P_8021Q)) { 1666 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1667 if (can_checksum_protocol(dev->features & dev->vlan_features, 1668 veh->h_vlan_encapsulated_proto)) 1669 return true; 1670 } 1671 1672 return false; 1673 } 1674 1675 /** 1676 * skb_dev_set -- assign a new device to a buffer 1677 * @skb: buffer for the new device 1678 * @dev: network device 1679 * 1680 * If an skb is owned by a device already, we have to reset 1681 * all data private to the namespace a device belongs to 1682 * before assigning it a new device. 1683 */ 1684 #ifdef CONFIG_NET_NS 1685 void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1686 { 1687 skb_dst_drop(skb); 1688 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { 1689 secpath_reset(skb); 1690 nf_reset(skb); 1691 skb_init_secmark(skb); 1692 skb->mark = 0; 1693 skb->priority = 0; 1694 skb->nf_trace = 0; 1695 skb->ipvs_property = 0; 1696 #ifdef CONFIG_NET_SCHED 1697 skb->tc_index = 0; 1698 #endif 1699 } 1700 skb->dev = dev; 1701 } 1702 EXPORT_SYMBOL(skb_set_dev); 1703 #endif /* CONFIG_NET_NS */ 1704 1705 /* 1706 * Invalidate hardware checksum when packet is to be mangled, and 1707 * complete checksum manually on outgoing path. 1708 */ 1709 int skb_checksum_help(struct sk_buff *skb) 1710 { 1711 __wsum csum; 1712 int ret = 0, offset; 1713 1714 if (skb->ip_summed == CHECKSUM_COMPLETE) 1715 goto out_set_summed; 1716 1717 if (unlikely(skb_shinfo(skb)->gso_size)) { 1718 /* Let GSO fix up the checksum. */ 1719 goto out_set_summed; 1720 } 1721 1722 offset = skb->csum_start - skb_headroom(skb); 1723 BUG_ON(offset >= skb_headlen(skb)); 1724 csum = skb_checksum(skb, offset, skb->len - offset, 0); 1725 1726 offset += skb->csum_offset; 1727 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 1728 1729 if (skb_cloned(skb) && 1730 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 1731 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1732 if (ret) 1733 goto out; 1734 } 1735 1736 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1737 out_set_summed: 1738 skb->ip_summed = CHECKSUM_NONE; 1739 out: 1740 return ret; 1741 } 1742 EXPORT_SYMBOL(skb_checksum_help); 1743 1744 /** 1745 * skb_gso_segment - Perform segmentation on skb. 1746 * @skb: buffer to segment 1747 * @features: features for the output path (see dev->features) 1748 * 1749 * This function segments the given skb and returns a list of segments. 1750 * 1751 * It may return NULL if the skb requires no segmentation. This is 1752 * only possible when GSO is used for verifying header integrity. 1753 */ 1754 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) 1755 { 1756 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1757 struct packet_type *ptype; 1758 __be16 type = skb->protocol; 1759 int err; 1760 1761 skb_reset_mac_header(skb); 1762 skb->mac_len = skb->network_header - skb->mac_header; 1763 __skb_pull(skb, skb->mac_len); 1764 1765 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1766 struct net_device *dev = skb->dev; 1767 struct ethtool_drvinfo info = {}; 1768 1769 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1770 dev->ethtool_ops->get_drvinfo(dev, &info); 1771 1772 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " 1773 "ip_summed=%d", 1774 info.driver, dev ? dev->features : 0L, 1775 skb->sk ? skb->sk->sk_route_caps : 0L, 1776 skb->len, skb->data_len, skb->ip_summed); 1777 1778 if (skb_header_cloned(skb) && 1779 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1780 return ERR_PTR(err); 1781 } 1782 1783 rcu_read_lock(); 1784 list_for_each_entry_rcu(ptype, 1785 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 1786 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1787 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1788 err = ptype->gso_send_check(skb); 1789 segs = ERR_PTR(err); 1790 if (err || skb_gso_ok(skb, features)) 1791 break; 1792 __skb_push(skb, (skb->data - 1793 skb_network_header(skb))); 1794 } 1795 segs = ptype->gso_segment(skb, features); 1796 break; 1797 } 1798 } 1799 rcu_read_unlock(); 1800 1801 __skb_push(skb, skb->data - skb_mac_header(skb)); 1802 1803 return segs; 1804 } 1805 EXPORT_SYMBOL(skb_gso_segment); 1806 1807 /* Take action when hardware reception checksum errors are detected. */ 1808 #ifdef CONFIG_BUG 1809 void netdev_rx_csum_fault(struct net_device *dev) 1810 { 1811 if (net_ratelimit()) { 1812 printk(KERN_ERR "%s: hw csum failure.\n", 1813 dev ? dev->name : "<unknown>"); 1814 dump_stack(); 1815 } 1816 } 1817 EXPORT_SYMBOL(netdev_rx_csum_fault); 1818 #endif 1819 1820 /* Actually, we should eliminate this check as soon as we know, that: 1821 * 1. IOMMU is present and allows to map all the memory. 1822 * 2. No high memory really exists on this machine. 1823 */ 1824 1825 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1826 { 1827 #ifdef CONFIG_HIGHMEM 1828 int i; 1829 if (!(dev->features & NETIF_F_HIGHDMA)) { 1830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1831 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 1832 return 1; 1833 } 1834 1835 if (PCI_DMA_BUS_IS_PHYS) { 1836 struct device *pdev = dev->dev.parent; 1837 1838 if (!pdev) 1839 return 0; 1840 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1841 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); 1842 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 1843 return 1; 1844 } 1845 } 1846 #endif 1847 return 0; 1848 } 1849 1850 struct dev_gso_cb { 1851 void (*destructor)(struct sk_buff *skb); 1852 }; 1853 1854 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 1855 1856 static void dev_gso_skb_destructor(struct sk_buff *skb) 1857 { 1858 struct dev_gso_cb *cb; 1859 1860 do { 1861 struct sk_buff *nskb = skb->next; 1862 1863 skb->next = nskb->next; 1864 nskb->next = NULL; 1865 kfree_skb(nskb); 1866 } while (skb->next); 1867 1868 cb = DEV_GSO_CB(skb); 1869 if (cb->destructor) 1870 cb->destructor(skb); 1871 } 1872 1873 /** 1874 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1875 * @skb: buffer to segment 1876 * 1877 * This function segments the given skb and stores the list of segments 1878 * in skb->next. 1879 */ 1880 static int dev_gso_segment(struct sk_buff *skb) 1881 { 1882 struct net_device *dev = skb->dev; 1883 struct sk_buff *segs; 1884 int features = dev->features & ~(illegal_highdma(dev, skb) ? 1885 NETIF_F_SG : 0); 1886 1887 segs = skb_gso_segment(skb, features); 1888 1889 /* Verifying header integrity only. */ 1890 if (!segs) 1891 return 0; 1892 1893 if (IS_ERR(segs)) 1894 return PTR_ERR(segs); 1895 1896 skb->next = segs; 1897 DEV_GSO_CB(skb)->destructor = skb->destructor; 1898 skb->destructor = dev_gso_skb_destructor; 1899 1900 return 0; 1901 } 1902 1903 /* 1904 * Try to orphan skb early, right before transmission by the device. 1905 * We cannot orphan skb if tx timestamp is requested, since 1906 * drivers need to call skb_tstamp_tx() to send the timestamp. 1907 */ 1908 static inline void skb_orphan_try(struct sk_buff *skb) 1909 { 1910 struct sock *sk = skb->sk; 1911 1912 if (sk && !skb_tx(skb)->flags) { 1913 /* skb_tx_hash() wont be able to get sk. 1914 * We copy sk_hash into skb->rxhash 1915 */ 1916 if (!skb->rxhash) 1917 skb->rxhash = sk->sk_hash; 1918 skb_orphan(skb); 1919 } 1920 } 1921 1922 /* 1923 * Returns true if either: 1924 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 1925 * 2. skb is fragmented and the device does not support SG, or if 1926 * at least one of fragments is in highmem and device does not 1927 * support DMA from it. 1928 */ 1929 static inline int skb_needs_linearize(struct sk_buff *skb, 1930 struct net_device *dev) 1931 { 1932 return skb_is_nonlinear(skb) && 1933 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || 1934 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || 1935 illegal_highdma(dev, skb)))); 1936 } 1937 1938 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1939 struct netdev_queue *txq) 1940 { 1941 const struct net_device_ops *ops = dev->netdev_ops; 1942 int rc = NETDEV_TX_OK; 1943 1944 if (likely(!skb->next)) { 1945 if (!list_empty(&ptype_all)) 1946 dev_queue_xmit_nit(skb, dev); 1947 1948 /* 1949 * If device doesnt need skb->dst, release it right now while 1950 * its hot in this cpu cache 1951 */ 1952 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1953 skb_dst_drop(skb); 1954 1955 skb_orphan_try(skb); 1956 1957 if (netif_needs_gso(dev, skb)) { 1958 if (unlikely(dev_gso_segment(skb))) 1959 goto out_kfree_skb; 1960 if (skb->next) 1961 goto gso; 1962 } else { 1963 if (skb_needs_linearize(skb, dev) && 1964 __skb_linearize(skb)) 1965 goto out_kfree_skb; 1966 1967 /* If packet is not checksummed and device does not 1968 * support checksumming for this protocol, complete 1969 * checksumming here. 1970 */ 1971 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1972 skb_set_transport_header(skb, skb->csum_start - 1973 skb_headroom(skb)); 1974 if (!dev_can_checksum(dev, skb) && 1975 skb_checksum_help(skb)) 1976 goto out_kfree_skb; 1977 } 1978 } 1979 1980 rc = ops->ndo_start_xmit(skb, dev); 1981 if (rc == NETDEV_TX_OK) 1982 txq_trans_update(txq); 1983 return rc; 1984 } 1985 1986 gso: 1987 do { 1988 struct sk_buff *nskb = skb->next; 1989 1990 skb->next = nskb->next; 1991 nskb->next = NULL; 1992 1993 /* 1994 * If device doesnt need nskb->dst, release it right now while 1995 * its hot in this cpu cache 1996 */ 1997 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1998 skb_dst_drop(nskb); 1999 2000 rc = ops->ndo_start_xmit(nskb, dev); 2001 if (unlikely(rc != NETDEV_TX_OK)) { 2002 if (rc & ~NETDEV_TX_MASK) 2003 goto out_kfree_gso_skb; 2004 nskb->next = skb->next; 2005 skb->next = nskb; 2006 return rc; 2007 } 2008 txq_trans_update(txq); 2009 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 2010 return NETDEV_TX_BUSY; 2011 } while (skb->next); 2012 2013 out_kfree_gso_skb: 2014 if (likely(skb->next == NULL)) 2015 skb->destructor = DEV_GSO_CB(skb)->destructor; 2016 out_kfree_skb: 2017 kfree_skb(skb); 2018 return rc; 2019 } 2020 2021 static u32 hashrnd __read_mostly; 2022 2023 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) 2024 { 2025 u32 hash; 2026 2027 if (skb_rx_queue_recorded(skb)) { 2028 hash = skb_get_rx_queue(skb); 2029 while (unlikely(hash >= dev->real_num_tx_queues)) 2030 hash -= dev->real_num_tx_queues; 2031 return hash; 2032 } 2033 2034 if (skb->sk && skb->sk->sk_hash) 2035 hash = skb->sk->sk_hash; 2036 else 2037 hash = (__force u16) skb->protocol ^ skb->rxhash; 2038 hash = jhash_1word(hash, hashrnd); 2039 2040 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 2041 } 2042 EXPORT_SYMBOL(skb_tx_hash); 2043 2044 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2045 { 2046 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2047 if (net_ratelimit()) { 2048 pr_warning("%s selects TX queue %d, but " 2049 "real number of TX queues is %d\n", 2050 dev->name, queue_index, dev->real_num_tx_queues); 2051 } 2052 return 0; 2053 } 2054 return queue_index; 2055 } 2056 2057 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2058 struct sk_buff *skb) 2059 { 2060 int queue_index; 2061 struct sock *sk = skb->sk; 2062 2063 queue_index = sk_tx_queue_get(sk); 2064 if (queue_index < 0) { 2065 const struct net_device_ops *ops = dev->netdev_ops; 2066 2067 if (ops->ndo_select_queue) { 2068 queue_index = ops->ndo_select_queue(dev, skb); 2069 queue_index = dev_cap_txqueue(dev, queue_index); 2070 } else { 2071 queue_index = 0; 2072 if (dev->real_num_tx_queues > 1) 2073 queue_index = skb_tx_hash(dev, skb); 2074 2075 if (sk) { 2076 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); 2077 2078 if (dst && skb_dst(skb) == dst) 2079 sk_tx_queue_set(sk, queue_index); 2080 } 2081 } 2082 } 2083 2084 skb_set_queue_mapping(skb, queue_index); 2085 return netdev_get_tx_queue(dev, queue_index); 2086 } 2087 2088 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2089 struct net_device *dev, 2090 struct netdev_queue *txq) 2091 { 2092 spinlock_t *root_lock = qdisc_lock(q); 2093 bool contended = qdisc_is_running(q); 2094 int rc; 2095 2096 /* 2097 * Heuristic to force contended enqueues to serialize on a 2098 * separate lock before trying to get qdisc main lock. 2099 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2100 * and dequeue packets faster. 2101 */ 2102 if (unlikely(contended)) 2103 spin_lock(&q->busylock); 2104 2105 spin_lock(root_lock); 2106 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2107 kfree_skb(skb); 2108 rc = NET_XMIT_DROP; 2109 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2110 qdisc_run_begin(q)) { 2111 /* 2112 * This is a work-conserving queue; there are no old skbs 2113 * waiting to be sent out; and the qdisc is not running - 2114 * xmit the skb directly. 2115 */ 2116 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2117 skb_dst_force(skb); 2118 __qdisc_update_bstats(q, skb->len); 2119 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2120 if (unlikely(contended)) { 2121 spin_unlock(&q->busylock); 2122 contended = false; 2123 } 2124 __qdisc_run(q); 2125 } else 2126 qdisc_run_end(q); 2127 2128 rc = NET_XMIT_SUCCESS; 2129 } else { 2130 skb_dst_force(skb); 2131 rc = qdisc_enqueue_root(skb, q); 2132 if (qdisc_run_begin(q)) { 2133 if (unlikely(contended)) { 2134 spin_unlock(&q->busylock); 2135 contended = false; 2136 } 2137 __qdisc_run(q); 2138 } 2139 } 2140 spin_unlock(root_lock); 2141 if (unlikely(contended)) 2142 spin_unlock(&q->busylock); 2143 return rc; 2144 } 2145 2146 /** 2147 * dev_queue_xmit - transmit a buffer 2148 * @skb: buffer to transmit 2149 * 2150 * Queue a buffer for transmission to a network device. The caller must 2151 * have set the device and priority and built the buffer before calling 2152 * this function. The function can be called from an interrupt. 2153 * 2154 * A negative errno code is returned on a failure. A success does not 2155 * guarantee the frame will be transmitted as it may be dropped due 2156 * to congestion or traffic shaping. 2157 * 2158 * ----------------------------------------------------------------------------------- 2159 * I notice this method can also return errors from the queue disciplines, 2160 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2161 * be positive. 2162 * 2163 * Regardless of the return value, the skb is consumed, so it is currently 2164 * difficult to retry a send to this method. (You can bump the ref count 2165 * before sending to hold a reference for retry if you are careful.) 2166 * 2167 * When calling this method, interrupts MUST be enabled. This is because 2168 * the BH enable code must have IRQs enabled so that it will not deadlock. 2169 * --BLG 2170 */ 2171 int dev_queue_xmit(struct sk_buff *skb) 2172 { 2173 struct net_device *dev = skb->dev; 2174 struct netdev_queue *txq; 2175 struct Qdisc *q; 2176 int rc = -ENOMEM; 2177 2178 /* Disable soft irqs for various locks below. Also 2179 * stops preemption for RCU. 2180 */ 2181 rcu_read_lock_bh(); 2182 2183 txq = dev_pick_tx(dev, skb); 2184 q = rcu_dereference_bh(txq->qdisc); 2185 2186 #ifdef CONFIG_NET_CLS_ACT 2187 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2188 #endif 2189 if (q->enqueue) { 2190 rc = __dev_xmit_skb(skb, q, dev, txq); 2191 goto out; 2192 } 2193 2194 /* The device has no queue. Common case for software devices: 2195 loopback, all the sorts of tunnels... 2196 2197 Really, it is unlikely that netif_tx_lock protection is necessary 2198 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2199 counters.) 2200 However, it is possible, that they rely on protection 2201 made by us here. 2202 2203 Check this and shot the lock. It is not prone from deadlocks. 2204 Either shot noqueue qdisc, it is even simpler 8) 2205 */ 2206 if (dev->flags & IFF_UP) { 2207 int cpu = smp_processor_id(); /* ok because BHs are off */ 2208 2209 if (txq->xmit_lock_owner != cpu) { 2210 2211 HARD_TX_LOCK(dev, txq, cpu); 2212 2213 if (!netif_tx_queue_stopped(txq)) { 2214 rc = dev_hard_start_xmit(skb, dev, txq); 2215 if (dev_xmit_complete(rc)) { 2216 HARD_TX_UNLOCK(dev, txq); 2217 goto out; 2218 } 2219 } 2220 HARD_TX_UNLOCK(dev, txq); 2221 if (net_ratelimit()) 2222 printk(KERN_CRIT "Virtual device %s asks to " 2223 "queue packet!\n", dev->name); 2224 } else { 2225 /* Recursion is detected! It is possible, 2226 * unfortunately */ 2227 if (net_ratelimit()) 2228 printk(KERN_CRIT "Dead loop on virtual device " 2229 "%s, fix it urgently!\n", dev->name); 2230 } 2231 } 2232 2233 rc = -ENETDOWN; 2234 rcu_read_unlock_bh(); 2235 2236 kfree_skb(skb); 2237 return rc; 2238 out: 2239 rcu_read_unlock_bh(); 2240 return rc; 2241 } 2242 EXPORT_SYMBOL(dev_queue_xmit); 2243 2244 2245 /*======================================================================= 2246 Receiver routines 2247 =======================================================================*/ 2248 2249 int netdev_max_backlog __read_mostly = 1000; 2250 int netdev_tstamp_prequeue __read_mostly = 1; 2251 int netdev_budget __read_mostly = 300; 2252 int weight_p __read_mostly = 64; /* old backlog weight */ 2253 2254 /* Called with irq disabled */ 2255 static inline void ____napi_schedule(struct softnet_data *sd, 2256 struct napi_struct *napi) 2257 { 2258 list_add_tail(&napi->poll_list, &sd->poll_list); 2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2260 } 2261 2262 #ifdef CONFIG_RPS 2263 2264 /* One global table that all flow-based protocols share. */ 2265 struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; 2266 EXPORT_SYMBOL(rps_sock_flow_table); 2267 2268 /* 2269 * get_rps_cpu is called from netif_receive_skb and returns the target 2270 * CPU from the RPS map of the receiving queue for a given skb. 2271 * rcu_read_lock must be held on entry. 2272 */ 2273 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2274 struct rps_dev_flow **rflowp) 2275 { 2276 struct ipv6hdr *ip6; 2277 struct iphdr *ip; 2278 struct netdev_rx_queue *rxqueue; 2279 struct rps_map *map; 2280 struct rps_dev_flow_table *flow_table; 2281 struct rps_sock_flow_table *sock_flow_table; 2282 int cpu = -1; 2283 u8 ip_proto; 2284 u16 tcpu; 2285 u32 addr1, addr2, ihl; 2286 union { 2287 u32 v32; 2288 u16 v16[2]; 2289 } ports; 2290 2291 if (skb_rx_queue_recorded(skb)) { 2292 u16 index = skb_get_rx_queue(skb); 2293 if (unlikely(index >= dev->num_rx_queues)) { 2294 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " 2295 "on queue %u, but number of RX queues is %u\n", 2296 dev->name, index, dev->num_rx_queues); 2297 goto done; 2298 } 2299 rxqueue = dev->_rx + index; 2300 } else 2301 rxqueue = dev->_rx; 2302 2303 if (!rxqueue->rps_map && !rxqueue->rps_flow_table) 2304 goto done; 2305 2306 if (skb->rxhash) 2307 goto got_hash; /* Skip hash computation on packet header */ 2308 2309 switch (skb->protocol) { 2310 case __constant_htons(ETH_P_IP): 2311 if (!pskb_may_pull(skb, sizeof(*ip))) 2312 goto done; 2313 2314 ip = (struct iphdr *) skb->data; 2315 ip_proto = ip->protocol; 2316 addr1 = (__force u32) ip->saddr; 2317 addr2 = (__force u32) ip->daddr; 2318 ihl = ip->ihl; 2319 break; 2320 case __constant_htons(ETH_P_IPV6): 2321 if (!pskb_may_pull(skb, sizeof(*ip6))) 2322 goto done; 2323 2324 ip6 = (struct ipv6hdr *) skb->data; 2325 ip_proto = ip6->nexthdr; 2326 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2327 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2328 ihl = (40 >> 2); 2329 break; 2330 default: 2331 goto done; 2332 } 2333 switch (ip_proto) { 2334 case IPPROTO_TCP: 2335 case IPPROTO_UDP: 2336 case IPPROTO_DCCP: 2337 case IPPROTO_ESP: 2338 case IPPROTO_AH: 2339 case IPPROTO_SCTP: 2340 case IPPROTO_UDPLITE: 2341 if (pskb_may_pull(skb, (ihl * 4) + 4)) { 2342 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4)); 2343 if (ports.v16[1] < ports.v16[0]) 2344 swap(ports.v16[0], ports.v16[1]); 2345 break; 2346 } 2347 default: 2348 ports.v32 = 0; 2349 break; 2350 } 2351 2352 /* get a consistent hash (same value on both flow directions) */ 2353 if (addr2 < addr1) 2354 swap(addr1, addr2); 2355 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd); 2356 if (!skb->rxhash) 2357 skb->rxhash = 1; 2358 2359 got_hash: 2360 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2361 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2362 if (flow_table && sock_flow_table) { 2363 u16 next_cpu; 2364 struct rps_dev_flow *rflow; 2365 2366 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 2367 tcpu = rflow->cpu; 2368 2369 next_cpu = sock_flow_table->ents[skb->rxhash & 2370 sock_flow_table->mask]; 2371 2372 /* 2373 * If the desired CPU (where last recvmsg was done) is 2374 * different from current CPU (one in the rx-queue flow 2375 * table entry), switch if one of the following holds: 2376 * - Current CPU is unset (equal to RPS_NO_CPU). 2377 * - Current CPU is offline. 2378 * - The current CPU's queue tail has advanced beyond the 2379 * last packet that was enqueued using this table entry. 2380 * This guarantees that all previous packets for the flow 2381 * have been dequeued, thus preserving in order delivery. 2382 */ 2383 if (unlikely(tcpu != next_cpu) && 2384 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2385 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2386 rflow->last_qtail)) >= 0)) { 2387 tcpu = rflow->cpu = next_cpu; 2388 if (tcpu != RPS_NO_CPU) 2389 rflow->last_qtail = per_cpu(softnet_data, 2390 tcpu).input_queue_head; 2391 } 2392 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2393 *rflowp = rflow; 2394 cpu = tcpu; 2395 goto done; 2396 } 2397 } 2398 2399 map = rcu_dereference(rxqueue->rps_map); 2400 if (map) { 2401 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2402 2403 if (cpu_online(tcpu)) { 2404 cpu = tcpu; 2405 goto done; 2406 } 2407 } 2408 2409 done: 2410 return cpu; 2411 } 2412 2413 /* Called from hardirq (IPI) context */ 2414 static void rps_trigger_softirq(void *data) 2415 { 2416 struct softnet_data *sd = data; 2417 2418 ____napi_schedule(sd, &sd->backlog); 2419 sd->received_rps++; 2420 } 2421 2422 #endif /* CONFIG_RPS */ 2423 2424 /* 2425 * Check if this softnet_data structure is another cpu one 2426 * If yes, queue it to our IPI list and return 1 2427 * If no, return 0 2428 */ 2429 static int rps_ipi_queued(struct softnet_data *sd) 2430 { 2431 #ifdef CONFIG_RPS 2432 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 2433 2434 if (sd != mysd) { 2435 sd->rps_ipi_next = mysd->rps_ipi_list; 2436 mysd->rps_ipi_list = sd; 2437 2438 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2439 return 1; 2440 } 2441 #endif /* CONFIG_RPS */ 2442 return 0; 2443 } 2444 2445 /* 2446 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 2447 * queue (may be a remote CPU queue). 2448 */ 2449 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 2450 unsigned int *qtail) 2451 { 2452 struct softnet_data *sd; 2453 unsigned long flags; 2454 2455 sd = &per_cpu(softnet_data, cpu); 2456 2457 local_irq_save(flags); 2458 2459 rps_lock(sd); 2460 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 2461 if (skb_queue_len(&sd->input_pkt_queue)) { 2462 enqueue: 2463 __skb_queue_tail(&sd->input_pkt_queue, skb); 2464 input_queue_tail_incr_save(sd, qtail); 2465 rps_unlock(sd); 2466 local_irq_restore(flags); 2467 return NET_RX_SUCCESS; 2468 } 2469 2470 /* Schedule NAPI for backlog device 2471 * We can use non atomic operation since we own the queue lock 2472 */ 2473 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 2474 if (!rps_ipi_queued(sd)) 2475 ____napi_schedule(sd, &sd->backlog); 2476 } 2477 goto enqueue; 2478 } 2479 2480 sd->dropped++; 2481 rps_unlock(sd); 2482 2483 local_irq_restore(flags); 2484 2485 kfree_skb(skb); 2486 return NET_RX_DROP; 2487 } 2488 2489 /** 2490 * netif_rx - post buffer to the network code 2491 * @skb: buffer to post 2492 * 2493 * This function receives a packet from a device driver and queues it for 2494 * the upper (protocol) levels to process. It always succeeds. The buffer 2495 * may be dropped during processing for congestion control or by the 2496 * protocol layers. 2497 * 2498 * return values: 2499 * NET_RX_SUCCESS (no congestion) 2500 * NET_RX_DROP (packet was dropped) 2501 * 2502 */ 2503 2504 int netif_rx(struct sk_buff *skb) 2505 { 2506 int ret; 2507 2508 /* if netpoll wants it, pretend we never saw it */ 2509 if (netpoll_rx(skb)) 2510 return NET_RX_DROP; 2511 2512 if (netdev_tstamp_prequeue) 2513 net_timestamp_check(skb); 2514 2515 #ifdef CONFIG_RPS 2516 { 2517 struct rps_dev_flow voidflow, *rflow = &voidflow; 2518 int cpu; 2519 2520 preempt_disable(); 2521 rcu_read_lock(); 2522 2523 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2524 if (cpu < 0) 2525 cpu = smp_processor_id(); 2526 2527 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2528 2529 rcu_read_unlock(); 2530 preempt_enable(); 2531 } 2532 #else 2533 { 2534 unsigned int qtail; 2535 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 2536 put_cpu(); 2537 } 2538 #endif 2539 return ret; 2540 } 2541 EXPORT_SYMBOL(netif_rx); 2542 2543 int netif_rx_ni(struct sk_buff *skb) 2544 { 2545 int err; 2546 2547 preempt_disable(); 2548 err = netif_rx(skb); 2549 if (local_softirq_pending()) 2550 do_softirq(); 2551 preempt_enable(); 2552 2553 return err; 2554 } 2555 EXPORT_SYMBOL(netif_rx_ni); 2556 2557 static void net_tx_action(struct softirq_action *h) 2558 { 2559 struct softnet_data *sd = &__get_cpu_var(softnet_data); 2560 2561 if (sd->completion_queue) { 2562 struct sk_buff *clist; 2563 2564 local_irq_disable(); 2565 clist = sd->completion_queue; 2566 sd->completion_queue = NULL; 2567 local_irq_enable(); 2568 2569 while (clist) { 2570 struct sk_buff *skb = clist; 2571 clist = clist->next; 2572 2573 WARN_ON(atomic_read(&skb->users)); 2574 __kfree_skb(skb); 2575 } 2576 } 2577 2578 if (sd->output_queue) { 2579 struct Qdisc *head; 2580 2581 local_irq_disable(); 2582 head = sd->output_queue; 2583 sd->output_queue = NULL; 2584 sd->output_queue_tailp = &sd->output_queue; 2585 local_irq_enable(); 2586 2587 while (head) { 2588 struct Qdisc *q = head; 2589 spinlock_t *root_lock; 2590 2591 head = head->next_sched; 2592 2593 root_lock = qdisc_lock(q); 2594 if (spin_trylock(root_lock)) { 2595 smp_mb__before_clear_bit(); 2596 clear_bit(__QDISC_STATE_SCHED, 2597 &q->state); 2598 qdisc_run(q); 2599 spin_unlock(root_lock); 2600 } else { 2601 if (!test_bit(__QDISC_STATE_DEACTIVATED, 2602 &q->state)) { 2603 __netif_reschedule(q); 2604 } else { 2605 smp_mb__before_clear_bit(); 2606 clear_bit(__QDISC_STATE_SCHED, 2607 &q->state); 2608 } 2609 } 2610 } 2611 } 2612 } 2613 2614 static inline int deliver_skb(struct sk_buff *skb, 2615 struct packet_type *pt_prev, 2616 struct net_device *orig_dev) 2617 { 2618 atomic_inc(&skb->users); 2619 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2620 } 2621 2622 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 2623 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 2624 /* This hook is defined here for ATM LANE */ 2625 int (*br_fdb_test_addr_hook)(struct net_device *dev, 2626 unsigned char *addr) __read_mostly; 2627 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 2628 #endif 2629 2630 #ifdef CONFIG_NET_CLS_ACT 2631 /* TODO: Maybe we should just force sch_ingress to be compiled in 2632 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 2633 * a compare and 2 stores extra right now if we dont have it on 2634 * but have CONFIG_NET_CLS_ACT 2635 * NOTE: This doesnt stop any functionality; if you dont have 2636 * the ingress scheduler, you just cant add policies on ingress. 2637 * 2638 */ 2639 static int ing_filter(struct sk_buff *skb) 2640 { 2641 struct net_device *dev = skb->dev; 2642 u32 ttl = G_TC_RTTL(skb->tc_verd); 2643 struct netdev_queue *rxq; 2644 int result = TC_ACT_OK; 2645 struct Qdisc *q; 2646 2647 if (unlikely(MAX_RED_LOOP < ttl++)) { 2648 if (net_ratelimit()) 2649 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 2650 skb->skb_iif, dev->ifindex); 2651 return TC_ACT_SHOT; 2652 } 2653 2654 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 2655 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 2656 2657 rxq = &dev->rx_queue; 2658 2659 q = rxq->qdisc; 2660 if (q != &noop_qdisc) { 2661 spin_lock(qdisc_lock(q)); 2662 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 2663 result = qdisc_enqueue_root(skb, q); 2664 spin_unlock(qdisc_lock(q)); 2665 } 2666 2667 return result; 2668 } 2669 2670 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 2671 struct packet_type **pt_prev, 2672 int *ret, struct net_device *orig_dev) 2673 { 2674 if (skb->dev->rx_queue.qdisc == &noop_qdisc) 2675 goto out; 2676 2677 if (*pt_prev) { 2678 *ret = deliver_skb(skb, *pt_prev, orig_dev); 2679 *pt_prev = NULL; 2680 } 2681 2682 switch (ing_filter(skb)) { 2683 case TC_ACT_SHOT: 2684 case TC_ACT_STOLEN: 2685 kfree_skb(skb); 2686 return NULL; 2687 } 2688 2689 out: 2690 skb->tc_verd = 0; 2691 return skb; 2692 } 2693 #endif 2694 2695 /* 2696 * netif_nit_deliver - deliver received packets to network taps 2697 * @skb: buffer 2698 * 2699 * This function is used to deliver incoming packets to network 2700 * taps. It should be used when the normal netif_receive_skb path 2701 * is bypassed, for example because of VLAN acceleration. 2702 */ 2703 void netif_nit_deliver(struct sk_buff *skb) 2704 { 2705 struct packet_type *ptype; 2706 2707 if (list_empty(&ptype_all)) 2708 return; 2709 2710 skb_reset_network_header(skb); 2711 skb_reset_transport_header(skb); 2712 skb->mac_len = skb->network_header - skb->mac_header; 2713 2714 rcu_read_lock(); 2715 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2716 if (!ptype->dev || ptype->dev == skb->dev) 2717 deliver_skb(skb, ptype, skb->dev); 2718 } 2719 rcu_read_unlock(); 2720 } 2721 2722 /** 2723 * netdev_rx_handler_register - register receive handler 2724 * @dev: device to register a handler for 2725 * @rx_handler: receive handler to register 2726 * @rx_handler_data: data pointer that is used by rx handler 2727 * 2728 * Register a receive hander for a device. This handler will then be 2729 * called from __netif_receive_skb. A negative errno code is returned 2730 * on a failure. 2731 * 2732 * The caller must hold the rtnl_mutex. 2733 */ 2734 int netdev_rx_handler_register(struct net_device *dev, 2735 rx_handler_func_t *rx_handler, 2736 void *rx_handler_data) 2737 { 2738 ASSERT_RTNL(); 2739 2740 if (dev->rx_handler) 2741 return -EBUSY; 2742 2743 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 2744 rcu_assign_pointer(dev->rx_handler, rx_handler); 2745 2746 return 0; 2747 } 2748 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 2749 2750 /** 2751 * netdev_rx_handler_unregister - unregister receive handler 2752 * @dev: device to unregister a handler from 2753 * 2754 * Unregister a receive hander from a device. 2755 * 2756 * The caller must hold the rtnl_mutex. 2757 */ 2758 void netdev_rx_handler_unregister(struct net_device *dev) 2759 { 2760 2761 ASSERT_RTNL(); 2762 rcu_assign_pointer(dev->rx_handler, NULL); 2763 rcu_assign_pointer(dev->rx_handler_data, NULL); 2764 } 2765 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 2766 2767 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 2768 struct net_device *master) 2769 { 2770 if (skb->pkt_type == PACKET_HOST) { 2771 u16 *dest = (u16 *) eth_hdr(skb)->h_dest; 2772 2773 memcpy(dest, master->dev_addr, ETH_ALEN); 2774 } 2775 } 2776 2777 /* On bonding slaves other than the currently active slave, suppress 2778 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and 2779 * ARP on active-backup slaves with arp_validate enabled. 2780 */ 2781 int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) 2782 { 2783 struct net_device *dev = skb->dev; 2784 2785 if (master->priv_flags & IFF_MASTER_ARPMON) 2786 dev->last_rx = jiffies; 2787 2788 if ((master->priv_flags & IFF_MASTER_ALB) && 2789 (master->priv_flags & IFF_BRIDGE_PORT)) { 2790 /* Do address unmangle. The local destination address 2791 * will be always the one master has. Provides the right 2792 * functionality in a bridge. 2793 */ 2794 skb_bond_set_mac_by_master(skb, master); 2795 } 2796 2797 if (dev->priv_flags & IFF_SLAVE_INACTIVE) { 2798 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && 2799 skb->protocol == __cpu_to_be16(ETH_P_ARP)) 2800 return 0; 2801 2802 if (master->priv_flags & IFF_MASTER_ALB) { 2803 if (skb->pkt_type != PACKET_BROADCAST && 2804 skb->pkt_type != PACKET_MULTICAST) 2805 return 0; 2806 } 2807 if (master->priv_flags & IFF_MASTER_8023AD && 2808 skb->protocol == __cpu_to_be16(ETH_P_SLOW)) 2809 return 0; 2810 2811 return 1; 2812 } 2813 return 0; 2814 } 2815 EXPORT_SYMBOL(__skb_bond_should_drop); 2816 2817 static int __netif_receive_skb(struct sk_buff *skb) 2818 { 2819 struct packet_type *ptype, *pt_prev; 2820 rx_handler_func_t *rx_handler; 2821 struct net_device *orig_dev; 2822 struct net_device *master; 2823 struct net_device *null_or_orig; 2824 struct net_device *orig_or_bond; 2825 int ret = NET_RX_DROP; 2826 __be16 type; 2827 2828 if (!netdev_tstamp_prequeue) 2829 net_timestamp_check(skb); 2830 2831 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) 2832 return NET_RX_SUCCESS; 2833 2834 /* if we've gotten here through NAPI, check netpoll */ 2835 if (netpoll_receive_skb(skb)) 2836 return NET_RX_DROP; 2837 2838 if (!skb->skb_iif) 2839 skb->skb_iif = skb->dev->ifindex; 2840 2841 /* 2842 * bonding note: skbs received on inactive slaves should only 2843 * be delivered to pkt handlers that are exact matches. Also 2844 * the deliver_no_wcard flag will be set. If packet handlers 2845 * are sensitive to duplicate packets these skbs will need to 2846 * be dropped at the handler. The vlan accel path may have 2847 * already set the deliver_no_wcard flag. 2848 */ 2849 null_or_orig = NULL; 2850 orig_dev = skb->dev; 2851 master = ACCESS_ONCE(orig_dev->master); 2852 if (skb->deliver_no_wcard) 2853 null_or_orig = orig_dev; 2854 else if (master) { 2855 if (skb_bond_should_drop(skb, master)) { 2856 skb->deliver_no_wcard = 1; 2857 null_or_orig = orig_dev; /* deliver only exact match */ 2858 } else 2859 skb->dev = master; 2860 } 2861 2862 __this_cpu_inc(softnet_data.processed); 2863 skb_reset_network_header(skb); 2864 skb_reset_transport_header(skb); 2865 skb->mac_len = skb->network_header - skb->mac_header; 2866 2867 pt_prev = NULL; 2868 2869 rcu_read_lock(); 2870 2871 #ifdef CONFIG_NET_CLS_ACT 2872 if (skb->tc_verd & TC_NCLS) { 2873 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 2874 goto ncls; 2875 } 2876 #endif 2877 2878 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2879 if (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2880 ptype->dev == orig_dev) { 2881 if (pt_prev) 2882 ret = deliver_skb(skb, pt_prev, orig_dev); 2883 pt_prev = ptype; 2884 } 2885 } 2886 2887 #ifdef CONFIG_NET_CLS_ACT 2888 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 2889 if (!skb) 2890 goto out; 2891 ncls: 2892 #endif 2893 2894 /* Handle special case of bridge or macvlan */ 2895 rx_handler = rcu_dereference(skb->dev->rx_handler); 2896 if (rx_handler) { 2897 if (pt_prev) { 2898 ret = deliver_skb(skb, pt_prev, orig_dev); 2899 pt_prev = NULL; 2900 } 2901 skb = rx_handler(skb); 2902 if (!skb) 2903 goto out; 2904 } 2905 2906 /* 2907 * Make sure frames received on VLAN interfaces stacked on 2908 * bonding interfaces still make their way to any base bonding 2909 * device that may have registered for a specific ptype. The 2910 * handler may have to adjust skb->dev and orig_dev. 2911 */ 2912 orig_or_bond = orig_dev; 2913 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && 2914 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { 2915 orig_or_bond = vlan_dev_real_dev(skb->dev); 2916 } 2917 2918 type = skb->protocol; 2919 list_for_each_entry_rcu(ptype, 2920 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2921 if (ptype->type == type && (ptype->dev == null_or_orig || 2922 ptype->dev == skb->dev || ptype->dev == orig_dev || 2923 ptype->dev == orig_or_bond)) { 2924 if (pt_prev) 2925 ret = deliver_skb(skb, pt_prev, orig_dev); 2926 pt_prev = ptype; 2927 } 2928 } 2929 2930 if (pt_prev) { 2931 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2932 } else { 2933 kfree_skb(skb); 2934 /* Jamal, now you will not able to escape explaining 2935 * me how you were going to use this. :-) 2936 */ 2937 ret = NET_RX_DROP; 2938 } 2939 2940 out: 2941 rcu_read_unlock(); 2942 return ret; 2943 } 2944 2945 /** 2946 * netif_receive_skb - process receive buffer from network 2947 * @skb: buffer to process 2948 * 2949 * netif_receive_skb() is the main receive data processing function. 2950 * It always succeeds. The buffer may be dropped during processing 2951 * for congestion control or by the protocol layers. 2952 * 2953 * This function may only be called from softirq context and interrupts 2954 * should be enabled. 2955 * 2956 * Return values (usually ignored): 2957 * NET_RX_SUCCESS: no congestion 2958 * NET_RX_DROP: packet was dropped 2959 */ 2960 int netif_receive_skb(struct sk_buff *skb) 2961 { 2962 if (netdev_tstamp_prequeue) 2963 net_timestamp_check(skb); 2964 2965 if (skb_defer_rx_timestamp(skb)) 2966 return NET_RX_SUCCESS; 2967 2968 #ifdef CONFIG_RPS 2969 { 2970 struct rps_dev_flow voidflow, *rflow = &voidflow; 2971 int cpu, ret; 2972 2973 rcu_read_lock(); 2974 2975 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2976 2977 if (cpu >= 0) { 2978 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2979 rcu_read_unlock(); 2980 } else { 2981 rcu_read_unlock(); 2982 ret = __netif_receive_skb(skb); 2983 } 2984 2985 return ret; 2986 } 2987 #else 2988 return __netif_receive_skb(skb); 2989 #endif 2990 } 2991 EXPORT_SYMBOL(netif_receive_skb); 2992 2993 /* Network device is going away, flush any packets still pending 2994 * Called with irqs disabled. 2995 */ 2996 static void flush_backlog(void *arg) 2997 { 2998 struct net_device *dev = arg; 2999 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3000 struct sk_buff *skb, *tmp; 3001 3002 rps_lock(sd); 3003 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3004 if (skb->dev == dev) { 3005 __skb_unlink(skb, &sd->input_pkt_queue); 3006 kfree_skb(skb); 3007 input_queue_head_incr(sd); 3008 } 3009 } 3010 rps_unlock(sd); 3011 3012 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3013 if (skb->dev == dev) { 3014 __skb_unlink(skb, &sd->process_queue); 3015 kfree_skb(skb); 3016 input_queue_head_incr(sd); 3017 } 3018 } 3019 } 3020 3021 static int napi_gro_complete(struct sk_buff *skb) 3022 { 3023 struct packet_type *ptype; 3024 __be16 type = skb->protocol; 3025 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3026 int err = -ENOENT; 3027 3028 if (NAPI_GRO_CB(skb)->count == 1) { 3029 skb_shinfo(skb)->gso_size = 0; 3030 goto out; 3031 } 3032 3033 rcu_read_lock(); 3034 list_for_each_entry_rcu(ptype, head, list) { 3035 if (ptype->type != type || ptype->dev || !ptype->gro_complete) 3036 continue; 3037 3038 err = ptype->gro_complete(skb); 3039 break; 3040 } 3041 rcu_read_unlock(); 3042 3043 if (err) { 3044 WARN_ON(&ptype->list == head); 3045 kfree_skb(skb); 3046 return NET_RX_SUCCESS; 3047 } 3048 3049 out: 3050 return netif_receive_skb(skb); 3051 } 3052 3053 static void napi_gro_flush(struct napi_struct *napi) 3054 { 3055 struct sk_buff *skb, *next; 3056 3057 for (skb = napi->gro_list; skb; skb = next) { 3058 next = skb->next; 3059 skb->next = NULL; 3060 napi_gro_complete(skb); 3061 } 3062 3063 napi->gro_count = 0; 3064 napi->gro_list = NULL; 3065 } 3066 3067 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3068 { 3069 struct sk_buff **pp = NULL; 3070 struct packet_type *ptype; 3071 __be16 type = skb->protocol; 3072 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3073 int same_flow; 3074 int mac_len; 3075 enum gro_result ret; 3076 3077 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3078 goto normal; 3079 3080 if (skb_is_gso(skb) || skb_has_frags(skb)) 3081 goto normal; 3082 3083 rcu_read_lock(); 3084 list_for_each_entry_rcu(ptype, head, list) { 3085 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 3086 continue; 3087 3088 skb_set_network_header(skb, skb_gro_offset(skb)); 3089 mac_len = skb->network_header - skb->mac_header; 3090 skb->mac_len = mac_len; 3091 NAPI_GRO_CB(skb)->same_flow = 0; 3092 NAPI_GRO_CB(skb)->flush = 0; 3093 NAPI_GRO_CB(skb)->free = 0; 3094 3095 pp = ptype->gro_receive(&napi->gro_list, skb); 3096 break; 3097 } 3098 rcu_read_unlock(); 3099 3100 if (&ptype->list == head) 3101 goto normal; 3102 3103 same_flow = NAPI_GRO_CB(skb)->same_flow; 3104 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3105 3106 if (pp) { 3107 struct sk_buff *nskb = *pp; 3108 3109 *pp = nskb->next; 3110 nskb->next = NULL; 3111 napi_gro_complete(nskb); 3112 napi->gro_count--; 3113 } 3114 3115 if (same_flow) 3116 goto ok; 3117 3118 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) 3119 goto normal; 3120 3121 napi->gro_count++; 3122 NAPI_GRO_CB(skb)->count = 1; 3123 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3124 skb->next = napi->gro_list; 3125 napi->gro_list = skb; 3126 ret = GRO_HELD; 3127 3128 pull: 3129 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3130 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3131 3132 BUG_ON(skb->end - skb->tail < grow); 3133 3134 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3135 3136 skb->tail += grow; 3137 skb->data_len -= grow; 3138 3139 skb_shinfo(skb)->frags[0].page_offset += grow; 3140 skb_shinfo(skb)->frags[0].size -= grow; 3141 3142 if (unlikely(!skb_shinfo(skb)->frags[0].size)) { 3143 put_page(skb_shinfo(skb)->frags[0].page); 3144 memmove(skb_shinfo(skb)->frags, 3145 skb_shinfo(skb)->frags + 1, 3146 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3147 } 3148 } 3149 3150 ok: 3151 return ret; 3152 3153 normal: 3154 ret = GRO_NORMAL; 3155 goto pull; 3156 } 3157 EXPORT_SYMBOL(dev_gro_receive); 3158 3159 static gro_result_t 3160 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3161 { 3162 struct sk_buff *p; 3163 3164 for (p = napi->gro_list; p; p = p->next) { 3165 NAPI_GRO_CB(p)->same_flow = 3166 (p->dev == skb->dev) && 3167 !compare_ether_header(skb_mac_header(p), 3168 skb_gro_mac_header(skb)); 3169 NAPI_GRO_CB(p)->flush = 0; 3170 } 3171 3172 return dev_gro_receive(napi, skb); 3173 } 3174 3175 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3176 { 3177 switch (ret) { 3178 case GRO_NORMAL: 3179 if (netif_receive_skb(skb)) 3180 ret = GRO_DROP; 3181 break; 3182 3183 case GRO_DROP: 3184 case GRO_MERGED_FREE: 3185 kfree_skb(skb); 3186 break; 3187 3188 case GRO_HELD: 3189 case GRO_MERGED: 3190 break; 3191 } 3192 3193 return ret; 3194 } 3195 EXPORT_SYMBOL(napi_skb_finish); 3196 3197 void skb_gro_reset_offset(struct sk_buff *skb) 3198 { 3199 NAPI_GRO_CB(skb)->data_offset = 0; 3200 NAPI_GRO_CB(skb)->frag0 = NULL; 3201 NAPI_GRO_CB(skb)->frag0_len = 0; 3202 3203 if (skb->mac_header == skb->tail && 3204 !PageHighMem(skb_shinfo(skb)->frags[0].page)) { 3205 NAPI_GRO_CB(skb)->frag0 = 3206 page_address(skb_shinfo(skb)->frags[0].page) + 3207 skb_shinfo(skb)->frags[0].page_offset; 3208 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; 3209 } 3210 } 3211 EXPORT_SYMBOL(skb_gro_reset_offset); 3212 3213 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3214 { 3215 skb_gro_reset_offset(skb); 3216 3217 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 3218 } 3219 EXPORT_SYMBOL(napi_gro_receive); 3220 3221 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3222 { 3223 __skb_pull(skb, skb_headlen(skb)); 3224 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3225 3226 napi->skb = skb; 3227 } 3228 EXPORT_SYMBOL(napi_reuse_skb); 3229 3230 struct sk_buff *napi_get_frags(struct napi_struct *napi) 3231 { 3232 struct sk_buff *skb = napi->skb; 3233 3234 if (!skb) { 3235 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 3236 if (skb) 3237 napi->skb = skb; 3238 } 3239 return skb; 3240 } 3241 EXPORT_SYMBOL(napi_get_frags); 3242 3243 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3244 gro_result_t ret) 3245 { 3246 switch (ret) { 3247 case GRO_NORMAL: 3248 case GRO_HELD: 3249 skb->protocol = eth_type_trans(skb, skb->dev); 3250 3251 if (ret == GRO_HELD) 3252 skb_gro_pull(skb, -ETH_HLEN); 3253 else if (netif_receive_skb(skb)) 3254 ret = GRO_DROP; 3255 break; 3256 3257 case GRO_DROP: 3258 case GRO_MERGED_FREE: 3259 napi_reuse_skb(napi, skb); 3260 break; 3261 3262 case GRO_MERGED: 3263 break; 3264 } 3265 3266 return ret; 3267 } 3268 EXPORT_SYMBOL(napi_frags_finish); 3269 3270 struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3271 { 3272 struct sk_buff *skb = napi->skb; 3273 struct ethhdr *eth; 3274 unsigned int hlen; 3275 unsigned int off; 3276 3277 napi->skb = NULL; 3278 3279 skb_reset_mac_header(skb); 3280 skb_gro_reset_offset(skb); 3281 3282 off = skb_gro_offset(skb); 3283 hlen = off + sizeof(*eth); 3284 eth = skb_gro_header_fast(skb, off); 3285 if (skb_gro_header_hard(skb, hlen)) { 3286 eth = skb_gro_header_slow(skb, hlen, off); 3287 if (unlikely(!eth)) { 3288 napi_reuse_skb(napi, skb); 3289 skb = NULL; 3290 goto out; 3291 } 3292 } 3293 3294 skb_gro_pull(skb, sizeof(*eth)); 3295 3296 /* 3297 * This works because the only protocols we care about don't require 3298 * special handling. We'll fix it up properly at the end. 3299 */ 3300 skb->protocol = eth->h_proto; 3301 3302 out: 3303 return skb; 3304 } 3305 EXPORT_SYMBOL(napi_frags_skb); 3306 3307 gro_result_t napi_gro_frags(struct napi_struct *napi) 3308 { 3309 struct sk_buff *skb = napi_frags_skb(napi); 3310 3311 if (!skb) 3312 return GRO_DROP; 3313 3314 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 3315 } 3316 EXPORT_SYMBOL(napi_gro_frags); 3317 3318 /* 3319 * net_rps_action sends any pending IPI's for rps. 3320 * Note: called with local irq disabled, but exits with local irq enabled. 3321 */ 3322 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 3323 { 3324 #ifdef CONFIG_RPS 3325 struct softnet_data *remsd = sd->rps_ipi_list; 3326 3327 if (remsd) { 3328 sd->rps_ipi_list = NULL; 3329 3330 local_irq_enable(); 3331 3332 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3333 while (remsd) { 3334 struct softnet_data *next = remsd->rps_ipi_next; 3335 3336 if (cpu_online(remsd->cpu)) 3337 __smp_call_function_single(remsd->cpu, 3338 &remsd->csd, 0); 3339 remsd = next; 3340 } 3341 } else 3342 #endif 3343 local_irq_enable(); 3344 } 3345 3346 static int process_backlog(struct napi_struct *napi, int quota) 3347 { 3348 int work = 0; 3349 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 3350 3351 #ifdef CONFIG_RPS 3352 /* Check if we have pending ipi, its better to send them now, 3353 * not waiting net_rx_action() end. 3354 */ 3355 if (sd->rps_ipi_list) { 3356 local_irq_disable(); 3357 net_rps_action_and_irq_enable(sd); 3358 } 3359 #endif 3360 napi->weight = weight_p; 3361 local_irq_disable(); 3362 while (work < quota) { 3363 struct sk_buff *skb; 3364 unsigned int qlen; 3365 3366 while ((skb = __skb_dequeue(&sd->process_queue))) { 3367 local_irq_enable(); 3368 __netif_receive_skb(skb); 3369 local_irq_disable(); 3370 input_queue_head_incr(sd); 3371 if (++work >= quota) { 3372 local_irq_enable(); 3373 return work; 3374 } 3375 } 3376 3377 rps_lock(sd); 3378 qlen = skb_queue_len(&sd->input_pkt_queue); 3379 if (qlen) 3380 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3381 &sd->process_queue); 3382 3383 if (qlen < quota - work) { 3384 /* 3385 * Inline a custom version of __napi_complete(). 3386 * only current cpu owns and manipulates this napi, 3387 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 3388 * we can use a plain write instead of clear_bit(), 3389 * and we dont need an smp_mb() memory barrier. 3390 */ 3391 list_del(&napi->poll_list); 3392 napi->state = 0; 3393 3394 quota = work + qlen; 3395 } 3396 rps_unlock(sd); 3397 } 3398 local_irq_enable(); 3399 3400 return work; 3401 } 3402 3403 /** 3404 * __napi_schedule - schedule for receive 3405 * @n: entry to schedule 3406 * 3407 * The entry's receive function will be scheduled to run 3408 */ 3409 void __napi_schedule(struct napi_struct *n) 3410 { 3411 unsigned long flags; 3412 3413 local_irq_save(flags); 3414 ____napi_schedule(&__get_cpu_var(softnet_data), n); 3415 local_irq_restore(flags); 3416 } 3417 EXPORT_SYMBOL(__napi_schedule); 3418 3419 void __napi_complete(struct napi_struct *n) 3420 { 3421 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 3422 BUG_ON(n->gro_list); 3423 3424 list_del(&n->poll_list); 3425 smp_mb__before_clear_bit(); 3426 clear_bit(NAPI_STATE_SCHED, &n->state); 3427 } 3428 EXPORT_SYMBOL(__napi_complete); 3429 3430 void napi_complete(struct napi_struct *n) 3431 { 3432 unsigned long flags; 3433 3434 /* 3435 * don't let napi dequeue from the cpu poll list 3436 * just in case its running on a different cpu 3437 */ 3438 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 3439 return; 3440 3441 napi_gro_flush(n); 3442 local_irq_save(flags); 3443 __napi_complete(n); 3444 local_irq_restore(flags); 3445 } 3446 EXPORT_SYMBOL(napi_complete); 3447 3448 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 3449 int (*poll)(struct napi_struct *, int), int weight) 3450 { 3451 INIT_LIST_HEAD(&napi->poll_list); 3452 napi->gro_count = 0; 3453 napi->gro_list = NULL; 3454 napi->skb = NULL; 3455 napi->poll = poll; 3456 napi->weight = weight; 3457 list_add(&napi->dev_list, &dev->napi_list); 3458 napi->dev = dev; 3459 #ifdef CONFIG_NETPOLL 3460 spin_lock_init(&napi->poll_lock); 3461 napi->poll_owner = -1; 3462 #endif 3463 set_bit(NAPI_STATE_SCHED, &napi->state); 3464 } 3465 EXPORT_SYMBOL(netif_napi_add); 3466 3467 void netif_napi_del(struct napi_struct *napi) 3468 { 3469 struct sk_buff *skb, *next; 3470 3471 list_del_init(&napi->dev_list); 3472 napi_free_frags(napi); 3473 3474 for (skb = napi->gro_list; skb; skb = next) { 3475 next = skb->next; 3476 skb->next = NULL; 3477 kfree_skb(skb); 3478 } 3479 3480 napi->gro_list = NULL; 3481 napi->gro_count = 0; 3482 } 3483 EXPORT_SYMBOL(netif_napi_del); 3484 3485 static void net_rx_action(struct softirq_action *h) 3486 { 3487 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3488 unsigned long time_limit = jiffies + 2; 3489 int budget = netdev_budget; 3490 void *have; 3491 3492 local_irq_disable(); 3493 3494 while (!list_empty(&sd->poll_list)) { 3495 struct napi_struct *n; 3496 int work, weight; 3497 3498 /* If softirq window is exhuasted then punt. 3499 * Allow this to run for 2 jiffies since which will allow 3500 * an average latency of 1.5/HZ. 3501 */ 3502 if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) 3503 goto softnet_break; 3504 3505 local_irq_enable(); 3506 3507 /* Even though interrupts have been re-enabled, this 3508 * access is safe because interrupts can only add new 3509 * entries to the tail of this list, and only ->poll() 3510 * calls can remove this head entry from the list. 3511 */ 3512 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 3513 3514 have = netpoll_poll_lock(n); 3515 3516 weight = n->weight; 3517 3518 /* This NAPI_STATE_SCHED test is for avoiding a race 3519 * with netpoll's poll_napi(). Only the entity which 3520 * obtains the lock and sees NAPI_STATE_SCHED set will 3521 * actually make the ->poll() call. Therefore we avoid 3522 * accidently calling ->poll() when NAPI is not scheduled. 3523 */ 3524 work = 0; 3525 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 3526 work = n->poll(n, weight); 3527 trace_napi_poll(n); 3528 } 3529 3530 WARN_ON_ONCE(work > weight); 3531 3532 budget -= work; 3533 3534 local_irq_disable(); 3535 3536 /* Drivers must not modify the NAPI state if they 3537 * consume the entire weight. In such cases this code 3538 * still "owns" the NAPI instance and therefore can 3539 * move the instance around on the list at-will. 3540 */ 3541 if (unlikely(work == weight)) { 3542 if (unlikely(napi_disable_pending(n))) { 3543 local_irq_enable(); 3544 napi_complete(n); 3545 local_irq_disable(); 3546 } else 3547 list_move_tail(&n->poll_list, &sd->poll_list); 3548 } 3549 3550 netpoll_poll_unlock(have); 3551 } 3552 out: 3553 net_rps_action_and_irq_enable(sd); 3554 3555 #ifdef CONFIG_NET_DMA 3556 /* 3557 * There may not be any more sk_buffs coming right now, so push 3558 * any pending DMA copies to hardware 3559 */ 3560 dma_issue_pending_all(); 3561 #endif 3562 3563 return; 3564 3565 softnet_break: 3566 sd->time_squeeze++; 3567 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3568 goto out; 3569 } 3570 3571 static gifconf_func_t *gifconf_list[NPROTO]; 3572 3573 /** 3574 * register_gifconf - register a SIOCGIF handler 3575 * @family: Address family 3576 * @gifconf: Function handler 3577 * 3578 * Register protocol dependent address dumping routines. The handler 3579 * that is passed must not be freed or reused until it has been replaced 3580 * by another handler. 3581 */ 3582 int register_gifconf(unsigned int family, gifconf_func_t *gifconf) 3583 { 3584 if (family >= NPROTO) 3585 return -EINVAL; 3586 gifconf_list[family] = gifconf; 3587 return 0; 3588 } 3589 EXPORT_SYMBOL(register_gifconf); 3590 3591 3592 /* 3593 * Map an interface index to its name (SIOCGIFNAME) 3594 */ 3595 3596 /* 3597 * We need this ioctl for efficient implementation of the 3598 * if_indextoname() function required by the IPv6 API. Without 3599 * it, we would have to search all the interfaces to find a 3600 * match. --pb 3601 */ 3602 3603 static int dev_ifname(struct net *net, struct ifreq __user *arg) 3604 { 3605 struct net_device *dev; 3606 struct ifreq ifr; 3607 3608 /* 3609 * Fetch the caller's info block. 3610 */ 3611 3612 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 3613 return -EFAULT; 3614 3615 rcu_read_lock(); 3616 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 3617 if (!dev) { 3618 rcu_read_unlock(); 3619 return -ENODEV; 3620 } 3621 3622 strcpy(ifr.ifr_name, dev->name); 3623 rcu_read_unlock(); 3624 3625 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 3626 return -EFAULT; 3627 return 0; 3628 } 3629 3630 /* 3631 * Perform a SIOCGIFCONF call. This structure will change 3632 * size eventually, and there is nothing I can do about it. 3633 * Thus we will need a 'compatibility mode'. 3634 */ 3635 3636 static int dev_ifconf(struct net *net, char __user *arg) 3637 { 3638 struct ifconf ifc; 3639 struct net_device *dev; 3640 char __user *pos; 3641 int len; 3642 int total; 3643 int i; 3644 3645 /* 3646 * Fetch the caller's info block. 3647 */ 3648 3649 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) 3650 return -EFAULT; 3651 3652 pos = ifc.ifc_buf; 3653 len = ifc.ifc_len; 3654 3655 /* 3656 * Loop over the interfaces, and write an info block for each. 3657 */ 3658 3659 total = 0; 3660 for_each_netdev(net, dev) { 3661 for (i = 0; i < NPROTO; i++) { 3662 if (gifconf_list[i]) { 3663 int done; 3664 if (!pos) 3665 done = gifconf_list[i](dev, NULL, 0); 3666 else 3667 done = gifconf_list[i](dev, pos + total, 3668 len - total); 3669 if (done < 0) 3670 return -EFAULT; 3671 total += done; 3672 } 3673 } 3674 } 3675 3676 /* 3677 * All done. Write the updated control block back to the caller. 3678 */ 3679 ifc.ifc_len = total; 3680 3681 /* 3682 * Both BSD and Solaris return 0 here, so we do too. 3683 */ 3684 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; 3685 } 3686 3687 #ifdef CONFIG_PROC_FS 3688 /* 3689 * This is invoked by the /proc filesystem handler to display a device 3690 * in detail. 3691 */ 3692 void *dev_seq_start(struct seq_file *seq, loff_t *pos) 3693 __acquires(RCU) 3694 { 3695 struct net *net = seq_file_net(seq); 3696 loff_t off; 3697 struct net_device *dev; 3698 3699 rcu_read_lock(); 3700 if (!*pos) 3701 return SEQ_START_TOKEN; 3702 3703 off = 1; 3704 for_each_netdev_rcu(net, dev) 3705 if (off++ == *pos) 3706 return dev; 3707 3708 return NULL; 3709 } 3710 3711 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3712 { 3713 struct net_device *dev = (v == SEQ_START_TOKEN) ? 3714 first_net_device(seq_file_net(seq)) : 3715 next_net_device((struct net_device *)v); 3716 3717 ++*pos; 3718 return rcu_dereference(dev); 3719 } 3720 3721 void dev_seq_stop(struct seq_file *seq, void *v) 3722 __releases(RCU) 3723 { 3724 rcu_read_unlock(); 3725 } 3726 3727 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 3728 { 3729 struct rtnl_link_stats64 temp; 3730 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 3731 3732 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 3733 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 3734 dev->name, stats->rx_bytes, stats->rx_packets, 3735 stats->rx_errors, 3736 stats->rx_dropped + stats->rx_missed_errors, 3737 stats->rx_fifo_errors, 3738 stats->rx_length_errors + stats->rx_over_errors + 3739 stats->rx_crc_errors + stats->rx_frame_errors, 3740 stats->rx_compressed, stats->multicast, 3741 stats->tx_bytes, stats->tx_packets, 3742 stats->tx_errors, stats->tx_dropped, 3743 stats->tx_fifo_errors, stats->collisions, 3744 stats->tx_carrier_errors + 3745 stats->tx_aborted_errors + 3746 stats->tx_window_errors + 3747 stats->tx_heartbeat_errors, 3748 stats->tx_compressed); 3749 } 3750 3751 /* 3752 * Called from the PROCfs module. This now uses the new arbitrary sized 3753 * /proc/net interface to create /proc/net/dev 3754 */ 3755 static int dev_seq_show(struct seq_file *seq, void *v) 3756 { 3757 if (v == SEQ_START_TOKEN) 3758 seq_puts(seq, "Inter-| Receive " 3759 " | Transmit\n" 3760 " face |bytes packets errs drop fifo frame " 3761 "compressed multicast|bytes packets errs " 3762 "drop fifo colls carrier compressed\n"); 3763 else 3764 dev_seq_printf_stats(seq, v); 3765 return 0; 3766 } 3767 3768 static struct softnet_data *softnet_get_online(loff_t *pos) 3769 { 3770 struct softnet_data *sd = NULL; 3771 3772 while (*pos < nr_cpu_ids) 3773 if (cpu_online(*pos)) { 3774 sd = &per_cpu(softnet_data, *pos); 3775 break; 3776 } else 3777 ++*pos; 3778 return sd; 3779 } 3780 3781 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 3782 { 3783 return softnet_get_online(pos); 3784 } 3785 3786 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3787 { 3788 ++*pos; 3789 return softnet_get_online(pos); 3790 } 3791 3792 static void softnet_seq_stop(struct seq_file *seq, void *v) 3793 { 3794 } 3795 3796 static int softnet_seq_show(struct seq_file *seq, void *v) 3797 { 3798 struct softnet_data *sd = v; 3799 3800 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3801 sd->processed, sd->dropped, sd->time_squeeze, 0, 3802 0, 0, 0, 0, /* was fastroute */ 3803 sd->cpu_collision, sd->received_rps); 3804 return 0; 3805 } 3806 3807 static const struct seq_operations dev_seq_ops = { 3808 .start = dev_seq_start, 3809 .next = dev_seq_next, 3810 .stop = dev_seq_stop, 3811 .show = dev_seq_show, 3812 }; 3813 3814 static int dev_seq_open(struct inode *inode, struct file *file) 3815 { 3816 return seq_open_net(inode, file, &dev_seq_ops, 3817 sizeof(struct seq_net_private)); 3818 } 3819 3820 static const struct file_operations dev_seq_fops = { 3821 .owner = THIS_MODULE, 3822 .open = dev_seq_open, 3823 .read = seq_read, 3824 .llseek = seq_lseek, 3825 .release = seq_release_net, 3826 }; 3827 3828 static const struct seq_operations softnet_seq_ops = { 3829 .start = softnet_seq_start, 3830 .next = softnet_seq_next, 3831 .stop = softnet_seq_stop, 3832 .show = softnet_seq_show, 3833 }; 3834 3835 static int softnet_seq_open(struct inode *inode, struct file *file) 3836 { 3837 return seq_open(file, &softnet_seq_ops); 3838 } 3839 3840 static const struct file_operations softnet_seq_fops = { 3841 .owner = THIS_MODULE, 3842 .open = softnet_seq_open, 3843 .read = seq_read, 3844 .llseek = seq_lseek, 3845 .release = seq_release, 3846 }; 3847 3848 static void *ptype_get_idx(loff_t pos) 3849 { 3850 struct packet_type *pt = NULL; 3851 loff_t i = 0; 3852 int t; 3853 3854 list_for_each_entry_rcu(pt, &ptype_all, list) { 3855 if (i == pos) 3856 return pt; 3857 ++i; 3858 } 3859 3860 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 3861 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 3862 if (i == pos) 3863 return pt; 3864 ++i; 3865 } 3866 } 3867 return NULL; 3868 } 3869 3870 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 3871 __acquires(RCU) 3872 { 3873 rcu_read_lock(); 3874 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; 3875 } 3876 3877 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3878 { 3879 struct packet_type *pt; 3880 struct list_head *nxt; 3881 int hash; 3882 3883 ++*pos; 3884 if (v == SEQ_START_TOKEN) 3885 return ptype_get_idx(0); 3886 3887 pt = v; 3888 nxt = pt->list.next; 3889 if (pt->type == htons(ETH_P_ALL)) { 3890 if (nxt != &ptype_all) 3891 goto found; 3892 hash = 0; 3893 nxt = ptype_base[0].next; 3894 } else 3895 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 3896 3897 while (nxt == &ptype_base[hash]) { 3898 if (++hash >= PTYPE_HASH_SIZE) 3899 return NULL; 3900 nxt = ptype_base[hash].next; 3901 } 3902 found: 3903 return list_entry(nxt, struct packet_type, list); 3904 } 3905 3906 static void ptype_seq_stop(struct seq_file *seq, void *v) 3907 __releases(RCU) 3908 { 3909 rcu_read_unlock(); 3910 } 3911 3912 static int ptype_seq_show(struct seq_file *seq, void *v) 3913 { 3914 struct packet_type *pt = v; 3915 3916 if (v == SEQ_START_TOKEN) 3917 seq_puts(seq, "Type Device Function\n"); 3918 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 3919 if (pt->type == htons(ETH_P_ALL)) 3920 seq_puts(seq, "ALL "); 3921 else 3922 seq_printf(seq, "%04x", ntohs(pt->type)); 3923 3924 seq_printf(seq, " %-8s %pF\n", 3925 pt->dev ? pt->dev->name : "", pt->func); 3926 } 3927 3928 return 0; 3929 } 3930 3931 static const struct seq_operations ptype_seq_ops = { 3932 .start = ptype_seq_start, 3933 .next = ptype_seq_next, 3934 .stop = ptype_seq_stop, 3935 .show = ptype_seq_show, 3936 }; 3937 3938 static int ptype_seq_open(struct inode *inode, struct file *file) 3939 { 3940 return seq_open_net(inode, file, &ptype_seq_ops, 3941 sizeof(struct seq_net_private)); 3942 } 3943 3944 static const struct file_operations ptype_seq_fops = { 3945 .owner = THIS_MODULE, 3946 .open = ptype_seq_open, 3947 .read = seq_read, 3948 .llseek = seq_lseek, 3949 .release = seq_release_net, 3950 }; 3951 3952 3953 static int __net_init dev_proc_net_init(struct net *net) 3954 { 3955 int rc = -ENOMEM; 3956 3957 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) 3958 goto out; 3959 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) 3960 goto out_dev; 3961 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) 3962 goto out_softnet; 3963 3964 if (wext_proc_init(net)) 3965 goto out_ptype; 3966 rc = 0; 3967 out: 3968 return rc; 3969 out_ptype: 3970 proc_net_remove(net, "ptype"); 3971 out_softnet: 3972 proc_net_remove(net, "softnet_stat"); 3973 out_dev: 3974 proc_net_remove(net, "dev"); 3975 goto out; 3976 } 3977 3978 static void __net_exit dev_proc_net_exit(struct net *net) 3979 { 3980 wext_proc_exit(net); 3981 3982 proc_net_remove(net, "ptype"); 3983 proc_net_remove(net, "softnet_stat"); 3984 proc_net_remove(net, "dev"); 3985 } 3986 3987 static struct pernet_operations __net_initdata dev_proc_ops = { 3988 .init = dev_proc_net_init, 3989 .exit = dev_proc_net_exit, 3990 }; 3991 3992 static int __init dev_proc_init(void) 3993 { 3994 return register_pernet_subsys(&dev_proc_ops); 3995 } 3996 #else 3997 #define dev_proc_init() 0 3998 #endif /* CONFIG_PROC_FS */ 3999 4000 4001 /** 4002 * netdev_set_master - set up master/slave pair 4003 * @slave: slave device 4004 * @master: new master device 4005 * 4006 * Changes the master device of the slave. Pass %NULL to break the 4007 * bonding. The caller must hold the RTNL semaphore. On a failure 4008 * a negative errno code is returned. On success the reference counts 4009 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the 4010 * function returns zero. 4011 */ 4012 int netdev_set_master(struct net_device *slave, struct net_device *master) 4013 { 4014 struct net_device *old = slave->master; 4015 4016 ASSERT_RTNL(); 4017 4018 if (master) { 4019 if (old) 4020 return -EBUSY; 4021 dev_hold(master); 4022 } 4023 4024 slave->master = master; 4025 4026 if (old) { 4027 synchronize_net(); 4028 dev_put(old); 4029 } 4030 if (master) 4031 slave->flags |= IFF_SLAVE; 4032 else 4033 slave->flags &= ~IFF_SLAVE; 4034 4035 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 4036 return 0; 4037 } 4038 EXPORT_SYMBOL(netdev_set_master); 4039 4040 static void dev_change_rx_flags(struct net_device *dev, int flags) 4041 { 4042 const struct net_device_ops *ops = dev->netdev_ops; 4043 4044 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4045 ops->ndo_change_rx_flags(dev, flags); 4046 } 4047 4048 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4049 { 4050 unsigned short old_flags = dev->flags; 4051 uid_t uid; 4052 gid_t gid; 4053 4054 ASSERT_RTNL(); 4055 4056 dev->flags |= IFF_PROMISC; 4057 dev->promiscuity += inc; 4058 if (dev->promiscuity == 0) { 4059 /* 4060 * Avoid overflow. 4061 * If inc causes overflow, untouch promisc and return error. 4062 */ 4063 if (inc < 0) 4064 dev->flags &= ~IFF_PROMISC; 4065 else { 4066 dev->promiscuity -= inc; 4067 printk(KERN_WARNING "%s: promiscuity touches roof, " 4068 "set promiscuity failed, promiscuity feature " 4069 "of device might be broken.\n", dev->name); 4070 return -EOVERFLOW; 4071 } 4072 } 4073 if (dev->flags != old_flags) { 4074 printk(KERN_INFO "device %s %s promiscuous mode\n", 4075 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4076 "left"); 4077 if (audit_enabled) { 4078 current_uid_gid(&uid, &gid); 4079 audit_log(current->audit_context, GFP_ATOMIC, 4080 AUDIT_ANOM_PROMISCUOUS, 4081 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4082 dev->name, (dev->flags & IFF_PROMISC), 4083 (old_flags & IFF_PROMISC), 4084 audit_get_loginuid(current), 4085 uid, gid, 4086 audit_get_sessionid(current)); 4087 } 4088 4089 dev_change_rx_flags(dev, IFF_PROMISC); 4090 } 4091 return 0; 4092 } 4093 4094 /** 4095 * dev_set_promiscuity - update promiscuity count on a device 4096 * @dev: device 4097 * @inc: modifier 4098 * 4099 * Add or remove promiscuity from a device. While the count in the device 4100 * remains above zero the interface remains promiscuous. Once it hits zero 4101 * the device reverts back to normal filtering operation. A negative inc 4102 * value is used to drop promiscuity on the device. 4103 * Return 0 if successful or a negative errno code on error. 4104 */ 4105 int dev_set_promiscuity(struct net_device *dev, int inc) 4106 { 4107 unsigned short old_flags = dev->flags; 4108 int err; 4109 4110 err = __dev_set_promiscuity(dev, inc); 4111 if (err < 0) 4112 return err; 4113 if (dev->flags != old_flags) 4114 dev_set_rx_mode(dev); 4115 return err; 4116 } 4117 EXPORT_SYMBOL(dev_set_promiscuity); 4118 4119 /** 4120 * dev_set_allmulti - update allmulti count on a device 4121 * @dev: device 4122 * @inc: modifier 4123 * 4124 * Add or remove reception of all multicast frames to a device. While the 4125 * count in the device remains above zero the interface remains listening 4126 * to all interfaces. Once it hits zero the device reverts back to normal 4127 * filtering operation. A negative @inc value is used to drop the counter 4128 * when releasing a resource needing all multicasts. 4129 * Return 0 if successful or a negative errno code on error. 4130 */ 4131 4132 int dev_set_allmulti(struct net_device *dev, int inc) 4133 { 4134 unsigned short old_flags = dev->flags; 4135 4136 ASSERT_RTNL(); 4137 4138 dev->flags |= IFF_ALLMULTI; 4139 dev->allmulti += inc; 4140 if (dev->allmulti == 0) { 4141 /* 4142 * Avoid overflow. 4143 * If inc causes overflow, untouch allmulti and return error. 4144 */ 4145 if (inc < 0) 4146 dev->flags &= ~IFF_ALLMULTI; 4147 else { 4148 dev->allmulti -= inc; 4149 printk(KERN_WARNING "%s: allmulti touches roof, " 4150 "set allmulti failed, allmulti feature of " 4151 "device might be broken.\n", dev->name); 4152 return -EOVERFLOW; 4153 } 4154 } 4155 if (dev->flags ^ old_flags) { 4156 dev_change_rx_flags(dev, IFF_ALLMULTI); 4157 dev_set_rx_mode(dev); 4158 } 4159 return 0; 4160 } 4161 EXPORT_SYMBOL(dev_set_allmulti); 4162 4163 /* 4164 * Upload unicast and multicast address lists to device and 4165 * configure RX filtering. When the device doesn't support unicast 4166 * filtering it is put in promiscuous mode while unicast addresses 4167 * are present. 4168 */ 4169 void __dev_set_rx_mode(struct net_device *dev) 4170 { 4171 const struct net_device_ops *ops = dev->netdev_ops; 4172 4173 /* dev_open will call this function so the list will stay sane. */ 4174 if (!(dev->flags&IFF_UP)) 4175 return; 4176 4177 if (!netif_device_present(dev)) 4178 return; 4179 4180 if (ops->ndo_set_rx_mode) 4181 ops->ndo_set_rx_mode(dev); 4182 else { 4183 /* Unicast addresses changes may only happen under the rtnl, 4184 * therefore calling __dev_set_promiscuity here is safe. 4185 */ 4186 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 4187 __dev_set_promiscuity(dev, 1); 4188 dev->uc_promisc = 1; 4189 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 4190 __dev_set_promiscuity(dev, -1); 4191 dev->uc_promisc = 0; 4192 } 4193 4194 if (ops->ndo_set_multicast_list) 4195 ops->ndo_set_multicast_list(dev); 4196 } 4197 } 4198 4199 void dev_set_rx_mode(struct net_device *dev) 4200 { 4201 netif_addr_lock_bh(dev); 4202 __dev_set_rx_mode(dev); 4203 netif_addr_unlock_bh(dev); 4204 } 4205 4206 /** 4207 * dev_get_flags - get flags reported to userspace 4208 * @dev: device 4209 * 4210 * Get the combination of flag bits exported through APIs to userspace. 4211 */ 4212 unsigned dev_get_flags(const struct net_device *dev) 4213 { 4214 unsigned flags; 4215 4216 flags = (dev->flags & ~(IFF_PROMISC | 4217 IFF_ALLMULTI | 4218 IFF_RUNNING | 4219 IFF_LOWER_UP | 4220 IFF_DORMANT)) | 4221 (dev->gflags & (IFF_PROMISC | 4222 IFF_ALLMULTI)); 4223 4224 if (netif_running(dev)) { 4225 if (netif_oper_up(dev)) 4226 flags |= IFF_RUNNING; 4227 if (netif_carrier_ok(dev)) 4228 flags |= IFF_LOWER_UP; 4229 if (netif_dormant(dev)) 4230 flags |= IFF_DORMANT; 4231 } 4232 4233 return flags; 4234 } 4235 EXPORT_SYMBOL(dev_get_flags); 4236 4237 int __dev_change_flags(struct net_device *dev, unsigned int flags) 4238 { 4239 int old_flags = dev->flags; 4240 int ret; 4241 4242 ASSERT_RTNL(); 4243 4244 /* 4245 * Set the flags on our device. 4246 */ 4247 4248 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 4249 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 4250 IFF_AUTOMEDIA)) | 4251 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 4252 IFF_ALLMULTI)); 4253 4254 /* 4255 * Load in the correct multicast list now the flags have changed. 4256 */ 4257 4258 if ((old_flags ^ flags) & IFF_MULTICAST) 4259 dev_change_rx_flags(dev, IFF_MULTICAST); 4260 4261 dev_set_rx_mode(dev); 4262 4263 /* 4264 * Have we downed the interface. We handle IFF_UP ourselves 4265 * according to user attempts to set it, rather than blindly 4266 * setting it. 4267 */ 4268 4269 ret = 0; 4270 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4271 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 4272 4273 if (!ret) 4274 dev_set_rx_mode(dev); 4275 } 4276 4277 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4278 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4279 4280 dev->gflags ^= IFF_PROMISC; 4281 dev_set_promiscuity(dev, inc); 4282 } 4283 4284 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 4285 is important. Some (broken) drivers set IFF_PROMISC, when 4286 IFF_ALLMULTI is requested not asking us and not reporting. 4287 */ 4288 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4289 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 4290 4291 dev->gflags ^= IFF_ALLMULTI; 4292 dev_set_allmulti(dev, inc); 4293 } 4294 4295 return ret; 4296 } 4297 4298 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) 4299 { 4300 unsigned int changes = dev->flags ^ old_flags; 4301 4302 if (changes & IFF_UP) { 4303 if (dev->flags & IFF_UP) 4304 call_netdevice_notifiers(NETDEV_UP, dev); 4305 else 4306 call_netdevice_notifiers(NETDEV_DOWN, dev); 4307 } 4308 4309 if (dev->flags & IFF_UP && 4310 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) 4311 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4312 } 4313 4314 /** 4315 * dev_change_flags - change device settings 4316 * @dev: device 4317 * @flags: device state flags 4318 * 4319 * Change settings on device based state flags. The flags are 4320 * in the userspace exported format. 4321 */ 4322 int dev_change_flags(struct net_device *dev, unsigned flags) 4323 { 4324 int ret, changes; 4325 int old_flags = dev->flags; 4326 4327 ret = __dev_change_flags(dev, flags); 4328 if (ret < 0) 4329 return ret; 4330 4331 changes = old_flags ^ dev->flags; 4332 if (changes) 4333 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4334 4335 __dev_notify_flags(dev, old_flags); 4336 return ret; 4337 } 4338 EXPORT_SYMBOL(dev_change_flags); 4339 4340 /** 4341 * dev_set_mtu - Change maximum transfer unit 4342 * @dev: device 4343 * @new_mtu: new transfer unit 4344 * 4345 * Change the maximum transfer size of the network device. 4346 */ 4347 int dev_set_mtu(struct net_device *dev, int new_mtu) 4348 { 4349 const struct net_device_ops *ops = dev->netdev_ops; 4350 int err; 4351 4352 if (new_mtu == dev->mtu) 4353 return 0; 4354 4355 /* MTU must be positive. */ 4356 if (new_mtu < 0) 4357 return -EINVAL; 4358 4359 if (!netif_device_present(dev)) 4360 return -ENODEV; 4361 4362 err = 0; 4363 if (ops->ndo_change_mtu) 4364 err = ops->ndo_change_mtu(dev, new_mtu); 4365 else 4366 dev->mtu = new_mtu; 4367 4368 if (!err && dev->flags & IFF_UP) 4369 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4370 return err; 4371 } 4372 EXPORT_SYMBOL(dev_set_mtu); 4373 4374 /** 4375 * dev_set_mac_address - Change Media Access Control Address 4376 * @dev: device 4377 * @sa: new address 4378 * 4379 * Change the hardware (MAC) address of the device 4380 */ 4381 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 4382 { 4383 const struct net_device_ops *ops = dev->netdev_ops; 4384 int err; 4385 4386 if (!ops->ndo_set_mac_address) 4387 return -EOPNOTSUPP; 4388 if (sa->sa_family != dev->type) 4389 return -EINVAL; 4390 if (!netif_device_present(dev)) 4391 return -ENODEV; 4392 err = ops->ndo_set_mac_address(dev, sa); 4393 if (!err) 4394 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4395 return err; 4396 } 4397 EXPORT_SYMBOL(dev_set_mac_address); 4398 4399 /* 4400 * Perform the SIOCxIFxxx calls, inside rcu_read_lock() 4401 */ 4402 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) 4403 { 4404 int err; 4405 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); 4406 4407 if (!dev) 4408 return -ENODEV; 4409 4410 switch (cmd) { 4411 case SIOCGIFFLAGS: /* Get interface flags */ 4412 ifr->ifr_flags = (short) dev_get_flags(dev); 4413 return 0; 4414 4415 case SIOCGIFMETRIC: /* Get the metric on the interface 4416 (currently unused) */ 4417 ifr->ifr_metric = 0; 4418 return 0; 4419 4420 case SIOCGIFMTU: /* Get the MTU of a device */ 4421 ifr->ifr_mtu = dev->mtu; 4422 return 0; 4423 4424 case SIOCGIFHWADDR: 4425 if (!dev->addr_len) 4426 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4427 else 4428 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4429 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4430 ifr->ifr_hwaddr.sa_family = dev->type; 4431 return 0; 4432 4433 case SIOCGIFSLAVE: 4434 err = -EINVAL; 4435 break; 4436 4437 case SIOCGIFMAP: 4438 ifr->ifr_map.mem_start = dev->mem_start; 4439 ifr->ifr_map.mem_end = dev->mem_end; 4440 ifr->ifr_map.base_addr = dev->base_addr; 4441 ifr->ifr_map.irq = dev->irq; 4442 ifr->ifr_map.dma = dev->dma; 4443 ifr->ifr_map.port = dev->if_port; 4444 return 0; 4445 4446 case SIOCGIFINDEX: 4447 ifr->ifr_ifindex = dev->ifindex; 4448 return 0; 4449 4450 case SIOCGIFTXQLEN: 4451 ifr->ifr_qlen = dev->tx_queue_len; 4452 return 0; 4453 4454 default: 4455 /* dev_ioctl() should ensure this case 4456 * is never reached 4457 */ 4458 WARN_ON(1); 4459 err = -EINVAL; 4460 break; 4461 4462 } 4463 return err; 4464 } 4465 4466 /* 4467 * Perform the SIOCxIFxxx calls, inside rtnl_lock() 4468 */ 4469 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) 4470 { 4471 int err; 4472 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 4473 const struct net_device_ops *ops; 4474 4475 if (!dev) 4476 return -ENODEV; 4477 4478 ops = dev->netdev_ops; 4479 4480 switch (cmd) { 4481 case SIOCSIFFLAGS: /* Set interface flags */ 4482 return dev_change_flags(dev, ifr->ifr_flags); 4483 4484 case SIOCSIFMETRIC: /* Set the metric on the interface 4485 (currently unused) */ 4486 return -EOPNOTSUPP; 4487 4488 case SIOCSIFMTU: /* Set the MTU of a device */ 4489 return dev_set_mtu(dev, ifr->ifr_mtu); 4490 4491 case SIOCSIFHWADDR: 4492 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 4493 4494 case SIOCSIFHWBROADCAST: 4495 if (ifr->ifr_hwaddr.sa_family != dev->type) 4496 return -EINVAL; 4497 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 4498 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4499 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4500 return 0; 4501 4502 case SIOCSIFMAP: 4503 if (ops->ndo_set_config) { 4504 if (!netif_device_present(dev)) 4505 return -ENODEV; 4506 return ops->ndo_set_config(dev, &ifr->ifr_map); 4507 } 4508 return -EOPNOTSUPP; 4509 4510 case SIOCADDMULTI: 4511 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4512 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4513 return -EINVAL; 4514 if (!netif_device_present(dev)) 4515 return -ENODEV; 4516 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 4517 4518 case SIOCDELMULTI: 4519 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4520 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4521 return -EINVAL; 4522 if (!netif_device_present(dev)) 4523 return -ENODEV; 4524 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); 4525 4526 case SIOCSIFTXQLEN: 4527 if (ifr->ifr_qlen < 0) 4528 return -EINVAL; 4529 dev->tx_queue_len = ifr->ifr_qlen; 4530 return 0; 4531 4532 case SIOCSIFNAME: 4533 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 4534 return dev_change_name(dev, ifr->ifr_newname); 4535 4536 /* 4537 * Unknown or private ioctl 4538 */ 4539 default: 4540 if ((cmd >= SIOCDEVPRIVATE && 4541 cmd <= SIOCDEVPRIVATE + 15) || 4542 cmd == SIOCBONDENSLAVE || 4543 cmd == SIOCBONDRELEASE || 4544 cmd == SIOCBONDSETHWADDR || 4545 cmd == SIOCBONDSLAVEINFOQUERY || 4546 cmd == SIOCBONDINFOQUERY || 4547 cmd == SIOCBONDCHANGEACTIVE || 4548 cmd == SIOCGMIIPHY || 4549 cmd == SIOCGMIIREG || 4550 cmd == SIOCSMIIREG || 4551 cmd == SIOCBRADDIF || 4552 cmd == SIOCBRDELIF || 4553 cmd == SIOCSHWTSTAMP || 4554 cmd == SIOCWANDEV) { 4555 err = -EOPNOTSUPP; 4556 if (ops->ndo_do_ioctl) { 4557 if (netif_device_present(dev)) 4558 err = ops->ndo_do_ioctl(dev, ifr, cmd); 4559 else 4560 err = -ENODEV; 4561 } 4562 } else 4563 err = -EINVAL; 4564 4565 } 4566 return err; 4567 } 4568 4569 /* 4570 * This function handles all "interface"-type I/O control requests. The actual 4571 * 'doing' part of this is dev_ifsioc above. 4572 */ 4573 4574 /** 4575 * dev_ioctl - network device ioctl 4576 * @net: the applicable net namespace 4577 * @cmd: command to issue 4578 * @arg: pointer to a struct ifreq in user space 4579 * 4580 * Issue ioctl functions to devices. This is normally called by the 4581 * user space syscall interfaces but can sometimes be useful for 4582 * other purposes. The return value is the return from the syscall if 4583 * positive or a negative errno code on error. 4584 */ 4585 4586 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) 4587 { 4588 struct ifreq ifr; 4589 int ret; 4590 char *colon; 4591 4592 /* One special case: SIOCGIFCONF takes ifconf argument 4593 and requires shared lock, because it sleeps writing 4594 to user space. 4595 */ 4596 4597 if (cmd == SIOCGIFCONF) { 4598 rtnl_lock(); 4599 ret = dev_ifconf(net, (char __user *) arg); 4600 rtnl_unlock(); 4601 return ret; 4602 } 4603 if (cmd == SIOCGIFNAME) 4604 return dev_ifname(net, (struct ifreq __user *)arg); 4605 4606 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 4607 return -EFAULT; 4608 4609 ifr.ifr_name[IFNAMSIZ-1] = 0; 4610 4611 colon = strchr(ifr.ifr_name, ':'); 4612 if (colon) 4613 *colon = 0; 4614 4615 /* 4616 * See which interface the caller is talking about. 4617 */ 4618 4619 switch (cmd) { 4620 /* 4621 * These ioctl calls: 4622 * - can be done by all. 4623 * - atomic and do not require locking. 4624 * - return a value 4625 */ 4626 case SIOCGIFFLAGS: 4627 case SIOCGIFMETRIC: 4628 case SIOCGIFMTU: 4629 case SIOCGIFHWADDR: 4630 case SIOCGIFSLAVE: 4631 case SIOCGIFMAP: 4632 case SIOCGIFINDEX: 4633 case SIOCGIFTXQLEN: 4634 dev_load(net, ifr.ifr_name); 4635 rcu_read_lock(); 4636 ret = dev_ifsioc_locked(net, &ifr, cmd); 4637 rcu_read_unlock(); 4638 if (!ret) { 4639 if (colon) 4640 *colon = ':'; 4641 if (copy_to_user(arg, &ifr, 4642 sizeof(struct ifreq))) 4643 ret = -EFAULT; 4644 } 4645 return ret; 4646 4647 case SIOCETHTOOL: 4648 dev_load(net, ifr.ifr_name); 4649 rtnl_lock(); 4650 ret = dev_ethtool(net, &ifr); 4651 rtnl_unlock(); 4652 if (!ret) { 4653 if (colon) 4654 *colon = ':'; 4655 if (copy_to_user(arg, &ifr, 4656 sizeof(struct ifreq))) 4657 ret = -EFAULT; 4658 } 4659 return ret; 4660 4661 /* 4662 * These ioctl calls: 4663 * - require superuser power. 4664 * - require strict serialization. 4665 * - return a value 4666 */ 4667 case SIOCGMIIPHY: 4668 case SIOCGMIIREG: 4669 case SIOCSIFNAME: 4670 if (!capable(CAP_NET_ADMIN)) 4671 return -EPERM; 4672 dev_load(net, ifr.ifr_name); 4673 rtnl_lock(); 4674 ret = dev_ifsioc(net, &ifr, cmd); 4675 rtnl_unlock(); 4676 if (!ret) { 4677 if (colon) 4678 *colon = ':'; 4679 if (copy_to_user(arg, &ifr, 4680 sizeof(struct ifreq))) 4681 ret = -EFAULT; 4682 } 4683 return ret; 4684 4685 /* 4686 * These ioctl calls: 4687 * - require superuser power. 4688 * - require strict serialization. 4689 * - do not return a value 4690 */ 4691 case SIOCSIFFLAGS: 4692 case SIOCSIFMETRIC: 4693 case SIOCSIFMTU: 4694 case SIOCSIFMAP: 4695 case SIOCSIFHWADDR: 4696 case SIOCSIFSLAVE: 4697 case SIOCADDMULTI: 4698 case SIOCDELMULTI: 4699 case SIOCSIFHWBROADCAST: 4700 case SIOCSIFTXQLEN: 4701 case SIOCSMIIREG: 4702 case SIOCBONDENSLAVE: 4703 case SIOCBONDRELEASE: 4704 case SIOCBONDSETHWADDR: 4705 case SIOCBONDCHANGEACTIVE: 4706 case SIOCBRADDIF: 4707 case SIOCBRDELIF: 4708 case SIOCSHWTSTAMP: 4709 if (!capable(CAP_NET_ADMIN)) 4710 return -EPERM; 4711 /* fall through */ 4712 case SIOCBONDSLAVEINFOQUERY: 4713 case SIOCBONDINFOQUERY: 4714 dev_load(net, ifr.ifr_name); 4715 rtnl_lock(); 4716 ret = dev_ifsioc(net, &ifr, cmd); 4717 rtnl_unlock(); 4718 return ret; 4719 4720 case SIOCGIFMEM: 4721 /* Get the per device memory space. We can add this but 4722 * currently do not support it */ 4723 case SIOCSIFMEM: 4724 /* Set the per device memory buffer space. 4725 * Not applicable in our case */ 4726 case SIOCSIFLINK: 4727 return -EINVAL; 4728 4729 /* 4730 * Unknown or private ioctl. 4731 */ 4732 default: 4733 if (cmd == SIOCWANDEV || 4734 (cmd >= SIOCDEVPRIVATE && 4735 cmd <= SIOCDEVPRIVATE + 15)) { 4736 dev_load(net, ifr.ifr_name); 4737 rtnl_lock(); 4738 ret = dev_ifsioc(net, &ifr, cmd); 4739 rtnl_unlock(); 4740 if (!ret && copy_to_user(arg, &ifr, 4741 sizeof(struct ifreq))) 4742 ret = -EFAULT; 4743 return ret; 4744 } 4745 /* Take care of Wireless Extensions */ 4746 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 4747 return wext_handle_ioctl(net, &ifr, cmd, arg); 4748 return -EINVAL; 4749 } 4750 } 4751 4752 4753 /** 4754 * dev_new_index - allocate an ifindex 4755 * @net: the applicable net namespace 4756 * 4757 * Returns a suitable unique value for a new device interface 4758 * number. The caller must hold the rtnl semaphore or the 4759 * dev_base_lock to be sure it remains unique. 4760 */ 4761 static int dev_new_index(struct net *net) 4762 { 4763 static int ifindex; 4764 for (;;) { 4765 if (++ifindex <= 0) 4766 ifindex = 1; 4767 if (!__dev_get_by_index(net, ifindex)) 4768 return ifindex; 4769 } 4770 } 4771 4772 /* Delayed registration/unregisteration */ 4773 static LIST_HEAD(net_todo_list); 4774 4775 static void net_set_todo(struct net_device *dev) 4776 { 4777 list_add_tail(&dev->todo_list, &net_todo_list); 4778 } 4779 4780 static void rollback_registered_many(struct list_head *head) 4781 { 4782 struct net_device *dev, *tmp; 4783 4784 BUG_ON(dev_boot_phase); 4785 ASSERT_RTNL(); 4786 4787 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 4788 /* Some devices call without registering 4789 * for initialization unwind. Remove those 4790 * devices and proceed with the remaining. 4791 */ 4792 if (dev->reg_state == NETREG_UNINITIALIZED) { 4793 pr_debug("unregister_netdevice: device %s/%p never " 4794 "was registered\n", dev->name, dev); 4795 4796 WARN_ON(1); 4797 list_del(&dev->unreg_list); 4798 continue; 4799 } 4800 4801 BUG_ON(dev->reg_state != NETREG_REGISTERED); 4802 4803 /* If device is running, close it first. */ 4804 dev_close(dev); 4805 4806 /* And unlink it from device chain. */ 4807 unlist_netdevice(dev); 4808 4809 dev->reg_state = NETREG_UNREGISTERING; 4810 } 4811 4812 synchronize_net(); 4813 4814 list_for_each_entry(dev, head, unreg_list) { 4815 /* Shutdown queueing discipline. */ 4816 dev_shutdown(dev); 4817 4818 4819 /* Notify protocols, that we are about to destroy 4820 this device. They should clean all the things. 4821 */ 4822 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 4823 4824 if (!dev->rtnl_link_ops || 4825 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 4826 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 4827 4828 /* 4829 * Flush the unicast and multicast chains 4830 */ 4831 dev_uc_flush(dev); 4832 dev_mc_flush(dev); 4833 4834 if (dev->netdev_ops->ndo_uninit) 4835 dev->netdev_ops->ndo_uninit(dev); 4836 4837 /* Notifier chain MUST detach us from master device. */ 4838 WARN_ON(dev->master); 4839 4840 /* Remove entries from kobject tree */ 4841 netdev_unregister_kobject(dev); 4842 } 4843 4844 /* Process any work delayed until the end of the batch */ 4845 dev = list_first_entry(head, struct net_device, unreg_list); 4846 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 4847 4848 synchronize_net(); 4849 4850 list_for_each_entry(dev, head, unreg_list) 4851 dev_put(dev); 4852 } 4853 4854 static void rollback_registered(struct net_device *dev) 4855 { 4856 LIST_HEAD(single); 4857 4858 list_add(&dev->unreg_list, &single); 4859 rollback_registered_many(&single); 4860 } 4861 4862 static void __netdev_init_queue_locks_one(struct net_device *dev, 4863 struct netdev_queue *dev_queue, 4864 void *_unused) 4865 { 4866 spin_lock_init(&dev_queue->_xmit_lock); 4867 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); 4868 dev_queue->xmit_lock_owner = -1; 4869 } 4870 4871 static void netdev_init_queue_locks(struct net_device *dev) 4872 { 4873 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); 4874 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL); 4875 } 4876 4877 unsigned long netdev_fix_features(unsigned long features, const char *name) 4878 { 4879 /* Fix illegal SG+CSUM combinations. */ 4880 if ((features & NETIF_F_SG) && 4881 !(features & NETIF_F_ALL_CSUM)) { 4882 if (name) 4883 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no " 4884 "checksum feature.\n", name); 4885 features &= ~NETIF_F_SG; 4886 } 4887 4888 /* TSO requires that SG is present as well. */ 4889 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 4890 if (name) 4891 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no " 4892 "SG feature.\n", name); 4893 features &= ~NETIF_F_TSO; 4894 } 4895 4896 if (features & NETIF_F_UFO) { 4897 if (!(features & NETIF_F_GEN_CSUM)) { 4898 if (name) 4899 printk(KERN_ERR "%s: Dropping NETIF_F_UFO " 4900 "since no NETIF_F_HW_CSUM feature.\n", 4901 name); 4902 features &= ~NETIF_F_UFO; 4903 } 4904 4905 if (!(features & NETIF_F_SG)) { 4906 if (name) 4907 printk(KERN_ERR "%s: Dropping NETIF_F_UFO " 4908 "since no NETIF_F_SG feature.\n", name); 4909 features &= ~NETIF_F_UFO; 4910 } 4911 } 4912 4913 return features; 4914 } 4915 EXPORT_SYMBOL(netdev_fix_features); 4916 4917 /** 4918 * netif_stacked_transfer_operstate - transfer operstate 4919 * @rootdev: the root or lower level device to transfer state from 4920 * @dev: the device to transfer operstate to 4921 * 4922 * Transfer operational state from root to device. This is normally 4923 * called when a stacking relationship exists between the root 4924 * device and the device(a leaf device). 4925 */ 4926 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4927 struct net_device *dev) 4928 { 4929 if (rootdev->operstate == IF_OPER_DORMANT) 4930 netif_dormant_on(dev); 4931 else 4932 netif_dormant_off(dev); 4933 4934 if (netif_carrier_ok(rootdev)) { 4935 if (!netif_carrier_ok(dev)) 4936 netif_carrier_on(dev); 4937 } else { 4938 if (netif_carrier_ok(dev)) 4939 netif_carrier_off(dev); 4940 } 4941 } 4942 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 4943 4944 /** 4945 * register_netdevice - register a network device 4946 * @dev: device to register 4947 * 4948 * Take a completed network device structure and add it to the kernel 4949 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 4950 * chain. 0 is returned on success. A negative errno code is returned 4951 * on a failure to set up the device, or if the name is a duplicate. 4952 * 4953 * Callers must hold the rtnl semaphore. You may want 4954 * register_netdev() instead of this. 4955 * 4956 * BUGS: 4957 * The locking appears insufficient to guarantee two parallel registers 4958 * will not get the same name. 4959 */ 4960 4961 int register_netdevice(struct net_device *dev) 4962 { 4963 int ret; 4964 struct net *net = dev_net(dev); 4965 4966 BUG_ON(dev_boot_phase); 4967 ASSERT_RTNL(); 4968 4969 might_sleep(); 4970 4971 /* When net_device's are persistent, this will be fatal. */ 4972 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 4973 BUG_ON(!net); 4974 4975 spin_lock_init(&dev->addr_list_lock); 4976 netdev_set_addr_lockdep_class(dev); 4977 netdev_init_queue_locks(dev); 4978 4979 dev->iflink = -1; 4980 4981 #ifdef CONFIG_RPS 4982 if (!dev->num_rx_queues) { 4983 /* 4984 * Allocate a single RX queue if driver never called 4985 * alloc_netdev_mq 4986 */ 4987 4988 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); 4989 if (!dev->_rx) { 4990 ret = -ENOMEM; 4991 goto out; 4992 } 4993 4994 dev->_rx->first = dev->_rx; 4995 atomic_set(&dev->_rx->count, 1); 4996 dev->num_rx_queues = 1; 4997 } 4998 #endif 4999 /* Init, if this function is available */ 5000 if (dev->netdev_ops->ndo_init) { 5001 ret = dev->netdev_ops->ndo_init(dev); 5002 if (ret) { 5003 if (ret > 0) 5004 ret = -EIO; 5005 goto out; 5006 } 5007 } 5008 5009 ret = dev_get_valid_name(dev, dev->name, 0); 5010 if (ret) 5011 goto err_uninit; 5012 5013 dev->ifindex = dev_new_index(net); 5014 if (dev->iflink == -1) 5015 dev->iflink = dev->ifindex; 5016 5017 /* Fix illegal checksum combinations */ 5018 if ((dev->features & NETIF_F_HW_CSUM) && 5019 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5020 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n", 5021 dev->name); 5022 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5023 } 5024 5025 if ((dev->features & NETIF_F_NO_CSUM) && 5026 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5027 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n", 5028 dev->name); 5029 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); 5030 } 5031 5032 dev->features = netdev_fix_features(dev->features, dev->name); 5033 5034 /* Enable software GSO if SG is supported. */ 5035 if (dev->features & NETIF_F_SG) 5036 dev->features |= NETIF_F_GSO; 5037 5038 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5039 ret = notifier_to_errno(ret); 5040 if (ret) 5041 goto err_uninit; 5042 5043 ret = netdev_register_kobject(dev); 5044 if (ret) 5045 goto err_uninit; 5046 dev->reg_state = NETREG_REGISTERED; 5047 5048 /* 5049 * Default initial state at registry is that the 5050 * device is present. 5051 */ 5052 5053 set_bit(__LINK_STATE_PRESENT, &dev->state); 5054 5055 dev_init_scheduler(dev); 5056 dev_hold(dev); 5057 list_netdevice(dev); 5058 5059 /* Notify protocols, that a new device appeared. */ 5060 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5061 ret = notifier_to_errno(ret); 5062 if (ret) { 5063 rollback_registered(dev); 5064 dev->reg_state = NETREG_UNREGISTERED; 5065 } 5066 /* 5067 * Prevent userspace races by waiting until the network 5068 * device is fully setup before sending notifications. 5069 */ 5070 if (!dev->rtnl_link_ops || 5071 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5072 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5073 5074 out: 5075 return ret; 5076 5077 err_uninit: 5078 if (dev->netdev_ops->ndo_uninit) 5079 dev->netdev_ops->ndo_uninit(dev); 5080 goto out; 5081 } 5082 EXPORT_SYMBOL(register_netdevice); 5083 5084 /** 5085 * init_dummy_netdev - init a dummy network device for NAPI 5086 * @dev: device to init 5087 * 5088 * This takes a network device structure and initialize the minimum 5089 * amount of fields so it can be used to schedule NAPI polls without 5090 * registering a full blown interface. This is to be used by drivers 5091 * that need to tie several hardware interfaces to a single NAPI 5092 * poll scheduler due to HW limitations. 5093 */ 5094 int init_dummy_netdev(struct net_device *dev) 5095 { 5096 /* Clear everything. Note we don't initialize spinlocks 5097 * are they aren't supposed to be taken by any of the 5098 * NAPI code and this dummy netdev is supposed to be 5099 * only ever used for NAPI polls 5100 */ 5101 memset(dev, 0, sizeof(struct net_device)); 5102 5103 /* make sure we BUG if trying to hit standard 5104 * register/unregister code path 5105 */ 5106 dev->reg_state = NETREG_DUMMY; 5107 5108 /* initialize the ref count */ 5109 atomic_set(&dev->refcnt, 1); 5110 5111 /* NAPI wants this */ 5112 INIT_LIST_HEAD(&dev->napi_list); 5113 5114 /* a dummy interface is started by default */ 5115 set_bit(__LINK_STATE_PRESENT, &dev->state); 5116 set_bit(__LINK_STATE_START, &dev->state); 5117 5118 return 0; 5119 } 5120 EXPORT_SYMBOL_GPL(init_dummy_netdev); 5121 5122 5123 /** 5124 * register_netdev - register a network device 5125 * @dev: device to register 5126 * 5127 * Take a completed network device structure and add it to the kernel 5128 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5129 * chain. 0 is returned on success. A negative errno code is returned 5130 * on a failure to set up the device, or if the name is a duplicate. 5131 * 5132 * This is a wrapper around register_netdevice that takes the rtnl semaphore 5133 * and expands the device name if you passed a format string to 5134 * alloc_netdev. 5135 */ 5136 int register_netdev(struct net_device *dev) 5137 { 5138 int err; 5139 5140 rtnl_lock(); 5141 5142 /* 5143 * If the name is a format string the caller wants us to do a 5144 * name allocation. 5145 */ 5146 if (strchr(dev->name, '%')) { 5147 err = dev_alloc_name(dev, dev->name); 5148 if (err < 0) 5149 goto out; 5150 } 5151 5152 err = register_netdevice(dev); 5153 out: 5154 rtnl_unlock(); 5155 return err; 5156 } 5157 EXPORT_SYMBOL(register_netdev); 5158 5159 /* 5160 * netdev_wait_allrefs - wait until all references are gone. 5161 * 5162 * This is called when unregistering network devices. 5163 * 5164 * Any protocol or device that holds a reference should register 5165 * for netdevice notification, and cleanup and put back the 5166 * reference if they receive an UNREGISTER event. 5167 * We can get stuck here if buggy protocols don't correctly 5168 * call dev_put. 5169 */ 5170 static void netdev_wait_allrefs(struct net_device *dev) 5171 { 5172 unsigned long rebroadcast_time, warning_time; 5173 5174 linkwatch_forget_dev(dev); 5175 5176 rebroadcast_time = warning_time = jiffies; 5177 while (atomic_read(&dev->refcnt) != 0) { 5178 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5179 rtnl_lock(); 5180 5181 /* Rebroadcast unregister notification */ 5182 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5183 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users 5184 * should have already handle it the first time */ 5185 5186 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5187 &dev->state)) { 5188 /* We must not have linkwatch events 5189 * pending on unregister. If this 5190 * happens, we simply run the queue 5191 * unscheduled, resulting in a noop 5192 * for this device. 5193 */ 5194 linkwatch_run_queue(); 5195 } 5196 5197 __rtnl_unlock(); 5198 5199 rebroadcast_time = jiffies; 5200 } 5201 5202 msleep(250); 5203 5204 if (time_after(jiffies, warning_time + 10 * HZ)) { 5205 printk(KERN_EMERG "unregister_netdevice: " 5206 "waiting for %s to become free. Usage " 5207 "count = %d\n", 5208 dev->name, atomic_read(&dev->refcnt)); 5209 warning_time = jiffies; 5210 } 5211 } 5212 } 5213 5214 /* The sequence is: 5215 * 5216 * rtnl_lock(); 5217 * ... 5218 * register_netdevice(x1); 5219 * register_netdevice(x2); 5220 * ... 5221 * unregister_netdevice(y1); 5222 * unregister_netdevice(y2); 5223 * ... 5224 * rtnl_unlock(); 5225 * free_netdev(y1); 5226 * free_netdev(y2); 5227 * 5228 * We are invoked by rtnl_unlock(). 5229 * This allows us to deal with problems: 5230 * 1) We can delete sysfs objects which invoke hotplug 5231 * without deadlocking with linkwatch via keventd. 5232 * 2) Since we run with the RTNL semaphore not held, we can sleep 5233 * safely in order to wait for the netdev refcnt to drop to zero. 5234 * 5235 * We must not return until all unregister events added during 5236 * the interval the lock was held have been completed. 5237 */ 5238 void netdev_run_todo(void) 5239 { 5240 struct list_head list; 5241 5242 /* Snapshot list, allow later requests */ 5243 list_replace_init(&net_todo_list, &list); 5244 5245 __rtnl_unlock(); 5246 5247 while (!list_empty(&list)) { 5248 struct net_device *dev 5249 = list_first_entry(&list, struct net_device, todo_list); 5250 list_del(&dev->todo_list); 5251 5252 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5253 printk(KERN_ERR "network todo '%s' but state %d\n", 5254 dev->name, dev->reg_state); 5255 dump_stack(); 5256 continue; 5257 } 5258 5259 dev->reg_state = NETREG_UNREGISTERED; 5260 5261 on_each_cpu(flush_backlog, dev, 1); 5262 5263 netdev_wait_allrefs(dev); 5264 5265 /* paranoia */ 5266 BUG_ON(atomic_read(&dev->refcnt)); 5267 WARN_ON(dev->ip_ptr); 5268 WARN_ON(dev->ip6_ptr); 5269 WARN_ON(dev->dn_ptr); 5270 5271 if (dev->destructor) 5272 dev->destructor(dev); 5273 5274 /* Free network device */ 5275 kobject_put(&dev->dev.kobj); 5276 } 5277 } 5278 5279 /** 5280 * dev_txq_stats_fold - fold tx_queues stats 5281 * @dev: device to get statistics from 5282 * @stats: struct rtnl_link_stats64 to hold results 5283 */ 5284 void dev_txq_stats_fold(const struct net_device *dev, 5285 struct rtnl_link_stats64 *stats) 5286 { 5287 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; 5288 unsigned int i; 5289 struct netdev_queue *txq; 5290 5291 for (i = 0; i < dev->num_tx_queues; i++) { 5292 txq = netdev_get_tx_queue(dev, i); 5293 spin_lock_bh(&txq->_xmit_lock); 5294 tx_bytes += txq->tx_bytes; 5295 tx_packets += txq->tx_packets; 5296 tx_dropped += txq->tx_dropped; 5297 spin_unlock_bh(&txq->_xmit_lock); 5298 } 5299 if (tx_bytes || tx_packets || tx_dropped) { 5300 stats->tx_bytes = tx_bytes; 5301 stats->tx_packets = tx_packets; 5302 stats->tx_dropped = tx_dropped; 5303 } 5304 } 5305 EXPORT_SYMBOL(dev_txq_stats_fold); 5306 5307 /* Convert net_device_stats to rtnl_link_stats64. They have the same 5308 * fields in the same order, with only the type differing. 5309 */ 5310 static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5311 const struct net_device_stats *netdev_stats) 5312 { 5313 #if BITS_PER_LONG == 64 5314 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5315 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5316 #else 5317 size_t i, n = sizeof(*stats64) / sizeof(u64); 5318 const unsigned long *src = (const unsigned long *)netdev_stats; 5319 u64 *dst = (u64 *)stats64; 5320 5321 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 5322 sizeof(*stats64) / sizeof(u64)); 5323 for (i = 0; i < n; i++) 5324 dst[i] = src[i]; 5325 #endif 5326 } 5327 5328 /** 5329 * dev_get_stats - get network device statistics 5330 * @dev: device to get statistics from 5331 * @storage: place to store stats 5332 * 5333 * Get network statistics from device. Return @storage. 5334 * The device driver may provide its own method by setting 5335 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 5336 * otherwise the internal statistics structure is used. 5337 */ 5338 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 5339 struct rtnl_link_stats64 *storage) 5340 { 5341 const struct net_device_ops *ops = dev->netdev_ops; 5342 5343 if (ops->ndo_get_stats64) { 5344 memset(storage, 0, sizeof(*storage)); 5345 return ops->ndo_get_stats64(dev, storage); 5346 } 5347 if (ops->ndo_get_stats) { 5348 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 5349 return storage; 5350 } 5351 netdev_stats_to_stats64(storage, &dev->stats); 5352 dev_txq_stats_fold(dev, storage); 5353 return storage; 5354 } 5355 EXPORT_SYMBOL(dev_get_stats); 5356 5357 static void netdev_init_one_queue(struct net_device *dev, 5358 struct netdev_queue *queue, 5359 void *_unused) 5360 { 5361 queue->dev = dev; 5362 } 5363 5364 static void netdev_init_queues(struct net_device *dev) 5365 { 5366 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 5367 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5368 spin_lock_init(&dev->tx_global_lock); 5369 } 5370 5371 /** 5372 * alloc_netdev_mq - allocate network device 5373 * @sizeof_priv: size of private data to allocate space for 5374 * @name: device name format string 5375 * @setup: callback to initialize device 5376 * @queue_count: the number of subqueues to allocate 5377 * 5378 * Allocates a struct net_device with private data area for driver use 5379 * and performs basic initialization. Also allocates subquue structs 5380 * for each queue on the device at the end of the netdevice. 5381 */ 5382 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5383 void (*setup)(struct net_device *), unsigned int queue_count) 5384 { 5385 struct netdev_queue *tx; 5386 struct net_device *dev; 5387 size_t alloc_size; 5388 struct net_device *p; 5389 #ifdef CONFIG_RPS 5390 struct netdev_rx_queue *rx; 5391 int i; 5392 #endif 5393 5394 BUG_ON(strlen(name) >= sizeof(dev->name)); 5395 5396 alloc_size = sizeof(struct net_device); 5397 if (sizeof_priv) { 5398 /* ensure 32-byte alignment of private area */ 5399 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 5400 alloc_size += sizeof_priv; 5401 } 5402 /* ensure 32-byte alignment of whole construct */ 5403 alloc_size += NETDEV_ALIGN - 1; 5404 5405 p = kzalloc(alloc_size, GFP_KERNEL); 5406 if (!p) { 5407 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5408 return NULL; 5409 } 5410 5411 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL); 5412 if (!tx) { 5413 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5414 "tx qdiscs.\n"); 5415 goto free_p; 5416 } 5417 5418 #ifdef CONFIG_RPS 5419 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5420 if (!rx) { 5421 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5422 "rx queues.\n"); 5423 goto free_tx; 5424 } 5425 5426 atomic_set(&rx->count, queue_count); 5427 5428 /* 5429 * Set a pointer to first element in the array which holds the 5430 * reference count. 5431 */ 5432 for (i = 0; i < queue_count; i++) 5433 rx[i].first = rx; 5434 #endif 5435 5436 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5437 dev->padded = (char *)dev - (char *)p; 5438 5439 if (dev_addr_init(dev)) 5440 goto free_rx; 5441 5442 dev_mc_init(dev); 5443 dev_uc_init(dev); 5444 5445 dev_net_set(dev, &init_net); 5446 5447 dev->_tx = tx; 5448 dev->num_tx_queues = queue_count; 5449 dev->real_num_tx_queues = queue_count; 5450 5451 #ifdef CONFIG_RPS 5452 dev->_rx = rx; 5453 dev->num_rx_queues = queue_count; 5454 #endif 5455 5456 dev->gso_max_size = GSO_MAX_SIZE; 5457 5458 netdev_init_queues(dev); 5459 5460 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); 5461 dev->ethtool_ntuple_list.count = 0; 5462 INIT_LIST_HEAD(&dev->napi_list); 5463 INIT_LIST_HEAD(&dev->unreg_list); 5464 INIT_LIST_HEAD(&dev->link_watch_list); 5465 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5466 setup(dev); 5467 strcpy(dev->name, name); 5468 return dev; 5469 5470 free_rx: 5471 #ifdef CONFIG_RPS 5472 kfree(rx); 5473 free_tx: 5474 #endif 5475 kfree(tx); 5476 free_p: 5477 kfree(p); 5478 return NULL; 5479 } 5480 EXPORT_SYMBOL(alloc_netdev_mq); 5481 5482 /** 5483 * free_netdev - free network device 5484 * @dev: device 5485 * 5486 * This function does the last stage of destroying an allocated device 5487 * interface. The reference to the device object is released. 5488 * If this is the last reference then it will be freed. 5489 */ 5490 void free_netdev(struct net_device *dev) 5491 { 5492 struct napi_struct *p, *n; 5493 5494 release_net(dev_net(dev)); 5495 5496 kfree(dev->_tx); 5497 5498 /* Flush device addresses */ 5499 dev_addr_flush(dev); 5500 5501 /* Clear ethtool n-tuple list */ 5502 ethtool_ntuple_flush(dev); 5503 5504 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5505 netif_napi_del(p); 5506 5507 /* Compatibility with error handling in drivers */ 5508 if (dev->reg_state == NETREG_UNINITIALIZED) { 5509 kfree((char *)dev - dev->padded); 5510 return; 5511 } 5512 5513 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 5514 dev->reg_state = NETREG_RELEASED; 5515 5516 /* will free via device release */ 5517 put_device(&dev->dev); 5518 } 5519 EXPORT_SYMBOL(free_netdev); 5520 5521 /** 5522 * synchronize_net - Synchronize with packet receive processing 5523 * 5524 * Wait for packets currently being received to be done. 5525 * Does not block later packets from starting. 5526 */ 5527 void synchronize_net(void) 5528 { 5529 might_sleep(); 5530 synchronize_rcu(); 5531 } 5532 EXPORT_SYMBOL(synchronize_net); 5533 5534 /** 5535 * unregister_netdevice_queue - remove device from the kernel 5536 * @dev: device 5537 * @head: list 5538 * 5539 * This function shuts down a device interface and removes it 5540 * from the kernel tables. 5541 * If head not NULL, device is queued to be unregistered later. 5542 * 5543 * Callers must hold the rtnl semaphore. You may want 5544 * unregister_netdev() instead of this. 5545 */ 5546 5547 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 5548 { 5549 ASSERT_RTNL(); 5550 5551 if (head) { 5552 list_move_tail(&dev->unreg_list, head); 5553 } else { 5554 rollback_registered(dev); 5555 /* Finish processing unregister after unlock */ 5556 net_set_todo(dev); 5557 } 5558 } 5559 EXPORT_SYMBOL(unregister_netdevice_queue); 5560 5561 /** 5562 * unregister_netdevice_many - unregister many devices 5563 * @head: list of devices 5564 */ 5565 void unregister_netdevice_many(struct list_head *head) 5566 { 5567 struct net_device *dev; 5568 5569 if (!list_empty(head)) { 5570 rollback_registered_many(head); 5571 list_for_each_entry(dev, head, unreg_list) 5572 net_set_todo(dev); 5573 } 5574 } 5575 EXPORT_SYMBOL(unregister_netdevice_many); 5576 5577 /** 5578 * unregister_netdev - remove device from the kernel 5579 * @dev: device 5580 * 5581 * This function shuts down a device interface and removes it 5582 * from the kernel tables. 5583 * 5584 * This is just a wrapper for unregister_netdevice that takes 5585 * the rtnl semaphore. In general you want to use this and not 5586 * unregister_netdevice. 5587 */ 5588 void unregister_netdev(struct net_device *dev) 5589 { 5590 rtnl_lock(); 5591 unregister_netdevice(dev); 5592 rtnl_unlock(); 5593 } 5594 EXPORT_SYMBOL(unregister_netdev); 5595 5596 /** 5597 * dev_change_net_namespace - move device to different nethost namespace 5598 * @dev: device 5599 * @net: network namespace 5600 * @pat: If not NULL name pattern to try if the current device name 5601 * is already taken in the destination network namespace. 5602 * 5603 * This function shuts down a device interface and moves it 5604 * to a new network namespace. On success 0 is returned, on 5605 * a failure a netagive errno code is returned. 5606 * 5607 * Callers must hold the rtnl semaphore. 5608 */ 5609 5610 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 5611 { 5612 int err; 5613 5614 ASSERT_RTNL(); 5615 5616 /* Don't allow namespace local devices to be moved. */ 5617 err = -EINVAL; 5618 if (dev->features & NETIF_F_NETNS_LOCAL) 5619 goto out; 5620 5621 /* Ensure the device has been registrered */ 5622 err = -EINVAL; 5623 if (dev->reg_state != NETREG_REGISTERED) 5624 goto out; 5625 5626 /* Get out if there is nothing todo */ 5627 err = 0; 5628 if (net_eq(dev_net(dev), net)) 5629 goto out; 5630 5631 /* Pick the destination device name, and ensure 5632 * we can use it in the destination network namespace. 5633 */ 5634 err = -EEXIST; 5635 if (__dev_get_by_name(net, dev->name)) { 5636 /* We get here if we can't use the current device name */ 5637 if (!pat) 5638 goto out; 5639 if (dev_get_valid_name(dev, pat, 1)) 5640 goto out; 5641 } 5642 5643 /* 5644 * And now a mini version of register_netdevice unregister_netdevice. 5645 */ 5646 5647 /* If device is running close it first. */ 5648 dev_close(dev); 5649 5650 /* And unlink it from device chain */ 5651 err = -ENODEV; 5652 unlist_netdevice(dev); 5653 5654 synchronize_net(); 5655 5656 /* Shutdown queueing discipline. */ 5657 dev_shutdown(dev); 5658 5659 /* Notify protocols, that we are about to destroy 5660 this device. They should clean all the things. 5661 */ 5662 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5663 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5664 5665 /* 5666 * Flush the unicast and multicast chains 5667 */ 5668 dev_uc_flush(dev); 5669 dev_mc_flush(dev); 5670 5671 /* Actually switch the network namespace */ 5672 dev_net_set(dev, net); 5673 5674 /* If there is an ifindex conflict assign a new one */ 5675 if (__dev_get_by_index(net, dev->ifindex)) { 5676 int iflink = (dev->iflink == dev->ifindex); 5677 dev->ifindex = dev_new_index(net); 5678 if (iflink) 5679 dev->iflink = dev->ifindex; 5680 } 5681 5682 /* Fixup kobjects */ 5683 err = device_rename(&dev->dev, dev->name); 5684 WARN_ON(err); 5685 5686 /* Add the device back in the hashes */ 5687 list_netdevice(dev); 5688 5689 /* Notify protocols, that a new device appeared. */ 5690 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5691 5692 /* 5693 * Prevent userspace races by waiting until the network 5694 * device is fully setup before sending notifications. 5695 */ 5696 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5697 5698 synchronize_net(); 5699 err = 0; 5700 out: 5701 return err; 5702 } 5703 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 5704 5705 static int dev_cpu_callback(struct notifier_block *nfb, 5706 unsigned long action, 5707 void *ocpu) 5708 { 5709 struct sk_buff **list_skb; 5710 struct sk_buff *skb; 5711 unsigned int cpu, oldcpu = (unsigned long)ocpu; 5712 struct softnet_data *sd, *oldsd; 5713 5714 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 5715 return NOTIFY_OK; 5716 5717 local_irq_disable(); 5718 cpu = smp_processor_id(); 5719 sd = &per_cpu(softnet_data, cpu); 5720 oldsd = &per_cpu(softnet_data, oldcpu); 5721 5722 /* Find end of our completion_queue. */ 5723 list_skb = &sd->completion_queue; 5724 while (*list_skb) 5725 list_skb = &(*list_skb)->next; 5726 /* Append completion queue from offline CPU. */ 5727 *list_skb = oldsd->completion_queue; 5728 oldsd->completion_queue = NULL; 5729 5730 /* Append output queue from offline CPU. */ 5731 if (oldsd->output_queue) { 5732 *sd->output_queue_tailp = oldsd->output_queue; 5733 sd->output_queue_tailp = oldsd->output_queue_tailp; 5734 oldsd->output_queue = NULL; 5735 oldsd->output_queue_tailp = &oldsd->output_queue; 5736 } 5737 5738 raise_softirq_irqoff(NET_TX_SOFTIRQ); 5739 local_irq_enable(); 5740 5741 /* Process offline CPU's input_pkt_queue */ 5742 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 5743 netif_rx(skb); 5744 input_queue_head_incr(oldsd); 5745 } 5746 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5747 netif_rx(skb); 5748 input_queue_head_incr(oldsd); 5749 } 5750 5751 return NOTIFY_OK; 5752 } 5753 5754 5755 /** 5756 * netdev_increment_features - increment feature set by one 5757 * @all: current feature set 5758 * @one: new feature set 5759 * @mask: mask feature set 5760 * 5761 * Computes a new feature set after adding a device with feature set 5762 * @one to the master device with current feature set @all. Will not 5763 * enable anything that is off in @mask. Returns the new feature set. 5764 */ 5765 unsigned long netdev_increment_features(unsigned long all, unsigned long one, 5766 unsigned long mask) 5767 { 5768 /* If device needs checksumming, downgrade to it. */ 5769 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 5770 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 5771 else if (mask & NETIF_F_ALL_CSUM) { 5772 /* If one device supports v4/v6 checksumming, set for all. */ 5773 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) && 5774 !(all & NETIF_F_GEN_CSUM)) { 5775 all &= ~NETIF_F_ALL_CSUM; 5776 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 5777 } 5778 5779 /* If one device supports hw checksumming, set for all. */ 5780 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { 5781 all &= ~NETIF_F_ALL_CSUM; 5782 all |= NETIF_F_HW_CSUM; 5783 } 5784 } 5785 5786 one |= NETIF_F_ALL_CSUM; 5787 5788 one |= all & NETIF_F_ONE_FOR_ALL; 5789 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; 5790 all |= one & mask & NETIF_F_ONE_FOR_ALL; 5791 5792 return all; 5793 } 5794 EXPORT_SYMBOL(netdev_increment_features); 5795 5796 static struct hlist_head *netdev_create_hash(void) 5797 { 5798 int i; 5799 struct hlist_head *hash; 5800 5801 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 5802 if (hash != NULL) 5803 for (i = 0; i < NETDEV_HASHENTRIES; i++) 5804 INIT_HLIST_HEAD(&hash[i]); 5805 5806 return hash; 5807 } 5808 5809 /* Initialize per network namespace state */ 5810 static int __net_init netdev_init(struct net *net) 5811 { 5812 INIT_LIST_HEAD(&net->dev_base_head); 5813 5814 net->dev_name_head = netdev_create_hash(); 5815 if (net->dev_name_head == NULL) 5816 goto err_name; 5817 5818 net->dev_index_head = netdev_create_hash(); 5819 if (net->dev_index_head == NULL) 5820 goto err_idx; 5821 5822 return 0; 5823 5824 err_idx: 5825 kfree(net->dev_name_head); 5826 err_name: 5827 return -ENOMEM; 5828 } 5829 5830 /** 5831 * netdev_drivername - network driver for the device 5832 * @dev: network device 5833 * @buffer: buffer for resulting name 5834 * @len: size of buffer 5835 * 5836 * Determine network driver for device. 5837 */ 5838 char *netdev_drivername(const struct net_device *dev, char *buffer, int len) 5839 { 5840 const struct device_driver *driver; 5841 const struct device *parent; 5842 5843 if (len <= 0 || !buffer) 5844 return buffer; 5845 buffer[0] = 0; 5846 5847 parent = dev->dev.parent; 5848 5849 if (!parent) 5850 return buffer; 5851 5852 driver = parent->driver; 5853 if (driver && driver->name) 5854 strlcpy(buffer, driver->name, len); 5855 return buffer; 5856 } 5857 5858 static int __netdev_printk(const char *level, const struct net_device *dev, 5859 struct va_format *vaf) 5860 { 5861 int r; 5862 5863 if (dev && dev->dev.parent) 5864 r = dev_printk(level, dev->dev.parent, "%s: %pV", 5865 netdev_name(dev), vaf); 5866 else if (dev) 5867 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 5868 else 5869 r = printk("%s(NULL net_device): %pV", level, vaf); 5870 5871 return r; 5872 } 5873 5874 int netdev_printk(const char *level, const struct net_device *dev, 5875 const char *format, ...) 5876 { 5877 struct va_format vaf; 5878 va_list args; 5879 int r; 5880 5881 va_start(args, format); 5882 5883 vaf.fmt = format; 5884 vaf.va = &args; 5885 5886 r = __netdev_printk(level, dev, &vaf); 5887 va_end(args); 5888 5889 return r; 5890 } 5891 EXPORT_SYMBOL(netdev_printk); 5892 5893 #define define_netdev_printk_level(func, level) \ 5894 int func(const struct net_device *dev, const char *fmt, ...) \ 5895 { \ 5896 int r; \ 5897 struct va_format vaf; \ 5898 va_list args; \ 5899 \ 5900 va_start(args, fmt); \ 5901 \ 5902 vaf.fmt = fmt; \ 5903 vaf.va = &args; \ 5904 \ 5905 r = __netdev_printk(level, dev, &vaf); \ 5906 va_end(args); \ 5907 \ 5908 return r; \ 5909 } \ 5910 EXPORT_SYMBOL(func); 5911 5912 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 5913 define_netdev_printk_level(netdev_alert, KERN_ALERT); 5914 define_netdev_printk_level(netdev_crit, KERN_CRIT); 5915 define_netdev_printk_level(netdev_err, KERN_ERR); 5916 define_netdev_printk_level(netdev_warn, KERN_WARNING); 5917 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 5918 define_netdev_printk_level(netdev_info, KERN_INFO); 5919 5920 static void __net_exit netdev_exit(struct net *net) 5921 { 5922 kfree(net->dev_name_head); 5923 kfree(net->dev_index_head); 5924 } 5925 5926 static struct pernet_operations __net_initdata netdev_net_ops = { 5927 .init = netdev_init, 5928 .exit = netdev_exit, 5929 }; 5930 5931 static void __net_exit default_device_exit(struct net *net) 5932 { 5933 struct net_device *dev, *aux; 5934 /* 5935 * Push all migratable network devices back to the 5936 * initial network namespace 5937 */ 5938 rtnl_lock(); 5939 for_each_netdev_safe(net, dev, aux) { 5940 int err; 5941 char fb_name[IFNAMSIZ]; 5942 5943 /* Ignore unmoveable devices (i.e. loopback) */ 5944 if (dev->features & NETIF_F_NETNS_LOCAL) 5945 continue; 5946 5947 /* Leave virtual devices for the generic cleanup */ 5948 if (dev->rtnl_link_ops) 5949 continue; 5950 5951 /* Push remaing network devices to init_net */ 5952 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 5953 err = dev_change_net_namespace(dev, &init_net, fb_name); 5954 if (err) { 5955 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 5956 __func__, dev->name, err); 5957 BUG(); 5958 } 5959 } 5960 rtnl_unlock(); 5961 } 5962 5963 static void __net_exit default_device_exit_batch(struct list_head *net_list) 5964 { 5965 /* At exit all network devices most be removed from a network 5966 * namespace. Do this in the reverse order of registeration. 5967 * Do this across as many network namespaces as possible to 5968 * improve batching efficiency. 5969 */ 5970 struct net_device *dev; 5971 struct net *net; 5972 LIST_HEAD(dev_kill_list); 5973 5974 rtnl_lock(); 5975 list_for_each_entry(net, net_list, exit_list) { 5976 for_each_netdev_reverse(net, dev) { 5977 if (dev->rtnl_link_ops) 5978 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 5979 else 5980 unregister_netdevice_queue(dev, &dev_kill_list); 5981 } 5982 } 5983 unregister_netdevice_many(&dev_kill_list); 5984 rtnl_unlock(); 5985 } 5986 5987 static struct pernet_operations __net_initdata default_device_ops = { 5988 .exit = default_device_exit, 5989 .exit_batch = default_device_exit_batch, 5990 }; 5991 5992 /* 5993 * Initialize the DEV module. At boot time this walks the device list and 5994 * unhooks any devices that fail to initialise (normally hardware not 5995 * present) and leaves us with a valid list of present and active devices. 5996 * 5997 */ 5998 5999 /* 6000 * This is called single threaded during boot, so no need 6001 * to take the rtnl semaphore. 6002 */ 6003 static int __init net_dev_init(void) 6004 { 6005 int i, rc = -ENOMEM; 6006 6007 BUG_ON(!dev_boot_phase); 6008 6009 if (dev_proc_init()) 6010 goto out; 6011 6012 if (netdev_kobject_init()) 6013 goto out; 6014 6015 INIT_LIST_HEAD(&ptype_all); 6016 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6017 INIT_LIST_HEAD(&ptype_base[i]); 6018 6019 if (register_pernet_subsys(&netdev_net_ops)) 6020 goto out; 6021 6022 /* 6023 * Initialise the packet receive queues. 6024 */ 6025 6026 for_each_possible_cpu(i) { 6027 struct softnet_data *sd = &per_cpu(softnet_data, i); 6028 6029 memset(sd, 0, sizeof(*sd)); 6030 skb_queue_head_init(&sd->input_pkt_queue); 6031 skb_queue_head_init(&sd->process_queue); 6032 sd->completion_queue = NULL; 6033 INIT_LIST_HEAD(&sd->poll_list); 6034 sd->output_queue = NULL; 6035 sd->output_queue_tailp = &sd->output_queue; 6036 #ifdef CONFIG_RPS 6037 sd->csd.func = rps_trigger_softirq; 6038 sd->csd.info = sd; 6039 sd->csd.flags = 0; 6040 sd->cpu = i; 6041 #endif 6042 6043 sd->backlog.poll = process_backlog; 6044 sd->backlog.weight = weight_p; 6045 sd->backlog.gro_list = NULL; 6046 sd->backlog.gro_count = 0; 6047 } 6048 6049 dev_boot_phase = 0; 6050 6051 /* The loopback device is special if any other network devices 6052 * is present in a network namespace the loopback device must 6053 * be present. Since we now dynamically allocate and free the 6054 * loopback device ensure this invariant is maintained by 6055 * keeping the loopback device as the first device on the 6056 * list of network devices. Ensuring the loopback devices 6057 * is the first device that appears and the last network device 6058 * that disappears. 6059 */ 6060 if (register_pernet_device(&loopback_net_ops)) 6061 goto out; 6062 6063 if (register_pernet_device(&default_device_ops)) 6064 goto out; 6065 6066 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 6067 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 6068 6069 hotcpu_notifier(dev_cpu_callback, 0); 6070 dst_init(); 6071 dev_mcast_init(); 6072 rc = 0; 6073 out: 6074 return rc; 6075 } 6076 6077 subsys_initcall(net_dev_init); 6078 6079 static int __init initialize_hashrnd(void) 6080 { 6081 get_random_bytes(&hashrnd, sizeof(hashrnd)); 6082 return 0; 6083 } 6084 6085 late_initcall_sync(initialize_hashrnd); 6086 6087