1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/proc_fs.h> 101 #include <linux/seq_file.h> 102 #include <linux/stat.h> 103 #include <net/dst.h> 104 #include <net/pkt_sched.h> 105 #include <net/checksum.h> 106 #include <net/xfrm.h> 107 #include <linux/highmem.h> 108 #include <linux/init.h> 109 #include <linux/kmod.h> 110 #include <linux/module.h> 111 #include <linux/netpoll.h> 112 #include <linux/rcupdate.h> 113 #include <linux/delay.h> 114 #include <net/wext.h> 115 #include <net/iw_handler.h> 116 #include <asm/current.h> 117 #include <linux/audit.h> 118 #include <linux/dmaengine.h> 119 #include <linux/err.h> 120 #include <linux/ctype.h> 121 #include <linux/if_arp.h> 122 #include <linux/if_vlan.h> 123 #include <linux/ip.h> 124 #include <net/ip.h> 125 #include <linux/ipv6.h> 126 #include <linux/in.h> 127 #include <linux/jhash.h> 128 #include <linux/random.h> 129 #include <trace/events/napi.h> 130 #include <trace/events/net.h> 131 #include <trace/events/skb.h> 132 #include <linux/pci.h> 133 #include <linux/inetdevice.h> 134 #include <linux/cpu_rmap.h> 135 #include <linux/net_tstamp.h> 136 #include <linux/static_key.h> 137 #include <net/flow_keys.h> 138 139 #include "net-sysfs.h" 140 141 /* Instead of increasing this, you should create a hash table. */ 142 #define MAX_GRO_SKBS 8 143 144 /* This should be increased if a protocol with a bigger head is added. */ 145 #define GRO_MAX_HEAD (MAX_HEADER + 128) 146 147 /* 148 * The list of packet types we will receive (as opposed to discard) 149 * and the routines to invoke. 150 * 151 * Why 16. Because with 16 the only overlap we get on a hash of the 152 * low nibble of the protocol value is RARP/SNAP/X.25. 153 * 154 * NOTE: That is no longer true with the addition of VLAN tags. Not 155 * sure which should go first, but I bet it won't make much 156 * difference if we are running VLANs. The good news is that 157 * this protocol won't be in the list unless compiled in, so 158 * the average user (w/out VLANs) will not be adversely affected. 159 * --BLG 160 * 161 * 0800 IP 162 * 8100 802.1Q VLAN 163 * 0001 802.3 164 * 0002 AX.25 165 * 0004 802.2 166 * 8035 RARP 167 * 0005 SNAP 168 * 0805 X.25 169 * 0806 ARP 170 * 8137 IPX 171 * 0009 Localtalk 172 * 86DD IPv6 173 */ 174 175 #define PTYPE_HASH_SIZE (16) 176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 177 178 static DEFINE_SPINLOCK(ptype_lock); 179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 180 static struct list_head ptype_all __read_mostly; /* Taps */ 181 182 /* 183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 184 * semaphore. 185 * 186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 187 * 188 * Writers must hold the rtnl semaphore while they loop through the 189 * dev_base_head list, and hold dev_base_lock for writing when they do the 190 * actual updates. This allows pure readers to access the list even 191 * while a writer is preparing to update it. 192 * 193 * To put it another way, dev_base_lock is held for writing only to 194 * protect against pure readers; the rtnl semaphore provides the 195 * protection against other writers. 196 * 197 * See, for example usages, register_netdevice() and 198 * unregister_netdevice(), which must be called with the rtnl 199 * semaphore held. 200 */ 201 DEFINE_RWLOCK(dev_base_lock); 202 EXPORT_SYMBOL(dev_base_lock); 203 204 static inline void dev_base_seq_inc(struct net *net) 205 { 206 while (++net->dev_base_seq == 0); 207 } 208 209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 210 { 211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 212 213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 214 } 215 216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 217 { 218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 219 } 220 221 static inline void rps_lock(struct softnet_data *sd) 222 { 223 #ifdef CONFIG_RPS 224 spin_lock(&sd->input_pkt_queue.lock); 225 #endif 226 } 227 228 static inline void rps_unlock(struct softnet_data *sd) 229 { 230 #ifdef CONFIG_RPS 231 spin_unlock(&sd->input_pkt_queue.lock); 232 #endif 233 } 234 235 /* Device list insertion */ 236 static int list_netdevice(struct net_device *dev) 237 { 238 struct net *net = dev_net(dev); 239 240 ASSERT_RTNL(); 241 242 write_lock_bh(&dev_base_lock); 243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 245 hlist_add_head_rcu(&dev->index_hlist, 246 dev_index_hash(net, dev->ifindex)); 247 write_unlock_bh(&dev_base_lock); 248 249 dev_base_seq_inc(net); 250 251 return 0; 252 } 253 254 /* Device list removal 255 * caller must respect a RCU grace period before freeing/reusing dev 256 */ 257 static void unlist_netdevice(struct net_device *dev) 258 { 259 ASSERT_RTNL(); 260 261 /* Unlink dev from the device chain */ 262 write_lock_bh(&dev_base_lock); 263 list_del_rcu(&dev->dev_list); 264 hlist_del_rcu(&dev->name_hlist); 265 hlist_del_rcu(&dev->index_hlist); 266 write_unlock_bh(&dev_base_lock); 267 268 dev_base_seq_inc(dev_net(dev)); 269 } 270 271 /* 272 * Our notifier list 273 */ 274 275 static RAW_NOTIFIER_HEAD(netdev_chain); 276 277 /* 278 * Device drivers call our routines to queue packets here. We empty the 279 * queue in the local softnet handler. 280 */ 281 282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 283 EXPORT_PER_CPU_SYMBOL(softnet_data); 284 285 #ifdef CONFIG_LOCKDEP 286 /* 287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 288 * according to dev->type 289 */ 290 static const unsigned short netdev_lock_type[] = 291 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 292 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 293 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 294 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 295 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 296 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 297 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 298 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 299 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 303 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 304 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 305 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 306 307 static const char *const netdev_lock_name[] = 308 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 309 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 310 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 311 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 312 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 313 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 314 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 315 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 316 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 317 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 318 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 319 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 320 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 321 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 322 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 323 324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 326 327 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 328 { 329 int i; 330 331 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 332 if (netdev_lock_type[i] == dev_type) 333 return i; 334 /* the last key is used by default */ 335 return ARRAY_SIZE(netdev_lock_type) - 1; 336 } 337 338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 339 unsigned short dev_type) 340 { 341 int i; 342 343 i = netdev_lock_pos(dev_type); 344 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 345 netdev_lock_name[i]); 346 } 347 348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 349 { 350 int i; 351 352 i = netdev_lock_pos(dev->type); 353 lockdep_set_class_and_name(&dev->addr_list_lock, 354 &netdev_addr_lock_key[i], 355 netdev_lock_name[i]); 356 } 357 #else 358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 359 unsigned short dev_type) 360 { 361 } 362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 363 { 364 } 365 #endif 366 367 /******************************************************************************* 368 369 Protocol management and registration routines 370 371 *******************************************************************************/ 372 373 /* 374 * Add a protocol ID to the list. Now that the input handler is 375 * smarter we can dispense with all the messy stuff that used to be 376 * here. 377 * 378 * BEWARE!!! Protocol handlers, mangling input packets, 379 * MUST BE last in hash buckets and checking protocol handlers 380 * MUST start from promiscuous ptype_all chain in net_bh. 381 * It is true now, do not change it. 382 * Explanation follows: if protocol handler, mangling packet, will 383 * be the first on list, it is not able to sense, that packet 384 * is cloned and should be copied-on-write, so that it will 385 * change it and subsequent readers will get broken packet. 386 * --ANK (980803) 387 */ 388 389 static inline struct list_head *ptype_head(const struct packet_type *pt) 390 { 391 if (pt->type == htons(ETH_P_ALL)) 392 return &ptype_all; 393 else 394 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 395 } 396 397 /** 398 * dev_add_pack - add packet handler 399 * @pt: packet type declaration 400 * 401 * Add a protocol handler to the networking stack. The passed &packet_type 402 * is linked into kernel lists and may not be freed until it has been 403 * removed from the kernel lists. 404 * 405 * This call does not sleep therefore it can not 406 * guarantee all CPU's that are in middle of receiving packets 407 * will see the new packet type (until the next received packet). 408 */ 409 410 void dev_add_pack(struct packet_type *pt) 411 { 412 struct list_head *head = ptype_head(pt); 413 414 spin_lock(&ptype_lock); 415 list_add_rcu(&pt->list, head); 416 spin_unlock(&ptype_lock); 417 } 418 EXPORT_SYMBOL(dev_add_pack); 419 420 /** 421 * __dev_remove_pack - remove packet handler 422 * @pt: packet type declaration 423 * 424 * Remove a protocol handler that was previously added to the kernel 425 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 426 * from the kernel lists and can be freed or reused once this function 427 * returns. 428 * 429 * The packet type might still be in use by receivers 430 * and must not be freed until after all the CPU's have gone 431 * through a quiescent state. 432 */ 433 void __dev_remove_pack(struct packet_type *pt) 434 { 435 struct list_head *head = ptype_head(pt); 436 struct packet_type *pt1; 437 438 spin_lock(&ptype_lock); 439 440 list_for_each_entry(pt1, head, list) { 441 if (pt == pt1) { 442 list_del_rcu(&pt->list); 443 goto out; 444 } 445 } 446 447 pr_warn("dev_remove_pack: %p not found\n", pt); 448 out: 449 spin_unlock(&ptype_lock); 450 } 451 EXPORT_SYMBOL(__dev_remove_pack); 452 453 /** 454 * dev_remove_pack - remove packet handler 455 * @pt: packet type declaration 456 * 457 * Remove a protocol handler that was previously added to the kernel 458 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 459 * from the kernel lists and can be freed or reused once this function 460 * returns. 461 * 462 * This call sleeps to guarantee that no CPU is looking at the packet 463 * type after return. 464 */ 465 void dev_remove_pack(struct packet_type *pt) 466 { 467 __dev_remove_pack(pt); 468 469 synchronize_net(); 470 } 471 EXPORT_SYMBOL(dev_remove_pack); 472 473 /****************************************************************************** 474 475 Device Boot-time Settings Routines 476 477 *******************************************************************************/ 478 479 /* Boot time configuration table */ 480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 481 482 /** 483 * netdev_boot_setup_add - add new setup entry 484 * @name: name of the device 485 * @map: configured settings for the device 486 * 487 * Adds new setup entry to the dev_boot_setup list. The function 488 * returns 0 on error and 1 on success. This is a generic routine to 489 * all netdevices. 490 */ 491 static int netdev_boot_setup_add(char *name, struct ifmap *map) 492 { 493 struct netdev_boot_setup *s; 494 int i; 495 496 s = dev_boot_setup; 497 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 498 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 499 memset(s[i].name, 0, sizeof(s[i].name)); 500 strlcpy(s[i].name, name, IFNAMSIZ); 501 memcpy(&s[i].map, map, sizeof(s[i].map)); 502 break; 503 } 504 } 505 506 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 507 } 508 509 /** 510 * netdev_boot_setup_check - check boot time settings 511 * @dev: the netdevice 512 * 513 * Check boot time settings for the device. 514 * The found settings are set for the device to be used 515 * later in the device probing. 516 * Returns 0 if no settings found, 1 if they are. 517 */ 518 int netdev_boot_setup_check(struct net_device *dev) 519 { 520 struct netdev_boot_setup *s = dev_boot_setup; 521 int i; 522 523 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 524 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 525 !strcmp(dev->name, s[i].name)) { 526 dev->irq = s[i].map.irq; 527 dev->base_addr = s[i].map.base_addr; 528 dev->mem_start = s[i].map.mem_start; 529 dev->mem_end = s[i].map.mem_end; 530 return 1; 531 } 532 } 533 return 0; 534 } 535 EXPORT_SYMBOL(netdev_boot_setup_check); 536 537 538 /** 539 * netdev_boot_base - get address from boot time settings 540 * @prefix: prefix for network device 541 * @unit: id for network device 542 * 543 * Check boot time settings for the base address of device. 544 * The found settings are set for the device to be used 545 * later in the device probing. 546 * Returns 0 if no settings found. 547 */ 548 unsigned long netdev_boot_base(const char *prefix, int unit) 549 { 550 const struct netdev_boot_setup *s = dev_boot_setup; 551 char name[IFNAMSIZ]; 552 int i; 553 554 sprintf(name, "%s%d", prefix, unit); 555 556 /* 557 * If device already registered then return base of 1 558 * to indicate not to probe for this interface 559 */ 560 if (__dev_get_by_name(&init_net, name)) 561 return 1; 562 563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 564 if (!strcmp(name, s[i].name)) 565 return s[i].map.base_addr; 566 return 0; 567 } 568 569 /* 570 * Saves at boot time configured settings for any netdevice. 571 */ 572 int __init netdev_boot_setup(char *str) 573 { 574 int ints[5]; 575 struct ifmap map; 576 577 str = get_options(str, ARRAY_SIZE(ints), ints); 578 if (!str || !*str) 579 return 0; 580 581 /* Save settings */ 582 memset(&map, 0, sizeof(map)); 583 if (ints[0] > 0) 584 map.irq = ints[1]; 585 if (ints[0] > 1) 586 map.base_addr = ints[2]; 587 if (ints[0] > 2) 588 map.mem_start = ints[3]; 589 if (ints[0] > 3) 590 map.mem_end = ints[4]; 591 592 /* Add new entry to the list */ 593 return netdev_boot_setup_add(str, &map); 594 } 595 596 __setup("netdev=", netdev_boot_setup); 597 598 /******************************************************************************* 599 600 Device Interface Subroutines 601 602 *******************************************************************************/ 603 604 /** 605 * __dev_get_by_name - find a device by its name 606 * @net: the applicable net namespace 607 * @name: name to find 608 * 609 * Find an interface by name. Must be called under RTNL semaphore 610 * or @dev_base_lock. If the name is found a pointer to the device 611 * is returned. If the name is not found then %NULL is returned. The 612 * reference counters are not incremented so the caller must be 613 * careful with locks. 614 */ 615 616 struct net_device *__dev_get_by_name(struct net *net, const char *name) 617 { 618 struct hlist_node *p; 619 struct net_device *dev; 620 struct hlist_head *head = dev_name_hash(net, name); 621 622 hlist_for_each_entry(dev, p, head, name_hlist) 623 if (!strncmp(dev->name, name, IFNAMSIZ)) 624 return dev; 625 626 return NULL; 627 } 628 EXPORT_SYMBOL(__dev_get_by_name); 629 630 /** 631 * dev_get_by_name_rcu - find a device by its name 632 * @net: the applicable net namespace 633 * @name: name to find 634 * 635 * Find an interface by name. 636 * If the name is found a pointer to the device is returned. 637 * If the name is not found then %NULL is returned. 638 * The reference counters are not incremented so the caller must be 639 * careful with locks. The caller must hold RCU lock. 640 */ 641 642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 643 { 644 struct hlist_node *p; 645 struct net_device *dev; 646 struct hlist_head *head = dev_name_hash(net, name); 647 648 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 649 if (!strncmp(dev->name, name, IFNAMSIZ)) 650 return dev; 651 652 return NULL; 653 } 654 EXPORT_SYMBOL(dev_get_by_name_rcu); 655 656 /** 657 * dev_get_by_name - find a device by its name 658 * @net: the applicable net namespace 659 * @name: name to find 660 * 661 * Find an interface by name. This can be called from any 662 * context and does its own locking. The returned handle has 663 * the usage count incremented and the caller must use dev_put() to 664 * release it when it is no longer needed. %NULL is returned if no 665 * matching device is found. 666 */ 667 668 struct net_device *dev_get_by_name(struct net *net, const char *name) 669 { 670 struct net_device *dev; 671 672 rcu_read_lock(); 673 dev = dev_get_by_name_rcu(net, name); 674 if (dev) 675 dev_hold(dev); 676 rcu_read_unlock(); 677 return dev; 678 } 679 EXPORT_SYMBOL(dev_get_by_name); 680 681 /** 682 * __dev_get_by_index - find a device by its ifindex 683 * @net: the applicable net namespace 684 * @ifindex: index of device 685 * 686 * Search for an interface by index. Returns %NULL if the device 687 * is not found or a pointer to the device. The device has not 688 * had its reference counter increased so the caller must be careful 689 * about locking. The caller must hold either the RTNL semaphore 690 * or @dev_base_lock. 691 */ 692 693 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 694 { 695 struct hlist_node *p; 696 struct net_device *dev; 697 struct hlist_head *head = dev_index_hash(net, ifindex); 698 699 hlist_for_each_entry(dev, p, head, index_hlist) 700 if (dev->ifindex == ifindex) 701 return dev; 702 703 return NULL; 704 } 705 EXPORT_SYMBOL(__dev_get_by_index); 706 707 /** 708 * dev_get_by_index_rcu - find a device by its ifindex 709 * @net: the applicable net namespace 710 * @ifindex: index of device 711 * 712 * Search for an interface by index. Returns %NULL if the device 713 * is not found or a pointer to the device. The device has not 714 * had its reference counter increased so the caller must be careful 715 * about locking. The caller must hold RCU lock. 716 */ 717 718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 719 { 720 struct hlist_node *p; 721 struct net_device *dev; 722 struct hlist_head *head = dev_index_hash(net, ifindex); 723 724 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 725 if (dev->ifindex == ifindex) 726 return dev; 727 728 return NULL; 729 } 730 EXPORT_SYMBOL(dev_get_by_index_rcu); 731 732 733 /** 734 * dev_get_by_index - find a device by its ifindex 735 * @net: the applicable net namespace 736 * @ifindex: index of device 737 * 738 * Search for an interface by index. Returns NULL if the device 739 * is not found or a pointer to the device. The device returned has 740 * had a reference added and the pointer is safe until the user calls 741 * dev_put to indicate they have finished with it. 742 */ 743 744 struct net_device *dev_get_by_index(struct net *net, int ifindex) 745 { 746 struct net_device *dev; 747 748 rcu_read_lock(); 749 dev = dev_get_by_index_rcu(net, ifindex); 750 if (dev) 751 dev_hold(dev); 752 rcu_read_unlock(); 753 return dev; 754 } 755 EXPORT_SYMBOL(dev_get_by_index); 756 757 /** 758 * dev_getbyhwaddr_rcu - find a device by its hardware address 759 * @net: the applicable net namespace 760 * @type: media type of device 761 * @ha: hardware address 762 * 763 * Search for an interface by MAC address. Returns NULL if the device 764 * is not found or a pointer to the device. 765 * The caller must hold RCU or RTNL. 766 * The returned device has not had its ref count increased 767 * and the caller must therefore be careful about locking 768 * 769 */ 770 771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 772 const char *ha) 773 { 774 struct net_device *dev; 775 776 for_each_netdev_rcu(net, dev) 777 if (dev->type == type && 778 !memcmp(dev->dev_addr, ha, dev->addr_len)) 779 return dev; 780 781 return NULL; 782 } 783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 784 785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 786 { 787 struct net_device *dev; 788 789 ASSERT_RTNL(); 790 for_each_netdev(net, dev) 791 if (dev->type == type) 792 return dev; 793 794 return NULL; 795 } 796 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 797 798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 799 { 800 struct net_device *dev, *ret = NULL; 801 802 rcu_read_lock(); 803 for_each_netdev_rcu(net, dev) 804 if (dev->type == type) { 805 dev_hold(dev); 806 ret = dev; 807 break; 808 } 809 rcu_read_unlock(); 810 return ret; 811 } 812 EXPORT_SYMBOL(dev_getfirstbyhwtype); 813 814 /** 815 * dev_get_by_flags_rcu - find any device with given flags 816 * @net: the applicable net namespace 817 * @if_flags: IFF_* values 818 * @mask: bitmask of bits in if_flags to check 819 * 820 * Search for any interface with the given flags. Returns NULL if a device 821 * is not found or a pointer to the device. Must be called inside 822 * rcu_read_lock(), and result refcount is unchanged. 823 */ 824 825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 826 unsigned short mask) 827 { 828 struct net_device *dev, *ret; 829 830 ret = NULL; 831 for_each_netdev_rcu(net, dev) { 832 if (((dev->flags ^ if_flags) & mask) == 0) { 833 ret = dev; 834 break; 835 } 836 } 837 return ret; 838 } 839 EXPORT_SYMBOL(dev_get_by_flags_rcu); 840 841 /** 842 * dev_valid_name - check if name is okay for network device 843 * @name: name string 844 * 845 * Network device names need to be valid file names to 846 * to allow sysfs to work. We also disallow any kind of 847 * whitespace. 848 */ 849 bool dev_valid_name(const char *name) 850 { 851 if (*name == '\0') 852 return false; 853 if (strlen(name) >= IFNAMSIZ) 854 return false; 855 if (!strcmp(name, ".") || !strcmp(name, "..")) 856 return false; 857 858 while (*name) { 859 if (*name == '/' || isspace(*name)) 860 return false; 861 name++; 862 } 863 return true; 864 } 865 EXPORT_SYMBOL(dev_valid_name); 866 867 /** 868 * __dev_alloc_name - allocate a name for a device 869 * @net: network namespace to allocate the device name in 870 * @name: name format string 871 * @buf: scratch buffer and result name string 872 * 873 * Passed a format string - eg "lt%d" it will try and find a suitable 874 * id. It scans list of devices to build up a free map, then chooses 875 * the first empty slot. The caller must hold the dev_base or rtnl lock 876 * while allocating the name and adding the device in order to avoid 877 * duplicates. 878 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 879 * Returns the number of the unit assigned or a negative errno code. 880 */ 881 882 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 883 { 884 int i = 0; 885 const char *p; 886 const int max_netdevices = 8*PAGE_SIZE; 887 unsigned long *inuse; 888 struct net_device *d; 889 890 p = strnchr(name, IFNAMSIZ-1, '%'); 891 if (p) { 892 /* 893 * Verify the string as this thing may have come from 894 * the user. There must be either one "%d" and no other "%" 895 * characters. 896 */ 897 if (p[1] != 'd' || strchr(p + 2, '%')) 898 return -EINVAL; 899 900 /* Use one page as a bit array of possible slots */ 901 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 902 if (!inuse) 903 return -ENOMEM; 904 905 for_each_netdev(net, d) { 906 if (!sscanf(d->name, name, &i)) 907 continue; 908 if (i < 0 || i >= max_netdevices) 909 continue; 910 911 /* avoid cases where sscanf is not exact inverse of printf */ 912 snprintf(buf, IFNAMSIZ, name, i); 913 if (!strncmp(buf, d->name, IFNAMSIZ)) 914 set_bit(i, inuse); 915 } 916 917 i = find_first_zero_bit(inuse, max_netdevices); 918 free_page((unsigned long) inuse); 919 } 920 921 if (buf != name) 922 snprintf(buf, IFNAMSIZ, name, i); 923 if (!__dev_get_by_name(net, buf)) 924 return i; 925 926 /* It is possible to run out of possible slots 927 * when the name is long and there isn't enough space left 928 * for the digits, or if all bits are used. 929 */ 930 return -ENFILE; 931 } 932 933 /** 934 * dev_alloc_name - allocate a name for a device 935 * @dev: device 936 * @name: name format string 937 * 938 * Passed a format string - eg "lt%d" it will try and find a suitable 939 * id. It scans list of devices to build up a free map, then chooses 940 * the first empty slot. The caller must hold the dev_base or rtnl lock 941 * while allocating the name and adding the device in order to avoid 942 * duplicates. 943 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 944 * Returns the number of the unit assigned or a negative errno code. 945 */ 946 947 int dev_alloc_name(struct net_device *dev, const char *name) 948 { 949 char buf[IFNAMSIZ]; 950 struct net *net; 951 int ret; 952 953 BUG_ON(!dev_net(dev)); 954 net = dev_net(dev); 955 ret = __dev_alloc_name(net, name, buf); 956 if (ret >= 0) 957 strlcpy(dev->name, buf, IFNAMSIZ); 958 return ret; 959 } 960 EXPORT_SYMBOL(dev_alloc_name); 961 962 static int dev_get_valid_name(struct net_device *dev, const char *name) 963 { 964 struct net *net; 965 966 BUG_ON(!dev_net(dev)); 967 net = dev_net(dev); 968 969 if (!dev_valid_name(name)) 970 return -EINVAL; 971 972 if (strchr(name, '%')) 973 return dev_alloc_name(dev, name); 974 else if (__dev_get_by_name(net, name)) 975 return -EEXIST; 976 else if (dev->name != name) 977 strlcpy(dev->name, name, IFNAMSIZ); 978 979 return 0; 980 } 981 982 /** 983 * dev_change_name - change name of a device 984 * @dev: device 985 * @newname: name (or format string) must be at least IFNAMSIZ 986 * 987 * Change name of a device, can pass format strings "eth%d". 988 * for wildcarding. 989 */ 990 int dev_change_name(struct net_device *dev, const char *newname) 991 { 992 char oldname[IFNAMSIZ]; 993 int err = 0; 994 int ret; 995 struct net *net; 996 997 ASSERT_RTNL(); 998 BUG_ON(!dev_net(dev)); 999 1000 net = dev_net(dev); 1001 if (dev->flags & IFF_UP) 1002 return -EBUSY; 1003 1004 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 1005 return 0; 1006 1007 memcpy(oldname, dev->name, IFNAMSIZ); 1008 1009 err = dev_get_valid_name(dev, newname); 1010 if (err < 0) 1011 return err; 1012 1013 rollback: 1014 ret = device_rename(&dev->dev, dev->name); 1015 if (ret) { 1016 memcpy(dev->name, oldname, IFNAMSIZ); 1017 return ret; 1018 } 1019 1020 write_lock_bh(&dev_base_lock); 1021 hlist_del_rcu(&dev->name_hlist); 1022 write_unlock_bh(&dev_base_lock); 1023 1024 synchronize_rcu(); 1025 1026 write_lock_bh(&dev_base_lock); 1027 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1028 write_unlock_bh(&dev_base_lock); 1029 1030 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1031 ret = notifier_to_errno(ret); 1032 1033 if (ret) { 1034 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1035 if (err >= 0) { 1036 err = ret; 1037 memcpy(dev->name, oldname, IFNAMSIZ); 1038 goto rollback; 1039 } else { 1040 pr_err("%s: name change rollback failed: %d\n", 1041 dev->name, ret); 1042 } 1043 } 1044 1045 return err; 1046 } 1047 1048 /** 1049 * dev_set_alias - change ifalias of a device 1050 * @dev: device 1051 * @alias: name up to IFALIASZ 1052 * @len: limit of bytes to copy from info 1053 * 1054 * Set ifalias for a device, 1055 */ 1056 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1057 { 1058 char *new_ifalias; 1059 1060 ASSERT_RTNL(); 1061 1062 if (len >= IFALIASZ) 1063 return -EINVAL; 1064 1065 if (!len) { 1066 if (dev->ifalias) { 1067 kfree(dev->ifalias); 1068 dev->ifalias = NULL; 1069 } 1070 return 0; 1071 } 1072 1073 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1074 if (!new_ifalias) 1075 return -ENOMEM; 1076 dev->ifalias = new_ifalias; 1077 1078 strlcpy(dev->ifalias, alias, len+1); 1079 return len; 1080 } 1081 1082 1083 /** 1084 * netdev_features_change - device changes features 1085 * @dev: device to cause notification 1086 * 1087 * Called to indicate a device has changed features. 1088 */ 1089 void netdev_features_change(struct net_device *dev) 1090 { 1091 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1092 } 1093 EXPORT_SYMBOL(netdev_features_change); 1094 1095 /** 1096 * netdev_state_change - device changes state 1097 * @dev: device to cause notification 1098 * 1099 * Called to indicate a device has changed state. This function calls 1100 * the notifier chains for netdev_chain and sends a NEWLINK message 1101 * to the routing socket. 1102 */ 1103 void netdev_state_change(struct net_device *dev) 1104 { 1105 if (dev->flags & IFF_UP) { 1106 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1107 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1108 } 1109 } 1110 EXPORT_SYMBOL(netdev_state_change); 1111 1112 int netdev_bonding_change(struct net_device *dev, unsigned long event) 1113 { 1114 return call_netdevice_notifiers(event, dev); 1115 } 1116 EXPORT_SYMBOL(netdev_bonding_change); 1117 1118 /** 1119 * dev_load - load a network module 1120 * @net: the applicable net namespace 1121 * @name: name of interface 1122 * 1123 * If a network interface is not present and the process has suitable 1124 * privileges this function loads the module. If module loading is not 1125 * available in this kernel then it becomes a nop. 1126 */ 1127 1128 void dev_load(struct net *net, const char *name) 1129 { 1130 struct net_device *dev; 1131 int no_module; 1132 1133 rcu_read_lock(); 1134 dev = dev_get_by_name_rcu(net, name); 1135 rcu_read_unlock(); 1136 1137 no_module = !dev; 1138 if (no_module && capable(CAP_NET_ADMIN)) 1139 no_module = request_module("netdev-%s", name); 1140 if (no_module && capable(CAP_SYS_MODULE)) { 1141 if (!request_module("%s", name)) 1142 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", 1143 name); 1144 } 1145 } 1146 EXPORT_SYMBOL(dev_load); 1147 1148 static int __dev_open(struct net_device *dev) 1149 { 1150 const struct net_device_ops *ops = dev->netdev_ops; 1151 int ret; 1152 1153 ASSERT_RTNL(); 1154 1155 if (!netif_device_present(dev)) 1156 return -ENODEV; 1157 1158 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1159 ret = notifier_to_errno(ret); 1160 if (ret) 1161 return ret; 1162 1163 set_bit(__LINK_STATE_START, &dev->state); 1164 1165 if (ops->ndo_validate_addr) 1166 ret = ops->ndo_validate_addr(dev); 1167 1168 if (!ret && ops->ndo_open) 1169 ret = ops->ndo_open(dev); 1170 1171 if (ret) 1172 clear_bit(__LINK_STATE_START, &dev->state); 1173 else { 1174 dev->flags |= IFF_UP; 1175 net_dmaengine_get(); 1176 dev_set_rx_mode(dev); 1177 dev_activate(dev); 1178 add_device_randomness(dev->dev_addr, dev->addr_len); 1179 } 1180 1181 return ret; 1182 } 1183 1184 /** 1185 * dev_open - prepare an interface for use. 1186 * @dev: device to open 1187 * 1188 * Takes a device from down to up state. The device's private open 1189 * function is invoked and then the multicast lists are loaded. Finally 1190 * the device is moved into the up state and a %NETDEV_UP message is 1191 * sent to the netdev notifier chain. 1192 * 1193 * Calling this function on an active interface is a nop. On a failure 1194 * a negative errno code is returned. 1195 */ 1196 int dev_open(struct net_device *dev) 1197 { 1198 int ret; 1199 1200 if (dev->flags & IFF_UP) 1201 return 0; 1202 1203 ret = __dev_open(dev); 1204 if (ret < 0) 1205 return ret; 1206 1207 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1208 call_netdevice_notifiers(NETDEV_UP, dev); 1209 1210 return ret; 1211 } 1212 EXPORT_SYMBOL(dev_open); 1213 1214 static int __dev_close_many(struct list_head *head) 1215 { 1216 struct net_device *dev; 1217 1218 ASSERT_RTNL(); 1219 might_sleep(); 1220 1221 list_for_each_entry(dev, head, unreg_list) { 1222 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1223 1224 clear_bit(__LINK_STATE_START, &dev->state); 1225 1226 /* Synchronize to scheduled poll. We cannot touch poll list, it 1227 * can be even on different cpu. So just clear netif_running(). 1228 * 1229 * dev->stop() will invoke napi_disable() on all of it's 1230 * napi_struct instances on this device. 1231 */ 1232 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1233 } 1234 1235 dev_deactivate_many(head); 1236 1237 list_for_each_entry(dev, head, unreg_list) { 1238 const struct net_device_ops *ops = dev->netdev_ops; 1239 1240 /* 1241 * Call the device specific close. This cannot fail. 1242 * Only if device is UP 1243 * 1244 * We allow it to be called even after a DETACH hot-plug 1245 * event. 1246 */ 1247 if (ops->ndo_stop) 1248 ops->ndo_stop(dev); 1249 1250 dev->flags &= ~IFF_UP; 1251 net_dmaengine_put(); 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int __dev_close(struct net_device *dev) 1258 { 1259 int retval; 1260 LIST_HEAD(single); 1261 1262 list_add(&dev->unreg_list, &single); 1263 retval = __dev_close_many(&single); 1264 list_del(&single); 1265 return retval; 1266 } 1267 1268 static int dev_close_many(struct list_head *head) 1269 { 1270 struct net_device *dev, *tmp; 1271 LIST_HEAD(tmp_list); 1272 1273 list_for_each_entry_safe(dev, tmp, head, unreg_list) 1274 if (!(dev->flags & IFF_UP)) 1275 list_move(&dev->unreg_list, &tmp_list); 1276 1277 __dev_close_many(head); 1278 1279 list_for_each_entry(dev, head, unreg_list) { 1280 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1281 call_netdevice_notifiers(NETDEV_DOWN, dev); 1282 } 1283 1284 /* rollback_registered_many needs the complete original list */ 1285 list_splice(&tmp_list, head); 1286 return 0; 1287 } 1288 1289 /** 1290 * dev_close - shutdown an interface. 1291 * @dev: device to shutdown 1292 * 1293 * This function moves an active device into down state. A 1294 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1295 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1296 * chain. 1297 */ 1298 int dev_close(struct net_device *dev) 1299 { 1300 if (dev->flags & IFF_UP) { 1301 LIST_HEAD(single); 1302 1303 list_add(&dev->unreg_list, &single); 1304 dev_close_many(&single); 1305 list_del(&single); 1306 } 1307 return 0; 1308 } 1309 EXPORT_SYMBOL(dev_close); 1310 1311 1312 /** 1313 * dev_disable_lro - disable Large Receive Offload on a device 1314 * @dev: device 1315 * 1316 * Disable Large Receive Offload (LRO) on a net device. Must be 1317 * called under RTNL. This is needed if received packets may be 1318 * forwarded to another interface. 1319 */ 1320 void dev_disable_lro(struct net_device *dev) 1321 { 1322 /* 1323 * If we're trying to disable lro on a vlan device 1324 * use the underlying physical device instead 1325 */ 1326 if (is_vlan_dev(dev)) 1327 dev = vlan_dev_real_dev(dev); 1328 1329 dev->wanted_features &= ~NETIF_F_LRO; 1330 netdev_update_features(dev); 1331 1332 if (unlikely(dev->features & NETIF_F_LRO)) 1333 netdev_WARN(dev, "failed to disable LRO!\n"); 1334 } 1335 EXPORT_SYMBOL(dev_disable_lro); 1336 1337 1338 static int dev_boot_phase = 1; 1339 1340 /** 1341 * register_netdevice_notifier - register a network notifier block 1342 * @nb: notifier 1343 * 1344 * Register a notifier to be called when network device events occur. 1345 * The notifier passed is linked into the kernel structures and must 1346 * not be reused until it has been unregistered. A negative errno code 1347 * is returned on a failure. 1348 * 1349 * When registered all registration and up events are replayed 1350 * to the new notifier to allow device to have a race free 1351 * view of the network device list. 1352 */ 1353 1354 int register_netdevice_notifier(struct notifier_block *nb) 1355 { 1356 struct net_device *dev; 1357 struct net_device *last; 1358 struct net *net; 1359 int err; 1360 1361 rtnl_lock(); 1362 err = raw_notifier_chain_register(&netdev_chain, nb); 1363 if (err) 1364 goto unlock; 1365 if (dev_boot_phase) 1366 goto unlock; 1367 for_each_net(net) { 1368 for_each_netdev(net, dev) { 1369 err = nb->notifier_call(nb, NETDEV_REGISTER, dev); 1370 err = notifier_to_errno(err); 1371 if (err) 1372 goto rollback; 1373 1374 if (!(dev->flags & IFF_UP)) 1375 continue; 1376 1377 nb->notifier_call(nb, NETDEV_UP, dev); 1378 } 1379 } 1380 1381 unlock: 1382 rtnl_unlock(); 1383 return err; 1384 1385 rollback: 1386 last = dev; 1387 for_each_net(net) { 1388 for_each_netdev(net, dev) { 1389 if (dev == last) 1390 goto outroll; 1391 1392 if (dev->flags & IFF_UP) { 1393 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1394 nb->notifier_call(nb, NETDEV_DOWN, dev); 1395 } 1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); 1398 } 1399 } 1400 1401 outroll: 1402 raw_notifier_chain_unregister(&netdev_chain, nb); 1403 goto unlock; 1404 } 1405 EXPORT_SYMBOL(register_netdevice_notifier); 1406 1407 /** 1408 * unregister_netdevice_notifier - unregister a network notifier block 1409 * @nb: notifier 1410 * 1411 * Unregister a notifier previously registered by 1412 * register_netdevice_notifier(). The notifier is unlinked into the 1413 * kernel structures and may then be reused. A negative errno code 1414 * is returned on a failure. 1415 * 1416 * After unregistering unregister and down device events are synthesized 1417 * for all devices on the device list to the removed notifier to remove 1418 * the need for special case cleanup code. 1419 */ 1420 1421 int unregister_netdevice_notifier(struct notifier_block *nb) 1422 { 1423 struct net_device *dev; 1424 struct net *net; 1425 int err; 1426 1427 rtnl_lock(); 1428 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1429 if (err) 1430 goto unlock; 1431 1432 for_each_net(net) { 1433 for_each_netdev(net, dev) { 1434 if (dev->flags & IFF_UP) { 1435 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1436 nb->notifier_call(nb, NETDEV_DOWN, dev); 1437 } 1438 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1439 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); 1440 } 1441 } 1442 unlock: 1443 rtnl_unlock(); 1444 return err; 1445 } 1446 EXPORT_SYMBOL(unregister_netdevice_notifier); 1447 1448 /** 1449 * call_netdevice_notifiers - call all network notifier blocks 1450 * @val: value passed unmodified to notifier function 1451 * @dev: net_device pointer passed unmodified to notifier function 1452 * 1453 * Call all network notifier blocks. Parameters and return value 1454 * are as for raw_notifier_call_chain(). 1455 */ 1456 1457 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1458 { 1459 ASSERT_RTNL(); 1460 return raw_notifier_call_chain(&netdev_chain, val, dev); 1461 } 1462 EXPORT_SYMBOL(call_netdevice_notifiers); 1463 1464 static struct static_key netstamp_needed __read_mostly; 1465 #ifdef HAVE_JUMP_LABEL 1466 /* We are not allowed to call static_key_slow_dec() from irq context 1467 * If net_disable_timestamp() is called from irq context, defer the 1468 * static_key_slow_dec() calls. 1469 */ 1470 static atomic_t netstamp_needed_deferred; 1471 #endif 1472 1473 void net_enable_timestamp(void) 1474 { 1475 #ifdef HAVE_JUMP_LABEL 1476 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1477 1478 if (deferred) { 1479 while (--deferred) 1480 static_key_slow_dec(&netstamp_needed); 1481 return; 1482 } 1483 #endif 1484 WARN_ON(in_interrupt()); 1485 static_key_slow_inc(&netstamp_needed); 1486 } 1487 EXPORT_SYMBOL(net_enable_timestamp); 1488 1489 void net_disable_timestamp(void) 1490 { 1491 #ifdef HAVE_JUMP_LABEL 1492 if (in_interrupt()) { 1493 atomic_inc(&netstamp_needed_deferred); 1494 return; 1495 } 1496 #endif 1497 static_key_slow_dec(&netstamp_needed); 1498 } 1499 EXPORT_SYMBOL(net_disable_timestamp); 1500 1501 static inline void net_timestamp_set(struct sk_buff *skb) 1502 { 1503 skb->tstamp.tv64 = 0; 1504 if (static_key_false(&netstamp_needed)) 1505 __net_timestamp(skb); 1506 } 1507 1508 #define net_timestamp_check(COND, SKB) \ 1509 if (static_key_false(&netstamp_needed)) { \ 1510 if ((COND) && !(SKB)->tstamp.tv64) \ 1511 __net_timestamp(SKB); \ 1512 } \ 1513 1514 static int net_hwtstamp_validate(struct ifreq *ifr) 1515 { 1516 struct hwtstamp_config cfg; 1517 enum hwtstamp_tx_types tx_type; 1518 enum hwtstamp_rx_filters rx_filter; 1519 int tx_type_valid = 0; 1520 int rx_filter_valid = 0; 1521 1522 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1523 return -EFAULT; 1524 1525 if (cfg.flags) /* reserved for future extensions */ 1526 return -EINVAL; 1527 1528 tx_type = cfg.tx_type; 1529 rx_filter = cfg.rx_filter; 1530 1531 switch (tx_type) { 1532 case HWTSTAMP_TX_OFF: 1533 case HWTSTAMP_TX_ON: 1534 case HWTSTAMP_TX_ONESTEP_SYNC: 1535 tx_type_valid = 1; 1536 break; 1537 } 1538 1539 switch (rx_filter) { 1540 case HWTSTAMP_FILTER_NONE: 1541 case HWTSTAMP_FILTER_ALL: 1542 case HWTSTAMP_FILTER_SOME: 1543 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1544 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1545 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1546 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1547 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1548 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1549 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1550 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1551 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1552 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1553 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1554 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1555 rx_filter_valid = 1; 1556 break; 1557 } 1558 1559 if (!tx_type_valid || !rx_filter_valid) 1560 return -ERANGE; 1561 1562 return 0; 1563 } 1564 1565 static inline bool is_skb_forwardable(struct net_device *dev, 1566 struct sk_buff *skb) 1567 { 1568 unsigned int len; 1569 1570 if (!(dev->flags & IFF_UP)) 1571 return false; 1572 1573 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1574 if (skb->len <= len) 1575 return true; 1576 1577 /* if TSO is enabled, we don't care about the length as the packet 1578 * could be forwarded without being segmented before 1579 */ 1580 if (skb_is_gso(skb)) 1581 return true; 1582 1583 return false; 1584 } 1585 1586 /** 1587 * dev_forward_skb - loopback an skb to another netif 1588 * 1589 * @dev: destination network device 1590 * @skb: buffer to forward 1591 * 1592 * return values: 1593 * NET_RX_SUCCESS (no congestion) 1594 * NET_RX_DROP (packet was dropped, but freed) 1595 * 1596 * dev_forward_skb can be used for injecting an skb from the 1597 * start_xmit function of one device into the receive queue 1598 * of another device. 1599 * 1600 * The receiving device may be in another namespace, so 1601 * we have to clear all information in the skb that could 1602 * impact namespace isolation. 1603 */ 1604 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1605 { 1606 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1607 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1608 atomic_long_inc(&dev->rx_dropped); 1609 kfree_skb(skb); 1610 return NET_RX_DROP; 1611 } 1612 } 1613 1614 skb_orphan(skb); 1615 nf_reset(skb); 1616 1617 if (unlikely(!is_skb_forwardable(dev, skb))) { 1618 atomic_long_inc(&dev->rx_dropped); 1619 kfree_skb(skb); 1620 return NET_RX_DROP; 1621 } 1622 skb->skb_iif = 0; 1623 skb->dev = dev; 1624 skb_dst_drop(skb); 1625 skb->tstamp.tv64 = 0; 1626 skb->pkt_type = PACKET_HOST; 1627 skb->protocol = eth_type_trans(skb, dev); 1628 skb->mark = 0; 1629 secpath_reset(skb); 1630 nf_reset(skb); 1631 return netif_rx(skb); 1632 } 1633 EXPORT_SYMBOL_GPL(dev_forward_skb); 1634 1635 static inline int deliver_skb(struct sk_buff *skb, 1636 struct packet_type *pt_prev, 1637 struct net_device *orig_dev) 1638 { 1639 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1640 return -ENOMEM; 1641 atomic_inc(&skb->users); 1642 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1643 } 1644 1645 /* 1646 * Support routine. Sends outgoing frames to any network 1647 * taps currently in use. 1648 */ 1649 1650 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1651 { 1652 struct packet_type *ptype; 1653 struct sk_buff *skb2 = NULL; 1654 struct packet_type *pt_prev = NULL; 1655 1656 rcu_read_lock(); 1657 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1658 /* Never send packets back to the socket 1659 * they originated from - MvS (miquels@drinkel.ow.org) 1660 */ 1661 if ((ptype->dev == dev || !ptype->dev) && 1662 (ptype->af_packet_priv == NULL || 1663 (struct sock *)ptype->af_packet_priv != skb->sk)) { 1664 if (pt_prev) { 1665 deliver_skb(skb2, pt_prev, skb->dev); 1666 pt_prev = ptype; 1667 continue; 1668 } 1669 1670 skb2 = skb_clone(skb, GFP_ATOMIC); 1671 if (!skb2) 1672 break; 1673 1674 net_timestamp_set(skb2); 1675 1676 /* skb->nh should be correctly 1677 set by sender, so that the second statement is 1678 just protection against buggy protocols. 1679 */ 1680 skb_reset_mac_header(skb2); 1681 1682 if (skb_network_header(skb2) < skb2->data || 1683 skb2->network_header > skb2->tail) { 1684 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 1685 ntohs(skb2->protocol), 1686 dev->name); 1687 skb_reset_network_header(skb2); 1688 } 1689 1690 skb2->transport_header = skb2->network_header; 1691 skb2->pkt_type = PACKET_OUTGOING; 1692 pt_prev = ptype; 1693 } 1694 } 1695 if (pt_prev) 1696 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1697 rcu_read_unlock(); 1698 } 1699 1700 /** 1701 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1702 * @dev: Network device 1703 * @txq: number of queues available 1704 * 1705 * If real_num_tx_queues is changed the tc mappings may no longer be 1706 * valid. To resolve this verify the tc mapping remains valid and if 1707 * not NULL the mapping. With no priorities mapping to this 1708 * offset/count pair it will no longer be used. In the worst case TC0 1709 * is invalid nothing can be done so disable priority mappings. If is 1710 * expected that drivers will fix this mapping if they can before 1711 * calling netif_set_real_num_tx_queues. 1712 */ 1713 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1714 { 1715 int i; 1716 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1717 1718 /* If TC0 is invalidated disable TC mapping */ 1719 if (tc->offset + tc->count > txq) { 1720 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1721 dev->num_tc = 0; 1722 return; 1723 } 1724 1725 /* Invalidated prio to tc mappings set to TC0 */ 1726 for (i = 1; i < TC_BITMASK + 1; i++) { 1727 int q = netdev_get_prio_tc_map(dev, i); 1728 1729 tc = &dev->tc_to_txq[q]; 1730 if (tc->offset + tc->count > txq) { 1731 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1732 i, q); 1733 netdev_set_prio_tc_map(dev, i, 0); 1734 } 1735 } 1736 } 1737 1738 /* 1739 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1740 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1741 */ 1742 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1743 { 1744 int rc; 1745 1746 if (txq < 1 || txq > dev->num_tx_queues) 1747 return -EINVAL; 1748 1749 if (dev->reg_state == NETREG_REGISTERED || 1750 dev->reg_state == NETREG_UNREGISTERING) { 1751 ASSERT_RTNL(); 1752 1753 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 1754 txq); 1755 if (rc) 1756 return rc; 1757 1758 if (dev->num_tc) 1759 netif_setup_tc(dev, txq); 1760 1761 if (txq < dev->real_num_tx_queues) 1762 qdisc_reset_all_tx_gt(dev, txq); 1763 } 1764 1765 dev->real_num_tx_queues = txq; 1766 return 0; 1767 } 1768 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 1769 1770 #ifdef CONFIG_RPS 1771 /** 1772 * netif_set_real_num_rx_queues - set actual number of RX queues used 1773 * @dev: Network device 1774 * @rxq: Actual number of RX queues 1775 * 1776 * This must be called either with the rtnl_lock held or before 1777 * registration of the net device. Returns 0 on success, or a 1778 * negative error code. If called before registration, it always 1779 * succeeds. 1780 */ 1781 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 1782 { 1783 int rc; 1784 1785 if (rxq < 1 || rxq > dev->num_rx_queues) 1786 return -EINVAL; 1787 1788 if (dev->reg_state == NETREG_REGISTERED) { 1789 ASSERT_RTNL(); 1790 1791 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 1792 rxq); 1793 if (rc) 1794 return rc; 1795 } 1796 1797 dev->real_num_rx_queues = rxq; 1798 return 0; 1799 } 1800 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1801 #endif 1802 1803 /** 1804 * netif_get_num_default_rss_queues - default number of RSS queues 1805 * 1806 * This routine should set an upper limit on the number of RSS queues 1807 * used by default by multiqueue devices. 1808 */ 1809 int netif_get_num_default_rss_queues(void) 1810 { 1811 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 1812 } 1813 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 1814 1815 static inline void __netif_reschedule(struct Qdisc *q) 1816 { 1817 struct softnet_data *sd; 1818 unsigned long flags; 1819 1820 local_irq_save(flags); 1821 sd = &__get_cpu_var(softnet_data); 1822 q->next_sched = NULL; 1823 *sd->output_queue_tailp = q; 1824 sd->output_queue_tailp = &q->next_sched; 1825 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1826 local_irq_restore(flags); 1827 } 1828 1829 void __netif_schedule(struct Qdisc *q) 1830 { 1831 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 1832 __netif_reschedule(q); 1833 } 1834 EXPORT_SYMBOL(__netif_schedule); 1835 1836 void dev_kfree_skb_irq(struct sk_buff *skb) 1837 { 1838 if (atomic_dec_and_test(&skb->users)) { 1839 struct softnet_data *sd; 1840 unsigned long flags; 1841 1842 local_irq_save(flags); 1843 sd = &__get_cpu_var(softnet_data); 1844 skb->next = sd->completion_queue; 1845 sd->completion_queue = skb; 1846 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1847 local_irq_restore(flags); 1848 } 1849 } 1850 EXPORT_SYMBOL(dev_kfree_skb_irq); 1851 1852 void dev_kfree_skb_any(struct sk_buff *skb) 1853 { 1854 if (in_irq() || irqs_disabled()) 1855 dev_kfree_skb_irq(skb); 1856 else 1857 dev_kfree_skb(skb); 1858 } 1859 EXPORT_SYMBOL(dev_kfree_skb_any); 1860 1861 1862 /** 1863 * netif_device_detach - mark device as removed 1864 * @dev: network device 1865 * 1866 * Mark device as removed from system and therefore no longer available. 1867 */ 1868 void netif_device_detach(struct net_device *dev) 1869 { 1870 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1871 netif_running(dev)) { 1872 netif_tx_stop_all_queues(dev); 1873 } 1874 } 1875 EXPORT_SYMBOL(netif_device_detach); 1876 1877 /** 1878 * netif_device_attach - mark device as attached 1879 * @dev: network device 1880 * 1881 * Mark device as attached from system and restart if needed. 1882 */ 1883 void netif_device_attach(struct net_device *dev) 1884 { 1885 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1886 netif_running(dev)) { 1887 netif_tx_wake_all_queues(dev); 1888 __netdev_watchdog_up(dev); 1889 } 1890 } 1891 EXPORT_SYMBOL(netif_device_attach); 1892 1893 static void skb_warn_bad_offload(const struct sk_buff *skb) 1894 { 1895 static const netdev_features_t null_features = 0; 1896 struct net_device *dev = skb->dev; 1897 const char *driver = ""; 1898 1899 if (dev && dev->dev.parent) 1900 driver = dev_driver_string(dev->dev.parent); 1901 1902 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 1903 "gso_type=%d ip_summed=%d\n", 1904 driver, dev ? &dev->features : &null_features, 1905 skb->sk ? &skb->sk->sk_route_caps : &null_features, 1906 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 1907 skb_shinfo(skb)->gso_type, skb->ip_summed); 1908 } 1909 1910 /* 1911 * Invalidate hardware checksum when packet is to be mangled, and 1912 * complete checksum manually on outgoing path. 1913 */ 1914 int skb_checksum_help(struct sk_buff *skb) 1915 { 1916 __wsum csum; 1917 int ret = 0, offset; 1918 1919 if (skb->ip_summed == CHECKSUM_COMPLETE) 1920 goto out_set_summed; 1921 1922 if (unlikely(skb_shinfo(skb)->gso_size)) { 1923 skb_warn_bad_offload(skb); 1924 return -EINVAL; 1925 } 1926 1927 offset = skb_checksum_start_offset(skb); 1928 BUG_ON(offset >= skb_headlen(skb)); 1929 csum = skb_checksum(skb, offset, skb->len - offset, 0); 1930 1931 offset += skb->csum_offset; 1932 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 1933 1934 if (skb_cloned(skb) && 1935 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 1936 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1937 if (ret) 1938 goto out; 1939 } 1940 1941 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1942 out_set_summed: 1943 skb->ip_summed = CHECKSUM_NONE; 1944 out: 1945 return ret; 1946 } 1947 EXPORT_SYMBOL(skb_checksum_help); 1948 1949 /** 1950 * skb_gso_segment - Perform segmentation on skb. 1951 * @skb: buffer to segment 1952 * @features: features for the output path (see dev->features) 1953 * 1954 * This function segments the given skb and returns a list of segments. 1955 * 1956 * It may return NULL if the skb requires no segmentation. This is 1957 * only possible when GSO is used for verifying header integrity. 1958 */ 1959 struct sk_buff *skb_gso_segment(struct sk_buff *skb, 1960 netdev_features_t features) 1961 { 1962 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1963 struct packet_type *ptype; 1964 __be16 type = skb->protocol; 1965 int vlan_depth = ETH_HLEN; 1966 int err; 1967 1968 while (type == htons(ETH_P_8021Q)) { 1969 struct vlan_hdr *vh; 1970 1971 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 1972 return ERR_PTR(-EINVAL); 1973 1974 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 1975 type = vh->h_vlan_encapsulated_proto; 1976 vlan_depth += VLAN_HLEN; 1977 } 1978 1979 skb_reset_mac_header(skb); 1980 skb->mac_len = skb->network_header - skb->mac_header; 1981 __skb_pull(skb, skb->mac_len); 1982 1983 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1984 skb_warn_bad_offload(skb); 1985 1986 if (skb_header_cloned(skb) && 1987 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1988 return ERR_PTR(err); 1989 } 1990 1991 rcu_read_lock(); 1992 list_for_each_entry_rcu(ptype, 1993 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 1994 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1995 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1996 err = ptype->gso_send_check(skb); 1997 segs = ERR_PTR(err); 1998 if (err || skb_gso_ok(skb, features)) 1999 break; 2000 __skb_push(skb, (skb->data - 2001 skb_network_header(skb))); 2002 } 2003 segs = ptype->gso_segment(skb, features); 2004 break; 2005 } 2006 } 2007 rcu_read_unlock(); 2008 2009 __skb_push(skb, skb->data - skb_mac_header(skb)); 2010 2011 return segs; 2012 } 2013 EXPORT_SYMBOL(skb_gso_segment); 2014 2015 /* Take action when hardware reception checksum errors are detected. */ 2016 #ifdef CONFIG_BUG 2017 void netdev_rx_csum_fault(struct net_device *dev) 2018 { 2019 if (net_ratelimit()) { 2020 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2021 dump_stack(); 2022 } 2023 } 2024 EXPORT_SYMBOL(netdev_rx_csum_fault); 2025 #endif 2026 2027 /* Actually, we should eliminate this check as soon as we know, that: 2028 * 1. IOMMU is present and allows to map all the memory. 2029 * 2. No high memory really exists on this machine. 2030 */ 2031 2032 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2033 { 2034 #ifdef CONFIG_HIGHMEM 2035 int i; 2036 if (!(dev->features & NETIF_F_HIGHDMA)) { 2037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2038 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2039 if (PageHighMem(skb_frag_page(frag))) 2040 return 1; 2041 } 2042 } 2043 2044 if (PCI_DMA_BUS_IS_PHYS) { 2045 struct device *pdev = dev->dev.parent; 2046 2047 if (!pdev) 2048 return 0; 2049 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2050 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2051 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2052 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2053 return 1; 2054 } 2055 } 2056 #endif 2057 return 0; 2058 } 2059 2060 struct dev_gso_cb { 2061 void (*destructor)(struct sk_buff *skb); 2062 }; 2063 2064 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2065 2066 static void dev_gso_skb_destructor(struct sk_buff *skb) 2067 { 2068 struct dev_gso_cb *cb; 2069 2070 do { 2071 struct sk_buff *nskb = skb->next; 2072 2073 skb->next = nskb->next; 2074 nskb->next = NULL; 2075 kfree_skb(nskb); 2076 } while (skb->next); 2077 2078 cb = DEV_GSO_CB(skb); 2079 if (cb->destructor) 2080 cb->destructor(skb); 2081 } 2082 2083 /** 2084 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2085 * @skb: buffer to segment 2086 * @features: device features as applicable to this skb 2087 * 2088 * This function segments the given skb and stores the list of segments 2089 * in skb->next. 2090 */ 2091 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2092 { 2093 struct sk_buff *segs; 2094 2095 segs = skb_gso_segment(skb, features); 2096 2097 /* Verifying header integrity only. */ 2098 if (!segs) 2099 return 0; 2100 2101 if (IS_ERR(segs)) 2102 return PTR_ERR(segs); 2103 2104 skb->next = segs; 2105 DEV_GSO_CB(skb)->destructor = skb->destructor; 2106 skb->destructor = dev_gso_skb_destructor; 2107 2108 return 0; 2109 } 2110 2111 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) 2112 { 2113 return ((features & NETIF_F_GEN_CSUM) || 2114 ((features & NETIF_F_V4_CSUM) && 2115 protocol == htons(ETH_P_IP)) || 2116 ((features & NETIF_F_V6_CSUM) && 2117 protocol == htons(ETH_P_IPV6)) || 2118 ((features & NETIF_F_FCOE_CRC) && 2119 protocol == htons(ETH_P_FCOE))); 2120 } 2121 2122 static netdev_features_t harmonize_features(struct sk_buff *skb, 2123 __be16 protocol, netdev_features_t features) 2124 { 2125 if (!can_checksum_protocol(features, protocol)) { 2126 features &= ~NETIF_F_ALL_CSUM; 2127 features &= ~NETIF_F_SG; 2128 } else if (illegal_highdma(skb->dev, skb)) { 2129 features &= ~NETIF_F_SG; 2130 } 2131 2132 return features; 2133 } 2134 2135 netdev_features_t netif_skb_features(struct sk_buff *skb) 2136 { 2137 __be16 protocol = skb->protocol; 2138 netdev_features_t features = skb->dev->features; 2139 2140 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2141 features &= ~NETIF_F_GSO_MASK; 2142 2143 if (protocol == htons(ETH_P_8021Q)) { 2144 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2145 protocol = veh->h_vlan_encapsulated_proto; 2146 } else if (!vlan_tx_tag_present(skb)) { 2147 return harmonize_features(skb, protocol, features); 2148 } 2149 2150 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); 2151 2152 if (protocol != htons(ETH_P_8021Q)) { 2153 return harmonize_features(skb, protocol, features); 2154 } else { 2155 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2156 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; 2157 return harmonize_features(skb, protocol, features); 2158 } 2159 } 2160 EXPORT_SYMBOL(netif_skb_features); 2161 2162 /* 2163 * Returns true if either: 2164 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2165 * 2. skb is fragmented and the device does not support SG, or if 2166 * at least one of fragments is in highmem and device does not 2167 * support DMA from it. 2168 */ 2169 static inline int skb_needs_linearize(struct sk_buff *skb, 2170 int features) 2171 { 2172 return skb_is_nonlinear(skb) && 2173 ((skb_has_frag_list(skb) && 2174 !(features & NETIF_F_FRAGLIST)) || 2175 (skb_shinfo(skb)->nr_frags && 2176 !(features & NETIF_F_SG))); 2177 } 2178 2179 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2180 struct netdev_queue *txq) 2181 { 2182 const struct net_device_ops *ops = dev->netdev_ops; 2183 int rc = NETDEV_TX_OK; 2184 unsigned int skb_len; 2185 2186 if (likely(!skb->next)) { 2187 netdev_features_t features; 2188 2189 /* 2190 * If device doesn't need skb->dst, release it right now while 2191 * its hot in this cpu cache 2192 */ 2193 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2194 skb_dst_drop(skb); 2195 2196 if (!list_empty(&ptype_all)) 2197 dev_queue_xmit_nit(skb, dev); 2198 2199 features = netif_skb_features(skb); 2200 2201 if (vlan_tx_tag_present(skb) && 2202 !(features & NETIF_F_HW_VLAN_TX)) { 2203 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2204 if (unlikely(!skb)) 2205 goto out; 2206 2207 skb->vlan_tci = 0; 2208 } 2209 2210 if (netif_needs_gso(skb, features)) { 2211 if (unlikely(dev_gso_segment(skb, features))) 2212 goto out_kfree_skb; 2213 if (skb->next) 2214 goto gso; 2215 } else { 2216 if (skb_needs_linearize(skb, features) && 2217 __skb_linearize(skb)) 2218 goto out_kfree_skb; 2219 2220 /* If packet is not checksummed and device does not 2221 * support checksumming for this protocol, complete 2222 * checksumming here. 2223 */ 2224 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2225 skb_set_transport_header(skb, 2226 skb_checksum_start_offset(skb)); 2227 if (!(features & NETIF_F_ALL_CSUM) && 2228 skb_checksum_help(skb)) 2229 goto out_kfree_skb; 2230 } 2231 } 2232 2233 skb_len = skb->len; 2234 rc = ops->ndo_start_xmit(skb, dev); 2235 trace_net_dev_xmit(skb, rc, dev, skb_len); 2236 if (rc == NETDEV_TX_OK) 2237 txq_trans_update(txq); 2238 return rc; 2239 } 2240 2241 gso: 2242 do { 2243 struct sk_buff *nskb = skb->next; 2244 2245 skb->next = nskb->next; 2246 nskb->next = NULL; 2247 2248 /* 2249 * If device doesn't need nskb->dst, release it right now while 2250 * its hot in this cpu cache 2251 */ 2252 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2253 skb_dst_drop(nskb); 2254 2255 skb_len = nskb->len; 2256 rc = ops->ndo_start_xmit(nskb, dev); 2257 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2258 if (unlikely(rc != NETDEV_TX_OK)) { 2259 if (rc & ~NETDEV_TX_MASK) 2260 goto out_kfree_gso_skb; 2261 nskb->next = skb->next; 2262 skb->next = nskb; 2263 return rc; 2264 } 2265 txq_trans_update(txq); 2266 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2267 return NETDEV_TX_BUSY; 2268 } while (skb->next); 2269 2270 out_kfree_gso_skb: 2271 if (likely(skb->next == NULL)) 2272 skb->destructor = DEV_GSO_CB(skb)->destructor; 2273 out_kfree_skb: 2274 kfree_skb(skb); 2275 out: 2276 return rc; 2277 } 2278 2279 static u32 hashrnd __read_mostly; 2280 2281 /* 2282 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2283 * to be used as a distribution range. 2284 */ 2285 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, 2286 unsigned int num_tx_queues) 2287 { 2288 u32 hash; 2289 u16 qoffset = 0; 2290 u16 qcount = num_tx_queues; 2291 2292 if (skb_rx_queue_recorded(skb)) { 2293 hash = skb_get_rx_queue(skb); 2294 while (unlikely(hash >= num_tx_queues)) 2295 hash -= num_tx_queues; 2296 return hash; 2297 } 2298 2299 if (dev->num_tc) { 2300 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2301 qoffset = dev->tc_to_txq[tc].offset; 2302 qcount = dev->tc_to_txq[tc].count; 2303 } 2304 2305 if (skb->sk && skb->sk->sk_hash) 2306 hash = skb->sk->sk_hash; 2307 else 2308 hash = (__force u16) skb->protocol; 2309 hash = jhash_1word(hash, hashrnd); 2310 2311 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 2312 } 2313 EXPORT_SYMBOL(__skb_tx_hash); 2314 2315 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2316 { 2317 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2318 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2319 dev->name, queue_index, 2320 dev->real_num_tx_queues); 2321 return 0; 2322 } 2323 return queue_index; 2324 } 2325 2326 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 2327 { 2328 #ifdef CONFIG_XPS 2329 struct xps_dev_maps *dev_maps; 2330 struct xps_map *map; 2331 int queue_index = -1; 2332 2333 rcu_read_lock(); 2334 dev_maps = rcu_dereference(dev->xps_maps); 2335 if (dev_maps) { 2336 map = rcu_dereference( 2337 dev_maps->cpu_map[raw_smp_processor_id()]); 2338 if (map) { 2339 if (map->len == 1) 2340 queue_index = map->queues[0]; 2341 else { 2342 u32 hash; 2343 if (skb->sk && skb->sk->sk_hash) 2344 hash = skb->sk->sk_hash; 2345 else 2346 hash = (__force u16) skb->protocol ^ 2347 skb->rxhash; 2348 hash = jhash_1word(hash, hashrnd); 2349 queue_index = map->queues[ 2350 ((u64)hash * map->len) >> 32]; 2351 } 2352 if (unlikely(queue_index >= dev->real_num_tx_queues)) 2353 queue_index = -1; 2354 } 2355 } 2356 rcu_read_unlock(); 2357 2358 return queue_index; 2359 #else 2360 return -1; 2361 #endif 2362 } 2363 2364 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2365 struct sk_buff *skb) 2366 { 2367 int queue_index; 2368 const struct net_device_ops *ops = dev->netdev_ops; 2369 2370 if (dev->real_num_tx_queues == 1) 2371 queue_index = 0; 2372 else if (ops->ndo_select_queue) { 2373 queue_index = ops->ndo_select_queue(dev, skb); 2374 queue_index = dev_cap_txqueue(dev, queue_index); 2375 } else { 2376 struct sock *sk = skb->sk; 2377 queue_index = sk_tx_queue_get(sk); 2378 2379 if (queue_index < 0 || skb->ooo_okay || 2380 queue_index >= dev->real_num_tx_queues) { 2381 int old_index = queue_index; 2382 2383 queue_index = get_xps_queue(dev, skb); 2384 if (queue_index < 0) 2385 queue_index = skb_tx_hash(dev, skb); 2386 2387 if (queue_index != old_index && sk) { 2388 struct dst_entry *dst = 2389 rcu_dereference_check(sk->sk_dst_cache, 1); 2390 2391 if (dst && skb_dst(skb) == dst) 2392 sk_tx_queue_set(sk, queue_index); 2393 } 2394 } 2395 } 2396 2397 skb_set_queue_mapping(skb, queue_index); 2398 return netdev_get_tx_queue(dev, queue_index); 2399 } 2400 2401 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2402 struct net_device *dev, 2403 struct netdev_queue *txq) 2404 { 2405 spinlock_t *root_lock = qdisc_lock(q); 2406 bool contended; 2407 int rc; 2408 2409 qdisc_skb_cb(skb)->pkt_len = skb->len; 2410 qdisc_calculate_pkt_len(skb, q); 2411 /* 2412 * Heuristic to force contended enqueues to serialize on a 2413 * separate lock before trying to get qdisc main lock. 2414 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2415 * and dequeue packets faster. 2416 */ 2417 contended = qdisc_is_running(q); 2418 if (unlikely(contended)) 2419 spin_lock(&q->busylock); 2420 2421 spin_lock(root_lock); 2422 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2423 kfree_skb(skb); 2424 rc = NET_XMIT_DROP; 2425 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2426 qdisc_run_begin(q)) { 2427 /* 2428 * This is a work-conserving queue; there are no old skbs 2429 * waiting to be sent out; and the qdisc is not running - 2430 * xmit the skb directly. 2431 */ 2432 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2433 skb_dst_force(skb); 2434 2435 qdisc_bstats_update(q, skb); 2436 2437 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2438 if (unlikely(contended)) { 2439 spin_unlock(&q->busylock); 2440 contended = false; 2441 } 2442 __qdisc_run(q); 2443 } else 2444 qdisc_run_end(q); 2445 2446 rc = NET_XMIT_SUCCESS; 2447 } else { 2448 skb_dst_force(skb); 2449 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2450 if (qdisc_run_begin(q)) { 2451 if (unlikely(contended)) { 2452 spin_unlock(&q->busylock); 2453 contended = false; 2454 } 2455 __qdisc_run(q); 2456 } 2457 } 2458 spin_unlock(root_lock); 2459 if (unlikely(contended)) 2460 spin_unlock(&q->busylock); 2461 return rc; 2462 } 2463 2464 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 2465 static void skb_update_prio(struct sk_buff *skb) 2466 { 2467 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2468 2469 if (!skb->priority && skb->sk && map) { 2470 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2471 2472 if (prioidx < map->priomap_len) 2473 skb->priority = map->priomap[prioidx]; 2474 } 2475 } 2476 #else 2477 #define skb_update_prio(skb) 2478 #endif 2479 2480 static DEFINE_PER_CPU(int, xmit_recursion); 2481 #define RECURSION_LIMIT 10 2482 2483 /** 2484 * dev_loopback_xmit - loop back @skb 2485 * @skb: buffer to transmit 2486 */ 2487 int dev_loopback_xmit(struct sk_buff *skb) 2488 { 2489 skb_reset_mac_header(skb); 2490 __skb_pull(skb, skb_network_offset(skb)); 2491 skb->pkt_type = PACKET_LOOPBACK; 2492 skb->ip_summed = CHECKSUM_UNNECESSARY; 2493 WARN_ON(!skb_dst(skb)); 2494 skb_dst_force(skb); 2495 netif_rx_ni(skb); 2496 return 0; 2497 } 2498 EXPORT_SYMBOL(dev_loopback_xmit); 2499 2500 /** 2501 * dev_queue_xmit - transmit a buffer 2502 * @skb: buffer to transmit 2503 * 2504 * Queue a buffer for transmission to a network device. The caller must 2505 * have set the device and priority and built the buffer before calling 2506 * this function. The function can be called from an interrupt. 2507 * 2508 * A negative errno code is returned on a failure. A success does not 2509 * guarantee the frame will be transmitted as it may be dropped due 2510 * to congestion or traffic shaping. 2511 * 2512 * ----------------------------------------------------------------------------------- 2513 * I notice this method can also return errors from the queue disciplines, 2514 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2515 * be positive. 2516 * 2517 * Regardless of the return value, the skb is consumed, so it is currently 2518 * difficult to retry a send to this method. (You can bump the ref count 2519 * before sending to hold a reference for retry if you are careful.) 2520 * 2521 * When calling this method, interrupts MUST be enabled. This is because 2522 * the BH enable code must have IRQs enabled so that it will not deadlock. 2523 * --BLG 2524 */ 2525 int dev_queue_xmit(struct sk_buff *skb) 2526 { 2527 struct net_device *dev = skb->dev; 2528 struct netdev_queue *txq; 2529 struct Qdisc *q; 2530 int rc = -ENOMEM; 2531 2532 /* Disable soft irqs for various locks below. Also 2533 * stops preemption for RCU. 2534 */ 2535 rcu_read_lock_bh(); 2536 2537 skb_update_prio(skb); 2538 2539 txq = dev_pick_tx(dev, skb); 2540 q = rcu_dereference_bh(txq->qdisc); 2541 2542 #ifdef CONFIG_NET_CLS_ACT 2543 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2544 #endif 2545 trace_net_dev_queue(skb); 2546 if (q->enqueue) { 2547 rc = __dev_xmit_skb(skb, q, dev, txq); 2548 goto out; 2549 } 2550 2551 /* The device has no queue. Common case for software devices: 2552 loopback, all the sorts of tunnels... 2553 2554 Really, it is unlikely that netif_tx_lock protection is necessary 2555 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2556 counters.) 2557 However, it is possible, that they rely on protection 2558 made by us here. 2559 2560 Check this and shot the lock. It is not prone from deadlocks. 2561 Either shot noqueue qdisc, it is even simpler 8) 2562 */ 2563 if (dev->flags & IFF_UP) { 2564 int cpu = smp_processor_id(); /* ok because BHs are off */ 2565 2566 if (txq->xmit_lock_owner != cpu) { 2567 2568 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2569 goto recursion_alert; 2570 2571 HARD_TX_LOCK(dev, txq, cpu); 2572 2573 if (!netif_xmit_stopped(txq)) { 2574 __this_cpu_inc(xmit_recursion); 2575 rc = dev_hard_start_xmit(skb, dev, txq); 2576 __this_cpu_dec(xmit_recursion); 2577 if (dev_xmit_complete(rc)) { 2578 HARD_TX_UNLOCK(dev, txq); 2579 goto out; 2580 } 2581 } 2582 HARD_TX_UNLOCK(dev, txq); 2583 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 2584 dev->name); 2585 } else { 2586 /* Recursion is detected! It is possible, 2587 * unfortunately 2588 */ 2589 recursion_alert: 2590 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 2591 dev->name); 2592 } 2593 } 2594 2595 rc = -ENETDOWN; 2596 rcu_read_unlock_bh(); 2597 2598 kfree_skb(skb); 2599 return rc; 2600 out: 2601 rcu_read_unlock_bh(); 2602 return rc; 2603 } 2604 EXPORT_SYMBOL(dev_queue_xmit); 2605 2606 2607 /*======================================================================= 2608 Receiver routines 2609 =======================================================================*/ 2610 2611 int netdev_max_backlog __read_mostly = 1000; 2612 int netdev_tstamp_prequeue __read_mostly = 1; 2613 int netdev_budget __read_mostly = 300; 2614 int weight_p __read_mostly = 64; /* old backlog weight */ 2615 2616 /* Called with irq disabled */ 2617 static inline void ____napi_schedule(struct softnet_data *sd, 2618 struct napi_struct *napi) 2619 { 2620 list_add_tail(&napi->poll_list, &sd->poll_list); 2621 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2622 } 2623 2624 /* 2625 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2626 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value 2627 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb 2628 * if hash is a canonical 4-tuple hash over transport ports. 2629 */ 2630 void __skb_get_rxhash(struct sk_buff *skb) 2631 { 2632 struct flow_keys keys; 2633 u32 hash; 2634 2635 if (!skb_flow_dissect(skb, &keys)) 2636 return; 2637 2638 if (keys.ports) { 2639 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) 2640 swap(keys.port16[0], keys.port16[1]); 2641 skb->l4_rxhash = 1; 2642 } 2643 2644 /* get a consistent hash (same value on both flow directions) */ 2645 if ((__force u32)keys.dst < (__force u32)keys.src) 2646 swap(keys.dst, keys.src); 2647 2648 hash = jhash_3words((__force u32)keys.dst, 2649 (__force u32)keys.src, 2650 (__force u32)keys.ports, hashrnd); 2651 if (!hash) 2652 hash = 1; 2653 2654 skb->rxhash = hash; 2655 } 2656 EXPORT_SYMBOL(__skb_get_rxhash); 2657 2658 #ifdef CONFIG_RPS 2659 2660 /* One global table that all flow-based protocols share. */ 2661 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2662 EXPORT_SYMBOL(rps_sock_flow_table); 2663 2664 struct static_key rps_needed __read_mostly; 2665 2666 static struct rps_dev_flow * 2667 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2668 struct rps_dev_flow *rflow, u16 next_cpu) 2669 { 2670 if (next_cpu != RPS_NO_CPU) { 2671 #ifdef CONFIG_RFS_ACCEL 2672 struct netdev_rx_queue *rxqueue; 2673 struct rps_dev_flow_table *flow_table; 2674 struct rps_dev_flow *old_rflow; 2675 u32 flow_id; 2676 u16 rxq_index; 2677 int rc; 2678 2679 /* Should we steer this flow to a different hardware queue? */ 2680 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2681 !(dev->features & NETIF_F_NTUPLE)) 2682 goto out; 2683 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2684 if (rxq_index == skb_get_rx_queue(skb)) 2685 goto out; 2686 2687 rxqueue = dev->_rx + rxq_index; 2688 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2689 if (!flow_table) 2690 goto out; 2691 flow_id = skb->rxhash & flow_table->mask; 2692 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2693 rxq_index, flow_id); 2694 if (rc < 0) 2695 goto out; 2696 old_rflow = rflow; 2697 rflow = &flow_table->flows[flow_id]; 2698 rflow->filter = rc; 2699 if (old_rflow->filter == rflow->filter) 2700 old_rflow->filter = RPS_NO_FILTER; 2701 out: 2702 #endif 2703 rflow->last_qtail = 2704 per_cpu(softnet_data, next_cpu).input_queue_head; 2705 } 2706 2707 rflow->cpu = next_cpu; 2708 return rflow; 2709 } 2710 2711 /* 2712 * get_rps_cpu is called from netif_receive_skb and returns the target 2713 * CPU from the RPS map of the receiving queue for a given skb. 2714 * rcu_read_lock must be held on entry. 2715 */ 2716 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2717 struct rps_dev_flow **rflowp) 2718 { 2719 struct netdev_rx_queue *rxqueue; 2720 struct rps_map *map; 2721 struct rps_dev_flow_table *flow_table; 2722 struct rps_sock_flow_table *sock_flow_table; 2723 int cpu = -1; 2724 u16 tcpu; 2725 2726 if (skb_rx_queue_recorded(skb)) { 2727 u16 index = skb_get_rx_queue(skb); 2728 if (unlikely(index >= dev->real_num_rx_queues)) { 2729 WARN_ONCE(dev->real_num_rx_queues > 1, 2730 "%s received packet on queue %u, but number " 2731 "of RX queues is %u\n", 2732 dev->name, index, dev->real_num_rx_queues); 2733 goto done; 2734 } 2735 rxqueue = dev->_rx + index; 2736 } else 2737 rxqueue = dev->_rx; 2738 2739 map = rcu_dereference(rxqueue->rps_map); 2740 if (map) { 2741 if (map->len == 1 && 2742 !rcu_access_pointer(rxqueue->rps_flow_table)) { 2743 tcpu = map->cpus[0]; 2744 if (cpu_online(tcpu)) 2745 cpu = tcpu; 2746 goto done; 2747 } 2748 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 2749 goto done; 2750 } 2751 2752 skb_reset_network_header(skb); 2753 if (!skb_get_rxhash(skb)) 2754 goto done; 2755 2756 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2757 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2758 if (flow_table && sock_flow_table) { 2759 u16 next_cpu; 2760 struct rps_dev_flow *rflow; 2761 2762 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 2763 tcpu = rflow->cpu; 2764 2765 next_cpu = sock_flow_table->ents[skb->rxhash & 2766 sock_flow_table->mask]; 2767 2768 /* 2769 * If the desired CPU (where last recvmsg was done) is 2770 * different from current CPU (one in the rx-queue flow 2771 * table entry), switch if one of the following holds: 2772 * - Current CPU is unset (equal to RPS_NO_CPU). 2773 * - Current CPU is offline. 2774 * - The current CPU's queue tail has advanced beyond the 2775 * last packet that was enqueued using this table entry. 2776 * This guarantees that all previous packets for the flow 2777 * have been dequeued, thus preserving in order delivery. 2778 */ 2779 if (unlikely(tcpu != next_cpu) && 2780 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2781 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2782 rflow->last_qtail)) >= 0)) 2783 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 2784 2785 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2786 *rflowp = rflow; 2787 cpu = tcpu; 2788 goto done; 2789 } 2790 } 2791 2792 if (map) { 2793 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2794 2795 if (cpu_online(tcpu)) { 2796 cpu = tcpu; 2797 goto done; 2798 } 2799 } 2800 2801 done: 2802 return cpu; 2803 } 2804 2805 #ifdef CONFIG_RFS_ACCEL 2806 2807 /** 2808 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 2809 * @dev: Device on which the filter was set 2810 * @rxq_index: RX queue index 2811 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 2812 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 2813 * 2814 * Drivers that implement ndo_rx_flow_steer() should periodically call 2815 * this function for each installed filter and remove the filters for 2816 * which it returns %true. 2817 */ 2818 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 2819 u32 flow_id, u16 filter_id) 2820 { 2821 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 2822 struct rps_dev_flow_table *flow_table; 2823 struct rps_dev_flow *rflow; 2824 bool expire = true; 2825 int cpu; 2826 2827 rcu_read_lock(); 2828 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2829 if (flow_table && flow_id <= flow_table->mask) { 2830 rflow = &flow_table->flows[flow_id]; 2831 cpu = ACCESS_ONCE(rflow->cpu); 2832 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 2833 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 2834 rflow->last_qtail) < 2835 (int)(10 * flow_table->mask))) 2836 expire = false; 2837 } 2838 rcu_read_unlock(); 2839 return expire; 2840 } 2841 EXPORT_SYMBOL(rps_may_expire_flow); 2842 2843 #endif /* CONFIG_RFS_ACCEL */ 2844 2845 /* Called from hardirq (IPI) context */ 2846 static void rps_trigger_softirq(void *data) 2847 { 2848 struct softnet_data *sd = data; 2849 2850 ____napi_schedule(sd, &sd->backlog); 2851 sd->received_rps++; 2852 } 2853 2854 #endif /* CONFIG_RPS */ 2855 2856 /* 2857 * Check if this softnet_data structure is another cpu one 2858 * If yes, queue it to our IPI list and return 1 2859 * If no, return 0 2860 */ 2861 static int rps_ipi_queued(struct softnet_data *sd) 2862 { 2863 #ifdef CONFIG_RPS 2864 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 2865 2866 if (sd != mysd) { 2867 sd->rps_ipi_next = mysd->rps_ipi_list; 2868 mysd->rps_ipi_list = sd; 2869 2870 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2871 return 1; 2872 } 2873 #endif /* CONFIG_RPS */ 2874 return 0; 2875 } 2876 2877 /* 2878 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 2879 * queue (may be a remote CPU queue). 2880 */ 2881 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 2882 unsigned int *qtail) 2883 { 2884 struct softnet_data *sd; 2885 unsigned long flags; 2886 2887 sd = &per_cpu(softnet_data, cpu); 2888 2889 local_irq_save(flags); 2890 2891 rps_lock(sd); 2892 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 2893 if (skb_queue_len(&sd->input_pkt_queue)) { 2894 enqueue: 2895 __skb_queue_tail(&sd->input_pkt_queue, skb); 2896 input_queue_tail_incr_save(sd, qtail); 2897 rps_unlock(sd); 2898 local_irq_restore(flags); 2899 return NET_RX_SUCCESS; 2900 } 2901 2902 /* Schedule NAPI for backlog device 2903 * We can use non atomic operation since we own the queue lock 2904 */ 2905 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 2906 if (!rps_ipi_queued(sd)) 2907 ____napi_schedule(sd, &sd->backlog); 2908 } 2909 goto enqueue; 2910 } 2911 2912 sd->dropped++; 2913 rps_unlock(sd); 2914 2915 local_irq_restore(flags); 2916 2917 atomic_long_inc(&skb->dev->rx_dropped); 2918 kfree_skb(skb); 2919 return NET_RX_DROP; 2920 } 2921 2922 /** 2923 * netif_rx - post buffer to the network code 2924 * @skb: buffer to post 2925 * 2926 * This function receives a packet from a device driver and queues it for 2927 * the upper (protocol) levels to process. It always succeeds. The buffer 2928 * may be dropped during processing for congestion control or by the 2929 * protocol layers. 2930 * 2931 * return values: 2932 * NET_RX_SUCCESS (no congestion) 2933 * NET_RX_DROP (packet was dropped) 2934 * 2935 */ 2936 2937 int netif_rx(struct sk_buff *skb) 2938 { 2939 int ret; 2940 2941 /* if netpoll wants it, pretend we never saw it */ 2942 if (netpoll_rx(skb)) 2943 return NET_RX_DROP; 2944 2945 net_timestamp_check(netdev_tstamp_prequeue, skb); 2946 2947 trace_netif_rx(skb); 2948 #ifdef CONFIG_RPS 2949 if (static_key_false(&rps_needed)) { 2950 struct rps_dev_flow voidflow, *rflow = &voidflow; 2951 int cpu; 2952 2953 preempt_disable(); 2954 rcu_read_lock(); 2955 2956 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2957 if (cpu < 0) 2958 cpu = smp_processor_id(); 2959 2960 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2961 2962 rcu_read_unlock(); 2963 preempt_enable(); 2964 } else 2965 #endif 2966 { 2967 unsigned int qtail; 2968 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 2969 put_cpu(); 2970 } 2971 return ret; 2972 } 2973 EXPORT_SYMBOL(netif_rx); 2974 2975 int netif_rx_ni(struct sk_buff *skb) 2976 { 2977 int err; 2978 2979 preempt_disable(); 2980 err = netif_rx(skb); 2981 if (local_softirq_pending()) 2982 do_softirq(); 2983 preempt_enable(); 2984 2985 return err; 2986 } 2987 EXPORT_SYMBOL(netif_rx_ni); 2988 2989 static void net_tx_action(struct softirq_action *h) 2990 { 2991 struct softnet_data *sd = &__get_cpu_var(softnet_data); 2992 2993 if (sd->completion_queue) { 2994 struct sk_buff *clist; 2995 2996 local_irq_disable(); 2997 clist = sd->completion_queue; 2998 sd->completion_queue = NULL; 2999 local_irq_enable(); 3000 3001 while (clist) { 3002 struct sk_buff *skb = clist; 3003 clist = clist->next; 3004 3005 WARN_ON(atomic_read(&skb->users)); 3006 trace_kfree_skb(skb, net_tx_action); 3007 __kfree_skb(skb); 3008 } 3009 } 3010 3011 if (sd->output_queue) { 3012 struct Qdisc *head; 3013 3014 local_irq_disable(); 3015 head = sd->output_queue; 3016 sd->output_queue = NULL; 3017 sd->output_queue_tailp = &sd->output_queue; 3018 local_irq_enable(); 3019 3020 while (head) { 3021 struct Qdisc *q = head; 3022 spinlock_t *root_lock; 3023 3024 head = head->next_sched; 3025 3026 root_lock = qdisc_lock(q); 3027 if (spin_trylock(root_lock)) { 3028 smp_mb__before_clear_bit(); 3029 clear_bit(__QDISC_STATE_SCHED, 3030 &q->state); 3031 qdisc_run(q); 3032 spin_unlock(root_lock); 3033 } else { 3034 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3035 &q->state)) { 3036 __netif_reschedule(q); 3037 } else { 3038 smp_mb__before_clear_bit(); 3039 clear_bit(__QDISC_STATE_SCHED, 3040 &q->state); 3041 } 3042 } 3043 } 3044 } 3045 } 3046 3047 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3048 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3049 /* This hook is defined here for ATM LANE */ 3050 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3051 unsigned char *addr) __read_mostly; 3052 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3053 #endif 3054 3055 #ifdef CONFIG_NET_CLS_ACT 3056 /* TODO: Maybe we should just force sch_ingress to be compiled in 3057 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3058 * a compare and 2 stores extra right now if we dont have it on 3059 * but have CONFIG_NET_CLS_ACT 3060 * NOTE: This doesn't stop any functionality; if you dont have 3061 * the ingress scheduler, you just can't add policies on ingress. 3062 * 3063 */ 3064 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3065 { 3066 struct net_device *dev = skb->dev; 3067 u32 ttl = G_TC_RTTL(skb->tc_verd); 3068 int result = TC_ACT_OK; 3069 struct Qdisc *q; 3070 3071 if (unlikely(MAX_RED_LOOP < ttl++)) { 3072 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", 3073 skb->skb_iif, dev->ifindex); 3074 return TC_ACT_SHOT; 3075 } 3076 3077 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3078 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3079 3080 q = rxq->qdisc; 3081 if (q != &noop_qdisc) { 3082 spin_lock(qdisc_lock(q)); 3083 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3084 result = qdisc_enqueue_root(skb, q); 3085 spin_unlock(qdisc_lock(q)); 3086 } 3087 3088 return result; 3089 } 3090 3091 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3092 struct packet_type **pt_prev, 3093 int *ret, struct net_device *orig_dev) 3094 { 3095 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3096 3097 if (!rxq || rxq->qdisc == &noop_qdisc) 3098 goto out; 3099 3100 if (*pt_prev) { 3101 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3102 *pt_prev = NULL; 3103 } 3104 3105 switch (ing_filter(skb, rxq)) { 3106 case TC_ACT_SHOT: 3107 case TC_ACT_STOLEN: 3108 kfree_skb(skb); 3109 return NULL; 3110 } 3111 3112 out: 3113 skb->tc_verd = 0; 3114 return skb; 3115 } 3116 #endif 3117 3118 /** 3119 * netdev_rx_handler_register - register receive handler 3120 * @dev: device to register a handler for 3121 * @rx_handler: receive handler to register 3122 * @rx_handler_data: data pointer that is used by rx handler 3123 * 3124 * Register a receive hander for a device. This handler will then be 3125 * called from __netif_receive_skb. A negative errno code is returned 3126 * on a failure. 3127 * 3128 * The caller must hold the rtnl_mutex. 3129 * 3130 * For a general description of rx_handler, see enum rx_handler_result. 3131 */ 3132 int netdev_rx_handler_register(struct net_device *dev, 3133 rx_handler_func_t *rx_handler, 3134 void *rx_handler_data) 3135 { 3136 ASSERT_RTNL(); 3137 3138 if (dev->rx_handler) 3139 return -EBUSY; 3140 3141 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3142 rcu_assign_pointer(dev->rx_handler, rx_handler); 3143 3144 return 0; 3145 } 3146 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3147 3148 /** 3149 * netdev_rx_handler_unregister - unregister receive handler 3150 * @dev: device to unregister a handler from 3151 * 3152 * Unregister a receive hander from a device. 3153 * 3154 * The caller must hold the rtnl_mutex. 3155 */ 3156 void netdev_rx_handler_unregister(struct net_device *dev) 3157 { 3158 3159 ASSERT_RTNL(); 3160 RCU_INIT_POINTER(dev->rx_handler, NULL); 3161 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3162 } 3163 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3164 3165 /* 3166 * Limit the use of PFMEMALLOC reserves to those protocols that implement 3167 * the special handling of PFMEMALLOC skbs. 3168 */ 3169 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 3170 { 3171 switch (skb->protocol) { 3172 case __constant_htons(ETH_P_ARP): 3173 case __constant_htons(ETH_P_IP): 3174 case __constant_htons(ETH_P_IPV6): 3175 case __constant_htons(ETH_P_8021Q): 3176 return true; 3177 default: 3178 return false; 3179 } 3180 } 3181 3182 static int __netif_receive_skb(struct sk_buff *skb) 3183 { 3184 struct packet_type *ptype, *pt_prev; 3185 rx_handler_func_t *rx_handler; 3186 struct net_device *orig_dev; 3187 struct net_device *null_or_dev; 3188 bool deliver_exact = false; 3189 int ret = NET_RX_DROP; 3190 __be16 type; 3191 unsigned long pflags = current->flags; 3192 3193 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3194 3195 trace_netif_receive_skb(skb); 3196 3197 /* 3198 * PFMEMALLOC skbs are special, they should 3199 * - be delivered to SOCK_MEMALLOC sockets only 3200 * - stay away from userspace 3201 * - have bounded memory usage 3202 * 3203 * Use PF_MEMALLOC as this saves us from propagating the allocation 3204 * context down to all allocation sites. 3205 */ 3206 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3207 current->flags |= PF_MEMALLOC; 3208 3209 /* if we've gotten here through NAPI, check netpoll */ 3210 if (netpoll_receive_skb(skb)) 3211 goto out; 3212 3213 orig_dev = skb->dev; 3214 3215 skb_reset_network_header(skb); 3216 skb_reset_transport_header(skb); 3217 skb_reset_mac_len(skb); 3218 3219 pt_prev = NULL; 3220 3221 rcu_read_lock(); 3222 3223 another_round: 3224 skb->skb_iif = skb->dev->ifindex; 3225 3226 __this_cpu_inc(softnet_data.processed); 3227 3228 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 3229 skb = vlan_untag(skb); 3230 if (unlikely(!skb)) 3231 goto unlock; 3232 } 3233 3234 #ifdef CONFIG_NET_CLS_ACT 3235 if (skb->tc_verd & TC_NCLS) { 3236 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3237 goto ncls; 3238 } 3239 #endif 3240 3241 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3242 goto skip_taps; 3243 3244 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3245 if (!ptype->dev || ptype->dev == skb->dev) { 3246 if (pt_prev) 3247 ret = deliver_skb(skb, pt_prev, orig_dev); 3248 pt_prev = ptype; 3249 } 3250 } 3251 3252 skip_taps: 3253 #ifdef CONFIG_NET_CLS_ACT 3254 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3255 if (!skb) 3256 goto unlock; 3257 ncls: 3258 #endif 3259 3260 if (sk_memalloc_socks() && skb_pfmemalloc(skb) 3261 && !skb_pfmemalloc_protocol(skb)) 3262 goto drop; 3263 3264 rx_handler = rcu_dereference(skb->dev->rx_handler); 3265 if (vlan_tx_tag_present(skb)) { 3266 if (pt_prev) { 3267 ret = deliver_skb(skb, pt_prev, orig_dev); 3268 pt_prev = NULL; 3269 } 3270 if (vlan_do_receive(&skb, !rx_handler)) 3271 goto another_round; 3272 else if (unlikely(!skb)) 3273 goto unlock; 3274 } 3275 3276 if (rx_handler) { 3277 if (pt_prev) { 3278 ret = deliver_skb(skb, pt_prev, orig_dev); 3279 pt_prev = NULL; 3280 } 3281 switch (rx_handler(&skb)) { 3282 case RX_HANDLER_CONSUMED: 3283 goto unlock; 3284 case RX_HANDLER_ANOTHER: 3285 goto another_round; 3286 case RX_HANDLER_EXACT: 3287 deliver_exact = true; 3288 case RX_HANDLER_PASS: 3289 break; 3290 default: 3291 BUG(); 3292 } 3293 } 3294 3295 /* deliver only exact match when indicated */ 3296 null_or_dev = deliver_exact ? skb->dev : NULL; 3297 3298 type = skb->protocol; 3299 list_for_each_entry_rcu(ptype, 3300 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3301 if (ptype->type == type && 3302 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3303 ptype->dev == orig_dev)) { 3304 if (pt_prev) 3305 ret = deliver_skb(skb, pt_prev, orig_dev); 3306 pt_prev = ptype; 3307 } 3308 } 3309 3310 if (pt_prev) { 3311 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3312 ret = -ENOMEM; 3313 else 3314 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3315 } else { 3316 drop: 3317 atomic_long_inc(&skb->dev->rx_dropped); 3318 kfree_skb(skb); 3319 /* Jamal, now you will not able to escape explaining 3320 * me how you were going to use this. :-) 3321 */ 3322 ret = NET_RX_DROP; 3323 } 3324 3325 unlock: 3326 rcu_read_unlock(); 3327 out: 3328 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3329 return ret; 3330 } 3331 3332 /** 3333 * netif_receive_skb - process receive buffer from network 3334 * @skb: buffer to process 3335 * 3336 * netif_receive_skb() is the main receive data processing function. 3337 * It always succeeds. The buffer may be dropped during processing 3338 * for congestion control or by the protocol layers. 3339 * 3340 * This function may only be called from softirq context and interrupts 3341 * should be enabled. 3342 * 3343 * Return values (usually ignored): 3344 * NET_RX_SUCCESS: no congestion 3345 * NET_RX_DROP: packet was dropped 3346 */ 3347 int netif_receive_skb(struct sk_buff *skb) 3348 { 3349 net_timestamp_check(netdev_tstamp_prequeue, skb); 3350 3351 if (skb_defer_rx_timestamp(skb)) 3352 return NET_RX_SUCCESS; 3353 3354 #ifdef CONFIG_RPS 3355 if (static_key_false(&rps_needed)) { 3356 struct rps_dev_flow voidflow, *rflow = &voidflow; 3357 int cpu, ret; 3358 3359 rcu_read_lock(); 3360 3361 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3362 3363 if (cpu >= 0) { 3364 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3365 rcu_read_unlock(); 3366 return ret; 3367 } 3368 rcu_read_unlock(); 3369 } 3370 #endif 3371 return __netif_receive_skb(skb); 3372 } 3373 EXPORT_SYMBOL(netif_receive_skb); 3374 3375 /* Network device is going away, flush any packets still pending 3376 * Called with irqs disabled. 3377 */ 3378 static void flush_backlog(void *arg) 3379 { 3380 struct net_device *dev = arg; 3381 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3382 struct sk_buff *skb, *tmp; 3383 3384 rps_lock(sd); 3385 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3386 if (skb->dev == dev) { 3387 __skb_unlink(skb, &sd->input_pkt_queue); 3388 kfree_skb(skb); 3389 input_queue_head_incr(sd); 3390 } 3391 } 3392 rps_unlock(sd); 3393 3394 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3395 if (skb->dev == dev) { 3396 __skb_unlink(skb, &sd->process_queue); 3397 kfree_skb(skb); 3398 input_queue_head_incr(sd); 3399 } 3400 } 3401 } 3402 3403 static int napi_gro_complete(struct sk_buff *skb) 3404 { 3405 struct packet_type *ptype; 3406 __be16 type = skb->protocol; 3407 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3408 int err = -ENOENT; 3409 3410 if (NAPI_GRO_CB(skb)->count == 1) { 3411 skb_shinfo(skb)->gso_size = 0; 3412 goto out; 3413 } 3414 3415 rcu_read_lock(); 3416 list_for_each_entry_rcu(ptype, head, list) { 3417 if (ptype->type != type || ptype->dev || !ptype->gro_complete) 3418 continue; 3419 3420 err = ptype->gro_complete(skb); 3421 break; 3422 } 3423 rcu_read_unlock(); 3424 3425 if (err) { 3426 WARN_ON(&ptype->list == head); 3427 kfree_skb(skb); 3428 return NET_RX_SUCCESS; 3429 } 3430 3431 out: 3432 return netif_receive_skb(skb); 3433 } 3434 3435 inline void napi_gro_flush(struct napi_struct *napi) 3436 { 3437 struct sk_buff *skb, *next; 3438 3439 for (skb = napi->gro_list; skb; skb = next) { 3440 next = skb->next; 3441 skb->next = NULL; 3442 napi_gro_complete(skb); 3443 } 3444 3445 napi->gro_count = 0; 3446 napi->gro_list = NULL; 3447 } 3448 EXPORT_SYMBOL(napi_gro_flush); 3449 3450 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3451 { 3452 struct sk_buff **pp = NULL; 3453 struct packet_type *ptype; 3454 __be16 type = skb->protocol; 3455 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3456 int same_flow; 3457 int mac_len; 3458 enum gro_result ret; 3459 3460 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3461 goto normal; 3462 3463 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3464 goto normal; 3465 3466 rcu_read_lock(); 3467 list_for_each_entry_rcu(ptype, head, list) { 3468 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 3469 continue; 3470 3471 skb_set_network_header(skb, skb_gro_offset(skb)); 3472 mac_len = skb->network_header - skb->mac_header; 3473 skb->mac_len = mac_len; 3474 NAPI_GRO_CB(skb)->same_flow = 0; 3475 NAPI_GRO_CB(skb)->flush = 0; 3476 NAPI_GRO_CB(skb)->free = 0; 3477 3478 pp = ptype->gro_receive(&napi->gro_list, skb); 3479 break; 3480 } 3481 rcu_read_unlock(); 3482 3483 if (&ptype->list == head) 3484 goto normal; 3485 3486 same_flow = NAPI_GRO_CB(skb)->same_flow; 3487 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3488 3489 if (pp) { 3490 struct sk_buff *nskb = *pp; 3491 3492 *pp = nskb->next; 3493 nskb->next = NULL; 3494 napi_gro_complete(nskb); 3495 napi->gro_count--; 3496 } 3497 3498 if (same_flow) 3499 goto ok; 3500 3501 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) 3502 goto normal; 3503 3504 napi->gro_count++; 3505 NAPI_GRO_CB(skb)->count = 1; 3506 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3507 skb->next = napi->gro_list; 3508 napi->gro_list = skb; 3509 ret = GRO_HELD; 3510 3511 pull: 3512 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3513 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3514 3515 BUG_ON(skb->end - skb->tail < grow); 3516 3517 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3518 3519 skb->tail += grow; 3520 skb->data_len -= grow; 3521 3522 skb_shinfo(skb)->frags[0].page_offset += grow; 3523 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3524 3525 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3526 skb_frag_unref(skb, 0); 3527 memmove(skb_shinfo(skb)->frags, 3528 skb_shinfo(skb)->frags + 1, 3529 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3530 } 3531 } 3532 3533 ok: 3534 return ret; 3535 3536 normal: 3537 ret = GRO_NORMAL; 3538 goto pull; 3539 } 3540 EXPORT_SYMBOL(dev_gro_receive); 3541 3542 static inline gro_result_t 3543 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3544 { 3545 struct sk_buff *p; 3546 unsigned int maclen = skb->dev->hard_header_len; 3547 3548 for (p = napi->gro_list; p; p = p->next) { 3549 unsigned long diffs; 3550 3551 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3552 diffs |= p->vlan_tci ^ skb->vlan_tci; 3553 if (maclen == ETH_HLEN) 3554 diffs |= compare_ether_header(skb_mac_header(p), 3555 skb_gro_mac_header(skb)); 3556 else if (!diffs) 3557 diffs = memcmp(skb_mac_header(p), 3558 skb_gro_mac_header(skb), 3559 maclen); 3560 NAPI_GRO_CB(p)->same_flow = !diffs; 3561 NAPI_GRO_CB(p)->flush = 0; 3562 } 3563 3564 return dev_gro_receive(napi, skb); 3565 } 3566 3567 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3568 { 3569 switch (ret) { 3570 case GRO_NORMAL: 3571 if (netif_receive_skb(skb)) 3572 ret = GRO_DROP; 3573 break; 3574 3575 case GRO_DROP: 3576 kfree_skb(skb); 3577 break; 3578 3579 case GRO_MERGED_FREE: 3580 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 3581 kmem_cache_free(skbuff_head_cache, skb); 3582 else 3583 __kfree_skb(skb); 3584 break; 3585 3586 case GRO_HELD: 3587 case GRO_MERGED: 3588 break; 3589 } 3590 3591 return ret; 3592 } 3593 EXPORT_SYMBOL(napi_skb_finish); 3594 3595 void skb_gro_reset_offset(struct sk_buff *skb) 3596 { 3597 NAPI_GRO_CB(skb)->data_offset = 0; 3598 NAPI_GRO_CB(skb)->frag0 = NULL; 3599 NAPI_GRO_CB(skb)->frag0_len = 0; 3600 3601 if (skb->mac_header == skb->tail && 3602 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { 3603 NAPI_GRO_CB(skb)->frag0 = 3604 skb_frag_address(&skb_shinfo(skb)->frags[0]); 3605 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); 3606 } 3607 } 3608 EXPORT_SYMBOL(skb_gro_reset_offset); 3609 3610 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3611 { 3612 skb_gro_reset_offset(skb); 3613 3614 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 3615 } 3616 EXPORT_SYMBOL(napi_gro_receive); 3617 3618 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3619 { 3620 __skb_pull(skb, skb_headlen(skb)); 3621 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 3622 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 3623 skb->vlan_tci = 0; 3624 skb->dev = napi->dev; 3625 skb->skb_iif = 0; 3626 3627 napi->skb = skb; 3628 } 3629 3630 struct sk_buff *napi_get_frags(struct napi_struct *napi) 3631 { 3632 struct sk_buff *skb = napi->skb; 3633 3634 if (!skb) { 3635 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 3636 if (skb) 3637 napi->skb = skb; 3638 } 3639 return skb; 3640 } 3641 EXPORT_SYMBOL(napi_get_frags); 3642 3643 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3644 gro_result_t ret) 3645 { 3646 switch (ret) { 3647 case GRO_NORMAL: 3648 case GRO_HELD: 3649 skb->protocol = eth_type_trans(skb, skb->dev); 3650 3651 if (ret == GRO_HELD) 3652 skb_gro_pull(skb, -ETH_HLEN); 3653 else if (netif_receive_skb(skb)) 3654 ret = GRO_DROP; 3655 break; 3656 3657 case GRO_DROP: 3658 case GRO_MERGED_FREE: 3659 napi_reuse_skb(napi, skb); 3660 break; 3661 3662 case GRO_MERGED: 3663 break; 3664 } 3665 3666 return ret; 3667 } 3668 EXPORT_SYMBOL(napi_frags_finish); 3669 3670 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3671 { 3672 struct sk_buff *skb = napi->skb; 3673 struct ethhdr *eth; 3674 unsigned int hlen; 3675 unsigned int off; 3676 3677 napi->skb = NULL; 3678 3679 skb_reset_mac_header(skb); 3680 skb_gro_reset_offset(skb); 3681 3682 off = skb_gro_offset(skb); 3683 hlen = off + sizeof(*eth); 3684 eth = skb_gro_header_fast(skb, off); 3685 if (skb_gro_header_hard(skb, hlen)) { 3686 eth = skb_gro_header_slow(skb, hlen, off); 3687 if (unlikely(!eth)) { 3688 napi_reuse_skb(napi, skb); 3689 skb = NULL; 3690 goto out; 3691 } 3692 } 3693 3694 skb_gro_pull(skb, sizeof(*eth)); 3695 3696 /* 3697 * This works because the only protocols we care about don't require 3698 * special handling. We'll fix it up properly at the end. 3699 */ 3700 skb->protocol = eth->h_proto; 3701 3702 out: 3703 return skb; 3704 } 3705 3706 gro_result_t napi_gro_frags(struct napi_struct *napi) 3707 { 3708 struct sk_buff *skb = napi_frags_skb(napi); 3709 3710 if (!skb) 3711 return GRO_DROP; 3712 3713 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 3714 } 3715 EXPORT_SYMBOL(napi_gro_frags); 3716 3717 /* 3718 * net_rps_action sends any pending IPI's for rps. 3719 * Note: called with local irq disabled, but exits with local irq enabled. 3720 */ 3721 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 3722 { 3723 #ifdef CONFIG_RPS 3724 struct softnet_data *remsd = sd->rps_ipi_list; 3725 3726 if (remsd) { 3727 sd->rps_ipi_list = NULL; 3728 3729 local_irq_enable(); 3730 3731 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3732 while (remsd) { 3733 struct softnet_data *next = remsd->rps_ipi_next; 3734 3735 if (cpu_online(remsd->cpu)) 3736 __smp_call_function_single(remsd->cpu, 3737 &remsd->csd, 0); 3738 remsd = next; 3739 } 3740 } else 3741 #endif 3742 local_irq_enable(); 3743 } 3744 3745 static int process_backlog(struct napi_struct *napi, int quota) 3746 { 3747 int work = 0; 3748 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 3749 3750 #ifdef CONFIG_RPS 3751 /* Check if we have pending ipi, its better to send them now, 3752 * not waiting net_rx_action() end. 3753 */ 3754 if (sd->rps_ipi_list) { 3755 local_irq_disable(); 3756 net_rps_action_and_irq_enable(sd); 3757 } 3758 #endif 3759 napi->weight = weight_p; 3760 local_irq_disable(); 3761 while (work < quota) { 3762 struct sk_buff *skb; 3763 unsigned int qlen; 3764 3765 while ((skb = __skb_dequeue(&sd->process_queue))) { 3766 local_irq_enable(); 3767 __netif_receive_skb(skb); 3768 local_irq_disable(); 3769 input_queue_head_incr(sd); 3770 if (++work >= quota) { 3771 local_irq_enable(); 3772 return work; 3773 } 3774 } 3775 3776 rps_lock(sd); 3777 qlen = skb_queue_len(&sd->input_pkt_queue); 3778 if (qlen) 3779 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3780 &sd->process_queue); 3781 3782 if (qlen < quota - work) { 3783 /* 3784 * Inline a custom version of __napi_complete(). 3785 * only current cpu owns and manipulates this napi, 3786 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 3787 * we can use a plain write instead of clear_bit(), 3788 * and we dont need an smp_mb() memory barrier. 3789 */ 3790 list_del(&napi->poll_list); 3791 napi->state = 0; 3792 3793 quota = work + qlen; 3794 } 3795 rps_unlock(sd); 3796 } 3797 local_irq_enable(); 3798 3799 return work; 3800 } 3801 3802 /** 3803 * __napi_schedule - schedule for receive 3804 * @n: entry to schedule 3805 * 3806 * The entry's receive function will be scheduled to run 3807 */ 3808 void __napi_schedule(struct napi_struct *n) 3809 { 3810 unsigned long flags; 3811 3812 local_irq_save(flags); 3813 ____napi_schedule(&__get_cpu_var(softnet_data), n); 3814 local_irq_restore(flags); 3815 } 3816 EXPORT_SYMBOL(__napi_schedule); 3817 3818 void __napi_complete(struct napi_struct *n) 3819 { 3820 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 3821 BUG_ON(n->gro_list); 3822 3823 list_del(&n->poll_list); 3824 smp_mb__before_clear_bit(); 3825 clear_bit(NAPI_STATE_SCHED, &n->state); 3826 } 3827 EXPORT_SYMBOL(__napi_complete); 3828 3829 void napi_complete(struct napi_struct *n) 3830 { 3831 unsigned long flags; 3832 3833 /* 3834 * don't let napi dequeue from the cpu poll list 3835 * just in case its running on a different cpu 3836 */ 3837 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 3838 return; 3839 3840 napi_gro_flush(n); 3841 local_irq_save(flags); 3842 __napi_complete(n); 3843 local_irq_restore(flags); 3844 } 3845 EXPORT_SYMBOL(napi_complete); 3846 3847 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 3848 int (*poll)(struct napi_struct *, int), int weight) 3849 { 3850 INIT_LIST_HEAD(&napi->poll_list); 3851 napi->gro_count = 0; 3852 napi->gro_list = NULL; 3853 napi->skb = NULL; 3854 napi->poll = poll; 3855 napi->weight = weight; 3856 list_add(&napi->dev_list, &dev->napi_list); 3857 napi->dev = dev; 3858 #ifdef CONFIG_NETPOLL 3859 spin_lock_init(&napi->poll_lock); 3860 napi->poll_owner = -1; 3861 #endif 3862 set_bit(NAPI_STATE_SCHED, &napi->state); 3863 } 3864 EXPORT_SYMBOL(netif_napi_add); 3865 3866 void netif_napi_del(struct napi_struct *napi) 3867 { 3868 struct sk_buff *skb, *next; 3869 3870 list_del_init(&napi->dev_list); 3871 napi_free_frags(napi); 3872 3873 for (skb = napi->gro_list; skb; skb = next) { 3874 next = skb->next; 3875 skb->next = NULL; 3876 kfree_skb(skb); 3877 } 3878 3879 napi->gro_list = NULL; 3880 napi->gro_count = 0; 3881 } 3882 EXPORT_SYMBOL(netif_napi_del); 3883 3884 static void net_rx_action(struct softirq_action *h) 3885 { 3886 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3887 unsigned long time_limit = jiffies + 2; 3888 int budget = netdev_budget; 3889 void *have; 3890 3891 local_irq_disable(); 3892 3893 while (!list_empty(&sd->poll_list)) { 3894 struct napi_struct *n; 3895 int work, weight; 3896 3897 /* If softirq window is exhuasted then punt. 3898 * Allow this to run for 2 jiffies since which will allow 3899 * an average latency of 1.5/HZ. 3900 */ 3901 if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) 3902 goto softnet_break; 3903 3904 local_irq_enable(); 3905 3906 /* Even though interrupts have been re-enabled, this 3907 * access is safe because interrupts can only add new 3908 * entries to the tail of this list, and only ->poll() 3909 * calls can remove this head entry from the list. 3910 */ 3911 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 3912 3913 have = netpoll_poll_lock(n); 3914 3915 weight = n->weight; 3916 3917 /* This NAPI_STATE_SCHED test is for avoiding a race 3918 * with netpoll's poll_napi(). Only the entity which 3919 * obtains the lock and sees NAPI_STATE_SCHED set will 3920 * actually make the ->poll() call. Therefore we avoid 3921 * accidentally calling ->poll() when NAPI is not scheduled. 3922 */ 3923 work = 0; 3924 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 3925 work = n->poll(n, weight); 3926 trace_napi_poll(n); 3927 } 3928 3929 WARN_ON_ONCE(work > weight); 3930 3931 budget -= work; 3932 3933 local_irq_disable(); 3934 3935 /* Drivers must not modify the NAPI state if they 3936 * consume the entire weight. In such cases this code 3937 * still "owns" the NAPI instance and therefore can 3938 * move the instance around on the list at-will. 3939 */ 3940 if (unlikely(work == weight)) { 3941 if (unlikely(napi_disable_pending(n))) { 3942 local_irq_enable(); 3943 napi_complete(n); 3944 local_irq_disable(); 3945 } else 3946 list_move_tail(&n->poll_list, &sd->poll_list); 3947 } 3948 3949 netpoll_poll_unlock(have); 3950 } 3951 out: 3952 net_rps_action_and_irq_enable(sd); 3953 3954 #ifdef CONFIG_NET_DMA 3955 /* 3956 * There may not be any more sk_buffs coming right now, so push 3957 * any pending DMA copies to hardware 3958 */ 3959 dma_issue_pending_all(); 3960 #endif 3961 3962 return; 3963 3964 softnet_break: 3965 sd->time_squeeze++; 3966 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3967 goto out; 3968 } 3969 3970 static gifconf_func_t *gifconf_list[NPROTO]; 3971 3972 /** 3973 * register_gifconf - register a SIOCGIF handler 3974 * @family: Address family 3975 * @gifconf: Function handler 3976 * 3977 * Register protocol dependent address dumping routines. The handler 3978 * that is passed must not be freed or reused until it has been replaced 3979 * by another handler. 3980 */ 3981 int register_gifconf(unsigned int family, gifconf_func_t *gifconf) 3982 { 3983 if (family >= NPROTO) 3984 return -EINVAL; 3985 gifconf_list[family] = gifconf; 3986 return 0; 3987 } 3988 EXPORT_SYMBOL(register_gifconf); 3989 3990 3991 /* 3992 * Map an interface index to its name (SIOCGIFNAME) 3993 */ 3994 3995 /* 3996 * We need this ioctl for efficient implementation of the 3997 * if_indextoname() function required by the IPv6 API. Without 3998 * it, we would have to search all the interfaces to find a 3999 * match. --pb 4000 */ 4001 4002 static int dev_ifname(struct net *net, struct ifreq __user *arg) 4003 { 4004 struct net_device *dev; 4005 struct ifreq ifr; 4006 4007 /* 4008 * Fetch the caller's info block. 4009 */ 4010 4011 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 4012 return -EFAULT; 4013 4014 rcu_read_lock(); 4015 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 4016 if (!dev) { 4017 rcu_read_unlock(); 4018 return -ENODEV; 4019 } 4020 4021 strcpy(ifr.ifr_name, dev->name); 4022 rcu_read_unlock(); 4023 4024 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 4025 return -EFAULT; 4026 return 0; 4027 } 4028 4029 /* 4030 * Perform a SIOCGIFCONF call. This structure will change 4031 * size eventually, and there is nothing I can do about it. 4032 * Thus we will need a 'compatibility mode'. 4033 */ 4034 4035 static int dev_ifconf(struct net *net, char __user *arg) 4036 { 4037 struct ifconf ifc; 4038 struct net_device *dev; 4039 char __user *pos; 4040 int len; 4041 int total; 4042 int i; 4043 4044 /* 4045 * Fetch the caller's info block. 4046 */ 4047 4048 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) 4049 return -EFAULT; 4050 4051 pos = ifc.ifc_buf; 4052 len = ifc.ifc_len; 4053 4054 /* 4055 * Loop over the interfaces, and write an info block for each. 4056 */ 4057 4058 total = 0; 4059 for_each_netdev(net, dev) { 4060 for (i = 0; i < NPROTO; i++) { 4061 if (gifconf_list[i]) { 4062 int done; 4063 if (!pos) 4064 done = gifconf_list[i](dev, NULL, 0); 4065 else 4066 done = gifconf_list[i](dev, pos + total, 4067 len - total); 4068 if (done < 0) 4069 return -EFAULT; 4070 total += done; 4071 } 4072 } 4073 } 4074 4075 /* 4076 * All done. Write the updated control block back to the caller. 4077 */ 4078 ifc.ifc_len = total; 4079 4080 /* 4081 * Both BSD and Solaris return 0 here, so we do too. 4082 */ 4083 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; 4084 } 4085 4086 #ifdef CONFIG_PROC_FS 4087 4088 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) 4089 4090 #define get_bucket(x) ((x) >> BUCKET_SPACE) 4091 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4092 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4093 4094 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos) 4095 { 4096 struct net *net = seq_file_net(seq); 4097 struct net_device *dev; 4098 struct hlist_node *p; 4099 struct hlist_head *h; 4100 unsigned int count = 0, offset = get_offset(*pos); 4101 4102 h = &net->dev_name_head[get_bucket(*pos)]; 4103 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4104 if (++count == offset) 4105 return dev; 4106 } 4107 4108 return NULL; 4109 } 4110 4111 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) 4112 { 4113 struct net_device *dev; 4114 unsigned int bucket; 4115 4116 do { 4117 dev = dev_from_same_bucket(seq, pos); 4118 if (dev) 4119 return dev; 4120 4121 bucket = get_bucket(*pos) + 1; 4122 *pos = set_bucket_offset(bucket, 1); 4123 } while (bucket < NETDEV_HASHENTRIES); 4124 4125 return NULL; 4126 } 4127 4128 /* 4129 * This is invoked by the /proc filesystem handler to display a device 4130 * in detail. 4131 */ 4132 void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4133 __acquires(RCU) 4134 { 4135 rcu_read_lock(); 4136 if (!*pos) 4137 return SEQ_START_TOKEN; 4138 4139 if (get_bucket(*pos) >= NETDEV_HASHENTRIES) 4140 return NULL; 4141 4142 return dev_from_bucket(seq, pos); 4143 } 4144 4145 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4146 { 4147 ++*pos; 4148 return dev_from_bucket(seq, pos); 4149 } 4150 4151 void dev_seq_stop(struct seq_file *seq, void *v) 4152 __releases(RCU) 4153 { 4154 rcu_read_unlock(); 4155 } 4156 4157 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 4158 { 4159 struct rtnl_link_stats64 temp; 4160 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 4161 4162 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 4163 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 4164 dev->name, stats->rx_bytes, stats->rx_packets, 4165 stats->rx_errors, 4166 stats->rx_dropped + stats->rx_missed_errors, 4167 stats->rx_fifo_errors, 4168 stats->rx_length_errors + stats->rx_over_errors + 4169 stats->rx_crc_errors + stats->rx_frame_errors, 4170 stats->rx_compressed, stats->multicast, 4171 stats->tx_bytes, stats->tx_packets, 4172 stats->tx_errors, stats->tx_dropped, 4173 stats->tx_fifo_errors, stats->collisions, 4174 stats->tx_carrier_errors + 4175 stats->tx_aborted_errors + 4176 stats->tx_window_errors + 4177 stats->tx_heartbeat_errors, 4178 stats->tx_compressed); 4179 } 4180 4181 /* 4182 * Called from the PROCfs module. This now uses the new arbitrary sized 4183 * /proc/net interface to create /proc/net/dev 4184 */ 4185 static int dev_seq_show(struct seq_file *seq, void *v) 4186 { 4187 if (v == SEQ_START_TOKEN) 4188 seq_puts(seq, "Inter-| Receive " 4189 " | Transmit\n" 4190 " face |bytes packets errs drop fifo frame " 4191 "compressed multicast|bytes packets errs " 4192 "drop fifo colls carrier compressed\n"); 4193 else 4194 dev_seq_printf_stats(seq, v); 4195 return 0; 4196 } 4197 4198 static struct softnet_data *softnet_get_online(loff_t *pos) 4199 { 4200 struct softnet_data *sd = NULL; 4201 4202 while (*pos < nr_cpu_ids) 4203 if (cpu_online(*pos)) { 4204 sd = &per_cpu(softnet_data, *pos); 4205 break; 4206 } else 4207 ++*pos; 4208 return sd; 4209 } 4210 4211 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 4212 { 4213 return softnet_get_online(pos); 4214 } 4215 4216 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4217 { 4218 ++*pos; 4219 return softnet_get_online(pos); 4220 } 4221 4222 static void softnet_seq_stop(struct seq_file *seq, void *v) 4223 { 4224 } 4225 4226 static int softnet_seq_show(struct seq_file *seq, void *v) 4227 { 4228 struct softnet_data *sd = v; 4229 4230 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 4231 sd->processed, sd->dropped, sd->time_squeeze, 0, 4232 0, 0, 0, 0, /* was fastroute */ 4233 sd->cpu_collision, sd->received_rps); 4234 return 0; 4235 } 4236 4237 static const struct seq_operations dev_seq_ops = { 4238 .start = dev_seq_start, 4239 .next = dev_seq_next, 4240 .stop = dev_seq_stop, 4241 .show = dev_seq_show, 4242 }; 4243 4244 static int dev_seq_open(struct inode *inode, struct file *file) 4245 { 4246 return seq_open_net(inode, file, &dev_seq_ops, 4247 sizeof(struct seq_net_private)); 4248 } 4249 4250 static const struct file_operations dev_seq_fops = { 4251 .owner = THIS_MODULE, 4252 .open = dev_seq_open, 4253 .read = seq_read, 4254 .llseek = seq_lseek, 4255 .release = seq_release_net, 4256 }; 4257 4258 static const struct seq_operations softnet_seq_ops = { 4259 .start = softnet_seq_start, 4260 .next = softnet_seq_next, 4261 .stop = softnet_seq_stop, 4262 .show = softnet_seq_show, 4263 }; 4264 4265 static int softnet_seq_open(struct inode *inode, struct file *file) 4266 { 4267 return seq_open(file, &softnet_seq_ops); 4268 } 4269 4270 static const struct file_operations softnet_seq_fops = { 4271 .owner = THIS_MODULE, 4272 .open = softnet_seq_open, 4273 .read = seq_read, 4274 .llseek = seq_lseek, 4275 .release = seq_release, 4276 }; 4277 4278 static void *ptype_get_idx(loff_t pos) 4279 { 4280 struct packet_type *pt = NULL; 4281 loff_t i = 0; 4282 int t; 4283 4284 list_for_each_entry_rcu(pt, &ptype_all, list) { 4285 if (i == pos) 4286 return pt; 4287 ++i; 4288 } 4289 4290 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 4291 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 4292 if (i == pos) 4293 return pt; 4294 ++i; 4295 } 4296 } 4297 return NULL; 4298 } 4299 4300 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 4301 __acquires(RCU) 4302 { 4303 rcu_read_lock(); 4304 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; 4305 } 4306 4307 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4308 { 4309 struct packet_type *pt; 4310 struct list_head *nxt; 4311 int hash; 4312 4313 ++*pos; 4314 if (v == SEQ_START_TOKEN) 4315 return ptype_get_idx(0); 4316 4317 pt = v; 4318 nxt = pt->list.next; 4319 if (pt->type == htons(ETH_P_ALL)) { 4320 if (nxt != &ptype_all) 4321 goto found; 4322 hash = 0; 4323 nxt = ptype_base[0].next; 4324 } else 4325 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 4326 4327 while (nxt == &ptype_base[hash]) { 4328 if (++hash >= PTYPE_HASH_SIZE) 4329 return NULL; 4330 nxt = ptype_base[hash].next; 4331 } 4332 found: 4333 return list_entry(nxt, struct packet_type, list); 4334 } 4335 4336 static void ptype_seq_stop(struct seq_file *seq, void *v) 4337 __releases(RCU) 4338 { 4339 rcu_read_unlock(); 4340 } 4341 4342 static int ptype_seq_show(struct seq_file *seq, void *v) 4343 { 4344 struct packet_type *pt = v; 4345 4346 if (v == SEQ_START_TOKEN) 4347 seq_puts(seq, "Type Device Function\n"); 4348 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 4349 if (pt->type == htons(ETH_P_ALL)) 4350 seq_puts(seq, "ALL "); 4351 else 4352 seq_printf(seq, "%04x", ntohs(pt->type)); 4353 4354 seq_printf(seq, " %-8s %pF\n", 4355 pt->dev ? pt->dev->name : "", pt->func); 4356 } 4357 4358 return 0; 4359 } 4360 4361 static const struct seq_operations ptype_seq_ops = { 4362 .start = ptype_seq_start, 4363 .next = ptype_seq_next, 4364 .stop = ptype_seq_stop, 4365 .show = ptype_seq_show, 4366 }; 4367 4368 static int ptype_seq_open(struct inode *inode, struct file *file) 4369 { 4370 return seq_open_net(inode, file, &ptype_seq_ops, 4371 sizeof(struct seq_net_private)); 4372 } 4373 4374 static const struct file_operations ptype_seq_fops = { 4375 .owner = THIS_MODULE, 4376 .open = ptype_seq_open, 4377 .read = seq_read, 4378 .llseek = seq_lseek, 4379 .release = seq_release_net, 4380 }; 4381 4382 4383 static int __net_init dev_proc_net_init(struct net *net) 4384 { 4385 int rc = -ENOMEM; 4386 4387 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) 4388 goto out; 4389 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) 4390 goto out_dev; 4391 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) 4392 goto out_softnet; 4393 4394 if (wext_proc_init(net)) 4395 goto out_ptype; 4396 rc = 0; 4397 out: 4398 return rc; 4399 out_ptype: 4400 proc_net_remove(net, "ptype"); 4401 out_softnet: 4402 proc_net_remove(net, "softnet_stat"); 4403 out_dev: 4404 proc_net_remove(net, "dev"); 4405 goto out; 4406 } 4407 4408 static void __net_exit dev_proc_net_exit(struct net *net) 4409 { 4410 wext_proc_exit(net); 4411 4412 proc_net_remove(net, "ptype"); 4413 proc_net_remove(net, "softnet_stat"); 4414 proc_net_remove(net, "dev"); 4415 } 4416 4417 static struct pernet_operations __net_initdata dev_proc_ops = { 4418 .init = dev_proc_net_init, 4419 .exit = dev_proc_net_exit, 4420 }; 4421 4422 static int __init dev_proc_init(void) 4423 { 4424 return register_pernet_subsys(&dev_proc_ops); 4425 } 4426 #else 4427 #define dev_proc_init() 0 4428 #endif /* CONFIG_PROC_FS */ 4429 4430 4431 /** 4432 * netdev_set_master - set up master pointer 4433 * @slave: slave device 4434 * @master: new master device 4435 * 4436 * Changes the master device of the slave. Pass %NULL to break the 4437 * bonding. The caller must hold the RTNL semaphore. On a failure 4438 * a negative errno code is returned. On success the reference counts 4439 * are adjusted and the function returns zero. 4440 */ 4441 int netdev_set_master(struct net_device *slave, struct net_device *master) 4442 { 4443 struct net_device *old = slave->master; 4444 4445 ASSERT_RTNL(); 4446 4447 if (master) { 4448 if (old) 4449 return -EBUSY; 4450 dev_hold(master); 4451 } 4452 4453 slave->master = master; 4454 4455 if (old) 4456 dev_put(old); 4457 return 0; 4458 } 4459 EXPORT_SYMBOL(netdev_set_master); 4460 4461 /** 4462 * netdev_set_bond_master - set up bonding master/slave pair 4463 * @slave: slave device 4464 * @master: new master device 4465 * 4466 * Changes the master device of the slave. Pass %NULL to break the 4467 * bonding. The caller must hold the RTNL semaphore. On a failure 4468 * a negative errno code is returned. On success %RTM_NEWLINK is sent 4469 * to the routing socket and the function returns zero. 4470 */ 4471 int netdev_set_bond_master(struct net_device *slave, struct net_device *master) 4472 { 4473 int err; 4474 4475 ASSERT_RTNL(); 4476 4477 err = netdev_set_master(slave, master); 4478 if (err) 4479 return err; 4480 if (master) 4481 slave->flags |= IFF_SLAVE; 4482 else 4483 slave->flags &= ~IFF_SLAVE; 4484 4485 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 4486 return 0; 4487 } 4488 EXPORT_SYMBOL(netdev_set_bond_master); 4489 4490 static void dev_change_rx_flags(struct net_device *dev, int flags) 4491 { 4492 const struct net_device_ops *ops = dev->netdev_ops; 4493 4494 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4495 ops->ndo_change_rx_flags(dev, flags); 4496 } 4497 4498 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4499 { 4500 unsigned int old_flags = dev->flags; 4501 uid_t uid; 4502 gid_t gid; 4503 4504 ASSERT_RTNL(); 4505 4506 dev->flags |= IFF_PROMISC; 4507 dev->promiscuity += inc; 4508 if (dev->promiscuity == 0) { 4509 /* 4510 * Avoid overflow. 4511 * If inc causes overflow, untouch promisc and return error. 4512 */ 4513 if (inc < 0) 4514 dev->flags &= ~IFF_PROMISC; 4515 else { 4516 dev->promiscuity -= inc; 4517 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 4518 dev->name); 4519 return -EOVERFLOW; 4520 } 4521 } 4522 if (dev->flags != old_flags) { 4523 pr_info("device %s %s promiscuous mode\n", 4524 dev->name, 4525 dev->flags & IFF_PROMISC ? "entered" : "left"); 4526 if (audit_enabled) { 4527 current_uid_gid(&uid, &gid); 4528 audit_log(current->audit_context, GFP_ATOMIC, 4529 AUDIT_ANOM_PROMISCUOUS, 4530 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4531 dev->name, (dev->flags & IFF_PROMISC), 4532 (old_flags & IFF_PROMISC), 4533 audit_get_loginuid(current), 4534 uid, gid, 4535 audit_get_sessionid(current)); 4536 } 4537 4538 dev_change_rx_flags(dev, IFF_PROMISC); 4539 } 4540 return 0; 4541 } 4542 4543 /** 4544 * dev_set_promiscuity - update promiscuity count on a device 4545 * @dev: device 4546 * @inc: modifier 4547 * 4548 * Add or remove promiscuity from a device. While the count in the device 4549 * remains above zero the interface remains promiscuous. Once it hits zero 4550 * the device reverts back to normal filtering operation. A negative inc 4551 * value is used to drop promiscuity on the device. 4552 * Return 0 if successful or a negative errno code on error. 4553 */ 4554 int dev_set_promiscuity(struct net_device *dev, int inc) 4555 { 4556 unsigned int old_flags = dev->flags; 4557 int err; 4558 4559 err = __dev_set_promiscuity(dev, inc); 4560 if (err < 0) 4561 return err; 4562 if (dev->flags != old_flags) 4563 dev_set_rx_mode(dev); 4564 return err; 4565 } 4566 EXPORT_SYMBOL(dev_set_promiscuity); 4567 4568 /** 4569 * dev_set_allmulti - update allmulti count on a device 4570 * @dev: device 4571 * @inc: modifier 4572 * 4573 * Add or remove reception of all multicast frames to a device. While the 4574 * count in the device remains above zero the interface remains listening 4575 * to all interfaces. Once it hits zero the device reverts back to normal 4576 * filtering operation. A negative @inc value is used to drop the counter 4577 * when releasing a resource needing all multicasts. 4578 * Return 0 if successful or a negative errno code on error. 4579 */ 4580 4581 int dev_set_allmulti(struct net_device *dev, int inc) 4582 { 4583 unsigned int old_flags = dev->flags; 4584 4585 ASSERT_RTNL(); 4586 4587 dev->flags |= IFF_ALLMULTI; 4588 dev->allmulti += inc; 4589 if (dev->allmulti == 0) { 4590 /* 4591 * Avoid overflow. 4592 * If inc causes overflow, untouch allmulti and return error. 4593 */ 4594 if (inc < 0) 4595 dev->flags &= ~IFF_ALLMULTI; 4596 else { 4597 dev->allmulti -= inc; 4598 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 4599 dev->name); 4600 return -EOVERFLOW; 4601 } 4602 } 4603 if (dev->flags ^ old_flags) { 4604 dev_change_rx_flags(dev, IFF_ALLMULTI); 4605 dev_set_rx_mode(dev); 4606 } 4607 return 0; 4608 } 4609 EXPORT_SYMBOL(dev_set_allmulti); 4610 4611 /* 4612 * Upload unicast and multicast address lists to device and 4613 * configure RX filtering. When the device doesn't support unicast 4614 * filtering it is put in promiscuous mode while unicast addresses 4615 * are present. 4616 */ 4617 void __dev_set_rx_mode(struct net_device *dev) 4618 { 4619 const struct net_device_ops *ops = dev->netdev_ops; 4620 4621 /* dev_open will call this function so the list will stay sane. */ 4622 if (!(dev->flags&IFF_UP)) 4623 return; 4624 4625 if (!netif_device_present(dev)) 4626 return; 4627 4628 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 4629 /* Unicast addresses changes may only happen under the rtnl, 4630 * therefore calling __dev_set_promiscuity here is safe. 4631 */ 4632 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 4633 __dev_set_promiscuity(dev, 1); 4634 dev->uc_promisc = true; 4635 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 4636 __dev_set_promiscuity(dev, -1); 4637 dev->uc_promisc = false; 4638 } 4639 } 4640 4641 if (ops->ndo_set_rx_mode) 4642 ops->ndo_set_rx_mode(dev); 4643 } 4644 4645 void dev_set_rx_mode(struct net_device *dev) 4646 { 4647 netif_addr_lock_bh(dev); 4648 __dev_set_rx_mode(dev); 4649 netif_addr_unlock_bh(dev); 4650 } 4651 4652 /** 4653 * dev_get_flags - get flags reported to userspace 4654 * @dev: device 4655 * 4656 * Get the combination of flag bits exported through APIs to userspace. 4657 */ 4658 unsigned int dev_get_flags(const struct net_device *dev) 4659 { 4660 unsigned int flags; 4661 4662 flags = (dev->flags & ~(IFF_PROMISC | 4663 IFF_ALLMULTI | 4664 IFF_RUNNING | 4665 IFF_LOWER_UP | 4666 IFF_DORMANT)) | 4667 (dev->gflags & (IFF_PROMISC | 4668 IFF_ALLMULTI)); 4669 4670 if (netif_running(dev)) { 4671 if (netif_oper_up(dev)) 4672 flags |= IFF_RUNNING; 4673 if (netif_carrier_ok(dev)) 4674 flags |= IFF_LOWER_UP; 4675 if (netif_dormant(dev)) 4676 flags |= IFF_DORMANT; 4677 } 4678 4679 return flags; 4680 } 4681 EXPORT_SYMBOL(dev_get_flags); 4682 4683 int __dev_change_flags(struct net_device *dev, unsigned int flags) 4684 { 4685 unsigned int old_flags = dev->flags; 4686 int ret; 4687 4688 ASSERT_RTNL(); 4689 4690 /* 4691 * Set the flags on our device. 4692 */ 4693 4694 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 4695 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 4696 IFF_AUTOMEDIA)) | 4697 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 4698 IFF_ALLMULTI)); 4699 4700 /* 4701 * Load in the correct multicast list now the flags have changed. 4702 */ 4703 4704 if ((old_flags ^ flags) & IFF_MULTICAST) 4705 dev_change_rx_flags(dev, IFF_MULTICAST); 4706 4707 dev_set_rx_mode(dev); 4708 4709 /* 4710 * Have we downed the interface. We handle IFF_UP ourselves 4711 * according to user attempts to set it, rather than blindly 4712 * setting it. 4713 */ 4714 4715 ret = 0; 4716 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4717 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 4718 4719 if (!ret) 4720 dev_set_rx_mode(dev); 4721 } 4722 4723 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4724 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4725 4726 dev->gflags ^= IFF_PROMISC; 4727 dev_set_promiscuity(dev, inc); 4728 } 4729 4730 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 4731 is important. Some (broken) drivers set IFF_PROMISC, when 4732 IFF_ALLMULTI is requested not asking us and not reporting. 4733 */ 4734 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4735 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 4736 4737 dev->gflags ^= IFF_ALLMULTI; 4738 dev_set_allmulti(dev, inc); 4739 } 4740 4741 return ret; 4742 } 4743 4744 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) 4745 { 4746 unsigned int changes = dev->flags ^ old_flags; 4747 4748 if (changes & IFF_UP) { 4749 if (dev->flags & IFF_UP) 4750 call_netdevice_notifiers(NETDEV_UP, dev); 4751 else 4752 call_netdevice_notifiers(NETDEV_DOWN, dev); 4753 } 4754 4755 if (dev->flags & IFF_UP && 4756 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) 4757 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4758 } 4759 4760 /** 4761 * dev_change_flags - change device settings 4762 * @dev: device 4763 * @flags: device state flags 4764 * 4765 * Change settings on device based state flags. The flags are 4766 * in the userspace exported format. 4767 */ 4768 int dev_change_flags(struct net_device *dev, unsigned int flags) 4769 { 4770 int ret; 4771 unsigned int changes, old_flags = dev->flags; 4772 4773 ret = __dev_change_flags(dev, flags); 4774 if (ret < 0) 4775 return ret; 4776 4777 changes = old_flags ^ dev->flags; 4778 if (changes) 4779 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4780 4781 __dev_notify_flags(dev, old_flags); 4782 return ret; 4783 } 4784 EXPORT_SYMBOL(dev_change_flags); 4785 4786 /** 4787 * dev_set_mtu - Change maximum transfer unit 4788 * @dev: device 4789 * @new_mtu: new transfer unit 4790 * 4791 * Change the maximum transfer size of the network device. 4792 */ 4793 int dev_set_mtu(struct net_device *dev, int new_mtu) 4794 { 4795 const struct net_device_ops *ops = dev->netdev_ops; 4796 int err; 4797 4798 if (new_mtu == dev->mtu) 4799 return 0; 4800 4801 /* MTU must be positive. */ 4802 if (new_mtu < 0) 4803 return -EINVAL; 4804 4805 if (!netif_device_present(dev)) 4806 return -ENODEV; 4807 4808 err = 0; 4809 if (ops->ndo_change_mtu) 4810 err = ops->ndo_change_mtu(dev, new_mtu); 4811 else 4812 dev->mtu = new_mtu; 4813 4814 if (!err && dev->flags & IFF_UP) 4815 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4816 return err; 4817 } 4818 EXPORT_SYMBOL(dev_set_mtu); 4819 4820 /** 4821 * dev_set_group - Change group this device belongs to 4822 * @dev: device 4823 * @new_group: group this device should belong to 4824 */ 4825 void dev_set_group(struct net_device *dev, int new_group) 4826 { 4827 dev->group = new_group; 4828 } 4829 EXPORT_SYMBOL(dev_set_group); 4830 4831 /** 4832 * dev_set_mac_address - Change Media Access Control Address 4833 * @dev: device 4834 * @sa: new address 4835 * 4836 * Change the hardware (MAC) address of the device 4837 */ 4838 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 4839 { 4840 const struct net_device_ops *ops = dev->netdev_ops; 4841 int err; 4842 4843 if (!ops->ndo_set_mac_address) 4844 return -EOPNOTSUPP; 4845 if (sa->sa_family != dev->type) 4846 return -EINVAL; 4847 if (!netif_device_present(dev)) 4848 return -ENODEV; 4849 err = ops->ndo_set_mac_address(dev, sa); 4850 if (!err) 4851 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4852 add_device_randomness(dev->dev_addr, dev->addr_len); 4853 return err; 4854 } 4855 EXPORT_SYMBOL(dev_set_mac_address); 4856 4857 /* 4858 * Perform the SIOCxIFxxx calls, inside rcu_read_lock() 4859 */ 4860 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) 4861 { 4862 int err; 4863 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); 4864 4865 if (!dev) 4866 return -ENODEV; 4867 4868 switch (cmd) { 4869 case SIOCGIFFLAGS: /* Get interface flags */ 4870 ifr->ifr_flags = (short) dev_get_flags(dev); 4871 return 0; 4872 4873 case SIOCGIFMETRIC: /* Get the metric on the interface 4874 (currently unused) */ 4875 ifr->ifr_metric = 0; 4876 return 0; 4877 4878 case SIOCGIFMTU: /* Get the MTU of a device */ 4879 ifr->ifr_mtu = dev->mtu; 4880 return 0; 4881 4882 case SIOCGIFHWADDR: 4883 if (!dev->addr_len) 4884 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4885 else 4886 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4887 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4888 ifr->ifr_hwaddr.sa_family = dev->type; 4889 return 0; 4890 4891 case SIOCGIFSLAVE: 4892 err = -EINVAL; 4893 break; 4894 4895 case SIOCGIFMAP: 4896 ifr->ifr_map.mem_start = dev->mem_start; 4897 ifr->ifr_map.mem_end = dev->mem_end; 4898 ifr->ifr_map.base_addr = dev->base_addr; 4899 ifr->ifr_map.irq = dev->irq; 4900 ifr->ifr_map.dma = dev->dma; 4901 ifr->ifr_map.port = dev->if_port; 4902 return 0; 4903 4904 case SIOCGIFINDEX: 4905 ifr->ifr_ifindex = dev->ifindex; 4906 return 0; 4907 4908 case SIOCGIFTXQLEN: 4909 ifr->ifr_qlen = dev->tx_queue_len; 4910 return 0; 4911 4912 default: 4913 /* dev_ioctl() should ensure this case 4914 * is never reached 4915 */ 4916 WARN_ON(1); 4917 err = -ENOTTY; 4918 break; 4919 4920 } 4921 return err; 4922 } 4923 4924 /* 4925 * Perform the SIOCxIFxxx calls, inside rtnl_lock() 4926 */ 4927 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) 4928 { 4929 int err; 4930 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 4931 const struct net_device_ops *ops; 4932 4933 if (!dev) 4934 return -ENODEV; 4935 4936 ops = dev->netdev_ops; 4937 4938 switch (cmd) { 4939 case SIOCSIFFLAGS: /* Set interface flags */ 4940 return dev_change_flags(dev, ifr->ifr_flags); 4941 4942 case SIOCSIFMETRIC: /* Set the metric on the interface 4943 (currently unused) */ 4944 return -EOPNOTSUPP; 4945 4946 case SIOCSIFMTU: /* Set the MTU of a device */ 4947 return dev_set_mtu(dev, ifr->ifr_mtu); 4948 4949 case SIOCSIFHWADDR: 4950 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 4951 4952 case SIOCSIFHWBROADCAST: 4953 if (ifr->ifr_hwaddr.sa_family != dev->type) 4954 return -EINVAL; 4955 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 4956 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4957 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4958 return 0; 4959 4960 case SIOCSIFMAP: 4961 if (ops->ndo_set_config) { 4962 if (!netif_device_present(dev)) 4963 return -ENODEV; 4964 return ops->ndo_set_config(dev, &ifr->ifr_map); 4965 } 4966 return -EOPNOTSUPP; 4967 4968 case SIOCADDMULTI: 4969 if (!ops->ndo_set_rx_mode || 4970 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4971 return -EINVAL; 4972 if (!netif_device_present(dev)) 4973 return -ENODEV; 4974 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 4975 4976 case SIOCDELMULTI: 4977 if (!ops->ndo_set_rx_mode || 4978 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4979 return -EINVAL; 4980 if (!netif_device_present(dev)) 4981 return -ENODEV; 4982 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); 4983 4984 case SIOCSIFTXQLEN: 4985 if (ifr->ifr_qlen < 0) 4986 return -EINVAL; 4987 dev->tx_queue_len = ifr->ifr_qlen; 4988 return 0; 4989 4990 case SIOCSIFNAME: 4991 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 4992 return dev_change_name(dev, ifr->ifr_newname); 4993 4994 case SIOCSHWTSTAMP: 4995 err = net_hwtstamp_validate(ifr); 4996 if (err) 4997 return err; 4998 /* fall through */ 4999 5000 /* 5001 * Unknown or private ioctl 5002 */ 5003 default: 5004 if ((cmd >= SIOCDEVPRIVATE && 5005 cmd <= SIOCDEVPRIVATE + 15) || 5006 cmd == SIOCBONDENSLAVE || 5007 cmd == SIOCBONDRELEASE || 5008 cmd == SIOCBONDSETHWADDR || 5009 cmd == SIOCBONDSLAVEINFOQUERY || 5010 cmd == SIOCBONDINFOQUERY || 5011 cmd == SIOCBONDCHANGEACTIVE || 5012 cmd == SIOCGMIIPHY || 5013 cmd == SIOCGMIIREG || 5014 cmd == SIOCSMIIREG || 5015 cmd == SIOCBRADDIF || 5016 cmd == SIOCBRDELIF || 5017 cmd == SIOCSHWTSTAMP || 5018 cmd == SIOCWANDEV) { 5019 err = -EOPNOTSUPP; 5020 if (ops->ndo_do_ioctl) { 5021 if (netif_device_present(dev)) 5022 err = ops->ndo_do_ioctl(dev, ifr, cmd); 5023 else 5024 err = -ENODEV; 5025 } 5026 } else 5027 err = -EINVAL; 5028 5029 } 5030 return err; 5031 } 5032 5033 /* 5034 * This function handles all "interface"-type I/O control requests. The actual 5035 * 'doing' part of this is dev_ifsioc above. 5036 */ 5037 5038 /** 5039 * dev_ioctl - network device ioctl 5040 * @net: the applicable net namespace 5041 * @cmd: command to issue 5042 * @arg: pointer to a struct ifreq in user space 5043 * 5044 * Issue ioctl functions to devices. This is normally called by the 5045 * user space syscall interfaces but can sometimes be useful for 5046 * other purposes. The return value is the return from the syscall if 5047 * positive or a negative errno code on error. 5048 */ 5049 5050 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) 5051 { 5052 struct ifreq ifr; 5053 int ret; 5054 char *colon; 5055 5056 /* One special case: SIOCGIFCONF takes ifconf argument 5057 and requires shared lock, because it sleeps writing 5058 to user space. 5059 */ 5060 5061 if (cmd == SIOCGIFCONF) { 5062 rtnl_lock(); 5063 ret = dev_ifconf(net, (char __user *) arg); 5064 rtnl_unlock(); 5065 return ret; 5066 } 5067 if (cmd == SIOCGIFNAME) 5068 return dev_ifname(net, (struct ifreq __user *)arg); 5069 5070 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 5071 return -EFAULT; 5072 5073 ifr.ifr_name[IFNAMSIZ-1] = 0; 5074 5075 colon = strchr(ifr.ifr_name, ':'); 5076 if (colon) 5077 *colon = 0; 5078 5079 /* 5080 * See which interface the caller is talking about. 5081 */ 5082 5083 switch (cmd) { 5084 /* 5085 * These ioctl calls: 5086 * - can be done by all. 5087 * - atomic and do not require locking. 5088 * - return a value 5089 */ 5090 case SIOCGIFFLAGS: 5091 case SIOCGIFMETRIC: 5092 case SIOCGIFMTU: 5093 case SIOCGIFHWADDR: 5094 case SIOCGIFSLAVE: 5095 case SIOCGIFMAP: 5096 case SIOCGIFINDEX: 5097 case SIOCGIFTXQLEN: 5098 dev_load(net, ifr.ifr_name); 5099 rcu_read_lock(); 5100 ret = dev_ifsioc_locked(net, &ifr, cmd); 5101 rcu_read_unlock(); 5102 if (!ret) { 5103 if (colon) 5104 *colon = ':'; 5105 if (copy_to_user(arg, &ifr, 5106 sizeof(struct ifreq))) 5107 ret = -EFAULT; 5108 } 5109 return ret; 5110 5111 case SIOCETHTOOL: 5112 dev_load(net, ifr.ifr_name); 5113 rtnl_lock(); 5114 ret = dev_ethtool(net, &ifr); 5115 rtnl_unlock(); 5116 if (!ret) { 5117 if (colon) 5118 *colon = ':'; 5119 if (copy_to_user(arg, &ifr, 5120 sizeof(struct ifreq))) 5121 ret = -EFAULT; 5122 } 5123 return ret; 5124 5125 /* 5126 * These ioctl calls: 5127 * - require superuser power. 5128 * - require strict serialization. 5129 * - return a value 5130 */ 5131 case SIOCGMIIPHY: 5132 case SIOCGMIIREG: 5133 case SIOCSIFNAME: 5134 if (!capable(CAP_NET_ADMIN)) 5135 return -EPERM; 5136 dev_load(net, ifr.ifr_name); 5137 rtnl_lock(); 5138 ret = dev_ifsioc(net, &ifr, cmd); 5139 rtnl_unlock(); 5140 if (!ret) { 5141 if (colon) 5142 *colon = ':'; 5143 if (copy_to_user(arg, &ifr, 5144 sizeof(struct ifreq))) 5145 ret = -EFAULT; 5146 } 5147 return ret; 5148 5149 /* 5150 * These ioctl calls: 5151 * - require superuser power. 5152 * - require strict serialization. 5153 * - do not return a value 5154 */ 5155 case SIOCSIFFLAGS: 5156 case SIOCSIFMETRIC: 5157 case SIOCSIFMTU: 5158 case SIOCSIFMAP: 5159 case SIOCSIFHWADDR: 5160 case SIOCSIFSLAVE: 5161 case SIOCADDMULTI: 5162 case SIOCDELMULTI: 5163 case SIOCSIFHWBROADCAST: 5164 case SIOCSIFTXQLEN: 5165 case SIOCSMIIREG: 5166 case SIOCBONDENSLAVE: 5167 case SIOCBONDRELEASE: 5168 case SIOCBONDSETHWADDR: 5169 case SIOCBONDCHANGEACTIVE: 5170 case SIOCBRADDIF: 5171 case SIOCBRDELIF: 5172 case SIOCSHWTSTAMP: 5173 if (!capable(CAP_NET_ADMIN)) 5174 return -EPERM; 5175 /* fall through */ 5176 case SIOCBONDSLAVEINFOQUERY: 5177 case SIOCBONDINFOQUERY: 5178 dev_load(net, ifr.ifr_name); 5179 rtnl_lock(); 5180 ret = dev_ifsioc(net, &ifr, cmd); 5181 rtnl_unlock(); 5182 return ret; 5183 5184 case SIOCGIFMEM: 5185 /* Get the per device memory space. We can add this but 5186 * currently do not support it */ 5187 case SIOCSIFMEM: 5188 /* Set the per device memory buffer space. 5189 * Not applicable in our case */ 5190 case SIOCSIFLINK: 5191 return -ENOTTY; 5192 5193 /* 5194 * Unknown or private ioctl. 5195 */ 5196 default: 5197 if (cmd == SIOCWANDEV || 5198 (cmd >= SIOCDEVPRIVATE && 5199 cmd <= SIOCDEVPRIVATE + 15)) { 5200 dev_load(net, ifr.ifr_name); 5201 rtnl_lock(); 5202 ret = dev_ifsioc(net, &ifr, cmd); 5203 rtnl_unlock(); 5204 if (!ret && copy_to_user(arg, &ifr, 5205 sizeof(struct ifreq))) 5206 ret = -EFAULT; 5207 return ret; 5208 } 5209 /* Take care of Wireless Extensions */ 5210 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 5211 return wext_handle_ioctl(net, &ifr, cmd, arg); 5212 return -ENOTTY; 5213 } 5214 } 5215 5216 5217 /** 5218 * dev_new_index - allocate an ifindex 5219 * @net: the applicable net namespace 5220 * 5221 * Returns a suitable unique value for a new device interface 5222 * number. The caller must hold the rtnl semaphore or the 5223 * dev_base_lock to be sure it remains unique. 5224 */ 5225 static int dev_new_index(struct net *net) 5226 { 5227 static int ifindex; 5228 for (;;) { 5229 if (++ifindex <= 0) 5230 ifindex = 1; 5231 if (!__dev_get_by_index(net, ifindex)) 5232 return ifindex; 5233 } 5234 } 5235 5236 /* Delayed registration/unregisteration */ 5237 static LIST_HEAD(net_todo_list); 5238 5239 static void net_set_todo(struct net_device *dev) 5240 { 5241 list_add_tail(&dev->todo_list, &net_todo_list); 5242 } 5243 5244 static void rollback_registered_many(struct list_head *head) 5245 { 5246 struct net_device *dev, *tmp; 5247 5248 BUG_ON(dev_boot_phase); 5249 ASSERT_RTNL(); 5250 5251 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 5252 /* Some devices call without registering 5253 * for initialization unwind. Remove those 5254 * devices and proceed with the remaining. 5255 */ 5256 if (dev->reg_state == NETREG_UNINITIALIZED) { 5257 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 5258 dev->name, dev); 5259 5260 WARN_ON(1); 5261 list_del(&dev->unreg_list); 5262 continue; 5263 } 5264 dev->dismantle = true; 5265 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5266 } 5267 5268 /* If device is running, close it first. */ 5269 dev_close_many(head); 5270 5271 list_for_each_entry(dev, head, unreg_list) { 5272 /* And unlink it from device chain. */ 5273 unlist_netdevice(dev); 5274 5275 dev->reg_state = NETREG_UNREGISTERING; 5276 } 5277 5278 synchronize_net(); 5279 5280 list_for_each_entry(dev, head, unreg_list) { 5281 /* Shutdown queueing discipline. */ 5282 dev_shutdown(dev); 5283 5284 5285 /* Notify protocols, that we are about to destroy 5286 this device. They should clean all the things. 5287 */ 5288 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5289 5290 if (!dev->rtnl_link_ops || 5291 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5292 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 5293 5294 /* 5295 * Flush the unicast and multicast chains 5296 */ 5297 dev_uc_flush(dev); 5298 dev_mc_flush(dev); 5299 5300 if (dev->netdev_ops->ndo_uninit) 5301 dev->netdev_ops->ndo_uninit(dev); 5302 5303 /* Notifier chain MUST detach us from master device. */ 5304 WARN_ON(dev->master); 5305 5306 /* Remove entries from kobject tree */ 5307 netdev_unregister_kobject(dev); 5308 } 5309 5310 /* Process any work delayed until the end of the batch */ 5311 dev = list_first_entry(head, struct net_device, unreg_list); 5312 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5313 5314 synchronize_net(); 5315 5316 list_for_each_entry(dev, head, unreg_list) 5317 dev_put(dev); 5318 } 5319 5320 static void rollback_registered(struct net_device *dev) 5321 { 5322 LIST_HEAD(single); 5323 5324 list_add(&dev->unreg_list, &single); 5325 rollback_registered_many(&single); 5326 list_del(&single); 5327 } 5328 5329 static netdev_features_t netdev_fix_features(struct net_device *dev, 5330 netdev_features_t features) 5331 { 5332 /* Fix illegal checksum combinations */ 5333 if ((features & NETIF_F_HW_CSUM) && 5334 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5335 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5336 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5337 } 5338 5339 /* Fix illegal SG+CSUM combinations. */ 5340 if ((features & NETIF_F_SG) && 5341 !(features & NETIF_F_ALL_CSUM)) { 5342 netdev_dbg(dev, 5343 "Dropping NETIF_F_SG since no checksum feature.\n"); 5344 features &= ~NETIF_F_SG; 5345 } 5346 5347 /* TSO requires that SG is present as well. */ 5348 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5349 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5350 features &= ~NETIF_F_ALL_TSO; 5351 } 5352 5353 /* TSO ECN requires that TSO is present as well. */ 5354 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5355 features &= ~NETIF_F_TSO_ECN; 5356 5357 /* Software GSO depends on SG. */ 5358 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5359 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5360 features &= ~NETIF_F_GSO; 5361 } 5362 5363 /* UFO needs SG and checksumming */ 5364 if (features & NETIF_F_UFO) { 5365 /* maybe split UFO into V4 and V6? */ 5366 if (!((features & NETIF_F_GEN_CSUM) || 5367 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5368 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5369 netdev_dbg(dev, 5370 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5371 features &= ~NETIF_F_UFO; 5372 } 5373 5374 if (!(features & NETIF_F_SG)) { 5375 netdev_dbg(dev, 5376 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5377 features &= ~NETIF_F_UFO; 5378 } 5379 } 5380 5381 return features; 5382 } 5383 5384 int __netdev_update_features(struct net_device *dev) 5385 { 5386 netdev_features_t features; 5387 int err = 0; 5388 5389 ASSERT_RTNL(); 5390 5391 features = netdev_get_wanted_features(dev); 5392 5393 if (dev->netdev_ops->ndo_fix_features) 5394 features = dev->netdev_ops->ndo_fix_features(dev, features); 5395 5396 /* driver might be less strict about feature dependencies */ 5397 features = netdev_fix_features(dev, features); 5398 5399 if (dev->features == features) 5400 return 0; 5401 5402 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 5403 &dev->features, &features); 5404 5405 if (dev->netdev_ops->ndo_set_features) 5406 err = dev->netdev_ops->ndo_set_features(dev, features); 5407 5408 if (unlikely(err < 0)) { 5409 netdev_err(dev, 5410 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5411 err, &features, &dev->features); 5412 return -1; 5413 } 5414 5415 if (!err) 5416 dev->features = features; 5417 5418 return 1; 5419 } 5420 5421 /** 5422 * netdev_update_features - recalculate device features 5423 * @dev: the device to check 5424 * 5425 * Recalculate dev->features set and send notifications if it 5426 * has changed. Should be called after driver or hardware dependent 5427 * conditions might have changed that influence the features. 5428 */ 5429 void netdev_update_features(struct net_device *dev) 5430 { 5431 if (__netdev_update_features(dev)) 5432 netdev_features_change(dev); 5433 } 5434 EXPORT_SYMBOL(netdev_update_features); 5435 5436 /** 5437 * netdev_change_features - recalculate device features 5438 * @dev: the device to check 5439 * 5440 * Recalculate dev->features set and send notifications even 5441 * if they have not changed. Should be called instead of 5442 * netdev_update_features() if also dev->vlan_features might 5443 * have changed to allow the changes to be propagated to stacked 5444 * VLAN devices. 5445 */ 5446 void netdev_change_features(struct net_device *dev) 5447 { 5448 __netdev_update_features(dev); 5449 netdev_features_change(dev); 5450 } 5451 EXPORT_SYMBOL(netdev_change_features); 5452 5453 /** 5454 * netif_stacked_transfer_operstate - transfer operstate 5455 * @rootdev: the root or lower level device to transfer state from 5456 * @dev: the device to transfer operstate to 5457 * 5458 * Transfer operational state from root to device. This is normally 5459 * called when a stacking relationship exists between the root 5460 * device and the device(a leaf device). 5461 */ 5462 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5463 struct net_device *dev) 5464 { 5465 if (rootdev->operstate == IF_OPER_DORMANT) 5466 netif_dormant_on(dev); 5467 else 5468 netif_dormant_off(dev); 5469 5470 if (netif_carrier_ok(rootdev)) { 5471 if (!netif_carrier_ok(dev)) 5472 netif_carrier_on(dev); 5473 } else { 5474 if (netif_carrier_ok(dev)) 5475 netif_carrier_off(dev); 5476 } 5477 } 5478 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5479 5480 #ifdef CONFIG_RPS 5481 static int netif_alloc_rx_queues(struct net_device *dev) 5482 { 5483 unsigned int i, count = dev->num_rx_queues; 5484 struct netdev_rx_queue *rx; 5485 5486 BUG_ON(count < 1); 5487 5488 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5489 if (!rx) { 5490 pr_err("netdev: Unable to allocate %u rx queues\n", count); 5491 return -ENOMEM; 5492 } 5493 dev->_rx = rx; 5494 5495 for (i = 0; i < count; i++) 5496 rx[i].dev = dev; 5497 return 0; 5498 } 5499 #endif 5500 5501 static void netdev_init_one_queue(struct net_device *dev, 5502 struct netdev_queue *queue, void *_unused) 5503 { 5504 /* Initialize queue lock */ 5505 spin_lock_init(&queue->_xmit_lock); 5506 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5507 queue->xmit_lock_owner = -1; 5508 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5509 queue->dev = dev; 5510 #ifdef CONFIG_BQL 5511 dql_init(&queue->dql, HZ); 5512 #endif 5513 } 5514 5515 static int netif_alloc_netdev_queues(struct net_device *dev) 5516 { 5517 unsigned int count = dev->num_tx_queues; 5518 struct netdev_queue *tx; 5519 5520 BUG_ON(count < 1); 5521 5522 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5523 if (!tx) { 5524 pr_err("netdev: Unable to allocate %u tx queues\n", count); 5525 return -ENOMEM; 5526 } 5527 dev->_tx = tx; 5528 5529 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5530 spin_lock_init(&dev->tx_global_lock); 5531 5532 return 0; 5533 } 5534 5535 /** 5536 * register_netdevice - register a network device 5537 * @dev: device to register 5538 * 5539 * Take a completed network device structure and add it to the kernel 5540 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5541 * chain. 0 is returned on success. A negative errno code is returned 5542 * on a failure to set up the device, or if the name is a duplicate. 5543 * 5544 * Callers must hold the rtnl semaphore. You may want 5545 * register_netdev() instead of this. 5546 * 5547 * BUGS: 5548 * The locking appears insufficient to guarantee two parallel registers 5549 * will not get the same name. 5550 */ 5551 5552 int register_netdevice(struct net_device *dev) 5553 { 5554 int ret; 5555 struct net *net = dev_net(dev); 5556 5557 BUG_ON(dev_boot_phase); 5558 ASSERT_RTNL(); 5559 5560 might_sleep(); 5561 5562 /* When net_device's are persistent, this will be fatal. */ 5563 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 5564 BUG_ON(!net); 5565 5566 spin_lock_init(&dev->addr_list_lock); 5567 netdev_set_addr_lockdep_class(dev); 5568 5569 dev->iflink = -1; 5570 5571 ret = dev_get_valid_name(dev, dev->name); 5572 if (ret < 0) 5573 goto out; 5574 5575 /* Init, if this function is available */ 5576 if (dev->netdev_ops->ndo_init) { 5577 ret = dev->netdev_ops->ndo_init(dev); 5578 if (ret) { 5579 if (ret > 0) 5580 ret = -EIO; 5581 goto out; 5582 } 5583 } 5584 5585 dev->ifindex = dev_new_index(net); 5586 if (dev->iflink == -1) 5587 dev->iflink = dev->ifindex; 5588 5589 /* Transfer changeable features to wanted_features and enable 5590 * software offloads (GSO and GRO). 5591 */ 5592 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5593 dev->features |= NETIF_F_SOFT_FEATURES; 5594 dev->wanted_features = dev->features & dev->hw_features; 5595 5596 /* Turn on no cache copy if HW is doing checksum */ 5597 if (!(dev->flags & IFF_LOOPBACK)) { 5598 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5599 if (dev->features & NETIF_F_ALL_CSUM) { 5600 dev->wanted_features |= NETIF_F_NOCACHE_COPY; 5601 dev->features |= NETIF_F_NOCACHE_COPY; 5602 } 5603 } 5604 5605 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5606 */ 5607 dev->vlan_features |= NETIF_F_HIGHDMA; 5608 5609 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5610 ret = notifier_to_errno(ret); 5611 if (ret) 5612 goto err_uninit; 5613 5614 ret = netdev_register_kobject(dev); 5615 if (ret) 5616 goto err_uninit; 5617 dev->reg_state = NETREG_REGISTERED; 5618 5619 __netdev_update_features(dev); 5620 5621 /* 5622 * Default initial state at registry is that the 5623 * device is present. 5624 */ 5625 5626 set_bit(__LINK_STATE_PRESENT, &dev->state); 5627 5628 dev_init_scheduler(dev); 5629 dev_hold(dev); 5630 list_netdevice(dev); 5631 add_device_randomness(dev->dev_addr, dev->addr_len); 5632 5633 /* Notify protocols, that a new device appeared. */ 5634 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5635 ret = notifier_to_errno(ret); 5636 if (ret) { 5637 rollback_registered(dev); 5638 dev->reg_state = NETREG_UNREGISTERED; 5639 } 5640 /* 5641 * Prevent userspace races by waiting until the network 5642 * device is fully setup before sending notifications. 5643 */ 5644 if (!dev->rtnl_link_ops || 5645 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5646 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5647 5648 out: 5649 return ret; 5650 5651 err_uninit: 5652 if (dev->netdev_ops->ndo_uninit) 5653 dev->netdev_ops->ndo_uninit(dev); 5654 goto out; 5655 } 5656 EXPORT_SYMBOL(register_netdevice); 5657 5658 /** 5659 * init_dummy_netdev - init a dummy network device for NAPI 5660 * @dev: device to init 5661 * 5662 * This takes a network device structure and initialize the minimum 5663 * amount of fields so it can be used to schedule NAPI polls without 5664 * registering a full blown interface. This is to be used by drivers 5665 * that need to tie several hardware interfaces to a single NAPI 5666 * poll scheduler due to HW limitations. 5667 */ 5668 int init_dummy_netdev(struct net_device *dev) 5669 { 5670 /* Clear everything. Note we don't initialize spinlocks 5671 * are they aren't supposed to be taken by any of the 5672 * NAPI code and this dummy netdev is supposed to be 5673 * only ever used for NAPI polls 5674 */ 5675 memset(dev, 0, sizeof(struct net_device)); 5676 5677 /* make sure we BUG if trying to hit standard 5678 * register/unregister code path 5679 */ 5680 dev->reg_state = NETREG_DUMMY; 5681 5682 /* NAPI wants this */ 5683 INIT_LIST_HEAD(&dev->napi_list); 5684 5685 /* a dummy interface is started by default */ 5686 set_bit(__LINK_STATE_PRESENT, &dev->state); 5687 set_bit(__LINK_STATE_START, &dev->state); 5688 5689 /* Note : We dont allocate pcpu_refcnt for dummy devices, 5690 * because users of this 'device' dont need to change 5691 * its refcount. 5692 */ 5693 5694 return 0; 5695 } 5696 EXPORT_SYMBOL_GPL(init_dummy_netdev); 5697 5698 5699 /** 5700 * register_netdev - register a network device 5701 * @dev: device to register 5702 * 5703 * Take a completed network device structure and add it to the kernel 5704 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5705 * chain. 0 is returned on success. A negative errno code is returned 5706 * on a failure to set up the device, or if the name is a duplicate. 5707 * 5708 * This is a wrapper around register_netdevice that takes the rtnl semaphore 5709 * and expands the device name if you passed a format string to 5710 * alloc_netdev. 5711 */ 5712 int register_netdev(struct net_device *dev) 5713 { 5714 int err; 5715 5716 rtnl_lock(); 5717 err = register_netdevice(dev); 5718 rtnl_unlock(); 5719 return err; 5720 } 5721 EXPORT_SYMBOL(register_netdev); 5722 5723 int netdev_refcnt_read(const struct net_device *dev) 5724 { 5725 int i, refcnt = 0; 5726 5727 for_each_possible_cpu(i) 5728 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 5729 return refcnt; 5730 } 5731 EXPORT_SYMBOL(netdev_refcnt_read); 5732 5733 /** 5734 * netdev_wait_allrefs - wait until all references are gone. 5735 * 5736 * This is called when unregistering network devices. 5737 * 5738 * Any protocol or device that holds a reference should register 5739 * for netdevice notification, and cleanup and put back the 5740 * reference if they receive an UNREGISTER event. 5741 * We can get stuck here if buggy protocols don't correctly 5742 * call dev_put. 5743 */ 5744 static void netdev_wait_allrefs(struct net_device *dev) 5745 { 5746 unsigned long rebroadcast_time, warning_time; 5747 int refcnt; 5748 5749 linkwatch_forget_dev(dev); 5750 5751 rebroadcast_time = warning_time = jiffies; 5752 refcnt = netdev_refcnt_read(dev); 5753 5754 while (refcnt != 0) { 5755 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5756 rtnl_lock(); 5757 5758 /* Rebroadcast unregister notification */ 5759 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5760 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users 5761 * should have already handle it the first time */ 5762 5763 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5764 &dev->state)) { 5765 /* We must not have linkwatch events 5766 * pending on unregister. If this 5767 * happens, we simply run the queue 5768 * unscheduled, resulting in a noop 5769 * for this device. 5770 */ 5771 linkwatch_run_queue(); 5772 } 5773 5774 __rtnl_unlock(); 5775 5776 rebroadcast_time = jiffies; 5777 } 5778 5779 msleep(250); 5780 5781 refcnt = netdev_refcnt_read(dev); 5782 5783 if (time_after(jiffies, warning_time + 10 * HZ)) { 5784 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 5785 dev->name, refcnt); 5786 warning_time = jiffies; 5787 } 5788 } 5789 } 5790 5791 /* The sequence is: 5792 * 5793 * rtnl_lock(); 5794 * ... 5795 * register_netdevice(x1); 5796 * register_netdevice(x2); 5797 * ... 5798 * unregister_netdevice(y1); 5799 * unregister_netdevice(y2); 5800 * ... 5801 * rtnl_unlock(); 5802 * free_netdev(y1); 5803 * free_netdev(y2); 5804 * 5805 * We are invoked by rtnl_unlock(). 5806 * This allows us to deal with problems: 5807 * 1) We can delete sysfs objects which invoke hotplug 5808 * without deadlocking with linkwatch via keventd. 5809 * 2) Since we run with the RTNL semaphore not held, we can sleep 5810 * safely in order to wait for the netdev refcnt to drop to zero. 5811 * 5812 * We must not return until all unregister events added during 5813 * the interval the lock was held have been completed. 5814 */ 5815 void netdev_run_todo(void) 5816 { 5817 struct list_head list; 5818 5819 /* Snapshot list, allow later requests */ 5820 list_replace_init(&net_todo_list, &list); 5821 5822 __rtnl_unlock(); 5823 5824 /* Wait for rcu callbacks to finish before attempting to drain 5825 * the device list. This usually avoids a 250ms wait. 5826 */ 5827 if (!list_empty(&list)) 5828 rcu_barrier(); 5829 5830 while (!list_empty(&list)) { 5831 struct net_device *dev 5832 = list_first_entry(&list, struct net_device, todo_list); 5833 list_del(&dev->todo_list); 5834 5835 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5836 pr_err("network todo '%s' but state %d\n", 5837 dev->name, dev->reg_state); 5838 dump_stack(); 5839 continue; 5840 } 5841 5842 dev->reg_state = NETREG_UNREGISTERED; 5843 5844 on_each_cpu(flush_backlog, dev, 1); 5845 5846 netdev_wait_allrefs(dev); 5847 5848 /* paranoia */ 5849 BUG_ON(netdev_refcnt_read(dev)); 5850 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 5851 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 5852 WARN_ON(dev->dn_ptr); 5853 5854 if (dev->destructor) 5855 dev->destructor(dev); 5856 5857 /* Free network device */ 5858 kobject_put(&dev->dev.kobj); 5859 } 5860 } 5861 5862 /* Convert net_device_stats to rtnl_link_stats64. They have the same 5863 * fields in the same order, with only the type differing. 5864 */ 5865 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5866 const struct net_device_stats *netdev_stats) 5867 { 5868 #if BITS_PER_LONG == 64 5869 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5870 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5871 #else 5872 size_t i, n = sizeof(*stats64) / sizeof(u64); 5873 const unsigned long *src = (const unsigned long *)netdev_stats; 5874 u64 *dst = (u64 *)stats64; 5875 5876 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 5877 sizeof(*stats64) / sizeof(u64)); 5878 for (i = 0; i < n; i++) 5879 dst[i] = src[i]; 5880 #endif 5881 } 5882 EXPORT_SYMBOL(netdev_stats_to_stats64); 5883 5884 /** 5885 * dev_get_stats - get network device statistics 5886 * @dev: device to get statistics from 5887 * @storage: place to store stats 5888 * 5889 * Get network statistics from device. Return @storage. 5890 * The device driver may provide its own method by setting 5891 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 5892 * otherwise the internal statistics structure is used. 5893 */ 5894 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 5895 struct rtnl_link_stats64 *storage) 5896 { 5897 const struct net_device_ops *ops = dev->netdev_ops; 5898 5899 if (ops->ndo_get_stats64) { 5900 memset(storage, 0, sizeof(*storage)); 5901 ops->ndo_get_stats64(dev, storage); 5902 } else if (ops->ndo_get_stats) { 5903 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 5904 } else { 5905 netdev_stats_to_stats64(storage, &dev->stats); 5906 } 5907 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 5908 return storage; 5909 } 5910 EXPORT_SYMBOL(dev_get_stats); 5911 5912 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 5913 { 5914 struct netdev_queue *queue = dev_ingress_queue(dev); 5915 5916 #ifdef CONFIG_NET_CLS_ACT 5917 if (queue) 5918 return queue; 5919 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 5920 if (!queue) 5921 return NULL; 5922 netdev_init_one_queue(dev, queue, NULL); 5923 queue->qdisc = &noop_qdisc; 5924 queue->qdisc_sleeping = &noop_qdisc; 5925 rcu_assign_pointer(dev->ingress_queue, queue); 5926 #endif 5927 return queue; 5928 } 5929 5930 /** 5931 * alloc_netdev_mqs - allocate network device 5932 * @sizeof_priv: size of private data to allocate space for 5933 * @name: device name format string 5934 * @setup: callback to initialize device 5935 * @txqs: the number of TX subqueues to allocate 5936 * @rxqs: the number of RX subqueues to allocate 5937 * 5938 * Allocates a struct net_device with private data area for driver use 5939 * and performs basic initialization. Also allocates subquue structs 5940 * for each queue on the device. 5941 */ 5942 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 5943 void (*setup)(struct net_device *), 5944 unsigned int txqs, unsigned int rxqs) 5945 { 5946 struct net_device *dev; 5947 size_t alloc_size; 5948 struct net_device *p; 5949 5950 BUG_ON(strlen(name) >= sizeof(dev->name)); 5951 5952 if (txqs < 1) { 5953 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 5954 return NULL; 5955 } 5956 5957 #ifdef CONFIG_RPS 5958 if (rxqs < 1) { 5959 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 5960 return NULL; 5961 } 5962 #endif 5963 5964 alloc_size = sizeof(struct net_device); 5965 if (sizeof_priv) { 5966 /* ensure 32-byte alignment of private area */ 5967 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 5968 alloc_size += sizeof_priv; 5969 } 5970 /* ensure 32-byte alignment of whole construct */ 5971 alloc_size += NETDEV_ALIGN - 1; 5972 5973 p = kzalloc(alloc_size, GFP_KERNEL); 5974 if (!p) { 5975 pr_err("alloc_netdev: Unable to allocate device\n"); 5976 return NULL; 5977 } 5978 5979 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5980 dev->padded = (char *)dev - (char *)p; 5981 5982 dev->pcpu_refcnt = alloc_percpu(int); 5983 if (!dev->pcpu_refcnt) 5984 goto free_p; 5985 5986 if (dev_addr_init(dev)) 5987 goto free_pcpu; 5988 5989 dev_mc_init(dev); 5990 dev_uc_init(dev); 5991 5992 dev_net_set(dev, &init_net); 5993 5994 dev->gso_max_size = GSO_MAX_SIZE; 5995 dev->gso_max_segs = GSO_MAX_SEGS; 5996 5997 INIT_LIST_HEAD(&dev->napi_list); 5998 INIT_LIST_HEAD(&dev->unreg_list); 5999 INIT_LIST_HEAD(&dev->link_watch_list); 6000 dev->priv_flags = IFF_XMIT_DST_RELEASE; 6001 setup(dev); 6002 6003 dev->num_tx_queues = txqs; 6004 dev->real_num_tx_queues = txqs; 6005 if (netif_alloc_netdev_queues(dev)) 6006 goto free_all; 6007 6008 #ifdef CONFIG_RPS 6009 dev->num_rx_queues = rxqs; 6010 dev->real_num_rx_queues = rxqs; 6011 if (netif_alloc_rx_queues(dev)) 6012 goto free_all; 6013 #endif 6014 6015 strcpy(dev->name, name); 6016 dev->group = INIT_NETDEV_GROUP; 6017 return dev; 6018 6019 free_all: 6020 free_netdev(dev); 6021 return NULL; 6022 6023 free_pcpu: 6024 free_percpu(dev->pcpu_refcnt); 6025 kfree(dev->_tx); 6026 #ifdef CONFIG_RPS 6027 kfree(dev->_rx); 6028 #endif 6029 6030 free_p: 6031 kfree(p); 6032 return NULL; 6033 } 6034 EXPORT_SYMBOL(alloc_netdev_mqs); 6035 6036 /** 6037 * free_netdev - free network device 6038 * @dev: device 6039 * 6040 * This function does the last stage of destroying an allocated device 6041 * interface. The reference to the device object is released. 6042 * If this is the last reference then it will be freed. 6043 */ 6044 void free_netdev(struct net_device *dev) 6045 { 6046 struct napi_struct *p, *n; 6047 6048 release_net(dev_net(dev)); 6049 6050 kfree(dev->_tx); 6051 #ifdef CONFIG_RPS 6052 kfree(dev->_rx); 6053 #endif 6054 6055 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 6056 6057 /* Flush device addresses */ 6058 dev_addr_flush(dev); 6059 6060 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 6061 netif_napi_del(p); 6062 6063 free_percpu(dev->pcpu_refcnt); 6064 dev->pcpu_refcnt = NULL; 6065 6066 /* Compatibility with error handling in drivers */ 6067 if (dev->reg_state == NETREG_UNINITIALIZED) { 6068 kfree((char *)dev - dev->padded); 6069 return; 6070 } 6071 6072 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 6073 dev->reg_state = NETREG_RELEASED; 6074 6075 /* will free via device release */ 6076 put_device(&dev->dev); 6077 } 6078 EXPORT_SYMBOL(free_netdev); 6079 6080 /** 6081 * synchronize_net - Synchronize with packet receive processing 6082 * 6083 * Wait for packets currently being received to be done. 6084 * Does not block later packets from starting. 6085 */ 6086 void synchronize_net(void) 6087 { 6088 might_sleep(); 6089 if (rtnl_is_locked()) 6090 synchronize_rcu_expedited(); 6091 else 6092 synchronize_rcu(); 6093 } 6094 EXPORT_SYMBOL(synchronize_net); 6095 6096 /** 6097 * unregister_netdevice_queue - remove device from the kernel 6098 * @dev: device 6099 * @head: list 6100 * 6101 * This function shuts down a device interface and removes it 6102 * from the kernel tables. 6103 * If head not NULL, device is queued to be unregistered later. 6104 * 6105 * Callers must hold the rtnl semaphore. You may want 6106 * unregister_netdev() instead of this. 6107 */ 6108 6109 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 6110 { 6111 ASSERT_RTNL(); 6112 6113 if (head) { 6114 list_move_tail(&dev->unreg_list, head); 6115 } else { 6116 rollback_registered(dev); 6117 /* Finish processing unregister after unlock */ 6118 net_set_todo(dev); 6119 } 6120 } 6121 EXPORT_SYMBOL(unregister_netdevice_queue); 6122 6123 /** 6124 * unregister_netdevice_many - unregister many devices 6125 * @head: list of devices 6126 */ 6127 void unregister_netdevice_many(struct list_head *head) 6128 { 6129 struct net_device *dev; 6130 6131 if (!list_empty(head)) { 6132 rollback_registered_many(head); 6133 list_for_each_entry(dev, head, unreg_list) 6134 net_set_todo(dev); 6135 } 6136 } 6137 EXPORT_SYMBOL(unregister_netdevice_many); 6138 6139 /** 6140 * unregister_netdev - remove device from the kernel 6141 * @dev: device 6142 * 6143 * This function shuts down a device interface and removes it 6144 * from the kernel tables. 6145 * 6146 * This is just a wrapper for unregister_netdevice that takes 6147 * the rtnl semaphore. In general you want to use this and not 6148 * unregister_netdevice. 6149 */ 6150 void unregister_netdev(struct net_device *dev) 6151 { 6152 rtnl_lock(); 6153 unregister_netdevice(dev); 6154 rtnl_unlock(); 6155 } 6156 EXPORT_SYMBOL(unregister_netdev); 6157 6158 /** 6159 * dev_change_net_namespace - move device to different nethost namespace 6160 * @dev: device 6161 * @net: network namespace 6162 * @pat: If not NULL name pattern to try if the current device name 6163 * is already taken in the destination network namespace. 6164 * 6165 * This function shuts down a device interface and moves it 6166 * to a new network namespace. On success 0 is returned, on 6167 * a failure a netagive errno code is returned. 6168 * 6169 * Callers must hold the rtnl semaphore. 6170 */ 6171 6172 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 6173 { 6174 int err; 6175 6176 ASSERT_RTNL(); 6177 6178 /* Don't allow namespace local devices to be moved. */ 6179 err = -EINVAL; 6180 if (dev->features & NETIF_F_NETNS_LOCAL) 6181 goto out; 6182 6183 /* Ensure the device has been registrered */ 6184 err = -EINVAL; 6185 if (dev->reg_state != NETREG_REGISTERED) 6186 goto out; 6187 6188 /* Get out if there is nothing todo */ 6189 err = 0; 6190 if (net_eq(dev_net(dev), net)) 6191 goto out; 6192 6193 /* Pick the destination device name, and ensure 6194 * we can use it in the destination network namespace. 6195 */ 6196 err = -EEXIST; 6197 if (__dev_get_by_name(net, dev->name)) { 6198 /* We get here if we can't use the current device name */ 6199 if (!pat) 6200 goto out; 6201 if (dev_get_valid_name(dev, pat) < 0) 6202 goto out; 6203 } 6204 6205 /* 6206 * And now a mini version of register_netdevice unregister_netdevice. 6207 */ 6208 6209 /* If device is running close it first. */ 6210 dev_close(dev); 6211 6212 /* And unlink it from device chain */ 6213 err = -ENODEV; 6214 unlist_netdevice(dev); 6215 6216 synchronize_net(); 6217 6218 /* Shutdown queueing discipline. */ 6219 dev_shutdown(dev); 6220 6221 /* Notify protocols, that we are about to destroy 6222 this device. They should clean all the things. 6223 6224 Note that dev->reg_state stays at NETREG_REGISTERED. 6225 This is wanted because this way 8021q and macvlan know 6226 the device is just moving and can keep their slaves up. 6227 */ 6228 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6229 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6230 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6231 6232 /* 6233 * Flush the unicast and multicast chains 6234 */ 6235 dev_uc_flush(dev); 6236 dev_mc_flush(dev); 6237 6238 /* Actually switch the network namespace */ 6239 dev_net_set(dev, net); 6240 6241 /* If there is an ifindex conflict assign a new one */ 6242 if (__dev_get_by_index(net, dev->ifindex)) { 6243 int iflink = (dev->iflink == dev->ifindex); 6244 dev->ifindex = dev_new_index(net); 6245 if (iflink) 6246 dev->iflink = dev->ifindex; 6247 } 6248 6249 /* Fixup kobjects */ 6250 err = device_rename(&dev->dev, dev->name); 6251 WARN_ON(err); 6252 6253 /* Add the device back in the hashes */ 6254 list_netdevice(dev); 6255 6256 /* Notify protocols, that a new device appeared. */ 6257 call_netdevice_notifiers(NETDEV_REGISTER, dev); 6258 6259 /* 6260 * Prevent userspace races by waiting until the network 6261 * device is fully setup before sending notifications. 6262 */ 6263 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 6264 6265 synchronize_net(); 6266 err = 0; 6267 out: 6268 return err; 6269 } 6270 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 6271 6272 static int dev_cpu_callback(struct notifier_block *nfb, 6273 unsigned long action, 6274 void *ocpu) 6275 { 6276 struct sk_buff **list_skb; 6277 struct sk_buff *skb; 6278 unsigned int cpu, oldcpu = (unsigned long)ocpu; 6279 struct softnet_data *sd, *oldsd; 6280 6281 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 6282 return NOTIFY_OK; 6283 6284 local_irq_disable(); 6285 cpu = smp_processor_id(); 6286 sd = &per_cpu(softnet_data, cpu); 6287 oldsd = &per_cpu(softnet_data, oldcpu); 6288 6289 /* Find end of our completion_queue. */ 6290 list_skb = &sd->completion_queue; 6291 while (*list_skb) 6292 list_skb = &(*list_skb)->next; 6293 /* Append completion queue from offline CPU. */ 6294 *list_skb = oldsd->completion_queue; 6295 oldsd->completion_queue = NULL; 6296 6297 /* Append output queue from offline CPU. */ 6298 if (oldsd->output_queue) { 6299 *sd->output_queue_tailp = oldsd->output_queue; 6300 sd->output_queue_tailp = oldsd->output_queue_tailp; 6301 oldsd->output_queue = NULL; 6302 oldsd->output_queue_tailp = &oldsd->output_queue; 6303 } 6304 /* Append NAPI poll list from offline CPU. */ 6305 if (!list_empty(&oldsd->poll_list)) { 6306 list_splice_init(&oldsd->poll_list, &sd->poll_list); 6307 raise_softirq_irqoff(NET_RX_SOFTIRQ); 6308 } 6309 6310 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6311 local_irq_enable(); 6312 6313 /* Process offline CPU's input_pkt_queue */ 6314 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6315 netif_rx(skb); 6316 input_queue_head_incr(oldsd); 6317 } 6318 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6319 netif_rx(skb); 6320 input_queue_head_incr(oldsd); 6321 } 6322 6323 return NOTIFY_OK; 6324 } 6325 6326 6327 /** 6328 * netdev_increment_features - increment feature set by one 6329 * @all: current feature set 6330 * @one: new feature set 6331 * @mask: mask feature set 6332 * 6333 * Computes a new feature set after adding a device with feature set 6334 * @one to the master device with current feature set @all. Will not 6335 * enable anything that is off in @mask. Returns the new feature set. 6336 */ 6337 netdev_features_t netdev_increment_features(netdev_features_t all, 6338 netdev_features_t one, netdev_features_t mask) 6339 { 6340 if (mask & NETIF_F_GEN_CSUM) 6341 mask |= NETIF_F_ALL_CSUM; 6342 mask |= NETIF_F_VLAN_CHALLENGED; 6343 6344 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6345 all &= one | ~NETIF_F_ALL_FOR_ALL; 6346 6347 /* If one device supports hw checksumming, set for all. */ 6348 if (all & NETIF_F_GEN_CSUM) 6349 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6350 6351 return all; 6352 } 6353 EXPORT_SYMBOL(netdev_increment_features); 6354 6355 static struct hlist_head *netdev_create_hash(void) 6356 { 6357 int i; 6358 struct hlist_head *hash; 6359 6360 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 6361 if (hash != NULL) 6362 for (i = 0; i < NETDEV_HASHENTRIES; i++) 6363 INIT_HLIST_HEAD(&hash[i]); 6364 6365 return hash; 6366 } 6367 6368 /* Initialize per network namespace state */ 6369 static int __net_init netdev_init(struct net *net) 6370 { 6371 if (net != &init_net) 6372 INIT_LIST_HEAD(&net->dev_base_head); 6373 6374 net->dev_name_head = netdev_create_hash(); 6375 if (net->dev_name_head == NULL) 6376 goto err_name; 6377 6378 net->dev_index_head = netdev_create_hash(); 6379 if (net->dev_index_head == NULL) 6380 goto err_idx; 6381 6382 return 0; 6383 6384 err_idx: 6385 kfree(net->dev_name_head); 6386 err_name: 6387 return -ENOMEM; 6388 } 6389 6390 /** 6391 * netdev_drivername - network driver for the device 6392 * @dev: network device 6393 * 6394 * Determine network driver for device. 6395 */ 6396 const char *netdev_drivername(const struct net_device *dev) 6397 { 6398 const struct device_driver *driver; 6399 const struct device *parent; 6400 const char *empty = ""; 6401 6402 parent = dev->dev.parent; 6403 if (!parent) 6404 return empty; 6405 6406 driver = parent->driver; 6407 if (driver && driver->name) 6408 return driver->name; 6409 return empty; 6410 } 6411 6412 int __netdev_printk(const char *level, const struct net_device *dev, 6413 struct va_format *vaf) 6414 { 6415 int r; 6416 6417 if (dev && dev->dev.parent) 6418 r = dev_printk(level, dev->dev.parent, "%s: %pV", 6419 netdev_name(dev), vaf); 6420 else if (dev) 6421 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6422 else 6423 r = printk("%s(NULL net_device): %pV", level, vaf); 6424 6425 return r; 6426 } 6427 EXPORT_SYMBOL(__netdev_printk); 6428 6429 int netdev_printk(const char *level, const struct net_device *dev, 6430 const char *format, ...) 6431 { 6432 struct va_format vaf; 6433 va_list args; 6434 int r; 6435 6436 va_start(args, format); 6437 6438 vaf.fmt = format; 6439 vaf.va = &args; 6440 6441 r = __netdev_printk(level, dev, &vaf); 6442 va_end(args); 6443 6444 return r; 6445 } 6446 EXPORT_SYMBOL(netdev_printk); 6447 6448 #define define_netdev_printk_level(func, level) \ 6449 int func(const struct net_device *dev, const char *fmt, ...) \ 6450 { \ 6451 int r; \ 6452 struct va_format vaf; \ 6453 va_list args; \ 6454 \ 6455 va_start(args, fmt); \ 6456 \ 6457 vaf.fmt = fmt; \ 6458 vaf.va = &args; \ 6459 \ 6460 r = __netdev_printk(level, dev, &vaf); \ 6461 va_end(args); \ 6462 \ 6463 return r; \ 6464 } \ 6465 EXPORT_SYMBOL(func); 6466 6467 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6468 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6469 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6470 define_netdev_printk_level(netdev_err, KERN_ERR); 6471 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6472 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 6473 define_netdev_printk_level(netdev_info, KERN_INFO); 6474 6475 static void __net_exit netdev_exit(struct net *net) 6476 { 6477 kfree(net->dev_name_head); 6478 kfree(net->dev_index_head); 6479 } 6480 6481 static struct pernet_operations __net_initdata netdev_net_ops = { 6482 .init = netdev_init, 6483 .exit = netdev_exit, 6484 }; 6485 6486 static void __net_exit default_device_exit(struct net *net) 6487 { 6488 struct net_device *dev, *aux; 6489 /* 6490 * Push all migratable network devices back to the 6491 * initial network namespace 6492 */ 6493 rtnl_lock(); 6494 for_each_netdev_safe(net, dev, aux) { 6495 int err; 6496 char fb_name[IFNAMSIZ]; 6497 6498 /* Ignore unmoveable devices (i.e. loopback) */ 6499 if (dev->features & NETIF_F_NETNS_LOCAL) 6500 continue; 6501 6502 /* Leave virtual devices for the generic cleanup */ 6503 if (dev->rtnl_link_ops) 6504 continue; 6505 6506 /* Push remaining network devices to init_net */ 6507 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6508 err = dev_change_net_namespace(dev, &init_net, fb_name); 6509 if (err) { 6510 pr_emerg("%s: failed to move %s to init_net: %d\n", 6511 __func__, dev->name, err); 6512 BUG(); 6513 } 6514 } 6515 rtnl_unlock(); 6516 } 6517 6518 static void __net_exit default_device_exit_batch(struct list_head *net_list) 6519 { 6520 /* At exit all network devices most be removed from a network 6521 * namespace. Do this in the reverse order of registration. 6522 * Do this across as many network namespaces as possible to 6523 * improve batching efficiency. 6524 */ 6525 struct net_device *dev; 6526 struct net *net; 6527 LIST_HEAD(dev_kill_list); 6528 6529 rtnl_lock(); 6530 list_for_each_entry(net, net_list, exit_list) { 6531 for_each_netdev_reverse(net, dev) { 6532 if (dev->rtnl_link_ops) 6533 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 6534 else 6535 unregister_netdevice_queue(dev, &dev_kill_list); 6536 } 6537 } 6538 unregister_netdevice_many(&dev_kill_list); 6539 list_del(&dev_kill_list); 6540 rtnl_unlock(); 6541 } 6542 6543 static struct pernet_operations __net_initdata default_device_ops = { 6544 .exit = default_device_exit, 6545 .exit_batch = default_device_exit_batch, 6546 }; 6547 6548 /* 6549 * Initialize the DEV module. At boot time this walks the device list and 6550 * unhooks any devices that fail to initialise (normally hardware not 6551 * present) and leaves us with a valid list of present and active devices. 6552 * 6553 */ 6554 6555 /* 6556 * This is called single threaded during boot, so no need 6557 * to take the rtnl semaphore. 6558 */ 6559 static int __init net_dev_init(void) 6560 { 6561 int i, rc = -ENOMEM; 6562 6563 BUG_ON(!dev_boot_phase); 6564 6565 if (dev_proc_init()) 6566 goto out; 6567 6568 if (netdev_kobject_init()) 6569 goto out; 6570 6571 INIT_LIST_HEAD(&ptype_all); 6572 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6573 INIT_LIST_HEAD(&ptype_base[i]); 6574 6575 if (register_pernet_subsys(&netdev_net_ops)) 6576 goto out; 6577 6578 /* 6579 * Initialise the packet receive queues. 6580 */ 6581 6582 for_each_possible_cpu(i) { 6583 struct softnet_data *sd = &per_cpu(softnet_data, i); 6584 6585 memset(sd, 0, sizeof(*sd)); 6586 skb_queue_head_init(&sd->input_pkt_queue); 6587 skb_queue_head_init(&sd->process_queue); 6588 sd->completion_queue = NULL; 6589 INIT_LIST_HEAD(&sd->poll_list); 6590 sd->output_queue = NULL; 6591 sd->output_queue_tailp = &sd->output_queue; 6592 #ifdef CONFIG_RPS 6593 sd->csd.func = rps_trigger_softirq; 6594 sd->csd.info = sd; 6595 sd->csd.flags = 0; 6596 sd->cpu = i; 6597 #endif 6598 6599 sd->backlog.poll = process_backlog; 6600 sd->backlog.weight = weight_p; 6601 sd->backlog.gro_list = NULL; 6602 sd->backlog.gro_count = 0; 6603 } 6604 6605 dev_boot_phase = 0; 6606 6607 /* The loopback device is special if any other network devices 6608 * is present in a network namespace the loopback device must 6609 * be present. Since we now dynamically allocate and free the 6610 * loopback device ensure this invariant is maintained by 6611 * keeping the loopback device as the first device on the 6612 * list of network devices. Ensuring the loopback devices 6613 * is the first device that appears and the last network device 6614 * that disappears. 6615 */ 6616 if (register_pernet_device(&loopback_net_ops)) 6617 goto out; 6618 6619 if (register_pernet_device(&default_device_ops)) 6620 goto out; 6621 6622 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 6623 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 6624 6625 hotcpu_notifier(dev_cpu_callback, 0); 6626 dst_init(); 6627 dev_mcast_init(); 6628 rc = 0; 6629 out: 6630 return rc; 6631 } 6632 6633 subsys_initcall(net_dev_init); 6634 6635 static int __init initialize_hashrnd(void) 6636 { 6637 get_random_bytes(&hashrnd, sizeof(hashrnd)); 6638 return 0; 6639 } 6640 6641 late_initcall_sync(initialize_hashrnd); 6642 6643