1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/string.h> 83 #include <linux/mm.h> 84 #include <linux/socket.h> 85 #include <linux/sockios.h> 86 #include <linux/errno.h> 87 #include <linux/interrupt.h> 88 #include <linux/if_ether.h> 89 #include <linux/netdevice.h> 90 #include <linux/etherdevice.h> 91 #include <linux/ethtool.h> 92 #include <linux/skbuff.h> 93 #include <linux/bpf.h> 94 #include <linux/bpf_trace.h> 95 #include <net/net_namespace.h> 96 #include <net/sock.h> 97 #include <net/busy_poll.h> 98 #include <linux/rtnetlink.h> 99 #include <linux/stat.h> 100 #include <net/dst.h> 101 #include <net/dst_metadata.h> 102 #include <net/pkt_sched.h> 103 #include <net/pkt_cls.h> 104 #include <net/checksum.h> 105 #include <net/xfrm.h> 106 #include <linux/highmem.h> 107 #include <linux/init.h> 108 #include <linux/module.h> 109 #include <linux/netpoll.h> 110 #include <linux/rcupdate.h> 111 #include <linux/delay.h> 112 #include <net/iw_handler.h> 113 #include <asm/current.h> 114 #include <linux/audit.h> 115 #include <linux/dmaengine.h> 116 #include <linux/err.h> 117 #include <linux/ctype.h> 118 #include <linux/if_arp.h> 119 #include <linux/if_vlan.h> 120 #include <linux/ip.h> 121 #include <net/ip.h> 122 #include <net/mpls.h> 123 #include <linux/ipv6.h> 124 #include <linux/in.h> 125 #include <linux/jhash.h> 126 #include <linux/random.h> 127 #include <trace/events/napi.h> 128 #include <trace/events/net.h> 129 #include <trace/events/skb.h> 130 #include <linux/inetdevice.h> 131 #include <linux/cpu_rmap.h> 132 #include <linux/static_key.h> 133 #include <linux/hashtable.h> 134 #include <linux/vmalloc.h> 135 #include <linux/if_macvlan.h> 136 #include <linux/errqueue.h> 137 #include <linux/hrtimer.h> 138 #include <linux/netfilter_ingress.h> 139 #include <linux/crash_dump.h> 140 #include <linux/sctp.h> 141 #include <net/udp_tunnel.h> 142 #include <linux/net_namespace.h> 143 #include <linux/indirect_call_wrapper.h> 144 #include <net/devlink.h> 145 146 #include "net-sysfs.h" 147 148 #define MAX_GRO_SKBS 8 149 150 /* This should be increased if a protocol with a bigger head is added. */ 151 #define GRO_MAX_HEAD (MAX_HEADER + 128) 152 153 static DEFINE_SPINLOCK(ptype_lock); 154 static DEFINE_SPINLOCK(offload_lock); 155 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 156 struct list_head ptype_all __read_mostly; /* Taps */ 157 static struct list_head offload_base __read_mostly; 158 159 static int netif_rx_internal(struct sk_buff *skb); 160 static int call_netdevice_notifiers_info(unsigned long val, 161 struct netdev_notifier_info *info); 162 static int call_netdevice_notifiers_extack(unsigned long val, 163 struct net_device *dev, 164 struct netlink_ext_ack *extack); 165 static struct napi_struct *napi_by_id(unsigned int napi_id); 166 167 /* 168 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 169 * semaphore. 170 * 171 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 172 * 173 * Writers must hold the rtnl semaphore while they loop through the 174 * dev_base_head list, and hold dev_base_lock for writing when they do the 175 * actual updates. This allows pure readers to access the list even 176 * while a writer is preparing to update it. 177 * 178 * To put it another way, dev_base_lock is held for writing only to 179 * protect against pure readers; the rtnl semaphore provides the 180 * protection against other writers. 181 * 182 * See, for example usages, register_netdevice() and 183 * unregister_netdevice(), which must be called with the rtnl 184 * semaphore held. 185 */ 186 DEFINE_RWLOCK(dev_base_lock); 187 EXPORT_SYMBOL(dev_base_lock); 188 189 static DEFINE_MUTEX(ifalias_mutex); 190 191 /* protects napi_hash addition/deletion and napi_gen_id */ 192 static DEFINE_SPINLOCK(napi_hash_lock); 193 194 static unsigned int napi_gen_id = NR_CPUS; 195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 196 197 static seqcount_t devnet_rename_seq; 198 199 static inline void dev_base_seq_inc(struct net *net) 200 { 201 while (++net->dev_base_seq == 0) 202 ; 203 } 204 205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 206 { 207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 208 209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 210 } 211 212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 213 { 214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 215 } 216 217 static inline void rps_lock(struct softnet_data *sd) 218 { 219 #ifdef CONFIG_RPS 220 spin_lock(&sd->input_pkt_queue.lock); 221 #endif 222 } 223 224 static inline void rps_unlock(struct softnet_data *sd) 225 { 226 #ifdef CONFIG_RPS 227 spin_unlock(&sd->input_pkt_queue.lock); 228 #endif 229 } 230 231 /* Device list insertion */ 232 static void list_netdevice(struct net_device *dev) 233 { 234 struct net *net = dev_net(dev); 235 236 ASSERT_RTNL(); 237 238 write_lock_bh(&dev_base_lock); 239 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 240 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 241 hlist_add_head_rcu(&dev->index_hlist, 242 dev_index_hash(net, dev->ifindex)); 243 write_unlock_bh(&dev_base_lock); 244 245 dev_base_seq_inc(net); 246 } 247 248 /* Device list removal 249 * caller must respect a RCU grace period before freeing/reusing dev 250 */ 251 static void unlist_netdevice(struct net_device *dev) 252 { 253 ASSERT_RTNL(); 254 255 /* Unlink dev from the device chain */ 256 write_lock_bh(&dev_base_lock); 257 list_del_rcu(&dev->dev_list); 258 hlist_del_rcu(&dev->name_hlist); 259 hlist_del_rcu(&dev->index_hlist); 260 write_unlock_bh(&dev_base_lock); 261 262 dev_base_seq_inc(dev_net(dev)); 263 } 264 265 /* 266 * Our notifier list 267 */ 268 269 static RAW_NOTIFIER_HEAD(netdev_chain); 270 271 /* 272 * Device drivers call our routines to queue packets here. We empty the 273 * queue in the local softnet handler. 274 */ 275 276 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 277 EXPORT_PER_CPU_SYMBOL(softnet_data); 278 279 #ifdef CONFIG_LOCKDEP 280 /* 281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 282 * according to dev->type 283 */ 284 static const unsigned short netdev_lock_type[] = { 285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 300 301 static const char *const netdev_lock_name[] = { 302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 317 318 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 319 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 320 321 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 322 { 323 int i; 324 325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 326 if (netdev_lock_type[i] == dev_type) 327 return i; 328 /* the last key is used by default */ 329 return ARRAY_SIZE(netdev_lock_type) - 1; 330 } 331 332 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 333 unsigned short dev_type) 334 { 335 int i; 336 337 i = netdev_lock_pos(dev_type); 338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 339 netdev_lock_name[i]); 340 } 341 342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 343 { 344 int i; 345 346 i = netdev_lock_pos(dev->type); 347 lockdep_set_class_and_name(&dev->addr_list_lock, 348 &netdev_addr_lock_key[i], 349 netdev_lock_name[i]); 350 } 351 #else 352 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 353 unsigned short dev_type) 354 { 355 } 356 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 357 { 358 } 359 #endif 360 361 /******************************************************************************* 362 * 363 * Protocol management and registration routines 364 * 365 *******************************************************************************/ 366 367 368 /* 369 * Add a protocol ID to the list. Now that the input handler is 370 * smarter we can dispense with all the messy stuff that used to be 371 * here. 372 * 373 * BEWARE!!! Protocol handlers, mangling input packets, 374 * MUST BE last in hash buckets and checking protocol handlers 375 * MUST start from promiscuous ptype_all chain in net_bh. 376 * It is true now, do not change it. 377 * Explanation follows: if protocol handler, mangling packet, will 378 * be the first on list, it is not able to sense, that packet 379 * is cloned and should be copied-on-write, so that it will 380 * change it and subsequent readers will get broken packet. 381 * --ANK (980803) 382 */ 383 384 static inline struct list_head *ptype_head(const struct packet_type *pt) 385 { 386 if (pt->type == htons(ETH_P_ALL)) 387 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 388 else 389 return pt->dev ? &pt->dev->ptype_specific : 390 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 391 } 392 393 /** 394 * dev_add_pack - add packet handler 395 * @pt: packet type declaration 396 * 397 * Add a protocol handler to the networking stack. The passed &packet_type 398 * is linked into kernel lists and may not be freed until it has been 399 * removed from the kernel lists. 400 * 401 * This call does not sleep therefore it can not 402 * guarantee all CPU's that are in middle of receiving packets 403 * will see the new packet type (until the next received packet). 404 */ 405 406 void dev_add_pack(struct packet_type *pt) 407 { 408 struct list_head *head = ptype_head(pt); 409 410 spin_lock(&ptype_lock); 411 list_add_rcu(&pt->list, head); 412 spin_unlock(&ptype_lock); 413 } 414 EXPORT_SYMBOL(dev_add_pack); 415 416 /** 417 * __dev_remove_pack - remove packet handler 418 * @pt: packet type declaration 419 * 420 * Remove a protocol handler that was previously added to the kernel 421 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 422 * from the kernel lists and can be freed or reused once this function 423 * returns. 424 * 425 * The packet type might still be in use by receivers 426 * and must not be freed until after all the CPU's have gone 427 * through a quiescent state. 428 */ 429 void __dev_remove_pack(struct packet_type *pt) 430 { 431 struct list_head *head = ptype_head(pt); 432 struct packet_type *pt1; 433 434 spin_lock(&ptype_lock); 435 436 list_for_each_entry(pt1, head, list) { 437 if (pt == pt1) { 438 list_del_rcu(&pt->list); 439 goto out; 440 } 441 } 442 443 pr_warn("dev_remove_pack: %p not found\n", pt); 444 out: 445 spin_unlock(&ptype_lock); 446 } 447 EXPORT_SYMBOL(__dev_remove_pack); 448 449 /** 450 * dev_remove_pack - remove packet handler 451 * @pt: packet type declaration 452 * 453 * Remove a protocol handler that was previously added to the kernel 454 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 455 * from the kernel lists and can be freed or reused once this function 456 * returns. 457 * 458 * This call sleeps to guarantee that no CPU is looking at the packet 459 * type after return. 460 */ 461 void dev_remove_pack(struct packet_type *pt) 462 { 463 __dev_remove_pack(pt); 464 465 synchronize_net(); 466 } 467 EXPORT_SYMBOL(dev_remove_pack); 468 469 470 /** 471 * dev_add_offload - register offload handlers 472 * @po: protocol offload declaration 473 * 474 * Add protocol offload handlers to the networking stack. The passed 475 * &proto_offload is linked into kernel lists and may not be freed until 476 * it has been removed from the kernel lists. 477 * 478 * This call does not sleep therefore it can not 479 * guarantee all CPU's that are in middle of receiving packets 480 * will see the new offload handlers (until the next received packet). 481 */ 482 void dev_add_offload(struct packet_offload *po) 483 { 484 struct packet_offload *elem; 485 486 spin_lock(&offload_lock); 487 list_for_each_entry(elem, &offload_base, list) { 488 if (po->priority < elem->priority) 489 break; 490 } 491 list_add_rcu(&po->list, elem->list.prev); 492 spin_unlock(&offload_lock); 493 } 494 EXPORT_SYMBOL(dev_add_offload); 495 496 /** 497 * __dev_remove_offload - remove offload handler 498 * @po: packet offload declaration 499 * 500 * Remove a protocol offload handler that was previously added to the 501 * kernel offload handlers by dev_add_offload(). The passed &offload_type 502 * is removed from the kernel lists and can be freed or reused once this 503 * function returns. 504 * 505 * The packet type might still be in use by receivers 506 * and must not be freed until after all the CPU's have gone 507 * through a quiescent state. 508 */ 509 static void __dev_remove_offload(struct packet_offload *po) 510 { 511 struct list_head *head = &offload_base; 512 struct packet_offload *po1; 513 514 spin_lock(&offload_lock); 515 516 list_for_each_entry(po1, head, list) { 517 if (po == po1) { 518 list_del_rcu(&po->list); 519 goto out; 520 } 521 } 522 523 pr_warn("dev_remove_offload: %p not found\n", po); 524 out: 525 spin_unlock(&offload_lock); 526 } 527 528 /** 529 * dev_remove_offload - remove packet offload handler 530 * @po: packet offload declaration 531 * 532 * Remove a packet offload handler that was previously added to the kernel 533 * offload handlers by dev_add_offload(). The passed &offload_type is 534 * removed from the kernel lists and can be freed or reused once this 535 * function returns. 536 * 537 * This call sleeps to guarantee that no CPU is looking at the packet 538 * type after return. 539 */ 540 void dev_remove_offload(struct packet_offload *po) 541 { 542 __dev_remove_offload(po); 543 544 synchronize_net(); 545 } 546 EXPORT_SYMBOL(dev_remove_offload); 547 548 /****************************************************************************** 549 * 550 * Device Boot-time Settings Routines 551 * 552 ******************************************************************************/ 553 554 /* Boot time configuration table */ 555 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 556 557 /** 558 * netdev_boot_setup_add - add new setup entry 559 * @name: name of the device 560 * @map: configured settings for the device 561 * 562 * Adds new setup entry to the dev_boot_setup list. The function 563 * returns 0 on error and 1 on success. This is a generic routine to 564 * all netdevices. 565 */ 566 static int netdev_boot_setup_add(char *name, struct ifmap *map) 567 { 568 struct netdev_boot_setup *s; 569 int i; 570 571 s = dev_boot_setup; 572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 573 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 574 memset(s[i].name, 0, sizeof(s[i].name)); 575 strlcpy(s[i].name, name, IFNAMSIZ); 576 memcpy(&s[i].map, map, sizeof(s[i].map)); 577 break; 578 } 579 } 580 581 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 582 } 583 584 /** 585 * netdev_boot_setup_check - check boot time settings 586 * @dev: the netdevice 587 * 588 * Check boot time settings for the device. 589 * The found settings are set for the device to be used 590 * later in the device probing. 591 * Returns 0 if no settings found, 1 if they are. 592 */ 593 int netdev_boot_setup_check(struct net_device *dev) 594 { 595 struct netdev_boot_setup *s = dev_boot_setup; 596 int i; 597 598 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 599 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 600 !strcmp(dev->name, s[i].name)) { 601 dev->irq = s[i].map.irq; 602 dev->base_addr = s[i].map.base_addr; 603 dev->mem_start = s[i].map.mem_start; 604 dev->mem_end = s[i].map.mem_end; 605 return 1; 606 } 607 } 608 return 0; 609 } 610 EXPORT_SYMBOL(netdev_boot_setup_check); 611 612 613 /** 614 * netdev_boot_base - get address from boot time settings 615 * @prefix: prefix for network device 616 * @unit: id for network device 617 * 618 * Check boot time settings for the base address of device. 619 * The found settings are set for the device to be used 620 * later in the device probing. 621 * Returns 0 if no settings found. 622 */ 623 unsigned long netdev_boot_base(const char *prefix, int unit) 624 { 625 const struct netdev_boot_setup *s = dev_boot_setup; 626 char name[IFNAMSIZ]; 627 int i; 628 629 sprintf(name, "%s%d", prefix, unit); 630 631 /* 632 * If device already registered then return base of 1 633 * to indicate not to probe for this interface 634 */ 635 if (__dev_get_by_name(&init_net, name)) 636 return 1; 637 638 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 639 if (!strcmp(name, s[i].name)) 640 return s[i].map.base_addr; 641 return 0; 642 } 643 644 /* 645 * Saves at boot time configured settings for any netdevice. 646 */ 647 int __init netdev_boot_setup(char *str) 648 { 649 int ints[5]; 650 struct ifmap map; 651 652 str = get_options(str, ARRAY_SIZE(ints), ints); 653 if (!str || !*str) 654 return 0; 655 656 /* Save settings */ 657 memset(&map, 0, sizeof(map)); 658 if (ints[0] > 0) 659 map.irq = ints[1]; 660 if (ints[0] > 1) 661 map.base_addr = ints[2]; 662 if (ints[0] > 2) 663 map.mem_start = ints[3]; 664 if (ints[0] > 3) 665 map.mem_end = ints[4]; 666 667 /* Add new entry to the list */ 668 return netdev_boot_setup_add(str, &map); 669 } 670 671 __setup("netdev=", netdev_boot_setup); 672 673 /******************************************************************************* 674 * 675 * Device Interface Subroutines 676 * 677 *******************************************************************************/ 678 679 /** 680 * dev_get_iflink - get 'iflink' value of a interface 681 * @dev: targeted interface 682 * 683 * Indicates the ifindex the interface is linked to. 684 * Physical interfaces have the same 'ifindex' and 'iflink' values. 685 */ 686 687 int dev_get_iflink(const struct net_device *dev) 688 { 689 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 690 return dev->netdev_ops->ndo_get_iflink(dev); 691 692 return dev->ifindex; 693 } 694 EXPORT_SYMBOL(dev_get_iflink); 695 696 /** 697 * dev_fill_metadata_dst - Retrieve tunnel egress information. 698 * @dev: targeted interface 699 * @skb: The packet. 700 * 701 * For better visibility of tunnel traffic OVS needs to retrieve 702 * egress tunnel information for a packet. Following API allows 703 * user to get this info. 704 */ 705 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 706 { 707 struct ip_tunnel_info *info; 708 709 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 710 return -EINVAL; 711 712 info = skb_tunnel_info_unclone(skb); 713 if (!info) 714 return -ENOMEM; 715 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 716 return -EINVAL; 717 718 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 719 } 720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 721 722 /** 723 * __dev_get_by_name - find a device by its name 724 * @net: the applicable net namespace 725 * @name: name to find 726 * 727 * Find an interface by name. Must be called under RTNL semaphore 728 * or @dev_base_lock. If the name is found a pointer to the device 729 * is returned. If the name is not found then %NULL is returned. The 730 * reference counters are not incremented so the caller must be 731 * careful with locks. 732 */ 733 734 struct net_device *__dev_get_by_name(struct net *net, const char *name) 735 { 736 struct net_device *dev; 737 struct hlist_head *head = dev_name_hash(net, name); 738 739 hlist_for_each_entry(dev, head, name_hlist) 740 if (!strncmp(dev->name, name, IFNAMSIZ)) 741 return dev; 742 743 return NULL; 744 } 745 EXPORT_SYMBOL(__dev_get_by_name); 746 747 /** 748 * dev_get_by_name_rcu - find a device by its name 749 * @net: the applicable net namespace 750 * @name: name to find 751 * 752 * Find an interface by name. 753 * If the name is found a pointer to the device is returned. 754 * If the name is not found then %NULL is returned. 755 * The reference counters are not incremented so the caller must be 756 * careful with locks. The caller must hold RCU lock. 757 */ 758 759 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 760 { 761 struct net_device *dev; 762 struct hlist_head *head = dev_name_hash(net, name); 763 764 hlist_for_each_entry_rcu(dev, head, name_hlist) 765 if (!strncmp(dev->name, name, IFNAMSIZ)) 766 return dev; 767 768 return NULL; 769 } 770 EXPORT_SYMBOL(dev_get_by_name_rcu); 771 772 /** 773 * dev_get_by_name - find a device by its name 774 * @net: the applicable net namespace 775 * @name: name to find 776 * 777 * Find an interface by name. This can be called from any 778 * context and does its own locking. The returned handle has 779 * the usage count incremented and the caller must use dev_put() to 780 * release it when it is no longer needed. %NULL is returned if no 781 * matching device is found. 782 */ 783 784 struct net_device *dev_get_by_name(struct net *net, const char *name) 785 { 786 struct net_device *dev; 787 788 rcu_read_lock(); 789 dev = dev_get_by_name_rcu(net, name); 790 if (dev) 791 dev_hold(dev); 792 rcu_read_unlock(); 793 return dev; 794 } 795 EXPORT_SYMBOL(dev_get_by_name); 796 797 /** 798 * __dev_get_by_index - find a device by its ifindex 799 * @net: the applicable net namespace 800 * @ifindex: index of device 801 * 802 * Search for an interface by index. Returns %NULL if the device 803 * is not found or a pointer to the device. The device has not 804 * had its reference counter increased so the caller must be careful 805 * about locking. The caller must hold either the RTNL semaphore 806 * or @dev_base_lock. 807 */ 808 809 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 810 { 811 struct net_device *dev; 812 struct hlist_head *head = dev_index_hash(net, ifindex); 813 814 hlist_for_each_entry(dev, head, index_hlist) 815 if (dev->ifindex == ifindex) 816 return dev; 817 818 return NULL; 819 } 820 EXPORT_SYMBOL(__dev_get_by_index); 821 822 /** 823 * dev_get_by_index_rcu - find a device by its ifindex 824 * @net: the applicable net namespace 825 * @ifindex: index of device 826 * 827 * Search for an interface by index. Returns %NULL if the device 828 * is not found or a pointer to the device. The device has not 829 * had its reference counter increased so the caller must be careful 830 * about locking. The caller must hold RCU lock. 831 */ 832 833 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 834 { 835 struct net_device *dev; 836 struct hlist_head *head = dev_index_hash(net, ifindex); 837 838 hlist_for_each_entry_rcu(dev, head, index_hlist) 839 if (dev->ifindex == ifindex) 840 return dev; 841 842 return NULL; 843 } 844 EXPORT_SYMBOL(dev_get_by_index_rcu); 845 846 847 /** 848 * dev_get_by_index - find a device by its ifindex 849 * @net: the applicable net namespace 850 * @ifindex: index of device 851 * 852 * Search for an interface by index. Returns NULL if the device 853 * is not found or a pointer to the device. The device returned has 854 * had a reference added and the pointer is safe until the user calls 855 * dev_put to indicate they have finished with it. 856 */ 857 858 struct net_device *dev_get_by_index(struct net *net, int ifindex) 859 { 860 struct net_device *dev; 861 862 rcu_read_lock(); 863 dev = dev_get_by_index_rcu(net, ifindex); 864 if (dev) 865 dev_hold(dev); 866 rcu_read_unlock(); 867 return dev; 868 } 869 EXPORT_SYMBOL(dev_get_by_index); 870 871 /** 872 * dev_get_by_napi_id - find a device by napi_id 873 * @napi_id: ID of the NAPI struct 874 * 875 * Search for an interface by NAPI ID. Returns %NULL if the device 876 * is not found or a pointer to the device. The device has not had 877 * its reference counter increased so the caller must be careful 878 * about locking. The caller must hold RCU lock. 879 */ 880 881 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 882 { 883 struct napi_struct *napi; 884 885 WARN_ON_ONCE(!rcu_read_lock_held()); 886 887 if (napi_id < MIN_NAPI_ID) 888 return NULL; 889 890 napi = napi_by_id(napi_id); 891 892 return napi ? napi->dev : NULL; 893 } 894 EXPORT_SYMBOL(dev_get_by_napi_id); 895 896 /** 897 * netdev_get_name - get a netdevice name, knowing its ifindex. 898 * @net: network namespace 899 * @name: a pointer to the buffer where the name will be stored. 900 * @ifindex: the ifindex of the interface to get the name from. 901 * 902 * The use of raw_seqcount_begin() and cond_resched() before 903 * retrying is required as we want to give the writers a chance 904 * to complete when CONFIG_PREEMPT is not set. 905 */ 906 int netdev_get_name(struct net *net, char *name, int ifindex) 907 { 908 struct net_device *dev; 909 unsigned int seq; 910 911 retry: 912 seq = raw_seqcount_begin(&devnet_rename_seq); 913 rcu_read_lock(); 914 dev = dev_get_by_index_rcu(net, ifindex); 915 if (!dev) { 916 rcu_read_unlock(); 917 return -ENODEV; 918 } 919 920 strcpy(name, dev->name); 921 rcu_read_unlock(); 922 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 923 cond_resched(); 924 goto retry; 925 } 926 927 return 0; 928 } 929 930 /** 931 * dev_getbyhwaddr_rcu - find a device by its hardware address 932 * @net: the applicable net namespace 933 * @type: media type of device 934 * @ha: hardware address 935 * 936 * Search for an interface by MAC address. Returns NULL if the device 937 * is not found or a pointer to the device. 938 * The caller must hold RCU or RTNL. 939 * The returned device has not had its ref count increased 940 * and the caller must therefore be careful about locking 941 * 942 */ 943 944 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 945 const char *ha) 946 { 947 struct net_device *dev; 948 949 for_each_netdev_rcu(net, dev) 950 if (dev->type == type && 951 !memcmp(dev->dev_addr, ha, dev->addr_len)) 952 return dev; 953 954 return NULL; 955 } 956 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 957 958 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 959 { 960 struct net_device *dev; 961 962 ASSERT_RTNL(); 963 for_each_netdev(net, dev) 964 if (dev->type == type) 965 return dev; 966 967 return NULL; 968 } 969 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 970 971 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 972 { 973 struct net_device *dev, *ret = NULL; 974 975 rcu_read_lock(); 976 for_each_netdev_rcu(net, dev) 977 if (dev->type == type) { 978 dev_hold(dev); 979 ret = dev; 980 break; 981 } 982 rcu_read_unlock(); 983 return ret; 984 } 985 EXPORT_SYMBOL(dev_getfirstbyhwtype); 986 987 /** 988 * __dev_get_by_flags - find any device with given flags 989 * @net: the applicable net namespace 990 * @if_flags: IFF_* values 991 * @mask: bitmask of bits in if_flags to check 992 * 993 * Search for any interface with the given flags. Returns NULL if a device 994 * is not found or a pointer to the device. Must be called inside 995 * rtnl_lock(), and result refcount is unchanged. 996 */ 997 998 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 999 unsigned short mask) 1000 { 1001 struct net_device *dev, *ret; 1002 1003 ASSERT_RTNL(); 1004 1005 ret = NULL; 1006 for_each_netdev(net, dev) { 1007 if (((dev->flags ^ if_flags) & mask) == 0) { 1008 ret = dev; 1009 break; 1010 } 1011 } 1012 return ret; 1013 } 1014 EXPORT_SYMBOL(__dev_get_by_flags); 1015 1016 /** 1017 * dev_valid_name - check if name is okay for network device 1018 * @name: name string 1019 * 1020 * Network device names need to be valid file names to 1021 * to allow sysfs to work. We also disallow any kind of 1022 * whitespace. 1023 */ 1024 bool dev_valid_name(const char *name) 1025 { 1026 if (*name == '\0') 1027 return false; 1028 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1029 return false; 1030 if (!strcmp(name, ".") || !strcmp(name, "..")) 1031 return false; 1032 1033 while (*name) { 1034 if (*name == '/' || *name == ':' || isspace(*name)) 1035 return false; 1036 name++; 1037 } 1038 return true; 1039 } 1040 EXPORT_SYMBOL(dev_valid_name); 1041 1042 /** 1043 * __dev_alloc_name - allocate a name for a device 1044 * @net: network namespace to allocate the device name in 1045 * @name: name format string 1046 * @buf: scratch buffer and result name string 1047 * 1048 * Passed a format string - eg "lt%d" it will try and find a suitable 1049 * id. It scans list of devices to build up a free map, then chooses 1050 * the first empty slot. The caller must hold the dev_base or rtnl lock 1051 * while allocating the name and adding the device in order to avoid 1052 * duplicates. 1053 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1054 * Returns the number of the unit assigned or a negative errno code. 1055 */ 1056 1057 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1058 { 1059 int i = 0; 1060 const char *p; 1061 const int max_netdevices = 8*PAGE_SIZE; 1062 unsigned long *inuse; 1063 struct net_device *d; 1064 1065 if (!dev_valid_name(name)) 1066 return -EINVAL; 1067 1068 p = strchr(name, '%'); 1069 if (p) { 1070 /* 1071 * Verify the string as this thing may have come from 1072 * the user. There must be either one "%d" and no other "%" 1073 * characters. 1074 */ 1075 if (p[1] != 'd' || strchr(p + 2, '%')) 1076 return -EINVAL; 1077 1078 /* Use one page as a bit array of possible slots */ 1079 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1080 if (!inuse) 1081 return -ENOMEM; 1082 1083 for_each_netdev(net, d) { 1084 if (!sscanf(d->name, name, &i)) 1085 continue; 1086 if (i < 0 || i >= max_netdevices) 1087 continue; 1088 1089 /* avoid cases where sscanf is not exact inverse of printf */ 1090 snprintf(buf, IFNAMSIZ, name, i); 1091 if (!strncmp(buf, d->name, IFNAMSIZ)) 1092 set_bit(i, inuse); 1093 } 1094 1095 i = find_first_zero_bit(inuse, max_netdevices); 1096 free_page((unsigned long) inuse); 1097 } 1098 1099 snprintf(buf, IFNAMSIZ, name, i); 1100 if (!__dev_get_by_name(net, buf)) 1101 return i; 1102 1103 /* It is possible to run out of possible slots 1104 * when the name is long and there isn't enough space left 1105 * for the digits, or if all bits are used. 1106 */ 1107 return -ENFILE; 1108 } 1109 1110 static int dev_alloc_name_ns(struct net *net, 1111 struct net_device *dev, 1112 const char *name) 1113 { 1114 char buf[IFNAMSIZ]; 1115 int ret; 1116 1117 BUG_ON(!net); 1118 ret = __dev_alloc_name(net, name, buf); 1119 if (ret >= 0) 1120 strlcpy(dev->name, buf, IFNAMSIZ); 1121 return ret; 1122 } 1123 1124 /** 1125 * dev_alloc_name - allocate a name for a device 1126 * @dev: device 1127 * @name: name format string 1128 * 1129 * Passed a format string - eg "lt%d" it will try and find a suitable 1130 * id. It scans list of devices to build up a free map, then chooses 1131 * the first empty slot. The caller must hold the dev_base or rtnl lock 1132 * while allocating the name and adding the device in order to avoid 1133 * duplicates. 1134 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1135 * Returns the number of the unit assigned or a negative errno code. 1136 */ 1137 1138 int dev_alloc_name(struct net_device *dev, const char *name) 1139 { 1140 return dev_alloc_name_ns(dev_net(dev), dev, name); 1141 } 1142 EXPORT_SYMBOL(dev_alloc_name); 1143 1144 int dev_get_valid_name(struct net *net, struct net_device *dev, 1145 const char *name) 1146 { 1147 BUG_ON(!net); 1148 1149 if (!dev_valid_name(name)) 1150 return -EINVAL; 1151 1152 if (strchr(name, '%')) 1153 return dev_alloc_name_ns(net, dev, name); 1154 else if (__dev_get_by_name(net, name)) 1155 return -EEXIST; 1156 else if (dev->name != name) 1157 strlcpy(dev->name, name, IFNAMSIZ); 1158 1159 return 0; 1160 } 1161 EXPORT_SYMBOL(dev_get_valid_name); 1162 1163 /** 1164 * dev_change_name - change name of a device 1165 * @dev: device 1166 * @newname: name (or format string) must be at least IFNAMSIZ 1167 * 1168 * Change name of a device, can pass format strings "eth%d". 1169 * for wildcarding. 1170 */ 1171 int dev_change_name(struct net_device *dev, const char *newname) 1172 { 1173 unsigned char old_assign_type; 1174 char oldname[IFNAMSIZ]; 1175 int err = 0; 1176 int ret; 1177 struct net *net; 1178 1179 ASSERT_RTNL(); 1180 BUG_ON(!dev_net(dev)); 1181 1182 net = dev_net(dev); 1183 1184 /* Some auto-enslaved devices e.g. failover slaves are 1185 * special, as userspace might rename the device after 1186 * the interface had been brought up and running since 1187 * the point kernel initiated auto-enslavement. Allow 1188 * live name change even when these slave devices are 1189 * up and running. 1190 * 1191 * Typically, users of these auto-enslaving devices 1192 * don't actually care about slave name change, as 1193 * they are supposed to operate on master interface 1194 * directly. 1195 */ 1196 if (dev->flags & IFF_UP && 1197 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1198 return -EBUSY; 1199 1200 write_seqcount_begin(&devnet_rename_seq); 1201 1202 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1203 write_seqcount_end(&devnet_rename_seq); 1204 return 0; 1205 } 1206 1207 memcpy(oldname, dev->name, IFNAMSIZ); 1208 1209 err = dev_get_valid_name(net, dev, newname); 1210 if (err < 0) { 1211 write_seqcount_end(&devnet_rename_seq); 1212 return err; 1213 } 1214 1215 if (oldname[0] && !strchr(oldname, '%')) 1216 netdev_info(dev, "renamed from %s\n", oldname); 1217 1218 old_assign_type = dev->name_assign_type; 1219 dev->name_assign_type = NET_NAME_RENAMED; 1220 1221 rollback: 1222 ret = device_rename(&dev->dev, dev->name); 1223 if (ret) { 1224 memcpy(dev->name, oldname, IFNAMSIZ); 1225 dev->name_assign_type = old_assign_type; 1226 write_seqcount_end(&devnet_rename_seq); 1227 return ret; 1228 } 1229 1230 write_seqcount_end(&devnet_rename_seq); 1231 1232 netdev_adjacent_rename_links(dev, oldname); 1233 1234 write_lock_bh(&dev_base_lock); 1235 hlist_del_rcu(&dev->name_hlist); 1236 write_unlock_bh(&dev_base_lock); 1237 1238 synchronize_rcu(); 1239 1240 write_lock_bh(&dev_base_lock); 1241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1242 write_unlock_bh(&dev_base_lock); 1243 1244 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1245 ret = notifier_to_errno(ret); 1246 1247 if (ret) { 1248 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1249 if (err >= 0) { 1250 err = ret; 1251 write_seqcount_begin(&devnet_rename_seq); 1252 memcpy(dev->name, oldname, IFNAMSIZ); 1253 memcpy(oldname, newname, IFNAMSIZ); 1254 dev->name_assign_type = old_assign_type; 1255 old_assign_type = NET_NAME_RENAMED; 1256 goto rollback; 1257 } else { 1258 pr_err("%s: name change rollback failed: %d\n", 1259 dev->name, ret); 1260 } 1261 } 1262 1263 return err; 1264 } 1265 1266 /** 1267 * dev_set_alias - change ifalias of a device 1268 * @dev: device 1269 * @alias: name up to IFALIASZ 1270 * @len: limit of bytes to copy from info 1271 * 1272 * Set ifalias for a device, 1273 */ 1274 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1275 { 1276 struct dev_ifalias *new_alias = NULL; 1277 1278 if (len >= IFALIASZ) 1279 return -EINVAL; 1280 1281 if (len) { 1282 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1283 if (!new_alias) 1284 return -ENOMEM; 1285 1286 memcpy(new_alias->ifalias, alias, len); 1287 new_alias->ifalias[len] = 0; 1288 } 1289 1290 mutex_lock(&ifalias_mutex); 1291 rcu_swap_protected(dev->ifalias, new_alias, 1292 mutex_is_locked(&ifalias_mutex)); 1293 mutex_unlock(&ifalias_mutex); 1294 1295 if (new_alias) 1296 kfree_rcu(new_alias, rcuhead); 1297 1298 return len; 1299 } 1300 EXPORT_SYMBOL(dev_set_alias); 1301 1302 /** 1303 * dev_get_alias - get ifalias of a device 1304 * @dev: device 1305 * @name: buffer to store name of ifalias 1306 * @len: size of buffer 1307 * 1308 * get ifalias for a device. Caller must make sure dev cannot go 1309 * away, e.g. rcu read lock or own a reference count to device. 1310 */ 1311 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1312 { 1313 const struct dev_ifalias *alias; 1314 int ret = 0; 1315 1316 rcu_read_lock(); 1317 alias = rcu_dereference(dev->ifalias); 1318 if (alias) 1319 ret = snprintf(name, len, "%s", alias->ifalias); 1320 rcu_read_unlock(); 1321 1322 return ret; 1323 } 1324 1325 /** 1326 * netdev_features_change - device changes features 1327 * @dev: device to cause notification 1328 * 1329 * Called to indicate a device has changed features. 1330 */ 1331 void netdev_features_change(struct net_device *dev) 1332 { 1333 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1334 } 1335 EXPORT_SYMBOL(netdev_features_change); 1336 1337 /** 1338 * netdev_state_change - device changes state 1339 * @dev: device to cause notification 1340 * 1341 * Called to indicate a device has changed state. This function calls 1342 * the notifier chains for netdev_chain and sends a NEWLINK message 1343 * to the routing socket. 1344 */ 1345 void netdev_state_change(struct net_device *dev) 1346 { 1347 if (dev->flags & IFF_UP) { 1348 struct netdev_notifier_change_info change_info = { 1349 .info.dev = dev, 1350 }; 1351 1352 call_netdevice_notifiers_info(NETDEV_CHANGE, 1353 &change_info.info); 1354 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1355 } 1356 } 1357 EXPORT_SYMBOL(netdev_state_change); 1358 1359 /** 1360 * netdev_notify_peers - notify network peers about existence of @dev 1361 * @dev: network device 1362 * 1363 * Generate traffic such that interested network peers are aware of 1364 * @dev, such as by generating a gratuitous ARP. This may be used when 1365 * a device wants to inform the rest of the network about some sort of 1366 * reconfiguration such as a failover event or virtual machine 1367 * migration. 1368 */ 1369 void netdev_notify_peers(struct net_device *dev) 1370 { 1371 rtnl_lock(); 1372 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1373 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1374 rtnl_unlock(); 1375 } 1376 EXPORT_SYMBOL(netdev_notify_peers); 1377 1378 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1379 { 1380 const struct net_device_ops *ops = dev->netdev_ops; 1381 int ret; 1382 1383 ASSERT_RTNL(); 1384 1385 if (!netif_device_present(dev)) 1386 return -ENODEV; 1387 1388 /* Block netpoll from trying to do any rx path servicing. 1389 * If we don't do this there is a chance ndo_poll_controller 1390 * or ndo_poll may be running while we open the device 1391 */ 1392 netpoll_poll_disable(dev); 1393 1394 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1395 ret = notifier_to_errno(ret); 1396 if (ret) 1397 return ret; 1398 1399 set_bit(__LINK_STATE_START, &dev->state); 1400 1401 if (ops->ndo_validate_addr) 1402 ret = ops->ndo_validate_addr(dev); 1403 1404 if (!ret && ops->ndo_open) 1405 ret = ops->ndo_open(dev); 1406 1407 netpoll_poll_enable(dev); 1408 1409 if (ret) 1410 clear_bit(__LINK_STATE_START, &dev->state); 1411 else { 1412 dev->flags |= IFF_UP; 1413 dev_set_rx_mode(dev); 1414 dev_activate(dev); 1415 add_device_randomness(dev->dev_addr, dev->addr_len); 1416 } 1417 1418 return ret; 1419 } 1420 1421 /** 1422 * dev_open - prepare an interface for use. 1423 * @dev: device to open 1424 * @extack: netlink extended ack 1425 * 1426 * Takes a device from down to up state. The device's private open 1427 * function is invoked and then the multicast lists are loaded. Finally 1428 * the device is moved into the up state and a %NETDEV_UP message is 1429 * sent to the netdev notifier chain. 1430 * 1431 * Calling this function on an active interface is a nop. On a failure 1432 * a negative errno code is returned. 1433 */ 1434 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1435 { 1436 int ret; 1437 1438 if (dev->flags & IFF_UP) 1439 return 0; 1440 1441 ret = __dev_open(dev, extack); 1442 if (ret < 0) 1443 return ret; 1444 1445 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1446 call_netdevice_notifiers(NETDEV_UP, dev); 1447 1448 return ret; 1449 } 1450 EXPORT_SYMBOL(dev_open); 1451 1452 static void __dev_close_many(struct list_head *head) 1453 { 1454 struct net_device *dev; 1455 1456 ASSERT_RTNL(); 1457 might_sleep(); 1458 1459 list_for_each_entry(dev, head, close_list) { 1460 /* Temporarily disable netpoll until the interface is down */ 1461 netpoll_poll_disable(dev); 1462 1463 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1464 1465 clear_bit(__LINK_STATE_START, &dev->state); 1466 1467 /* Synchronize to scheduled poll. We cannot touch poll list, it 1468 * can be even on different cpu. So just clear netif_running(). 1469 * 1470 * dev->stop() will invoke napi_disable() on all of it's 1471 * napi_struct instances on this device. 1472 */ 1473 smp_mb__after_atomic(); /* Commit netif_running(). */ 1474 } 1475 1476 dev_deactivate_many(head); 1477 1478 list_for_each_entry(dev, head, close_list) { 1479 const struct net_device_ops *ops = dev->netdev_ops; 1480 1481 /* 1482 * Call the device specific close. This cannot fail. 1483 * Only if device is UP 1484 * 1485 * We allow it to be called even after a DETACH hot-plug 1486 * event. 1487 */ 1488 if (ops->ndo_stop) 1489 ops->ndo_stop(dev); 1490 1491 dev->flags &= ~IFF_UP; 1492 netpoll_poll_enable(dev); 1493 } 1494 } 1495 1496 static void __dev_close(struct net_device *dev) 1497 { 1498 LIST_HEAD(single); 1499 1500 list_add(&dev->close_list, &single); 1501 __dev_close_many(&single); 1502 list_del(&single); 1503 } 1504 1505 void dev_close_many(struct list_head *head, bool unlink) 1506 { 1507 struct net_device *dev, *tmp; 1508 1509 /* Remove the devices that don't need to be closed */ 1510 list_for_each_entry_safe(dev, tmp, head, close_list) 1511 if (!(dev->flags & IFF_UP)) 1512 list_del_init(&dev->close_list); 1513 1514 __dev_close_many(head); 1515 1516 list_for_each_entry_safe(dev, tmp, head, close_list) { 1517 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1518 call_netdevice_notifiers(NETDEV_DOWN, dev); 1519 if (unlink) 1520 list_del_init(&dev->close_list); 1521 } 1522 } 1523 EXPORT_SYMBOL(dev_close_many); 1524 1525 /** 1526 * dev_close - shutdown an interface. 1527 * @dev: device to shutdown 1528 * 1529 * This function moves an active device into down state. A 1530 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1531 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1532 * chain. 1533 */ 1534 void dev_close(struct net_device *dev) 1535 { 1536 if (dev->flags & IFF_UP) { 1537 LIST_HEAD(single); 1538 1539 list_add(&dev->close_list, &single); 1540 dev_close_many(&single, true); 1541 list_del(&single); 1542 } 1543 } 1544 EXPORT_SYMBOL(dev_close); 1545 1546 1547 /** 1548 * dev_disable_lro - disable Large Receive Offload on a device 1549 * @dev: device 1550 * 1551 * Disable Large Receive Offload (LRO) on a net device. Must be 1552 * called under RTNL. This is needed if received packets may be 1553 * forwarded to another interface. 1554 */ 1555 void dev_disable_lro(struct net_device *dev) 1556 { 1557 struct net_device *lower_dev; 1558 struct list_head *iter; 1559 1560 dev->wanted_features &= ~NETIF_F_LRO; 1561 netdev_update_features(dev); 1562 1563 if (unlikely(dev->features & NETIF_F_LRO)) 1564 netdev_WARN(dev, "failed to disable LRO!\n"); 1565 1566 netdev_for_each_lower_dev(dev, lower_dev, iter) 1567 dev_disable_lro(lower_dev); 1568 } 1569 EXPORT_SYMBOL(dev_disable_lro); 1570 1571 /** 1572 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1573 * @dev: device 1574 * 1575 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1576 * called under RTNL. This is needed if Generic XDP is installed on 1577 * the device. 1578 */ 1579 static void dev_disable_gro_hw(struct net_device *dev) 1580 { 1581 dev->wanted_features &= ~NETIF_F_GRO_HW; 1582 netdev_update_features(dev); 1583 1584 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1585 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1586 } 1587 1588 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1589 { 1590 #define N(val) \ 1591 case NETDEV_##val: \ 1592 return "NETDEV_" __stringify(val); 1593 switch (cmd) { 1594 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1595 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1596 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1597 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1598 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1599 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1600 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1601 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1602 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1603 N(PRE_CHANGEADDR) 1604 } 1605 #undef N 1606 return "UNKNOWN_NETDEV_EVENT"; 1607 } 1608 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1609 1610 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1611 struct net_device *dev) 1612 { 1613 struct netdev_notifier_info info = { 1614 .dev = dev, 1615 }; 1616 1617 return nb->notifier_call(nb, val, &info); 1618 } 1619 1620 static int dev_boot_phase = 1; 1621 1622 /** 1623 * register_netdevice_notifier - register a network notifier block 1624 * @nb: notifier 1625 * 1626 * Register a notifier to be called when network device events occur. 1627 * The notifier passed is linked into the kernel structures and must 1628 * not be reused until it has been unregistered. A negative errno code 1629 * is returned on a failure. 1630 * 1631 * When registered all registration and up events are replayed 1632 * to the new notifier to allow device to have a race free 1633 * view of the network device list. 1634 */ 1635 1636 int register_netdevice_notifier(struct notifier_block *nb) 1637 { 1638 struct net_device *dev; 1639 struct net_device *last; 1640 struct net *net; 1641 int err; 1642 1643 /* Close race with setup_net() and cleanup_net() */ 1644 down_write(&pernet_ops_rwsem); 1645 rtnl_lock(); 1646 err = raw_notifier_chain_register(&netdev_chain, nb); 1647 if (err) 1648 goto unlock; 1649 if (dev_boot_phase) 1650 goto unlock; 1651 for_each_net(net) { 1652 for_each_netdev(net, dev) { 1653 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1654 err = notifier_to_errno(err); 1655 if (err) 1656 goto rollback; 1657 1658 if (!(dev->flags & IFF_UP)) 1659 continue; 1660 1661 call_netdevice_notifier(nb, NETDEV_UP, dev); 1662 } 1663 } 1664 1665 unlock: 1666 rtnl_unlock(); 1667 up_write(&pernet_ops_rwsem); 1668 return err; 1669 1670 rollback: 1671 last = dev; 1672 for_each_net(net) { 1673 for_each_netdev(net, dev) { 1674 if (dev == last) 1675 goto outroll; 1676 1677 if (dev->flags & IFF_UP) { 1678 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1679 dev); 1680 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1681 } 1682 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1683 } 1684 } 1685 1686 outroll: 1687 raw_notifier_chain_unregister(&netdev_chain, nb); 1688 goto unlock; 1689 } 1690 EXPORT_SYMBOL(register_netdevice_notifier); 1691 1692 /** 1693 * unregister_netdevice_notifier - unregister a network notifier block 1694 * @nb: notifier 1695 * 1696 * Unregister a notifier previously registered by 1697 * register_netdevice_notifier(). The notifier is unlinked into the 1698 * kernel structures and may then be reused. A negative errno code 1699 * is returned on a failure. 1700 * 1701 * After unregistering unregister and down device events are synthesized 1702 * for all devices on the device list to the removed notifier to remove 1703 * the need for special case cleanup code. 1704 */ 1705 1706 int unregister_netdevice_notifier(struct notifier_block *nb) 1707 { 1708 struct net_device *dev; 1709 struct net *net; 1710 int err; 1711 1712 /* Close race with setup_net() and cleanup_net() */ 1713 down_write(&pernet_ops_rwsem); 1714 rtnl_lock(); 1715 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1716 if (err) 1717 goto unlock; 1718 1719 for_each_net(net) { 1720 for_each_netdev(net, dev) { 1721 if (dev->flags & IFF_UP) { 1722 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1723 dev); 1724 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1725 } 1726 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1727 } 1728 } 1729 unlock: 1730 rtnl_unlock(); 1731 up_write(&pernet_ops_rwsem); 1732 return err; 1733 } 1734 EXPORT_SYMBOL(unregister_netdevice_notifier); 1735 1736 /** 1737 * call_netdevice_notifiers_info - call all network notifier blocks 1738 * @val: value passed unmodified to notifier function 1739 * @info: notifier information data 1740 * 1741 * Call all network notifier blocks. Parameters and return value 1742 * are as for raw_notifier_call_chain(). 1743 */ 1744 1745 static int call_netdevice_notifiers_info(unsigned long val, 1746 struct netdev_notifier_info *info) 1747 { 1748 ASSERT_RTNL(); 1749 return raw_notifier_call_chain(&netdev_chain, val, info); 1750 } 1751 1752 static int call_netdevice_notifiers_extack(unsigned long val, 1753 struct net_device *dev, 1754 struct netlink_ext_ack *extack) 1755 { 1756 struct netdev_notifier_info info = { 1757 .dev = dev, 1758 .extack = extack, 1759 }; 1760 1761 return call_netdevice_notifiers_info(val, &info); 1762 } 1763 1764 /** 1765 * call_netdevice_notifiers - call all network notifier blocks 1766 * @val: value passed unmodified to notifier function 1767 * @dev: net_device pointer passed unmodified to notifier function 1768 * 1769 * Call all network notifier blocks. Parameters and return value 1770 * are as for raw_notifier_call_chain(). 1771 */ 1772 1773 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1774 { 1775 return call_netdevice_notifiers_extack(val, dev, NULL); 1776 } 1777 EXPORT_SYMBOL(call_netdevice_notifiers); 1778 1779 /** 1780 * call_netdevice_notifiers_mtu - call all network notifier blocks 1781 * @val: value passed unmodified to notifier function 1782 * @dev: net_device pointer passed unmodified to notifier function 1783 * @arg: additional u32 argument passed to the notifier function 1784 * 1785 * Call all network notifier blocks. Parameters and return value 1786 * are as for raw_notifier_call_chain(). 1787 */ 1788 static int call_netdevice_notifiers_mtu(unsigned long val, 1789 struct net_device *dev, u32 arg) 1790 { 1791 struct netdev_notifier_info_ext info = { 1792 .info.dev = dev, 1793 .ext.mtu = arg, 1794 }; 1795 1796 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 1797 1798 return call_netdevice_notifiers_info(val, &info.info); 1799 } 1800 1801 #ifdef CONFIG_NET_INGRESS 1802 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1803 1804 void net_inc_ingress_queue(void) 1805 { 1806 static_branch_inc(&ingress_needed_key); 1807 } 1808 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 1809 1810 void net_dec_ingress_queue(void) 1811 { 1812 static_branch_dec(&ingress_needed_key); 1813 } 1814 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 1815 #endif 1816 1817 #ifdef CONFIG_NET_EGRESS 1818 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 1819 1820 void net_inc_egress_queue(void) 1821 { 1822 static_branch_inc(&egress_needed_key); 1823 } 1824 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 1825 1826 void net_dec_egress_queue(void) 1827 { 1828 static_branch_dec(&egress_needed_key); 1829 } 1830 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 1831 #endif 1832 1833 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 1834 #ifdef CONFIG_JUMP_LABEL 1835 static atomic_t netstamp_needed_deferred; 1836 static atomic_t netstamp_wanted; 1837 static void netstamp_clear(struct work_struct *work) 1838 { 1839 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1840 int wanted; 1841 1842 wanted = atomic_add_return(deferred, &netstamp_wanted); 1843 if (wanted > 0) 1844 static_branch_enable(&netstamp_needed_key); 1845 else 1846 static_branch_disable(&netstamp_needed_key); 1847 } 1848 static DECLARE_WORK(netstamp_work, netstamp_clear); 1849 #endif 1850 1851 void net_enable_timestamp(void) 1852 { 1853 #ifdef CONFIG_JUMP_LABEL 1854 int wanted; 1855 1856 while (1) { 1857 wanted = atomic_read(&netstamp_wanted); 1858 if (wanted <= 0) 1859 break; 1860 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 1861 return; 1862 } 1863 atomic_inc(&netstamp_needed_deferred); 1864 schedule_work(&netstamp_work); 1865 #else 1866 static_branch_inc(&netstamp_needed_key); 1867 #endif 1868 } 1869 EXPORT_SYMBOL(net_enable_timestamp); 1870 1871 void net_disable_timestamp(void) 1872 { 1873 #ifdef CONFIG_JUMP_LABEL 1874 int wanted; 1875 1876 while (1) { 1877 wanted = atomic_read(&netstamp_wanted); 1878 if (wanted <= 1) 1879 break; 1880 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 1881 return; 1882 } 1883 atomic_dec(&netstamp_needed_deferred); 1884 schedule_work(&netstamp_work); 1885 #else 1886 static_branch_dec(&netstamp_needed_key); 1887 #endif 1888 } 1889 EXPORT_SYMBOL(net_disable_timestamp); 1890 1891 static inline void net_timestamp_set(struct sk_buff *skb) 1892 { 1893 skb->tstamp = 0; 1894 if (static_branch_unlikely(&netstamp_needed_key)) 1895 __net_timestamp(skb); 1896 } 1897 1898 #define net_timestamp_check(COND, SKB) \ 1899 if (static_branch_unlikely(&netstamp_needed_key)) { \ 1900 if ((COND) && !(SKB)->tstamp) \ 1901 __net_timestamp(SKB); \ 1902 } \ 1903 1904 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 1905 { 1906 unsigned int len; 1907 1908 if (!(dev->flags & IFF_UP)) 1909 return false; 1910 1911 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1912 if (skb->len <= len) 1913 return true; 1914 1915 /* if TSO is enabled, we don't care about the length as the packet 1916 * could be forwarded without being segmented before 1917 */ 1918 if (skb_is_gso(skb)) 1919 return true; 1920 1921 return false; 1922 } 1923 EXPORT_SYMBOL_GPL(is_skb_forwardable); 1924 1925 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1926 { 1927 int ret = ____dev_forward_skb(dev, skb); 1928 1929 if (likely(!ret)) { 1930 skb->protocol = eth_type_trans(skb, dev); 1931 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1932 } 1933 1934 return ret; 1935 } 1936 EXPORT_SYMBOL_GPL(__dev_forward_skb); 1937 1938 /** 1939 * dev_forward_skb - loopback an skb to another netif 1940 * 1941 * @dev: destination network device 1942 * @skb: buffer to forward 1943 * 1944 * return values: 1945 * NET_RX_SUCCESS (no congestion) 1946 * NET_RX_DROP (packet was dropped, but freed) 1947 * 1948 * dev_forward_skb can be used for injecting an skb from the 1949 * start_xmit function of one device into the receive queue 1950 * of another device. 1951 * 1952 * The receiving device may be in another namespace, so 1953 * we have to clear all information in the skb that could 1954 * impact namespace isolation. 1955 */ 1956 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1957 { 1958 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 1959 } 1960 EXPORT_SYMBOL_GPL(dev_forward_skb); 1961 1962 static inline int deliver_skb(struct sk_buff *skb, 1963 struct packet_type *pt_prev, 1964 struct net_device *orig_dev) 1965 { 1966 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1967 return -ENOMEM; 1968 refcount_inc(&skb->users); 1969 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1970 } 1971 1972 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 1973 struct packet_type **pt, 1974 struct net_device *orig_dev, 1975 __be16 type, 1976 struct list_head *ptype_list) 1977 { 1978 struct packet_type *ptype, *pt_prev = *pt; 1979 1980 list_for_each_entry_rcu(ptype, ptype_list, list) { 1981 if (ptype->type != type) 1982 continue; 1983 if (pt_prev) 1984 deliver_skb(skb, pt_prev, orig_dev); 1985 pt_prev = ptype; 1986 } 1987 *pt = pt_prev; 1988 } 1989 1990 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1991 { 1992 if (!ptype->af_packet_priv || !skb->sk) 1993 return false; 1994 1995 if (ptype->id_match) 1996 return ptype->id_match(ptype, skb->sk); 1997 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1998 return true; 1999 2000 return false; 2001 } 2002 2003 /** 2004 * dev_nit_active - return true if any network interface taps are in use 2005 * 2006 * @dev: network device to check for the presence of taps 2007 */ 2008 bool dev_nit_active(struct net_device *dev) 2009 { 2010 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2011 } 2012 EXPORT_SYMBOL_GPL(dev_nit_active); 2013 2014 /* 2015 * Support routine. Sends outgoing frames to any network 2016 * taps currently in use. 2017 */ 2018 2019 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2020 { 2021 struct packet_type *ptype; 2022 struct sk_buff *skb2 = NULL; 2023 struct packet_type *pt_prev = NULL; 2024 struct list_head *ptype_list = &ptype_all; 2025 2026 rcu_read_lock(); 2027 again: 2028 list_for_each_entry_rcu(ptype, ptype_list, list) { 2029 if (ptype->ignore_outgoing) 2030 continue; 2031 2032 /* Never send packets back to the socket 2033 * they originated from - MvS (miquels@drinkel.ow.org) 2034 */ 2035 if (skb_loop_sk(ptype, skb)) 2036 continue; 2037 2038 if (pt_prev) { 2039 deliver_skb(skb2, pt_prev, skb->dev); 2040 pt_prev = ptype; 2041 continue; 2042 } 2043 2044 /* need to clone skb, done only once */ 2045 skb2 = skb_clone(skb, GFP_ATOMIC); 2046 if (!skb2) 2047 goto out_unlock; 2048 2049 net_timestamp_set(skb2); 2050 2051 /* skb->nh should be correctly 2052 * set by sender, so that the second statement is 2053 * just protection against buggy protocols. 2054 */ 2055 skb_reset_mac_header(skb2); 2056 2057 if (skb_network_header(skb2) < skb2->data || 2058 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2059 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2060 ntohs(skb2->protocol), 2061 dev->name); 2062 skb_reset_network_header(skb2); 2063 } 2064 2065 skb2->transport_header = skb2->network_header; 2066 skb2->pkt_type = PACKET_OUTGOING; 2067 pt_prev = ptype; 2068 } 2069 2070 if (ptype_list == &ptype_all) { 2071 ptype_list = &dev->ptype_all; 2072 goto again; 2073 } 2074 out_unlock: 2075 if (pt_prev) { 2076 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2077 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2078 else 2079 kfree_skb(skb2); 2080 } 2081 rcu_read_unlock(); 2082 } 2083 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2084 2085 /** 2086 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2087 * @dev: Network device 2088 * @txq: number of queues available 2089 * 2090 * If real_num_tx_queues is changed the tc mappings may no longer be 2091 * valid. To resolve this verify the tc mapping remains valid and if 2092 * not NULL the mapping. With no priorities mapping to this 2093 * offset/count pair it will no longer be used. In the worst case TC0 2094 * is invalid nothing can be done so disable priority mappings. If is 2095 * expected that drivers will fix this mapping if they can before 2096 * calling netif_set_real_num_tx_queues. 2097 */ 2098 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2099 { 2100 int i; 2101 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2102 2103 /* If TC0 is invalidated disable TC mapping */ 2104 if (tc->offset + tc->count > txq) { 2105 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2106 dev->num_tc = 0; 2107 return; 2108 } 2109 2110 /* Invalidated prio to tc mappings set to TC0 */ 2111 for (i = 1; i < TC_BITMASK + 1; i++) { 2112 int q = netdev_get_prio_tc_map(dev, i); 2113 2114 tc = &dev->tc_to_txq[q]; 2115 if (tc->offset + tc->count > txq) { 2116 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2117 i, q); 2118 netdev_set_prio_tc_map(dev, i, 0); 2119 } 2120 } 2121 } 2122 2123 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2124 { 2125 if (dev->num_tc) { 2126 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2127 int i; 2128 2129 /* walk through the TCs and see if it falls into any of them */ 2130 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2131 if ((txq - tc->offset) < tc->count) 2132 return i; 2133 } 2134 2135 /* didn't find it, just return -1 to indicate no match */ 2136 return -1; 2137 } 2138 2139 return 0; 2140 } 2141 EXPORT_SYMBOL(netdev_txq_to_tc); 2142 2143 #ifdef CONFIG_XPS 2144 struct static_key xps_needed __read_mostly; 2145 EXPORT_SYMBOL(xps_needed); 2146 struct static_key xps_rxqs_needed __read_mostly; 2147 EXPORT_SYMBOL(xps_rxqs_needed); 2148 static DEFINE_MUTEX(xps_map_mutex); 2149 #define xmap_dereference(P) \ 2150 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2151 2152 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2153 int tci, u16 index) 2154 { 2155 struct xps_map *map = NULL; 2156 int pos; 2157 2158 if (dev_maps) 2159 map = xmap_dereference(dev_maps->attr_map[tci]); 2160 if (!map) 2161 return false; 2162 2163 for (pos = map->len; pos--;) { 2164 if (map->queues[pos] != index) 2165 continue; 2166 2167 if (map->len > 1) { 2168 map->queues[pos] = map->queues[--map->len]; 2169 break; 2170 } 2171 2172 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2173 kfree_rcu(map, rcu); 2174 return false; 2175 } 2176 2177 return true; 2178 } 2179 2180 static bool remove_xps_queue_cpu(struct net_device *dev, 2181 struct xps_dev_maps *dev_maps, 2182 int cpu, u16 offset, u16 count) 2183 { 2184 int num_tc = dev->num_tc ? : 1; 2185 bool active = false; 2186 int tci; 2187 2188 for (tci = cpu * num_tc; num_tc--; tci++) { 2189 int i, j; 2190 2191 for (i = count, j = offset; i--; j++) { 2192 if (!remove_xps_queue(dev_maps, tci, j)) 2193 break; 2194 } 2195 2196 active |= i < 0; 2197 } 2198 2199 return active; 2200 } 2201 2202 static void reset_xps_maps(struct net_device *dev, 2203 struct xps_dev_maps *dev_maps, 2204 bool is_rxqs_map) 2205 { 2206 if (is_rxqs_map) { 2207 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2208 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2209 } else { 2210 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2211 } 2212 static_key_slow_dec_cpuslocked(&xps_needed); 2213 kfree_rcu(dev_maps, rcu); 2214 } 2215 2216 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2217 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2218 u16 offset, u16 count, bool is_rxqs_map) 2219 { 2220 bool active = false; 2221 int i, j; 2222 2223 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), 2224 j < nr_ids;) 2225 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2226 count); 2227 if (!active) 2228 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2229 2230 if (!is_rxqs_map) { 2231 for (i = offset + (count - 1); count--; i--) { 2232 netdev_queue_numa_node_write( 2233 netdev_get_tx_queue(dev, i), 2234 NUMA_NO_NODE); 2235 } 2236 } 2237 } 2238 2239 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2240 u16 count) 2241 { 2242 const unsigned long *possible_mask = NULL; 2243 struct xps_dev_maps *dev_maps; 2244 unsigned int nr_ids; 2245 2246 if (!static_key_false(&xps_needed)) 2247 return; 2248 2249 cpus_read_lock(); 2250 mutex_lock(&xps_map_mutex); 2251 2252 if (static_key_false(&xps_rxqs_needed)) { 2253 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2254 if (dev_maps) { 2255 nr_ids = dev->num_rx_queues; 2256 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, 2257 offset, count, true); 2258 } 2259 } 2260 2261 dev_maps = xmap_dereference(dev->xps_cpus_map); 2262 if (!dev_maps) 2263 goto out_no_maps; 2264 2265 if (num_possible_cpus() > 1) 2266 possible_mask = cpumask_bits(cpu_possible_mask); 2267 nr_ids = nr_cpu_ids; 2268 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, 2269 false); 2270 2271 out_no_maps: 2272 mutex_unlock(&xps_map_mutex); 2273 cpus_read_unlock(); 2274 } 2275 2276 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2277 { 2278 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2279 } 2280 2281 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2282 u16 index, bool is_rxqs_map) 2283 { 2284 struct xps_map *new_map; 2285 int alloc_len = XPS_MIN_MAP_ALLOC; 2286 int i, pos; 2287 2288 for (pos = 0; map && pos < map->len; pos++) { 2289 if (map->queues[pos] != index) 2290 continue; 2291 return map; 2292 } 2293 2294 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2295 if (map) { 2296 if (pos < map->alloc_len) 2297 return map; 2298 2299 alloc_len = map->alloc_len * 2; 2300 } 2301 2302 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2303 * map 2304 */ 2305 if (is_rxqs_map) 2306 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2307 else 2308 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2309 cpu_to_node(attr_index)); 2310 if (!new_map) 2311 return NULL; 2312 2313 for (i = 0; i < pos; i++) 2314 new_map->queues[i] = map->queues[i]; 2315 new_map->alloc_len = alloc_len; 2316 new_map->len = pos; 2317 2318 return new_map; 2319 } 2320 2321 /* Must be called under cpus_read_lock */ 2322 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2323 u16 index, bool is_rxqs_map) 2324 { 2325 const unsigned long *online_mask = NULL, *possible_mask = NULL; 2326 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 2327 int i, j, tci, numa_node_id = -2; 2328 int maps_sz, num_tc = 1, tc = 0; 2329 struct xps_map *map, *new_map; 2330 bool active = false; 2331 unsigned int nr_ids; 2332 2333 if (dev->num_tc) { 2334 /* Do not allow XPS on subordinate device directly */ 2335 num_tc = dev->num_tc; 2336 if (num_tc < 0) 2337 return -EINVAL; 2338 2339 /* If queue belongs to subordinate dev use its map */ 2340 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2341 2342 tc = netdev_txq_to_tc(dev, index); 2343 if (tc < 0) 2344 return -EINVAL; 2345 } 2346 2347 mutex_lock(&xps_map_mutex); 2348 if (is_rxqs_map) { 2349 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2350 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2351 nr_ids = dev->num_rx_queues; 2352 } else { 2353 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2354 if (num_possible_cpus() > 1) { 2355 online_mask = cpumask_bits(cpu_online_mask); 2356 possible_mask = cpumask_bits(cpu_possible_mask); 2357 } 2358 dev_maps = xmap_dereference(dev->xps_cpus_map); 2359 nr_ids = nr_cpu_ids; 2360 } 2361 2362 if (maps_sz < L1_CACHE_BYTES) 2363 maps_sz = L1_CACHE_BYTES; 2364 2365 /* allocate memory for queue storage */ 2366 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2367 j < nr_ids;) { 2368 if (!new_dev_maps) 2369 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2370 if (!new_dev_maps) { 2371 mutex_unlock(&xps_map_mutex); 2372 return -ENOMEM; 2373 } 2374 2375 tci = j * num_tc + tc; 2376 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : 2377 NULL; 2378 2379 map = expand_xps_map(map, j, index, is_rxqs_map); 2380 if (!map) 2381 goto error; 2382 2383 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2384 } 2385 2386 if (!new_dev_maps) 2387 goto out_no_new_maps; 2388 2389 if (!dev_maps) { 2390 /* Increment static keys at most once per type */ 2391 static_key_slow_inc_cpuslocked(&xps_needed); 2392 if (is_rxqs_map) 2393 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2394 } 2395 2396 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2397 j < nr_ids;) { 2398 /* copy maps belonging to foreign traffic classes */ 2399 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { 2400 /* fill in the new device map from the old device map */ 2401 map = xmap_dereference(dev_maps->attr_map[tci]); 2402 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2403 } 2404 2405 /* We need to explicitly update tci as prevous loop 2406 * could break out early if dev_maps is NULL. 2407 */ 2408 tci = j * num_tc + tc; 2409 2410 if (netif_attr_test_mask(j, mask, nr_ids) && 2411 netif_attr_test_online(j, online_mask, nr_ids)) { 2412 /* add tx-queue to CPU/rx-queue maps */ 2413 int pos = 0; 2414 2415 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2416 while ((pos < map->len) && (map->queues[pos] != index)) 2417 pos++; 2418 2419 if (pos == map->len) 2420 map->queues[map->len++] = index; 2421 #ifdef CONFIG_NUMA 2422 if (!is_rxqs_map) { 2423 if (numa_node_id == -2) 2424 numa_node_id = cpu_to_node(j); 2425 else if (numa_node_id != cpu_to_node(j)) 2426 numa_node_id = -1; 2427 } 2428 #endif 2429 } else if (dev_maps) { 2430 /* fill in the new device map from the old device map */ 2431 map = xmap_dereference(dev_maps->attr_map[tci]); 2432 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2433 } 2434 2435 /* copy maps belonging to foreign traffic classes */ 2436 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { 2437 /* fill in the new device map from the old device map */ 2438 map = xmap_dereference(dev_maps->attr_map[tci]); 2439 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2440 } 2441 } 2442 2443 if (is_rxqs_map) 2444 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); 2445 else 2446 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); 2447 2448 /* Cleanup old maps */ 2449 if (!dev_maps) 2450 goto out_no_old_maps; 2451 2452 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2453 j < nr_ids;) { 2454 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2455 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2456 map = xmap_dereference(dev_maps->attr_map[tci]); 2457 if (map && map != new_map) 2458 kfree_rcu(map, rcu); 2459 } 2460 } 2461 2462 kfree_rcu(dev_maps, rcu); 2463 2464 out_no_old_maps: 2465 dev_maps = new_dev_maps; 2466 active = true; 2467 2468 out_no_new_maps: 2469 if (!is_rxqs_map) { 2470 /* update Tx queue numa node */ 2471 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2472 (numa_node_id >= 0) ? 2473 numa_node_id : NUMA_NO_NODE); 2474 } 2475 2476 if (!dev_maps) 2477 goto out_no_maps; 2478 2479 /* removes tx-queue from unused CPUs/rx-queues */ 2480 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2481 j < nr_ids;) { 2482 for (i = tc, tci = j * num_tc; i--; tci++) 2483 active |= remove_xps_queue(dev_maps, tci, index); 2484 if (!netif_attr_test_mask(j, mask, nr_ids) || 2485 !netif_attr_test_online(j, online_mask, nr_ids)) 2486 active |= remove_xps_queue(dev_maps, tci, index); 2487 for (i = num_tc - tc, tci++; --i; tci++) 2488 active |= remove_xps_queue(dev_maps, tci, index); 2489 } 2490 2491 /* free map if not active */ 2492 if (!active) 2493 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2494 2495 out_no_maps: 2496 mutex_unlock(&xps_map_mutex); 2497 2498 return 0; 2499 error: 2500 /* remove any maps that we added */ 2501 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2502 j < nr_ids;) { 2503 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2504 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2505 map = dev_maps ? 2506 xmap_dereference(dev_maps->attr_map[tci]) : 2507 NULL; 2508 if (new_map && new_map != map) 2509 kfree(new_map); 2510 } 2511 } 2512 2513 mutex_unlock(&xps_map_mutex); 2514 2515 kfree(new_dev_maps); 2516 return -ENOMEM; 2517 } 2518 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2519 2520 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2521 u16 index) 2522 { 2523 int ret; 2524 2525 cpus_read_lock(); 2526 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); 2527 cpus_read_unlock(); 2528 2529 return ret; 2530 } 2531 EXPORT_SYMBOL(netif_set_xps_queue); 2532 2533 #endif 2534 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2535 { 2536 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2537 2538 /* Unbind any subordinate channels */ 2539 while (txq-- != &dev->_tx[0]) { 2540 if (txq->sb_dev) 2541 netdev_unbind_sb_channel(dev, txq->sb_dev); 2542 } 2543 } 2544 2545 void netdev_reset_tc(struct net_device *dev) 2546 { 2547 #ifdef CONFIG_XPS 2548 netif_reset_xps_queues_gt(dev, 0); 2549 #endif 2550 netdev_unbind_all_sb_channels(dev); 2551 2552 /* Reset TC configuration of device */ 2553 dev->num_tc = 0; 2554 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2555 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2556 } 2557 EXPORT_SYMBOL(netdev_reset_tc); 2558 2559 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2560 { 2561 if (tc >= dev->num_tc) 2562 return -EINVAL; 2563 2564 #ifdef CONFIG_XPS 2565 netif_reset_xps_queues(dev, offset, count); 2566 #endif 2567 dev->tc_to_txq[tc].count = count; 2568 dev->tc_to_txq[tc].offset = offset; 2569 return 0; 2570 } 2571 EXPORT_SYMBOL(netdev_set_tc_queue); 2572 2573 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2574 { 2575 if (num_tc > TC_MAX_QUEUE) 2576 return -EINVAL; 2577 2578 #ifdef CONFIG_XPS 2579 netif_reset_xps_queues_gt(dev, 0); 2580 #endif 2581 netdev_unbind_all_sb_channels(dev); 2582 2583 dev->num_tc = num_tc; 2584 return 0; 2585 } 2586 EXPORT_SYMBOL(netdev_set_num_tc); 2587 2588 void netdev_unbind_sb_channel(struct net_device *dev, 2589 struct net_device *sb_dev) 2590 { 2591 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2592 2593 #ifdef CONFIG_XPS 2594 netif_reset_xps_queues_gt(sb_dev, 0); 2595 #endif 2596 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2597 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2598 2599 while (txq-- != &dev->_tx[0]) { 2600 if (txq->sb_dev == sb_dev) 2601 txq->sb_dev = NULL; 2602 } 2603 } 2604 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2605 2606 int netdev_bind_sb_channel_queue(struct net_device *dev, 2607 struct net_device *sb_dev, 2608 u8 tc, u16 count, u16 offset) 2609 { 2610 /* Make certain the sb_dev and dev are already configured */ 2611 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2612 return -EINVAL; 2613 2614 /* We cannot hand out queues we don't have */ 2615 if ((offset + count) > dev->real_num_tx_queues) 2616 return -EINVAL; 2617 2618 /* Record the mapping */ 2619 sb_dev->tc_to_txq[tc].count = count; 2620 sb_dev->tc_to_txq[tc].offset = offset; 2621 2622 /* Provide a way for Tx queue to find the tc_to_txq map or 2623 * XPS map for itself. 2624 */ 2625 while (count--) 2626 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2627 2628 return 0; 2629 } 2630 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2631 2632 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2633 { 2634 /* Do not use a multiqueue device to represent a subordinate channel */ 2635 if (netif_is_multiqueue(dev)) 2636 return -ENODEV; 2637 2638 /* We allow channels 1 - 32767 to be used for subordinate channels. 2639 * Channel 0 is meant to be "native" mode and used only to represent 2640 * the main root device. We allow writing 0 to reset the device back 2641 * to normal mode after being used as a subordinate channel. 2642 */ 2643 if (channel > S16_MAX) 2644 return -EINVAL; 2645 2646 dev->num_tc = -channel; 2647 2648 return 0; 2649 } 2650 EXPORT_SYMBOL(netdev_set_sb_channel); 2651 2652 /* 2653 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2654 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2655 */ 2656 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2657 { 2658 bool disabling; 2659 int rc; 2660 2661 disabling = txq < dev->real_num_tx_queues; 2662 2663 if (txq < 1 || txq > dev->num_tx_queues) 2664 return -EINVAL; 2665 2666 if (dev->reg_state == NETREG_REGISTERED || 2667 dev->reg_state == NETREG_UNREGISTERING) { 2668 ASSERT_RTNL(); 2669 2670 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2671 txq); 2672 if (rc) 2673 return rc; 2674 2675 if (dev->num_tc) 2676 netif_setup_tc(dev, txq); 2677 2678 dev->real_num_tx_queues = txq; 2679 2680 if (disabling) { 2681 synchronize_net(); 2682 qdisc_reset_all_tx_gt(dev, txq); 2683 #ifdef CONFIG_XPS 2684 netif_reset_xps_queues_gt(dev, txq); 2685 #endif 2686 } 2687 } else { 2688 dev->real_num_tx_queues = txq; 2689 } 2690 2691 return 0; 2692 } 2693 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2694 2695 #ifdef CONFIG_SYSFS 2696 /** 2697 * netif_set_real_num_rx_queues - set actual number of RX queues used 2698 * @dev: Network device 2699 * @rxq: Actual number of RX queues 2700 * 2701 * This must be called either with the rtnl_lock held or before 2702 * registration of the net device. Returns 0 on success, or a 2703 * negative error code. If called before registration, it always 2704 * succeeds. 2705 */ 2706 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2707 { 2708 int rc; 2709 2710 if (rxq < 1 || rxq > dev->num_rx_queues) 2711 return -EINVAL; 2712 2713 if (dev->reg_state == NETREG_REGISTERED) { 2714 ASSERT_RTNL(); 2715 2716 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2717 rxq); 2718 if (rc) 2719 return rc; 2720 } 2721 2722 dev->real_num_rx_queues = rxq; 2723 return 0; 2724 } 2725 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2726 #endif 2727 2728 /** 2729 * netif_get_num_default_rss_queues - default number of RSS queues 2730 * 2731 * This routine should set an upper limit on the number of RSS queues 2732 * used by default by multiqueue devices. 2733 */ 2734 int netif_get_num_default_rss_queues(void) 2735 { 2736 return is_kdump_kernel() ? 2737 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2738 } 2739 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2740 2741 static void __netif_reschedule(struct Qdisc *q) 2742 { 2743 struct softnet_data *sd; 2744 unsigned long flags; 2745 2746 local_irq_save(flags); 2747 sd = this_cpu_ptr(&softnet_data); 2748 q->next_sched = NULL; 2749 *sd->output_queue_tailp = q; 2750 sd->output_queue_tailp = &q->next_sched; 2751 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2752 local_irq_restore(flags); 2753 } 2754 2755 void __netif_schedule(struct Qdisc *q) 2756 { 2757 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2758 __netif_reschedule(q); 2759 } 2760 EXPORT_SYMBOL(__netif_schedule); 2761 2762 struct dev_kfree_skb_cb { 2763 enum skb_free_reason reason; 2764 }; 2765 2766 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2767 { 2768 return (struct dev_kfree_skb_cb *)skb->cb; 2769 } 2770 2771 void netif_schedule_queue(struct netdev_queue *txq) 2772 { 2773 rcu_read_lock(); 2774 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { 2775 struct Qdisc *q = rcu_dereference(txq->qdisc); 2776 2777 __netif_schedule(q); 2778 } 2779 rcu_read_unlock(); 2780 } 2781 EXPORT_SYMBOL(netif_schedule_queue); 2782 2783 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2784 { 2785 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 2786 struct Qdisc *q; 2787 2788 rcu_read_lock(); 2789 q = rcu_dereference(dev_queue->qdisc); 2790 __netif_schedule(q); 2791 rcu_read_unlock(); 2792 } 2793 } 2794 EXPORT_SYMBOL(netif_tx_wake_queue); 2795 2796 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2797 { 2798 unsigned long flags; 2799 2800 if (unlikely(!skb)) 2801 return; 2802 2803 if (likely(refcount_read(&skb->users) == 1)) { 2804 smp_rmb(); 2805 refcount_set(&skb->users, 0); 2806 } else if (likely(!refcount_dec_and_test(&skb->users))) { 2807 return; 2808 } 2809 get_kfree_skb_cb(skb)->reason = reason; 2810 local_irq_save(flags); 2811 skb->next = __this_cpu_read(softnet_data.completion_queue); 2812 __this_cpu_write(softnet_data.completion_queue, skb); 2813 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2814 local_irq_restore(flags); 2815 } 2816 EXPORT_SYMBOL(__dev_kfree_skb_irq); 2817 2818 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 2819 { 2820 if (in_irq() || irqs_disabled()) 2821 __dev_kfree_skb_irq(skb, reason); 2822 else 2823 dev_kfree_skb(skb); 2824 } 2825 EXPORT_SYMBOL(__dev_kfree_skb_any); 2826 2827 2828 /** 2829 * netif_device_detach - mark device as removed 2830 * @dev: network device 2831 * 2832 * Mark device as removed from system and therefore no longer available. 2833 */ 2834 void netif_device_detach(struct net_device *dev) 2835 { 2836 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2837 netif_running(dev)) { 2838 netif_tx_stop_all_queues(dev); 2839 } 2840 } 2841 EXPORT_SYMBOL(netif_device_detach); 2842 2843 /** 2844 * netif_device_attach - mark device as attached 2845 * @dev: network device 2846 * 2847 * Mark device as attached from system and restart if needed. 2848 */ 2849 void netif_device_attach(struct net_device *dev) 2850 { 2851 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2852 netif_running(dev)) { 2853 netif_tx_wake_all_queues(dev); 2854 __netdev_watchdog_up(dev); 2855 } 2856 } 2857 EXPORT_SYMBOL(netif_device_attach); 2858 2859 /* 2860 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2861 * to be used as a distribution range. 2862 */ 2863 static u16 skb_tx_hash(const struct net_device *dev, 2864 const struct net_device *sb_dev, 2865 struct sk_buff *skb) 2866 { 2867 u32 hash; 2868 u16 qoffset = 0; 2869 u16 qcount = dev->real_num_tx_queues; 2870 2871 if (dev->num_tc) { 2872 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2873 2874 qoffset = sb_dev->tc_to_txq[tc].offset; 2875 qcount = sb_dev->tc_to_txq[tc].count; 2876 } 2877 2878 if (skb_rx_queue_recorded(skb)) { 2879 hash = skb_get_rx_queue(skb); 2880 while (unlikely(hash >= qcount)) 2881 hash -= qcount; 2882 return hash + qoffset; 2883 } 2884 2885 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 2886 } 2887 2888 static void skb_warn_bad_offload(const struct sk_buff *skb) 2889 { 2890 static const netdev_features_t null_features; 2891 struct net_device *dev = skb->dev; 2892 const char *name = ""; 2893 2894 if (!net_ratelimit()) 2895 return; 2896 2897 if (dev) { 2898 if (dev->dev.parent) 2899 name = dev_driver_string(dev->dev.parent); 2900 else 2901 name = netdev_name(dev); 2902 } 2903 skb_dump(KERN_WARNING, skb, false); 2904 WARN(1, "%s: caps=(%pNF, %pNF)\n", 2905 name, dev ? &dev->features : &null_features, 2906 skb->sk ? &skb->sk->sk_route_caps : &null_features); 2907 } 2908 2909 /* 2910 * Invalidate hardware checksum when packet is to be mangled, and 2911 * complete checksum manually on outgoing path. 2912 */ 2913 int skb_checksum_help(struct sk_buff *skb) 2914 { 2915 __wsum csum; 2916 int ret = 0, offset; 2917 2918 if (skb->ip_summed == CHECKSUM_COMPLETE) 2919 goto out_set_summed; 2920 2921 if (unlikely(skb_shinfo(skb)->gso_size)) { 2922 skb_warn_bad_offload(skb); 2923 return -EINVAL; 2924 } 2925 2926 /* Before computing a checksum, we should make sure no frag could 2927 * be modified by an external entity : checksum could be wrong. 2928 */ 2929 if (skb_has_shared_frag(skb)) { 2930 ret = __skb_linearize(skb); 2931 if (ret) 2932 goto out; 2933 } 2934 2935 offset = skb_checksum_start_offset(skb); 2936 BUG_ON(offset >= skb_headlen(skb)); 2937 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2938 2939 offset += skb->csum_offset; 2940 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2941 2942 if (skb_cloned(skb) && 2943 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2944 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2945 if (ret) 2946 goto out; 2947 } 2948 2949 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 2950 out_set_summed: 2951 skb->ip_summed = CHECKSUM_NONE; 2952 out: 2953 return ret; 2954 } 2955 EXPORT_SYMBOL(skb_checksum_help); 2956 2957 int skb_crc32c_csum_help(struct sk_buff *skb) 2958 { 2959 __le32 crc32c_csum; 2960 int ret = 0, offset, start; 2961 2962 if (skb->ip_summed != CHECKSUM_PARTIAL) 2963 goto out; 2964 2965 if (unlikely(skb_is_gso(skb))) 2966 goto out; 2967 2968 /* Before computing a checksum, we should make sure no frag could 2969 * be modified by an external entity : checksum could be wrong. 2970 */ 2971 if (unlikely(skb_has_shared_frag(skb))) { 2972 ret = __skb_linearize(skb); 2973 if (ret) 2974 goto out; 2975 } 2976 start = skb_checksum_start_offset(skb); 2977 offset = start + offsetof(struct sctphdr, checksum); 2978 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 2979 ret = -EINVAL; 2980 goto out; 2981 } 2982 if (skb_cloned(skb) && 2983 !skb_clone_writable(skb, offset + sizeof(__le32))) { 2984 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2985 if (ret) 2986 goto out; 2987 } 2988 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 2989 skb->len - start, ~(__u32)0, 2990 crc32c_csum_stub)); 2991 *(__le32 *)(skb->data + offset) = crc32c_csum; 2992 skb->ip_summed = CHECKSUM_NONE; 2993 skb->csum_not_inet = 0; 2994 out: 2995 return ret; 2996 } 2997 2998 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2999 { 3000 __be16 type = skb->protocol; 3001 3002 /* Tunnel gso handlers can set protocol to ethernet. */ 3003 if (type == htons(ETH_P_TEB)) { 3004 struct ethhdr *eth; 3005 3006 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3007 return 0; 3008 3009 eth = (struct ethhdr *)skb->data; 3010 type = eth->h_proto; 3011 } 3012 3013 return __vlan_get_protocol(skb, type, depth); 3014 } 3015 3016 /** 3017 * skb_mac_gso_segment - mac layer segmentation handler. 3018 * @skb: buffer to segment 3019 * @features: features for the output path (see dev->features) 3020 */ 3021 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3022 netdev_features_t features) 3023 { 3024 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3025 struct packet_offload *ptype; 3026 int vlan_depth = skb->mac_len; 3027 __be16 type = skb_network_protocol(skb, &vlan_depth); 3028 3029 if (unlikely(!type)) 3030 return ERR_PTR(-EINVAL); 3031 3032 __skb_pull(skb, vlan_depth); 3033 3034 rcu_read_lock(); 3035 list_for_each_entry_rcu(ptype, &offload_base, list) { 3036 if (ptype->type == type && ptype->callbacks.gso_segment) { 3037 segs = ptype->callbacks.gso_segment(skb, features); 3038 break; 3039 } 3040 } 3041 rcu_read_unlock(); 3042 3043 __skb_push(skb, skb->data - skb_mac_header(skb)); 3044 3045 return segs; 3046 } 3047 EXPORT_SYMBOL(skb_mac_gso_segment); 3048 3049 3050 /* openvswitch calls this on rx path, so we need a different check. 3051 */ 3052 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3053 { 3054 if (tx_path) 3055 return skb->ip_summed != CHECKSUM_PARTIAL && 3056 skb->ip_summed != CHECKSUM_UNNECESSARY; 3057 3058 return skb->ip_summed == CHECKSUM_NONE; 3059 } 3060 3061 /** 3062 * __skb_gso_segment - Perform segmentation on skb. 3063 * @skb: buffer to segment 3064 * @features: features for the output path (see dev->features) 3065 * @tx_path: whether it is called in TX path 3066 * 3067 * This function segments the given skb and returns a list of segments. 3068 * 3069 * It may return NULL if the skb requires no segmentation. This is 3070 * only possible when GSO is used for verifying header integrity. 3071 * 3072 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. 3073 */ 3074 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3075 netdev_features_t features, bool tx_path) 3076 { 3077 struct sk_buff *segs; 3078 3079 if (unlikely(skb_needs_check(skb, tx_path))) { 3080 int err; 3081 3082 /* We're going to init ->check field in TCP or UDP header */ 3083 err = skb_cow_head(skb, 0); 3084 if (err < 0) 3085 return ERR_PTR(err); 3086 } 3087 3088 /* Only report GSO partial support if it will enable us to 3089 * support segmentation on this frame without needing additional 3090 * work. 3091 */ 3092 if (features & NETIF_F_GSO_PARTIAL) { 3093 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3094 struct net_device *dev = skb->dev; 3095 3096 partial_features |= dev->features & dev->gso_partial_features; 3097 if (!skb_gso_ok(skb, features | partial_features)) 3098 features &= ~NETIF_F_GSO_PARTIAL; 3099 } 3100 3101 BUILD_BUG_ON(SKB_SGO_CB_OFFSET + 3102 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3103 3104 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3105 SKB_GSO_CB(skb)->encap_level = 0; 3106 3107 skb_reset_mac_header(skb); 3108 skb_reset_mac_len(skb); 3109 3110 segs = skb_mac_gso_segment(skb, features); 3111 3112 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3113 skb_warn_bad_offload(skb); 3114 3115 return segs; 3116 } 3117 EXPORT_SYMBOL(__skb_gso_segment); 3118 3119 /* Take action when hardware reception checksum errors are detected. */ 3120 #ifdef CONFIG_BUG 3121 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3122 { 3123 if (net_ratelimit()) { 3124 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3125 skb_dump(KERN_ERR, skb, true); 3126 dump_stack(); 3127 } 3128 } 3129 EXPORT_SYMBOL(netdev_rx_csum_fault); 3130 #endif 3131 3132 /* XXX: check that highmem exists at all on the given machine. */ 3133 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3134 { 3135 #ifdef CONFIG_HIGHMEM 3136 int i; 3137 3138 if (!(dev->features & NETIF_F_HIGHDMA)) { 3139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3140 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3141 3142 if (PageHighMem(skb_frag_page(frag))) 3143 return 1; 3144 } 3145 } 3146 #endif 3147 return 0; 3148 } 3149 3150 /* If MPLS offload request, verify we are testing hardware MPLS features 3151 * instead of standard features for the netdev. 3152 */ 3153 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3154 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3155 netdev_features_t features, 3156 __be16 type) 3157 { 3158 if (eth_p_mpls(type)) 3159 features &= skb->dev->mpls_features; 3160 3161 return features; 3162 } 3163 #else 3164 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3165 netdev_features_t features, 3166 __be16 type) 3167 { 3168 return features; 3169 } 3170 #endif 3171 3172 static netdev_features_t harmonize_features(struct sk_buff *skb, 3173 netdev_features_t features) 3174 { 3175 int tmp; 3176 __be16 type; 3177 3178 type = skb_network_protocol(skb, &tmp); 3179 features = net_mpls_features(skb, features, type); 3180 3181 if (skb->ip_summed != CHECKSUM_NONE && 3182 !can_checksum_protocol(features, type)) { 3183 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3184 } 3185 if (illegal_highdma(skb->dev, skb)) 3186 features &= ~NETIF_F_SG; 3187 3188 return features; 3189 } 3190 3191 netdev_features_t passthru_features_check(struct sk_buff *skb, 3192 struct net_device *dev, 3193 netdev_features_t features) 3194 { 3195 return features; 3196 } 3197 EXPORT_SYMBOL(passthru_features_check); 3198 3199 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3200 struct net_device *dev, 3201 netdev_features_t features) 3202 { 3203 return vlan_features_check(skb, features); 3204 } 3205 3206 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3207 struct net_device *dev, 3208 netdev_features_t features) 3209 { 3210 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3211 3212 if (gso_segs > dev->gso_max_segs) 3213 return features & ~NETIF_F_GSO_MASK; 3214 3215 /* Support for GSO partial features requires software 3216 * intervention before we can actually process the packets 3217 * so we need to strip support for any partial features now 3218 * and we can pull them back in after we have partially 3219 * segmented the frame. 3220 */ 3221 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3222 features &= ~dev->gso_partial_features; 3223 3224 /* Make sure to clear the IPv4 ID mangling feature if the 3225 * IPv4 header has the potential to be fragmented. 3226 */ 3227 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3228 struct iphdr *iph = skb->encapsulation ? 3229 inner_ip_hdr(skb) : ip_hdr(skb); 3230 3231 if (!(iph->frag_off & htons(IP_DF))) 3232 features &= ~NETIF_F_TSO_MANGLEID; 3233 } 3234 3235 return features; 3236 } 3237 3238 netdev_features_t netif_skb_features(struct sk_buff *skb) 3239 { 3240 struct net_device *dev = skb->dev; 3241 netdev_features_t features = dev->features; 3242 3243 if (skb_is_gso(skb)) 3244 features = gso_features_check(skb, dev, features); 3245 3246 /* If encapsulation offload request, verify we are testing 3247 * hardware encapsulation features instead of standard 3248 * features for the netdev 3249 */ 3250 if (skb->encapsulation) 3251 features &= dev->hw_enc_features; 3252 3253 if (skb_vlan_tagged(skb)) 3254 features = netdev_intersect_features(features, 3255 dev->vlan_features | 3256 NETIF_F_HW_VLAN_CTAG_TX | 3257 NETIF_F_HW_VLAN_STAG_TX); 3258 3259 if (dev->netdev_ops->ndo_features_check) 3260 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3261 features); 3262 else 3263 features &= dflt_features_check(skb, dev, features); 3264 3265 return harmonize_features(skb, features); 3266 } 3267 EXPORT_SYMBOL(netif_skb_features); 3268 3269 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3270 struct netdev_queue *txq, bool more) 3271 { 3272 unsigned int len; 3273 int rc; 3274 3275 if (dev_nit_active(dev)) 3276 dev_queue_xmit_nit(skb, dev); 3277 3278 len = skb->len; 3279 trace_net_dev_start_xmit(skb, dev); 3280 rc = netdev_start_xmit(skb, dev, txq, more); 3281 trace_net_dev_xmit(skb, rc, dev, len); 3282 3283 return rc; 3284 } 3285 3286 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3287 struct netdev_queue *txq, int *ret) 3288 { 3289 struct sk_buff *skb = first; 3290 int rc = NETDEV_TX_OK; 3291 3292 while (skb) { 3293 struct sk_buff *next = skb->next; 3294 3295 skb_mark_not_on_list(skb); 3296 rc = xmit_one(skb, dev, txq, next != NULL); 3297 if (unlikely(!dev_xmit_complete(rc))) { 3298 skb->next = next; 3299 goto out; 3300 } 3301 3302 skb = next; 3303 if (netif_tx_queue_stopped(txq) && skb) { 3304 rc = NETDEV_TX_BUSY; 3305 break; 3306 } 3307 } 3308 3309 out: 3310 *ret = rc; 3311 return skb; 3312 } 3313 3314 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3315 netdev_features_t features) 3316 { 3317 if (skb_vlan_tag_present(skb) && 3318 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3319 skb = __vlan_hwaccel_push_inside(skb); 3320 return skb; 3321 } 3322 3323 int skb_csum_hwoffload_help(struct sk_buff *skb, 3324 const netdev_features_t features) 3325 { 3326 if (unlikely(skb->csum_not_inet)) 3327 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3328 skb_crc32c_csum_help(skb); 3329 3330 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); 3331 } 3332 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3333 3334 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3335 { 3336 netdev_features_t features; 3337 3338 features = netif_skb_features(skb); 3339 skb = validate_xmit_vlan(skb, features); 3340 if (unlikely(!skb)) 3341 goto out_null; 3342 3343 skb = sk_validate_xmit_skb(skb, dev); 3344 if (unlikely(!skb)) 3345 goto out_null; 3346 3347 if (netif_needs_gso(skb, features)) { 3348 struct sk_buff *segs; 3349 3350 segs = skb_gso_segment(skb, features); 3351 if (IS_ERR(segs)) { 3352 goto out_kfree_skb; 3353 } else if (segs) { 3354 consume_skb(skb); 3355 skb = segs; 3356 } 3357 } else { 3358 if (skb_needs_linearize(skb, features) && 3359 __skb_linearize(skb)) 3360 goto out_kfree_skb; 3361 3362 /* If packet is not checksummed and device does not 3363 * support checksumming for this protocol, complete 3364 * checksumming here. 3365 */ 3366 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3367 if (skb->encapsulation) 3368 skb_set_inner_transport_header(skb, 3369 skb_checksum_start_offset(skb)); 3370 else 3371 skb_set_transport_header(skb, 3372 skb_checksum_start_offset(skb)); 3373 if (skb_csum_hwoffload_help(skb, features)) 3374 goto out_kfree_skb; 3375 } 3376 } 3377 3378 skb = validate_xmit_xfrm(skb, features, again); 3379 3380 return skb; 3381 3382 out_kfree_skb: 3383 kfree_skb(skb); 3384 out_null: 3385 atomic_long_inc(&dev->tx_dropped); 3386 return NULL; 3387 } 3388 3389 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3390 { 3391 struct sk_buff *next, *head = NULL, *tail; 3392 3393 for (; skb != NULL; skb = next) { 3394 next = skb->next; 3395 skb_mark_not_on_list(skb); 3396 3397 /* in case skb wont be segmented, point to itself */ 3398 skb->prev = skb; 3399 3400 skb = validate_xmit_skb(skb, dev, again); 3401 if (!skb) 3402 continue; 3403 3404 if (!head) 3405 head = skb; 3406 else 3407 tail->next = skb; 3408 /* If skb was segmented, skb->prev points to 3409 * the last segment. If not, it still contains skb. 3410 */ 3411 tail = skb->prev; 3412 } 3413 return head; 3414 } 3415 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3416 3417 static void qdisc_pkt_len_init(struct sk_buff *skb) 3418 { 3419 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3420 3421 qdisc_skb_cb(skb)->pkt_len = skb->len; 3422 3423 /* To get more precise estimation of bytes sent on wire, 3424 * we add to pkt_len the headers size of all segments 3425 */ 3426 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3427 unsigned int hdr_len; 3428 u16 gso_segs = shinfo->gso_segs; 3429 3430 /* mac layer + network layer */ 3431 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3432 3433 /* + transport layer */ 3434 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3435 const struct tcphdr *th; 3436 struct tcphdr _tcphdr; 3437 3438 th = skb_header_pointer(skb, skb_transport_offset(skb), 3439 sizeof(_tcphdr), &_tcphdr); 3440 if (likely(th)) 3441 hdr_len += __tcp_hdrlen(th); 3442 } else { 3443 struct udphdr _udphdr; 3444 3445 if (skb_header_pointer(skb, skb_transport_offset(skb), 3446 sizeof(_udphdr), &_udphdr)) 3447 hdr_len += sizeof(struct udphdr); 3448 } 3449 3450 if (shinfo->gso_type & SKB_GSO_DODGY) 3451 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3452 shinfo->gso_size); 3453 3454 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3455 } 3456 } 3457 3458 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3459 struct net_device *dev, 3460 struct netdev_queue *txq) 3461 { 3462 spinlock_t *root_lock = qdisc_lock(q); 3463 struct sk_buff *to_free = NULL; 3464 bool contended; 3465 int rc; 3466 3467 qdisc_calculate_pkt_len(skb, q); 3468 3469 if (q->flags & TCQ_F_NOLOCK) { 3470 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3471 __qdisc_drop(skb, &to_free); 3472 rc = NET_XMIT_DROP; 3473 } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && 3474 qdisc_run_begin(q)) { 3475 qdisc_bstats_cpu_update(q, skb); 3476 3477 if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) 3478 __qdisc_run(q); 3479 3480 qdisc_run_end(q); 3481 rc = NET_XMIT_SUCCESS; 3482 } else { 3483 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3484 qdisc_run(q); 3485 } 3486 3487 if (unlikely(to_free)) 3488 kfree_skb_list(to_free); 3489 return rc; 3490 } 3491 3492 /* 3493 * Heuristic to force contended enqueues to serialize on a 3494 * separate lock before trying to get qdisc main lock. 3495 * This permits qdisc->running owner to get the lock more 3496 * often and dequeue packets faster. 3497 */ 3498 contended = qdisc_is_running(q); 3499 if (unlikely(contended)) 3500 spin_lock(&q->busylock); 3501 3502 spin_lock(root_lock); 3503 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3504 __qdisc_drop(skb, &to_free); 3505 rc = NET_XMIT_DROP; 3506 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3507 qdisc_run_begin(q)) { 3508 /* 3509 * This is a work-conserving queue; there are no old skbs 3510 * waiting to be sent out; and the qdisc is not running - 3511 * xmit the skb directly. 3512 */ 3513 3514 qdisc_bstats_update(q, skb); 3515 3516 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3517 if (unlikely(contended)) { 3518 spin_unlock(&q->busylock); 3519 contended = false; 3520 } 3521 __qdisc_run(q); 3522 } 3523 3524 qdisc_run_end(q); 3525 rc = NET_XMIT_SUCCESS; 3526 } else { 3527 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3528 if (qdisc_run_begin(q)) { 3529 if (unlikely(contended)) { 3530 spin_unlock(&q->busylock); 3531 contended = false; 3532 } 3533 __qdisc_run(q); 3534 qdisc_run_end(q); 3535 } 3536 } 3537 spin_unlock(root_lock); 3538 if (unlikely(to_free)) 3539 kfree_skb_list(to_free); 3540 if (unlikely(contended)) 3541 spin_unlock(&q->busylock); 3542 return rc; 3543 } 3544 3545 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3546 static void skb_update_prio(struct sk_buff *skb) 3547 { 3548 const struct netprio_map *map; 3549 const struct sock *sk; 3550 unsigned int prioidx; 3551 3552 if (skb->priority) 3553 return; 3554 map = rcu_dereference_bh(skb->dev->priomap); 3555 if (!map) 3556 return; 3557 sk = skb_to_full_sk(skb); 3558 if (!sk) 3559 return; 3560 3561 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3562 3563 if (prioidx < map->priomap_len) 3564 skb->priority = map->priomap[prioidx]; 3565 } 3566 #else 3567 #define skb_update_prio(skb) 3568 #endif 3569 3570 /** 3571 * dev_loopback_xmit - loop back @skb 3572 * @net: network namespace this loopback is happening in 3573 * @sk: sk needed to be a netfilter okfn 3574 * @skb: buffer to transmit 3575 */ 3576 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3577 { 3578 skb_reset_mac_header(skb); 3579 __skb_pull(skb, skb_network_offset(skb)); 3580 skb->pkt_type = PACKET_LOOPBACK; 3581 skb->ip_summed = CHECKSUM_UNNECESSARY; 3582 WARN_ON(!skb_dst(skb)); 3583 skb_dst_force(skb); 3584 netif_rx_ni(skb); 3585 return 0; 3586 } 3587 EXPORT_SYMBOL(dev_loopback_xmit); 3588 3589 #ifdef CONFIG_NET_EGRESS 3590 static struct sk_buff * 3591 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3592 { 3593 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3594 struct tcf_result cl_res; 3595 3596 if (!miniq) 3597 return skb; 3598 3599 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3600 mini_qdisc_bstats_cpu_update(miniq, skb); 3601 3602 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 3603 case TC_ACT_OK: 3604 case TC_ACT_RECLASSIFY: 3605 skb->tc_index = TC_H_MIN(cl_res.classid); 3606 break; 3607 case TC_ACT_SHOT: 3608 mini_qdisc_qstats_cpu_drop(miniq); 3609 *ret = NET_XMIT_DROP; 3610 kfree_skb(skb); 3611 return NULL; 3612 case TC_ACT_STOLEN: 3613 case TC_ACT_QUEUED: 3614 case TC_ACT_TRAP: 3615 *ret = NET_XMIT_SUCCESS; 3616 consume_skb(skb); 3617 return NULL; 3618 case TC_ACT_REDIRECT: 3619 /* No need to push/pop skb's mac_header here on egress! */ 3620 skb_do_redirect(skb); 3621 *ret = NET_XMIT_SUCCESS; 3622 return NULL; 3623 default: 3624 break; 3625 } 3626 3627 return skb; 3628 } 3629 #endif /* CONFIG_NET_EGRESS */ 3630 3631 #ifdef CONFIG_XPS 3632 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3633 struct xps_dev_maps *dev_maps, unsigned int tci) 3634 { 3635 struct xps_map *map; 3636 int queue_index = -1; 3637 3638 if (dev->num_tc) { 3639 tci *= dev->num_tc; 3640 tci += netdev_get_prio_tc_map(dev, skb->priority); 3641 } 3642 3643 map = rcu_dereference(dev_maps->attr_map[tci]); 3644 if (map) { 3645 if (map->len == 1) 3646 queue_index = map->queues[0]; 3647 else 3648 queue_index = map->queues[reciprocal_scale( 3649 skb_get_hash(skb), map->len)]; 3650 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3651 queue_index = -1; 3652 } 3653 return queue_index; 3654 } 3655 #endif 3656 3657 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3658 struct sk_buff *skb) 3659 { 3660 #ifdef CONFIG_XPS 3661 struct xps_dev_maps *dev_maps; 3662 struct sock *sk = skb->sk; 3663 int queue_index = -1; 3664 3665 if (!static_key_false(&xps_needed)) 3666 return -1; 3667 3668 rcu_read_lock(); 3669 if (!static_key_false(&xps_rxqs_needed)) 3670 goto get_cpus_map; 3671 3672 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); 3673 if (dev_maps) { 3674 int tci = sk_rx_queue_get(sk); 3675 3676 if (tci >= 0 && tci < dev->num_rx_queues) 3677 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3678 tci); 3679 } 3680 3681 get_cpus_map: 3682 if (queue_index < 0) { 3683 dev_maps = rcu_dereference(sb_dev->xps_cpus_map); 3684 if (dev_maps) { 3685 unsigned int tci = skb->sender_cpu - 1; 3686 3687 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3688 tci); 3689 } 3690 } 3691 rcu_read_unlock(); 3692 3693 return queue_index; 3694 #else 3695 return -1; 3696 #endif 3697 } 3698 3699 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3700 struct net_device *sb_dev) 3701 { 3702 return 0; 3703 } 3704 EXPORT_SYMBOL(dev_pick_tx_zero); 3705 3706 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3707 struct net_device *sb_dev) 3708 { 3709 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3710 } 3711 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3712 3713 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3714 struct net_device *sb_dev) 3715 { 3716 struct sock *sk = skb->sk; 3717 int queue_index = sk_tx_queue_get(sk); 3718 3719 sb_dev = sb_dev ? : dev; 3720 3721 if (queue_index < 0 || skb->ooo_okay || 3722 queue_index >= dev->real_num_tx_queues) { 3723 int new_index = get_xps_queue(dev, sb_dev, skb); 3724 3725 if (new_index < 0) 3726 new_index = skb_tx_hash(dev, sb_dev, skb); 3727 3728 if (queue_index != new_index && sk && 3729 sk_fullsock(sk) && 3730 rcu_access_pointer(sk->sk_dst_cache)) 3731 sk_tx_queue_set(sk, new_index); 3732 3733 queue_index = new_index; 3734 } 3735 3736 return queue_index; 3737 } 3738 EXPORT_SYMBOL(netdev_pick_tx); 3739 3740 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 3741 struct sk_buff *skb, 3742 struct net_device *sb_dev) 3743 { 3744 int queue_index = 0; 3745 3746 #ifdef CONFIG_XPS 3747 u32 sender_cpu = skb->sender_cpu - 1; 3748 3749 if (sender_cpu >= (u32)NR_CPUS) 3750 skb->sender_cpu = raw_smp_processor_id() + 1; 3751 #endif 3752 3753 if (dev->real_num_tx_queues != 1) { 3754 const struct net_device_ops *ops = dev->netdev_ops; 3755 3756 if (ops->ndo_select_queue) 3757 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 3758 else 3759 queue_index = netdev_pick_tx(dev, skb, sb_dev); 3760 3761 queue_index = netdev_cap_txqueue(dev, queue_index); 3762 } 3763 3764 skb_set_queue_mapping(skb, queue_index); 3765 return netdev_get_tx_queue(dev, queue_index); 3766 } 3767 3768 /** 3769 * __dev_queue_xmit - transmit a buffer 3770 * @skb: buffer to transmit 3771 * @sb_dev: suboordinate device used for L2 forwarding offload 3772 * 3773 * Queue a buffer for transmission to a network device. The caller must 3774 * have set the device and priority and built the buffer before calling 3775 * this function. The function can be called from an interrupt. 3776 * 3777 * A negative errno code is returned on a failure. A success does not 3778 * guarantee the frame will be transmitted as it may be dropped due 3779 * to congestion or traffic shaping. 3780 * 3781 * ----------------------------------------------------------------------------------- 3782 * I notice this method can also return errors from the queue disciplines, 3783 * including NET_XMIT_DROP, which is a positive value. So, errors can also 3784 * be positive. 3785 * 3786 * Regardless of the return value, the skb is consumed, so it is currently 3787 * difficult to retry a send to this method. (You can bump the ref count 3788 * before sending to hold a reference for retry if you are careful.) 3789 * 3790 * When calling this method, interrupts MUST be enabled. This is because 3791 * the BH enable code must have IRQs enabled so that it will not deadlock. 3792 * --BLG 3793 */ 3794 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 3795 { 3796 struct net_device *dev = skb->dev; 3797 struct netdev_queue *txq; 3798 struct Qdisc *q; 3799 int rc = -ENOMEM; 3800 bool again = false; 3801 3802 skb_reset_mac_header(skb); 3803 3804 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 3805 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); 3806 3807 /* Disable soft irqs for various locks below. Also 3808 * stops preemption for RCU. 3809 */ 3810 rcu_read_lock_bh(); 3811 3812 skb_update_prio(skb); 3813 3814 qdisc_pkt_len_init(skb); 3815 #ifdef CONFIG_NET_CLS_ACT 3816 skb->tc_at_ingress = 0; 3817 # ifdef CONFIG_NET_EGRESS 3818 if (static_branch_unlikely(&egress_needed_key)) { 3819 skb = sch_handle_egress(skb, &rc, dev); 3820 if (!skb) 3821 goto out; 3822 } 3823 # endif 3824 #endif 3825 /* If device/qdisc don't need skb->dst, release it right now while 3826 * its hot in this cpu cache. 3827 */ 3828 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 3829 skb_dst_drop(skb); 3830 else 3831 skb_dst_force(skb); 3832 3833 txq = netdev_core_pick_tx(dev, skb, sb_dev); 3834 q = rcu_dereference_bh(txq->qdisc); 3835 3836 trace_net_dev_queue(skb); 3837 if (q->enqueue) { 3838 rc = __dev_xmit_skb(skb, q, dev, txq); 3839 goto out; 3840 } 3841 3842 /* The device has no queue. Common case for software devices: 3843 * loopback, all the sorts of tunnels... 3844 3845 * Really, it is unlikely that netif_tx_lock protection is necessary 3846 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 3847 * counters.) 3848 * However, it is possible, that they rely on protection 3849 * made by us here. 3850 3851 * Check this and shot the lock. It is not prone from deadlocks. 3852 *Either shot noqueue qdisc, it is even simpler 8) 3853 */ 3854 if (dev->flags & IFF_UP) { 3855 int cpu = smp_processor_id(); /* ok because BHs are off */ 3856 3857 if (txq->xmit_lock_owner != cpu) { 3858 if (dev_xmit_recursion()) 3859 goto recursion_alert; 3860 3861 skb = validate_xmit_skb(skb, dev, &again); 3862 if (!skb) 3863 goto out; 3864 3865 HARD_TX_LOCK(dev, txq, cpu); 3866 3867 if (!netif_xmit_stopped(txq)) { 3868 dev_xmit_recursion_inc(); 3869 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 3870 dev_xmit_recursion_dec(); 3871 if (dev_xmit_complete(rc)) { 3872 HARD_TX_UNLOCK(dev, txq); 3873 goto out; 3874 } 3875 } 3876 HARD_TX_UNLOCK(dev, txq); 3877 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 3878 dev->name); 3879 } else { 3880 /* Recursion is detected! It is possible, 3881 * unfortunately 3882 */ 3883 recursion_alert: 3884 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 3885 dev->name); 3886 } 3887 } 3888 3889 rc = -ENETDOWN; 3890 rcu_read_unlock_bh(); 3891 3892 atomic_long_inc(&dev->tx_dropped); 3893 kfree_skb_list(skb); 3894 return rc; 3895 out: 3896 rcu_read_unlock_bh(); 3897 return rc; 3898 } 3899 3900 int dev_queue_xmit(struct sk_buff *skb) 3901 { 3902 return __dev_queue_xmit(skb, NULL); 3903 } 3904 EXPORT_SYMBOL(dev_queue_xmit); 3905 3906 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 3907 { 3908 return __dev_queue_xmit(skb, sb_dev); 3909 } 3910 EXPORT_SYMBOL(dev_queue_xmit_accel); 3911 3912 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3913 { 3914 struct net_device *dev = skb->dev; 3915 struct sk_buff *orig_skb = skb; 3916 struct netdev_queue *txq; 3917 int ret = NETDEV_TX_BUSY; 3918 bool again = false; 3919 3920 if (unlikely(!netif_running(dev) || 3921 !netif_carrier_ok(dev))) 3922 goto drop; 3923 3924 skb = validate_xmit_skb_list(skb, dev, &again); 3925 if (skb != orig_skb) 3926 goto drop; 3927 3928 skb_set_queue_mapping(skb, queue_id); 3929 txq = skb_get_tx_queue(dev, skb); 3930 3931 local_bh_disable(); 3932 3933 HARD_TX_LOCK(dev, txq, smp_processor_id()); 3934 if (!netif_xmit_frozen_or_drv_stopped(txq)) 3935 ret = netdev_start_xmit(skb, dev, txq, false); 3936 HARD_TX_UNLOCK(dev, txq); 3937 3938 local_bh_enable(); 3939 3940 if (!dev_xmit_complete(ret)) 3941 kfree_skb(skb); 3942 3943 return ret; 3944 drop: 3945 atomic_long_inc(&dev->tx_dropped); 3946 kfree_skb_list(skb); 3947 return NET_XMIT_DROP; 3948 } 3949 EXPORT_SYMBOL(dev_direct_xmit); 3950 3951 /************************************************************************* 3952 * Receiver routines 3953 *************************************************************************/ 3954 3955 int netdev_max_backlog __read_mostly = 1000; 3956 EXPORT_SYMBOL(netdev_max_backlog); 3957 3958 int netdev_tstamp_prequeue __read_mostly = 1; 3959 int netdev_budget __read_mostly = 300; 3960 unsigned int __read_mostly netdev_budget_usecs = 2000; 3961 int weight_p __read_mostly = 64; /* old backlog weight */ 3962 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 3963 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 3964 int dev_rx_weight __read_mostly = 64; 3965 int dev_tx_weight __read_mostly = 64; 3966 3967 /* Called with irq disabled */ 3968 static inline void ____napi_schedule(struct softnet_data *sd, 3969 struct napi_struct *napi) 3970 { 3971 list_add_tail(&napi->poll_list, &sd->poll_list); 3972 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3973 } 3974 3975 #ifdef CONFIG_RPS 3976 3977 /* One global table that all flow-based protocols share. */ 3978 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 3979 EXPORT_SYMBOL(rps_sock_flow_table); 3980 u32 rps_cpu_mask __read_mostly; 3981 EXPORT_SYMBOL(rps_cpu_mask); 3982 3983 struct static_key_false rps_needed __read_mostly; 3984 EXPORT_SYMBOL(rps_needed); 3985 struct static_key_false rfs_needed __read_mostly; 3986 EXPORT_SYMBOL(rfs_needed); 3987 3988 static struct rps_dev_flow * 3989 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3990 struct rps_dev_flow *rflow, u16 next_cpu) 3991 { 3992 if (next_cpu < nr_cpu_ids) { 3993 #ifdef CONFIG_RFS_ACCEL 3994 struct netdev_rx_queue *rxqueue; 3995 struct rps_dev_flow_table *flow_table; 3996 struct rps_dev_flow *old_rflow; 3997 u32 flow_id; 3998 u16 rxq_index; 3999 int rc; 4000 4001 /* Should we steer this flow to a different hardware queue? */ 4002 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4003 !(dev->features & NETIF_F_NTUPLE)) 4004 goto out; 4005 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4006 if (rxq_index == skb_get_rx_queue(skb)) 4007 goto out; 4008 4009 rxqueue = dev->_rx + rxq_index; 4010 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4011 if (!flow_table) 4012 goto out; 4013 flow_id = skb_get_hash(skb) & flow_table->mask; 4014 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4015 rxq_index, flow_id); 4016 if (rc < 0) 4017 goto out; 4018 old_rflow = rflow; 4019 rflow = &flow_table->flows[flow_id]; 4020 rflow->filter = rc; 4021 if (old_rflow->filter == rflow->filter) 4022 old_rflow->filter = RPS_NO_FILTER; 4023 out: 4024 #endif 4025 rflow->last_qtail = 4026 per_cpu(softnet_data, next_cpu).input_queue_head; 4027 } 4028 4029 rflow->cpu = next_cpu; 4030 return rflow; 4031 } 4032 4033 /* 4034 * get_rps_cpu is called from netif_receive_skb and returns the target 4035 * CPU from the RPS map of the receiving queue for a given skb. 4036 * rcu_read_lock must be held on entry. 4037 */ 4038 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4039 struct rps_dev_flow **rflowp) 4040 { 4041 const struct rps_sock_flow_table *sock_flow_table; 4042 struct netdev_rx_queue *rxqueue = dev->_rx; 4043 struct rps_dev_flow_table *flow_table; 4044 struct rps_map *map; 4045 int cpu = -1; 4046 u32 tcpu; 4047 u32 hash; 4048 4049 if (skb_rx_queue_recorded(skb)) { 4050 u16 index = skb_get_rx_queue(skb); 4051 4052 if (unlikely(index >= dev->real_num_rx_queues)) { 4053 WARN_ONCE(dev->real_num_rx_queues > 1, 4054 "%s received packet on queue %u, but number " 4055 "of RX queues is %u\n", 4056 dev->name, index, dev->real_num_rx_queues); 4057 goto done; 4058 } 4059 rxqueue += index; 4060 } 4061 4062 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4063 4064 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4065 map = rcu_dereference(rxqueue->rps_map); 4066 if (!flow_table && !map) 4067 goto done; 4068 4069 skb_reset_network_header(skb); 4070 hash = skb_get_hash(skb); 4071 if (!hash) 4072 goto done; 4073 4074 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4075 if (flow_table && sock_flow_table) { 4076 struct rps_dev_flow *rflow; 4077 u32 next_cpu; 4078 u32 ident; 4079 4080 /* First check into global flow table if there is a match */ 4081 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4082 if ((ident ^ hash) & ~rps_cpu_mask) 4083 goto try_rps; 4084 4085 next_cpu = ident & rps_cpu_mask; 4086 4087 /* OK, now we know there is a match, 4088 * we can look at the local (per receive queue) flow table 4089 */ 4090 rflow = &flow_table->flows[hash & flow_table->mask]; 4091 tcpu = rflow->cpu; 4092 4093 /* 4094 * If the desired CPU (where last recvmsg was done) is 4095 * different from current CPU (one in the rx-queue flow 4096 * table entry), switch if one of the following holds: 4097 * - Current CPU is unset (>= nr_cpu_ids). 4098 * - Current CPU is offline. 4099 * - The current CPU's queue tail has advanced beyond the 4100 * last packet that was enqueued using this table entry. 4101 * This guarantees that all previous packets for the flow 4102 * have been dequeued, thus preserving in order delivery. 4103 */ 4104 if (unlikely(tcpu != next_cpu) && 4105 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4106 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4107 rflow->last_qtail)) >= 0)) { 4108 tcpu = next_cpu; 4109 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4110 } 4111 4112 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4113 *rflowp = rflow; 4114 cpu = tcpu; 4115 goto done; 4116 } 4117 } 4118 4119 try_rps: 4120 4121 if (map) { 4122 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4123 if (cpu_online(tcpu)) { 4124 cpu = tcpu; 4125 goto done; 4126 } 4127 } 4128 4129 done: 4130 return cpu; 4131 } 4132 4133 #ifdef CONFIG_RFS_ACCEL 4134 4135 /** 4136 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4137 * @dev: Device on which the filter was set 4138 * @rxq_index: RX queue index 4139 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4140 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4141 * 4142 * Drivers that implement ndo_rx_flow_steer() should periodically call 4143 * this function for each installed filter and remove the filters for 4144 * which it returns %true. 4145 */ 4146 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4147 u32 flow_id, u16 filter_id) 4148 { 4149 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4150 struct rps_dev_flow_table *flow_table; 4151 struct rps_dev_flow *rflow; 4152 bool expire = true; 4153 unsigned int cpu; 4154 4155 rcu_read_lock(); 4156 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4157 if (flow_table && flow_id <= flow_table->mask) { 4158 rflow = &flow_table->flows[flow_id]; 4159 cpu = READ_ONCE(rflow->cpu); 4160 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4161 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4162 rflow->last_qtail) < 4163 (int)(10 * flow_table->mask))) 4164 expire = false; 4165 } 4166 rcu_read_unlock(); 4167 return expire; 4168 } 4169 EXPORT_SYMBOL(rps_may_expire_flow); 4170 4171 #endif /* CONFIG_RFS_ACCEL */ 4172 4173 /* Called from hardirq (IPI) context */ 4174 static void rps_trigger_softirq(void *data) 4175 { 4176 struct softnet_data *sd = data; 4177 4178 ____napi_schedule(sd, &sd->backlog); 4179 sd->received_rps++; 4180 } 4181 4182 #endif /* CONFIG_RPS */ 4183 4184 /* 4185 * Check if this softnet_data structure is another cpu one 4186 * If yes, queue it to our IPI list and return 1 4187 * If no, return 0 4188 */ 4189 static int rps_ipi_queued(struct softnet_data *sd) 4190 { 4191 #ifdef CONFIG_RPS 4192 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4193 4194 if (sd != mysd) { 4195 sd->rps_ipi_next = mysd->rps_ipi_list; 4196 mysd->rps_ipi_list = sd; 4197 4198 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4199 return 1; 4200 } 4201 #endif /* CONFIG_RPS */ 4202 return 0; 4203 } 4204 4205 #ifdef CONFIG_NET_FLOW_LIMIT 4206 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4207 #endif 4208 4209 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4210 { 4211 #ifdef CONFIG_NET_FLOW_LIMIT 4212 struct sd_flow_limit *fl; 4213 struct softnet_data *sd; 4214 unsigned int old_flow, new_flow; 4215 4216 if (qlen < (netdev_max_backlog >> 1)) 4217 return false; 4218 4219 sd = this_cpu_ptr(&softnet_data); 4220 4221 rcu_read_lock(); 4222 fl = rcu_dereference(sd->flow_limit); 4223 if (fl) { 4224 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4225 old_flow = fl->history[fl->history_head]; 4226 fl->history[fl->history_head] = new_flow; 4227 4228 fl->history_head++; 4229 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4230 4231 if (likely(fl->buckets[old_flow])) 4232 fl->buckets[old_flow]--; 4233 4234 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4235 fl->count++; 4236 rcu_read_unlock(); 4237 return true; 4238 } 4239 } 4240 rcu_read_unlock(); 4241 #endif 4242 return false; 4243 } 4244 4245 /* 4246 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4247 * queue (may be a remote CPU queue). 4248 */ 4249 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4250 unsigned int *qtail) 4251 { 4252 struct softnet_data *sd; 4253 unsigned long flags; 4254 unsigned int qlen; 4255 4256 sd = &per_cpu(softnet_data, cpu); 4257 4258 local_irq_save(flags); 4259 4260 rps_lock(sd); 4261 if (!netif_running(skb->dev)) 4262 goto drop; 4263 qlen = skb_queue_len(&sd->input_pkt_queue); 4264 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4265 if (qlen) { 4266 enqueue: 4267 __skb_queue_tail(&sd->input_pkt_queue, skb); 4268 input_queue_tail_incr_save(sd, qtail); 4269 rps_unlock(sd); 4270 local_irq_restore(flags); 4271 return NET_RX_SUCCESS; 4272 } 4273 4274 /* Schedule NAPI for backlog device 4275 * We can use non atomic operation since we own the queue lock 4276 */ 4277 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4278 if (!rps_ipi_queued(sd)) 4279 ____napi_schedule(sd, &sd->backlog); 4280 } 4281 goto enqueue; 4282 } 4283 4284 drop: 4285 sd->dropped++; 4286 rps_unlock(sd); 4287 4288 local_irq_restore(flags); 4289 4290 atomic_long_inc(&skb->dev->rx_dropped); 4291 kfree_skb(skb); 4292 return NET_RX_DROP; 4293 } 4294 4295 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4296 { 4297 struct net_device *dev = skb->dev; 4298 struct netdev_rx_queue *rxqueue; 4299 4300 rxqueue = dev->_rx; 4301 4302 if (skb_rx_queue_recorded(skb)) { 4303 u16 index = skb_get_rx_queue(skb); 4304 4305 if (unlikely(index >= dev->real_num_rx_queues)) { 4306 WARN_ONCE(dev->real_num_rx_queues > 1, 4307 "%s received packet on queue %u, but number " 4308 "of RX queues is %u\n", 4309 dev->name, index, dev->real_num_rx_queues); 4310 4311 return rxqueue; /* Return first rxqueue */ 4312 } 4313 rxqueue += index; 4314 } 4315 return rxqueue; 4316 } 4317 4318 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4319 struct xdp_buff *xdp, 4320 struct bpf_prog *xdp_prog) 4321 { 4322 struct netdev_rx_queue *rxqueue; 4323 void *orig_data, *orig_data_end; 4324 u32 metalen, act = XDP_DROP; 4325 __be16 orig_eth_type; 4326 struct ethhdr *eth; 4327 bool orig_bcast; 4328 int hlen, off; 4329 u32 mac_len; 4330 4331 /* Reinjected packets coming from act_mirred or similar should 4332 * not get XDP generic processing. 4333 */ 4334 if (skb_cloned(skb) || skb_is_tc_redirected(skb)) 4335 return XDP_PASS; 4336 4337 /* XDP packets must be linear and must have sufficient headroom 4338 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4339 * native XDP provides, thus we need to do it here as well. 4340 */ 4341 if (skb_is_nonlinear(skb) || 4342 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4343 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4344 int troom = skb->tail + skb->data_len - skb->end; 4345 4346 /* In case we have to go down the path and also linearize, 4347 * then lets do the pskb_expand_head() work just once here. 4348 */ 4349 if (pskb_expand_head(skb, 4350 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4351 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4352 goto do_drop; 4353 if (skb_linearize(skb)) 4354 goto do_drop; 4355 } 4356 4357 /* The XDP program wants to see the packet starting at the MAC 4358 * header. 4359 */ 4360 mac_len = skb->data - skb_mac_header(skb); 4361 hlen = skb_headlen(skb) + mac_len; 4362 xdp->data = skb->data - mac_len; 4363 xdp->data_meta = xdp->data; 4364 xdp->data_end = xdp->data + hlen; 4365 xdp->data_hard_start = skb->data - skb_headroom(skb); 4366 orig_data_end = xdp->data_end; 4367 orig_data = xdp->data; 4368 eth = (struct ethhdr *)xdp->data; 4369 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4370 orig_eth_type = eth->h_proto; 4371 4372 rxqueue = netif_get_rxqueue(skb); 4373 xdp->rxq = &rxqueue->xdp_rxq; 4374 4375 act = bpf_prog_run_xdp(xdp_prog, xdp); 4376 4377 off = xdp->data - orig_data; 4378 if (off > 0) 4379 __skb_pull(skb, off); 4380 else if (off < 0) 4381 __skb_push(skb, -off); 4382 skb->mac_header += off; 4383 4384 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4385 * pckt. 4386 */ 4387 off = orig_data_end - xdp->data_end; 4388 if (off != 0) { 4389 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4390 skb->len -= off; 4391 4392 } 4393 4394 /* check if XDP changed eth hdr such SKB needs update */ 4395 eth = (struct ethhdr *)xdp->data; 4396 if ((orig_eth_type != eth->h_proto) || 4397 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4398 __skb_push(skb, ETH_HLEN); 4399 skb->protocol = eth_type_trans(skb, skb->dev); 4400 } 4401 4402 switch (act) { 4403 case XDP_REDIRECT: 4404 case XDP_TX: 4405 __skb_push(skb, mac_len); 4406 break; 4407 case XDP_PASS: 4408 metalen = xdp->data - xdp->data_meta; 4409 if (metalen) 4410 skb_metadata_set(skb, metalen); 4411 break; 4412 default: 4413 bpf_warn_invalid_xdp_action(act); 4414 /* fall through */ 4415 case XDP_ABORTED: 4416 trace_xdp_exception(skb->dev, xdp_prog, act); 4417 /* fall through */ 4418 case XDP_DROP: 4419 do_drop: 4420 kfree_skb(skb); 4421 break; 4422 } 4423 4424 return act; 4425 } 4426 4427 /* When doing generic XDP we have to bypass the qdisc layer and the 4428 * network taps in order to match in-driver-XDP behavior. 4429 */ 4430 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4431 { 4432 struct net_device *dev = skb->dev; 4433 struct netdev_queue *txq; 4434 bool free_skb = true; 4435 int cpu, rc; 4436 4437 txq = netdev_core_pick_tx(dev, skb, NULL); 4438 cpu = smp_processor_id(); 4439 HARD_TX_LOCK(dev, txq, cpu); 4440 if (!netif_xmit_stopped(txq)) { 4441 rc = netdev_start_xmit(skb, dev, txq, 0); 4442 if (dev_xmit_complete(rc)) 4443 free_skb = false; 4444 } 4445 HARD_TX_UNLOCK(dev, txq); 4446 if (free_skb) { 4447 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4448 kfree_skb(skb); 4449 } 4450 } 4451 EXPORT_SYMBOL_GPL(generic_xdp_tx); 4452 4453 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4454 4455 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4456 { 4457 if (xdp_prog) { 4458 struct xdp_buff xdp; 4459 u32 act; 4460 int err; 4461 4462 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4463 if (act != XDP_PASS) { 4464 switch (act) { 4465 case XDP_REDIRECT: 4466 err = xdp_do_generic_redirect(skb->dev, skb, 4467 &xdp, xdp_prog); 4468 if (err) 4469 goto out_redir; 4470 break; 4471 case XDP_TX: 4472 generic_xdp_tx(skb, xdp_prog); 4473 break; 4474 } 4475 return XDP_DROP; 4476 } 4477 } 4478 return XDP_PASS; 4479 out_redir: 4480 kfree_skb(skb); 4481 return XDP_DROP; 4482 } 4483 EXPORT_SYMBOL_GPL(do_xdp_generic); 4484 4485 static int netif_rx_internal(struct sk_buff *skb) 4486 { 4487 int ret; 4488 4489 net_timestamp_check(netdev_tstamp_prequeue, skb); 4490 4491 trace_netif_rx(skb); 4492 4493 #ifdef CONFIG_RPS 4494 if (static_branch_unlikely(&rps_needed)) { 4495 struct rps_dev_flow voidflow, *rflow = &voidflow; 4496 int cpu; 4497 4498 preempt_disable(); 4499 rcu_read_lock(); 4500 4501 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4502 if (cpu < 0) 4503 cpu = smp_processor_id(); 4504 4505 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4506 4507 rcu_read_unlock(); 4508 preempt_enable(); 4509 } else 4510 #endif 4511 { 4512 unsigned int qtail; 4513 4514 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4515 put_cpu(); 4516 } 4517 return ret; 4518 } 4519 4520 /** 4521 * netif_rx - post buffer to the network code 4522 * @skb: buffer to post 4523 * 4524 * This function receives a packet from a device driver and queues it for 4525 * the upper (protocol) levels to process. It always succeeds. The buffer 4526 * may be dropped during processing for congestion control or by the 4527 * protocol layers. 4528 * 4529 * return values: 4530 * NET_RX_SUCCESS (no congestion) 4531 * NET_RX_DROP (packet was dropped) 4532 * 4533 */ 4534 4535 int netif_rx(struct sk_buff *skb) 4536 { 4537 int ret; 4538 4539 trace_netif_rx_entry(skb); 4540 4541 ret = netif_rx_internal(skb); 4542 trace_netif_rx_exit(ret); 4543 4544 return ret; 4545 } 4546 EXPORT_SYMBOL(netif_rx); 4547 4548 int netif_rx_ni(struct sk_buff *skb) 4549 { 4550 int err; 4551 4552 trace_netif_rx_ni_entry(skb); 4553 4554 preempt_disable(); 4555 err = netif_rx_internal(skb); 4556 if (local_softirq_pending()) 4557 do_softirq(); 4558 preempt_enable(); 4559 trace_netif_rx_ni_exit(err); 4560 4561 return err; 4562 } 4563 EXPORT_SYMBOL(netif_rx_ni); 4564 4565 static __latent_entropy void net_tx_action(struct softirq_action *h) 4566 { 4567 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4568 4569 if (sd->completion_queue) { 4570 struct sk_buff *clist; 4571 4572 local_irq_disable(); 4573 clist = sd->completion_queue; 4574 sd->completion_queue = NULL; 4575 local_irq_enable(); 4576 4577 while (clist) { 4578 struct sk_buff *skb = clist; 4579 4580 clist = clist->next; 4581 4582 WARN_ON(refcount_read(&skb->users)); 4583 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4584 trace_consume_skb(skb); 4585 else 4586 trace_kfree_skb(skb, net_tx_action); 4587 4588 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4589 __kfree_skb(skb); 4590 else 4591 __kfree_skb_defer(skb); 4592 } 4593 4594 __kfree_skb_flush(); 4595 } 4596 4597 if (sd->output_queue) { 4598 struct Qdisc *head; 4599 4600 local_irq_disable(); 4601 head = sd->output_queue; 4602 sd->output_queue = NULL; 4603 sd->output_queue_tailp = &sd->output_queue; 4604 local_irq_enable(); 4605 4606 while (head) { 4607 struct Qdisc *q = head; 4608 spinlock_t *root_lock = NULL; 4609 4610 head = head->next_sched; 4611 4612 if (!(q->flags & TCQ_F_NOLOCK)) { 4613 root_lock = qdisc_lock(q); 4614 spin_lock(root_lock); 4615 } 4616 /* We need to make sure head->next_sched is read 4617 * before clearing __QDISC_STATE_SCHED 4618 */ 4619 smp_mb__before_atomic(); 4620 clear_bit(__QDISC_STATE_SCHED, &q->state); 4621 qdisc_run(q); 4622 if (root_lock) 4623 spin_unlock(root_lock); 4624 } 4625 } 4626 4627 xfrm_dev_backlog(sd); 4628 } 4629 4630 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4631 /* This hook is defined here for ATM LANE */ 4632 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4633 unsigned char *addr) __read_mostly; 4634 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4635 #endif 4636 4637 static inline struct sk_buff * 4638 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4639 struct net_device *orig_dev) 4640 { 4641 #ifdef CONFIG_NET_CLS_ACT 4642 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4643 struct tcf_result cl_res; 4644 4645 /* If there's at least one ingress present somewhere (so 4646 * we get here via enabled static key), remaining devices 4647 * that are not configured with an ingress qdisc will bail 4648 * out here. 4649 */ 4650 if (!miniq) 4651 return skb; 4652 4653 if (*pt_prev) { 4654 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4655 *pt_prev = NULL; 4656 } 4657 4658 qdisc_skb_cb(skb)->pkt_len = skb->len; 4659 skb->tc_at_ingress = 1; 4660 mini_qdisc_bstats_cpu_update(miniq, skb); 4661 4662 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 4663 case TC_ACT_OK: 4664 case TC_ACT_RECLASSIFY: 4665 skb->tc_index = TC_H_MIN(cl_res.classid); 4666 break; 4667 case TC_ACT_SHOT: 4668 mini_qdisc_qstats_cpu_drop(miniq); 4669 kfree_skb(skb); 4670 return NULL; 4671 case TC_ACT_STOLEN: 4672 case TC_ACT_QUEUED: 4673 case TC_ACT_TRAP: 4674 consume_skb(skb); 4675 return NULL; 4676 case TC_ACT_REDIRECT: 4677 /* skb_mac_header check was done by cls/act_bpf, so 4678 * we can safely push the L2 header back before 4679 * redirecting to another netdev 4680 */ 4681 __skb_push(skb, skb->mac_len); 4682 skb_do_redirect(skb); 4683 return NULL; 4684 case TC_ACT_CONSUMED: 4685 return NULL; 4686 default: 4687 break; 4688 } 4689 #endif /* CONFIG_NET_CLS_ACT */ 4690 return skb; 4691 } 4692 4693 /** 4694 * netdev_is_rx_handler_busy - check if receive handler is registered 4695 * @dev: device to check 4696 * 4697 * Check if a receive handler is already registered for a given device. 4698 * Return true if there one. 4699 * 4700 * The caller must hold the rtnl_mutex. 4701 */ 4702 bool netdev_is_rx_handler_busy(struct net_device *dev) 4703 { 4704 ASSERT_RTNL(); 4705 return dev && rtnl_dereference(dev->rx_handler); 4706 } 4707 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 4708 4709 /** 4710 * netdev_rx_handler_register - register receive handler 4711 * @dev: device to register a handler for 4712 * @rx_handler: receive handler to register 4713 * @rx_handler_data: data pointer that is used by rx handler 4714 * 4715 * Register a receive handler for a device. This handler will then be 4716 * called from __netif_receive_skb. A negative errno code is returned 4717 * on a failure. 4718 * 4719 * The caller must hold the rtnl_mutex. 4720 * 4721 * For a general description of rx_handler, see enum rx_handler_result. 4722 */ 4723 int netdev_rx_handler_register(struct net_device *dev, 4724 rx_handler_func_t *rx_handler, 4725 void *rx_handler_data) 4726 { 4727 if (netdev_is_rx_handler_busy(dev)) 4728 return -EBUSY; 4729 4730 if (dev->priv_flags & IFF_NO_RX_HANDLER) 4731 return -EINVAL; 4732 4733 /* Note: rx_handler_data must be set before rx_handler */ 4734 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 4735 rcu_assign_pointer(dev->rx_handler, rx_handler); 4736 4737 return 0; 4738 } 4739 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 4740 4741 /** 4742 * netdev_rx_handler_unregister - unregister receive handler 4743 * @dev: device to unregister a handler from 4744 * 4745 * Unregister a receive handler from a device. 4746 * 4747 * The caller must hold the rtnl_mutex. 4748 */ 4749 void netdev_rx_handler_unregister(struct net_device *dev) 4750 { 4751 4752 ASSERT_RTNL(); 4753 RCU_INIT_POINTER(dev->rx_handler, NULL); 4754 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 4755 * section has a guarantee to see a non NULL rx_handler_data 4756 * as well. 4757 */ 4758 synchronize_net(); 4759 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 4760 } 4761 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 4762 4763 /* 4764 * Limit the use of PFMEMALLOC reserves to those protocols that implement 4765 * the special handling of PFMEMALLOC skbs. 4766 */ 4767 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 4768 { 4769 switch (skb->protocol) { 4770 case htons(ETH_P_ARP): 4771 case htons(ETH_P_IP): 4772 case htons(ETH_P_IPV6): 4773 case htons(ETH_P_8021Q): 4774 case htons(ETH_P_8021AD): 4775 return true; 4776 default: 4777 return false; 4778 } 4779 } 4780 4781 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 4782 int *ret, struct net_device *orig_dev) 4783 { 4784 #ifdef CONFIG_NETFILTER_INGRESS 4785 if (nf_hook_ingress_active(skb)) { 4786 int ingress_retval; 4787 4788 if (*pt_prev) { 4789 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4790 *pt_prev = NULL; 4791 } 4792 4793 rcu_read_lock(); 4794 ingress_retval = nf_hook_ingress(skb); 4795 rcu_read_unlock(); 4796 return ingress_retval; 4797 } 4798 #endif /* CONFIG_NETFILTER_INGRESS */ 4799 return 0; 4800 } 4801 4802 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4803 struct packet_type **ppt_prev) 4804 { 4805 struct packet_type *ptype, *pt_prev; 4806 rx_handler_func_t *rx_handler; 4807 struct net_device *orig_dev; 4808 bool deliver_exact = false; 4809 int ret = NET_RX_DROP; 4810 __be16 type; 4811 4812 net_timestamp_check(!netdev_tstamp_prequeue, skb); 4813 4814 trace_netif_receive_skb(skb); 4815 4816 orig_dev = skb->dev; 4817 4818 skb_reset_network_header(skb); 4819 if (!skb_transport_header_was_set(skb)) 4820 skb_reset_transport_header(skb); 4821 skb_reset_mac_len(skb); 4822 4823 pt_prev = NULL; 4824 4825 another_round: 4826 skb->skb_iif = skb->dev->ifindex; 4827 4828 __this_cpu_inc(softnet_data.processed); 4829 4830 if (static_branch_unlikely(&generic_xdp_needed_key)) { 4831 int ret2; 4832 4833 preempt_disable(); 4834 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 4835 preempt_enable(); 4836 4837 if (ret2 != XDP_PASS) 4838 return NET_RX_DROP; 4839 skb_reset_mac_len(skb); 4840 } 4841 4842 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 4843 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 4844 skb = skb_vlan_untag(skb); 4845 if (unlikely(!skb)) 4846 goto out; 4847 } 4848 4849 if (skb_skip_tc_classify(skb)) 4850 goto skip_classify; 4851 4852 if (pfmemalloc) 4853 goto skip_taps; 4854 4855 list_for_each_entry_rcu(ptype, &ptype_all, list) { 4856 if (pt_prev) 4857 ret = deliver_skb(skb, pt_prev, orig_dev); 4858 pt_prev = ptype; 4859 } 4860 4861 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 4862 if (pt_prev) 4863 ret = deliver_skb(skb, pt_prev, orig_dev); 4864 pt_prev = ptype; 4865 } 4866 4867 skip_taps: 4868 #ifdef CONFIG_NET_INGRESS 4869 if (static_branch_unlikely(&ingress_needed_key)) { 4870 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); 4871 if (!skb) 4872 goto out; 4873 4874 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 4875 goto out; 4876 } 4877 #endif 4878 skb_reset_tc(skb); 4879 skip_classify: 4880 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 4881 goto drop; 4882 4883 if (skb_vlan_tag_present(skb)) { 4884 if (pt_prev) { 4885 ret = deliver_skb(skb, pt_prev, orig_dev); 4886 pt_prev = NULL; 4887 } 4888 if (vlan_do_receive(&skb)) 4889 goto another_round; 4890 else if (unlikely(!skb)) 4891 goto out; 4892 } 4893 4894 rx_handler = rcu_dereference(skb->dev->rx_handler); 4895 if (rx_handler) { 4896 if (pt_prev) { 4897 ret = deliver_skb(skb, pt_prev, orig_dev); 4898 pt_prev = NULL; 4899 } 4900 switch (rx_handler(&skb)) { 4901 case RX_HANDLER_CONSUMED: 4902 ret = NET_RX_SUCCESS; 4903 goto out; 4904 case RX_HANDLER_ANOTHER: 4905 goto another_round; 4906 case RX_HANDLER_EXACT: 4907 deliver_exact = true; 4908 case RX_HANDLER_PASS: 4909 break; 4910 default: 4911 BUG(); 4912 } 4913 } 4914 4915 if (unlikely(skb_vlan_tag_present(skb))) { 4916 check_vlan_id: 4917 if (skb_vlan_tag_get_id(skb)) { 4918 /* Vlan id is non 0 and vlan_do_receive() above couldn't 4919 * find vlan device. 4920 */ 4921 skb->pkt_type = PACKET_OTHERHOST; 4922 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 4923 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 4924 /* Outer header is 802.1P with vlan 0, inner header is 4925 * 802.1Q or 802.1AD and vlan_do_receive() above could 4926 * not find vlan dev for vlan id 0. 4927 */ 4928 __vlan_hwaccel_clear_tag(skb); 4929 skb = skb_vlan_untag(skb); 4930 if (unlikely(!skb)) 4931 goto out; 4932 if (vlan_do_receive(&skb)) 4933 /* After stripping off 802.1P header with vlan 0 4934 * vlan dev is found for inner header. 4935 */ 4936 goto another_round; 4937 else if (unlikely(!skb)) 4938 goto out; 4939 else 4940 /* We have stripped outer 802.1P vlan 0 header. 4941 * But could not find vlan dev. 4942 * check again for vlan id to set OTHERHOST. 4943 */ 4944 goto check_vlan_id; 4945 } 4946 /* Note: we might in the future use prio bits 4947 * and set skb->priority like in vlan_do_receive() 4948 * For the time being, just ignore Priority Code Point 4949 */ 4950 __vlan_hwaccel_clear_tag(skb); 4951 } 4952 4953 type = skb->protocol; 4954 4955 /* deliver only exact match when indicated */ 4956 if (likely(!deliver_exact)) { 4957 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4958 &ptype_base[ntohs(type) & 4959 PTYPE_HASH_MASK]); 4960 } 4961 4962 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4963 &orig_dev->ptype_specific); 4964 4965 if (unlikely(skb->dev != orig_dev)) { 4966 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4967 &skb->dev->ptype_specific); 4968 } 4969 4970 if (pt_prev) { 4971 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 4972 goto drop; 4973 *ppt_prev = pt_prev; 4974 } else { 4975 drop: 4976 if (!deliver_exact) 4977 atomic_long_inc(&skb->dev->rx_dropped); 4978 else 4979 atomic_long_inc(&skb->dev->rx_nohandler); 4980 kfree_skb(skb); 4981 /* Jamal, now you will not able to escape explaining 4982 * me how you were going to use this. :-) 4983 */ 4984 ret = NET_RX_DROP; 4985 } 4986 4987 out: 4988 return ret; 4989 } 4990 4991 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 4992 { 4993 struct net_device *orig_dev = skb->dev; 4994 struct packet_type *pt_prev = NULL; 4995 int ret; 4996 4997 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 4998 if (pt_prev) 4999 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5000 skb->dev, pt_prev, orig_dev); 5001 return ret; 5002 } 5003 5004 /** 5005 * netif_receive_skb_core - special purpose version of netif_receive_skb 5006 * @skb: buffer to process 5007 * 5008 * More direct receive version of netif_receive_skb(). It should 5009 * only be used by callers that have a need to skip RPS and Generic XDP. 5010 * Caller must also take care of handling if (page_is_)pfmemalloc. 5011 * 5012 * This function may only be called from softirq context and interrupts 5013 * should be enabled. 5014 * 5015 * Return values (usually ignored): 5016 * NET_RX_SUCCESS: no congestion 5017 * NET_RX_DROP: packet was dropped 5018 */ 5019 int netif_receive_skb_core(struct sk_buff *skb) 5020 { 5021 int ret; 5022 5023 rcu_read_lock(); 5024 ret = __netif_receive_skb_one_core(skb, false); 5025 rcu_read_unlock(); 5026 5027 return ret; 5028 } 5029 EXPORT_SYMBOL(netif_receive_skb_core); 5030 5031 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5032 struct packet_type *pt_prev, 5033 struct net_device *orig_dev) 5034 { 5035 struct sk_buff *skb, *next; 5036 5037 if (!pt_prev) 5038 return; 5039 if (list_empty(head)) 5040 return; 5041 if (pt_prev->list_func != NULL) 5042 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5043 ip_list_rcv, head, pt_prev, orig_dev); 5044 else 5045 list_for_each_entry_safe(skb, next, head, list) { 5046 skb_list_del_init(skb); 5047 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5048 } 5049 } 5050 5051 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5052 { 5053 /* Fast-path assumptions: 5054 * - There is no RX handler. 5055 * - Only one packet_type matches. 5056 * If either of these fails, we will end up doing some per-packet 5057 * processing in-line, then handling the 'last ptype' for the whole 5058 * sublist. This can't cause out-of-order delivery to any single ptype, 5059 * because the 'last ptype' must be constant across the sublist, and all 5060 * other ptypes are handled per-packet. 5061 */ 5062 /* Current (common) ptype of sublist */ 5063 struct packet_type *pt_curr = NULL; 5064 /* Current (common) orig_dev of sublist */ 5065 struct net_device *od_curr = NULL; 5066 struct list_head sublist; 5067 struct sk_buff *skb, *next; 5068 5069 INIT_LIST_HEAD(&sublist); 5070 list_for_each_entry_safe(skb, next, head, list) { 5071 struct net_device *orig_dev = skb->dev; 5072 struct packet_type *pt_prev = NULL; 5073 5074 skb_list_del_init(skb); 5075 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5076 if (!pt_prev) 5077 continue; 5078 if (pt_curr != pt_prev || od_curr != orig_dev) { 5079 /* dispatch old sublist */ 5080 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5081 /* start new sublist */ 5082 INIT_LIST_HEAD(&sublist); 5083 pt_curr = pt_prev; 5084 od_curr = orig_dev; 5085 } 5086 list_add_tail(&skb->list, &sublist); 5087 } 5088 5089 /* dispatch final sublist */ 5090 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5091 } 5092 5093 static int __netif_receive_skb(struct sk_buff *skb) 5094 { 5095 int ret; 5096 5097 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5098 unsigned int noreclaim_flag; 5099 5100 /* 5101 * PFMEMALLOC skbs are special, they should 5102 * - be delivered to SOCK_MEMALLOC sockets only 5103 * - stay away from userspace 5104 * - have bounded memory usage 5105 * 5106 * Use PF_MEMALLOC as this saves us from propagating the allocation 5107 * context down to all allocation sites. 5108 */ 5109 noreclaim_flag = memalloc_noreclaim_save(); 5110 ret = __netif_receive_skb_one_core(skb, true); 5111 memalloc_noreclaim_restore(noreclaim_flag); 5112 } else 5113 ret = __netif_receive_skb_one_core(skb, false); 5114 5115 return ret; 5116 } 5117 5118 static void __netif_receive_skb_list(struct list_head *head) 5119 { 5120 unsigned long noreclaim_flag = 0; 5121 struct sk_buff *skb, *next; 5122 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5123 5124 list_for_each_entry_safe(skb, next, head, list) { 5125 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5126 struct list_head sublist; 5127 5128 /* Handle the previous sublist */ 5129 list_cut_before(&sublist, head, &skb->list); 5130 if (!list_empty(&sublist)) 5131 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5132 pfmemalloc = !pfmemalloc; 5133 /* See comments in __netif_receive_skb */ 5134 if (pfmemalloc) 5135 noreclaim_flag = memalloc_noreclaim_save(); 5136 else 5137 memalloc_noreclaim_restore(noreclaim_flag); 5138 } 5139 } 5140 /* Handle the remaining sublist */ 5141 if (!list_empty(head)) 5142 __netif_receive_skb_list_core(head, pfmemalloc); 5143 /* Restore pflags */ 5144 if (pfmemalloc) 5145 memalloc_noreclaim_restore(noreclaim_flag); 5146 } 5147 5148 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5149 { 5150 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5151 struct bpf_prog *new = xdp->prog; 5152 int ret = 0; 5153 5154 switch (xdp->command) { 5155 case XDP_SETUP_PROG: 5156 rcu_assign_pointer(dev->xdp_prog, new); 5157 if (old) 5158 bpf_prog_put(old); 5159 5160 if (old && !new) { 5161 static_branch_dec(&generic_xdp_needed_key); 5162 } else if (new && !old) { 5163 static_branch_inc(&generic_xdp_needed_key); 5164 dev_disable_lro(dev); 5165 dev_disable_gro_hw(dev); 5166 } 5167 break; 5168 5169 case XDP_QUERY_PROG: 5170 xdp->prog_id = old ? old->aux->id : 0; 5171 break; 5172 5173 default: 5174 ret = -EINVAL; 5175 break; 5176 } 5177 5178 return ret; 5179 } 5180 5181 static int netif_receive_skb_internal(struct sk_buff *skb) 5182 { 5183 int ret; 5184 5185 net_timestamp_check(netdev_tstamp_prequeue, skb); 5186 5187 if (skb_defer_rx_timestamp(skb)) 5188 return NET_RX_SUCCESS; 5189 5190 rcu_read_lock(); 5191 #ifdef CONFIG_RPS 5192 if (static_branch_unlikely(&rps_needed)) { 5193 struct rps_dev_flow voidflow, *rflow = &voidflow; 5194 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5195 5196 if (cpu >= 0) { 5197 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5198 rcu_read_unlock(); 5199 return ret; 5200 } 5201 } 5202 #endif 5203 ret = __netif_receive_skb(skb); 5204 rcu_read_unlock(); 5205 return ret; 5206 } 5207 5208 static void netif_receive_skb_list_internal(struct list_head *head) 5209 { 5210 struct sk_buff *skb, *next; 5211 struct list_head sublist; 5212 5213 INIT_LIST_HEAD(&sublist); 5214 list_for_each_entry_safe(skb, next, head, list) { 5215 net_timestamp_check(netdev_tstamp_prequeue, skb); 5216 skb_list_del_init(skb); 5217 if (!skb_defer_rx_timestamp(skb)) 5218 list_add_tail(&skb->list, &sublist); 5219 } 5220 list_splice_init(&sublist, head); 5221 5222 rcu_read_lock(); 5223 #ifdef CONFIG_RPS 5224 if (static_branch_unlikely(&rps_needed)) { 5225 list_for_each_entry_safe(skb, next, head, list) { 5226 struct rps_dev_flow voidflow, *rflow = &voidflow; 5227 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5228 5229 if (cpu >= 0) { 5230 /* Will be handled, remove from list */ 5231 skb_list_del_init(skb); 5232 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5233 } 5234 } 5235 } 5236 #endif 5237 __netif_receive_skb_list(head); 5238 rcu_read_unlock(); 5239 } 5240 5241 /** 5242 * netif_receive_skb - process receive buffer from network 5243 * @skb: buffer to process 5244 * 5245 * netif_receive_skb() is the main receive data processing function. 5246 * It always succeeds. The buffer may be dropped during processing 5247 * for congestion control or by the protocol layers. 5248 * 5249 * This function may only be called from softirq context and interrupts 5250 * should be enabled. 5251 * 5252 * Return values (usually ignored): 5253 * NET_RX_SUCCESS: no congestion 5254 * NET_RX_DROP: packet was dropped 5255 */ 5256 int netif_receive_skb(struct sk_buff *skb) 5257 { 5258 int ret; 5259 5260 trace_netif_receive_skb_entry(skb); 5261 5262 ret = netif_receive_skb_internal(skb); 5263 trace_netif_receive_skb_exit(ret); 5264 5265 return ret; 5266 } 5267 EXPORT_SYMBOL(netif_receive_skb); 5268 5269 /** 5270 * netif_receive_skb_list - process many receive buffers from network 5271 * @head: list of skbs to process. 5272 * 5273 * Since return value of netif_receive_skb() is normally ignored, and 5274 * wouldn't be meaningful for a list, this function returns void. 5275 * 5276 * This function may only be called from softirq context and interrupts 5277 * should be enabled. 5278 */ 5279 void netif_receive_skb_list(struct list_head *head) 5280 { 5281 struct sk_buff *skb; 5282 5283 if (list_empty(head)) 5284 return; 5285 if (trace_netif_receive_skb_list_entry_enabled()) { 5286 list_for_each_entry(skb, head, list) 5287 trace_netif_receive_skb_list_entry(skb); 5288 } 5289 netif_receive_skb_list_internal(head); 5290 trace_netif_receive_skb_list_exit(0); 5291 } 5292 EXPORT_SYMBOL(netif_receive_skb_list); 5293 5294 DEFINE_PER_CPU(struct work_struct, flush_works); 5295 5296 /* Network device is going away, flush any packets still pending */ 5297 static void flush_backlog(struct work_struct *work) 5298 { 5299 struct sk_buff *skb, *tmp; 5300 struct softnet_data *sd; 5301 5302 local_bh_disable(); 5303 sd = this_cpu_ptr(&softnet_data); 5304 5305 local_irq_disable(); 5306 rps_lock(sd); 5307 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5308 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5309 __skb_unlink(skb, &sd->input_pkt_queue); 5310 kfree_skb(skb); 5311 input_queue_head_incr(sd); 5312 } 5313 } 5314 rps_unlock(sd); 5315 local_irq_enable(); 5316 5317 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5318 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5319 __skb_unlink(skb, &sd->process_queue); 5320 kfree_skb(skb); 5321 input_queue_head_incr(sd); 5322 } 5323 } 5324 local_bh_enable(); 5325 } 5326 5327 static void flush_all_backlogs(void) 5328 { 5329 unsigned int cpu; 5330 5331 get_online_cpus(); 5332 5333 for_each_online_cpu(cpu) 5334 queue_work_on(cpu, system_highpri_wq, 5335 per_cpu_ptr(&flush_works, cpu)); 5336 5337 for_each_online_cpu(cpu) 5338 flush_work(per_cpu_ptr(&flush_works, cpu)); 5339 5340 put_online_cpus(); 5341 } 5342 5343 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); 5344 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); 5345 static int napi_gro_complete(struct sk_buff *skb) 5346 { 5347 struct packet_offload *ptype; 5348 __be16 type = skb->protocol; 5349 struct list_head *head = &offload_base; 5350 int err = -ENOENT; 5351 5352 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5353 5354 if (NAPI_GRO_CB(skb)->count == 1) { 5355 skb_shinfo(skb)->gso_size = 0; 5356 goto out; 5357 } 5358 5359 rcu_read_lock(); 5360 list_for_each_entry_rcu(ptype, head, list) { 5361 if (ptype->type != type || !ptype->callbacks.gro_complete) 5362 continue; 5363 5364 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 5365 ipv6_gro_complete, inet_gro_complete, 5366 skb, 0); 5367 break; 5368 } 5369 rcu_read_unlock(); 5370 5371 if (err) { 5372 WARN_ON(&ptype->list == head); 5373 kfree_skb(skb); 5374 return NET_RX_SUCCESS; 5375 } 5376 5377 out: 5378 return netif_receive_skb_internal(skb); 5379 } 5380 5381 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5382 bool flush_old) 5383 { 5384 struct list_head *head = &napi->gro_hash[index].list; 5385 struct sk_buff *skb, *p; 5386 5387 list_for_each_entry_safe_reverse(skb, p, head, list) { 5388 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5389 return; 5390 skb_list_del_init(skb); 5391 napi_gro_complete(skb); 5392 napi->gro_hash[index].count--; 5393 } 5394 5395 if (!napi->gro_hash[index].count) 5396 __clear_bit(index, &napi->gro_bitmask); 5397 } 5398 5399 /* napi->gro_hash[].list contains packets ordered by age. 5400 * youngest packets at the head of it. 5401 * Complete skbs in reverse order to reduce latencies. 5402 */ 5403 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5404 { 5405 unsigned long bitmask = napi->gro_bitmask; 5406 unsigned int i, base = ~0U; 5407 5408 while ((i = ffs(bitmask)) != 0) { 5409 bitmask >>= i; 5410 base += i; 5411 __napi_gro_flush_chain(napi, base, flush_old); 5412 } 5413 } 5414 EXPORT_SYMBOL(napi_gro_flush); 5415 5416 static struct list_head *gro_list_prepare(struct napi_struct *napi, 5417 struct sk_buff *skb) 5418 { 5419 unsigned int maclen = skb->dev->hard_header_len; 5420 u32 hash = skb_get_hash_raw(skb); 5421 struct list_head *head; 5422 struct sk_buff *p; 5423 5424 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; 5425 list_for_each_entry(p, head, list) { 5426 unsigned long diffs; 5427 5428 NAPI_GRO_CB(p)->flush = 0; 5429 5430 if (hash != skb_get_hash_raw(p)) { 5431 NAPI_GRO_CB(p)->same_flow = 0; 5432 continue; 5433 } 5434 5435 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5436 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 5437 if (skb_vlan_tag_present(p)) 5438 diffs |= p->vlan_tci ^ skb->vlan_tci; 5439 diffs |= skb_metadata_dst_cmp(p, skb); 5440 diffs |= skb_metadata_differs(p, skb); 5441 if (maclen == ETH_HLEN) 5442 diffs |= compare_ether_header(skb_mac_header(p), 5443 skb_mac_header(skb)); 5444 else if (!diffs) 5445 diffs = memcmp(skb_mac_header(p), 5446 skb_mac_header(skb), 5447 maclen); 5448 NAPI_GRO_CB(p)->same_flow = !diffs; 5449 } 5450 5451 return head; 5452 } 5453 5454 static void skb_gro_reset_offset(struct sk_buff *skb) 5455 { 5456 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5457 const skb_frag_t *frag0 = &pinfo->frags[0]; 5458 5459 NAPI_GRO_CB(skb)->data_offset = 0; 5460 NAPI_GRO_CB(skb)->frag0 = NULL; 5461 NAPI_GRO_CB(skb)->frag0_len = 0; 5462 5463 if (skb_mac_header(skb) == skb_tail_pointer(skb) && 5464 pinfo->nr_frags && 5465 !PageHighMem(skb_frag_page(frag0))) { 5466 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5467 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5468 skb_frag_size(frag0), 5469 skb->end - skb->tail); 5470 } 5471 } 5472 5473 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5474 { 5475 struct skb_shared_info *pinfo = skb_shinfo(skb); 5476 5477 BUG_ON(skb->end - skb->tail < grow); 5478 5479 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5480 5481 skb->data_len -= grow; 5482 skb->tail += grow; 5483 5484 pinfo->frags[0].page_offset += grow; 5485 skb_frag_size_sub(&pinfo->frags[0], grow); 5486 5487 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 5488 skb_frag_unref(skb, 0); 5489 memmove(pinfo->frags, pinfo->frags + 1, 5490 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 5491 } 5492 } 5493 5494 static void gro_flush_oldest(struct list_head *head) 5495 { 5496 struct sk_buff *oldest; 5497 5498 oldest = list_last_entry(head, struct sk_buff, list); 5499 5500 /* We are called with head length >= MAX_GRO_SKBS, so this is 5501 * impossible. 5502 */ 5503 if (WARN_ON_ONCE(!oldest)) 5504 return; 5505 5506 /* Do not adjust napi->gro_hash[].count, caller is adding a new 5507 * SKB to the chain. 5508 */ 5509 skb_list_del_init(oldest); 5510 napi_gro_complete(oldest); 5511 } 5512 5513 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, 5514 struct sk_buff *)); 5515 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, 5516 struct sk_buff *)); 5517 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5518 { 5519 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 5520 struct list_head *head = &offload_base; 5521 struct packet_offload *ptype; 5522 __be16 type = skb->protocol; 5523 struct list_head *gro_head; 5524 struct sk_buff *pp = NULL; 5525 enum gro_result ret; 5526 int same_flow; 5527 int grow; 5528 5529 if (netif_elide_gro(skb->dev)) 5530 goto normal; 5531 5532 gro_head = gro_list_prepare(napi, skb); 5533 5534 rcu_read_lock(); 5535 list_for_each_entry_rcu(ptype, head, list) { 5536 if (ptype->type != type || !ptype->callbacks.gro_receive) 5537 continue; 5538 5539 skb_set_network_header(skb, skb_gro_offset(skb)); 5540 skb_reset_mac_len(skb); 5541 NAPI_GRO_CB(skb)->same_flow = 0; 5542 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 5543 NAPI_GRO_CB(skb)->free = 0; 5544 NAPI_GRO_CB(skb)->encap_mark = 0; 5545 NAPI_GRO_CB(skb)->recursion_counter = 0; 5546 NAPI_GRO_CB(skb)->is_fou = 0; 5547 NAPI_GRO_CB(skb)->is_atomic = 1; 5548 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 5549 5550 /* Setup for GRO checksum validation */ 5551 switch (skb->ip_summed) { 5552 case CHECKSUM_COMPLETE: 5553 NAPI_GRO_CB(skb)->csum = skb->csum; 5554 NAPI_GRO_CB(skb)->csum_valid = 1; 5555 NAPI_GRO_CB(skb)->csum_cnt = 0; 5556 break; 5557 case CHECKSUM_UNNECESSARY: 5558 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 5559 NAPI_GRO_CB(skb)->csum_valid = 0; 5560 break; 5561 default: 5562 NAPI_GRO_CB(skb)->csum_cnt = 0; 5563 NAPI_GRO_CB(skb)->csum_valid = 0; 5564 } 5565 5566 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 5567 ipv6_gro_receive, inet_gro_receive, 5568 gro_head, skb); 5569 break; 5570 } 5571 rcu_read_unlock(); 5572 5573 if (&ptype->list == head) 5574 goto normal; 5575 5576 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { 5577 ret = GRO_CONSUMED; 5578 goto ok; 5579 } 5580 5581 same_flow = NAPI_GRO_CB(skb)->same_flow; 5582 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 5583 5584 if (pp) { 5585 skb_list_del_init(pp); 5586 napi_gro_complete(pp); 5587 napi->gro_hash[hash].count--; 5588 } 5589 5590 if (same_flow) 5591 goto ok; 5592 5593 if (NAPI_GRO_CB(skb)->flush) 5594 goto normal; 5595 5596 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { 5597 gro_flush_oldest(gro_head); 5598 } else { 5599 napi->gro_hash[hash].count++; 5600 } 5601 NAPI_GRO_CB(skb)->count = 1; 5602 NAPI_GRO_CB(skb)->age = jiffies; 5603 NAPI_GRO_CB(skb)->last = skb; 5604 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 5605 list_add(&skb->list, gro_head); 5606 ret = GRO_HELD; 5607 5608 pull: 5609 grow = skb_gro_offset(skb) - skb_headlen(skb); 5610 if (grow > 0) 5611 gro_pull_from_frag0(skb, grow); 5612 ok: 5613 if (napi->gro_hash[hash].count) { 5614 if (!test_bit(hash, &napi->gro_bitmask)) 5615 __set_bit(hash, &napi->gro_bitmask); 5616 } else if (test_bit(hash, &napi->gro_bitmask)) { 5617 __clear_bit(hash, &napi->gro_bitmask); 5618 } 5619 5620 return ret; 5621 5622 normal: 5623 ret = GRO_NORMAL; 5624 goto pull; 5625 } 5626 5627 struct packet_offload *gro_find_receive_by_type(__be16 type) 5628 { 5629 struct list_head *offload_head = &offload_base; 5630 struct packet_offload *ptype; 5631 5632 list_for_each_entry_rcu(ptype, offload_head, list) { 5633 if (ptype->type != type || !ptype->callbacks.gro_receive) 5634 continue; 5635 return ptype; 5636 } 5637 return NULL; 5638 } 5639 EXPORT_SYMBOL(gro_find_receive_by_type); 5640 5641 struct packet_offload *gro_find_complete_by_type(__be16 type) 5642 { 5643 struct list_head *offload_head = &offload_base; 5644 struct packet_offload *ptype; 5645 5646 list_for_each_entry_rcu(ptype, offload_head, list) { 5647 if (ptype->type != type || !ptype->callbacks.gro_complete) 5648 continue; 5649 return ptype; 5650 } 5651 return NULL; 5652 } 5653 EXPORT_SYMBOL(gro_find_complete_by_type); 5654 5655 static void napi_skb_free_stolen_head(struct sk_buff *skb) 5656 { 5657 skb_dst_drop(skb); 5658 secpath_reset(skb); 5659 kmem_cache_free(skbuff_head_cache, skb); 5660 } 5661 5662 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 5663 { 5664 switch (ret) { 5665 case GRO_NORMAL: 5666 if (netif_receive_skb_internal(skb)) 5667 ret = GRO_DROP; 5668 break; 5669 5670 case GRO_DROP: 5671 kfree_skb(skb); 5672 break; 5673 5674 case GRO_MERGED_FREE: 5675 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5676 napi_skb_free_stolen_head(skb); 5677 else 5678 __kfree_skb(skb); 5679 break; 5680 5681 case GRO_HELD: 5682 case GRO_MERGED: 5683 case GRO_CONSUMED: 5684 break; 5685 } 5686 5687 return ret; 5688 } 5689 5690 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5691 { 5692 gro_result_t ret; 5693 5694 skb_mark_napi_id(skb, napi); 5695 trace_napi_gro_receive_entry(skb); 5696 5697 skb_gro_reset_offset(skb); 5698 5699 ret = napi_skb_finish(dev_gro_receive(napi, skb), skb); 5700 trace_napi_gro_receive_exit(ret); 5701 5702 return ret; 5703 } 5704 EXPORT_SYMBOL(napi_gro_receive); 5705 5706 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 5707 { 5708 if (unlikely(skb->pfmemalloc)) { 5709 consume_skb(skb); 5710 return; 5711 } 5712 __skb_pull(skb, skb_headlen(skb)); 5713 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 5714 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 5715 __vlan_hwaccel_clear_tag(skb); 5716 skb->dev = napi->dev; 5717 skb->skb_iif = 0; 5718 5719 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 5720 skb->pkt_type = PACKET_HOST; 5721 5722 skb->encapsulation = 0; 5723 skb_shinfo(skb)->gso_type = 0; 5724 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5725 secpath_reset(skb); 5726 5727 napi->skb = skb; 5728 } 5729 5730 struct sk_buff *napi_get_frags(struct napi_struct *napi) 5731 { 5732 struct sk_buff *skb = napi->skb; 5733 5734 if (!skb) { 5735 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 5736 if (skb) { 5737 napi->skb = skb; 5738 skb_mark_napi_id(skb, napi); 5739 } 5740 } 5741 return skb; 5742 } 5743 EXPORT_SYMBOL(napi_get_frags); 5744 5745 static gro_result_t napi_frags_finish(struct napi_struct *napi, 5746 struct sk_buff *skb, 5747 gro_result_t ret) 5748 { 5749 switch (ret) { 5750 case GRO_NORMAL: 5751 case GRO_HELD: 5752 __skb_push(skb, ETH_HLEN); 5753 skb->protocol = eth_type_trans(skb, skb->dev); 5754 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) 5755 ret = GRO_DROP; 5756 break; 5757 5758 case GRO_DROP: 5759 napi_reuse_skb(napi, skb); 5760 break; 5761 5762 case GRO_MERGED_FREE: 5763 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5764 napi_skb_free_stolen_head(skb); 5765 else 5766 napi_reuse_skb(napi, skb); 5767 break; 5768 5769 case GRO_MERGED: 5770 case GRO_CONSUMED: 5771 break; 5772 } 5773 5774 return ret; 5775 } 5776 5777 /* Upper GRO stack assumes network header starts at gro_offset=0 5778 * Drivers could call both napi_gro_frags() and napi_gro_receive() 5779 * We copy ethernet header into skb->data to have a common layout. 5780 */ 5781 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 5782 { 5783 struct sk_buff *skb = napi->skb; 5784 const struct ethhdr *eth; 5785 unsigned int hlen = sizeof(*eth); 5786 5787 napi->skb = NULL; 5788 5789 skb_reset_mac_header(skb); 5790 skb_gro_reset_offset(skb); 5791 5792 if (unlikely(skb_gro_header_hard(skb, hlen))) { 5793 eth = skb_gro_header_slow(skb, hlen, 0); 5794 if (unlikely(!eth)) { 5795 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 5796 __func__, napi->dev->name); 5797 napi_reuse_skb(napi, skb); 5798 return NULL; 5799 } 5800 } else { 5801 eth = (const struct ethhdr *)skb->data; 5802 gro_pull_from_frag0(skb, hlen); 5803 NAPI_GRO_CB(skb)->frag0 += hlen; 5804 NAPI_GRO_CB(skb)->frag0_len -= hlen; 5805 } 5806 __skb_pull(skb, hlen); 5807 5808 /* 5809 * This works because the only protocols we care about don't require 5810 * special handling. 5811 * We'll fix it up properly in napi_frags_finish() 5812 */ 5813 skb->protocol = eth->h_proto; 5814 5815 return skb; 5816 } 5817 5818 gro_result_t napi_gro_frags(struct napi_struct *napi) 5819 { 5820 gro_result_t ret; 5821 struct sk_buff *skb = napi_frags_skb(napi); 5822 5823 if (!skb) 5824 return GRO_DROP; 5825 5826 trace_napi_gro_frags_entry(skb); 5827 5828 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 5829 trace_napi_gro_frags_exit(ret); 5830 5831 return ret; 5832 } 5833 EXPORT_SYMBOL(napi_gro_frags); 5834 5835 /* Compute the checksum from gro_offset and return the folded value 5836 * after adding in any pseudo checksum. 5837 */ 5838 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 5839 { 5840 __wsum wsum; 5841 __sum16 sum; 5842 5843 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 5844 5845 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 5846 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 5847 /* See comments in __skb_checksum_complete(). */ 5848 if (likely(!sum)) { 5849 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 5850 !skb->csum_complete_sw) 5851 netdev_rx_csum_fault(skb->dev, skb); 5852 } 5853 5854 NAPI_GRO_CB(skb)->csum = wsum; 5855 NAPI_GRO_CB(skb)->csum_valid = 1; 5856 5857 return sum; 5858 } 5859 EXPORT_SYMBOL(__skb_gro_checksum_complete); 5860 5861 static void net_rps_send_ipi(struct softnet_data *remsd) 5862 { 5863 #ifdef CONFIG_RPS 5864 while (remsd) { 5865 struct softnet_data *next = remsd->rps_ipi_next; 5866 5867 if (cpu_online(remsd->cpu)) 5868 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5869 remsd = next; 5870 } 5871 #endif 5872 } 5873 5874 /* 5875 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5876 * Note: called with local irq disabled, but exits with local irq enabled. 5877 */ 5878 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5879 { 5880 #ifdef CONFIG_RPS 5881 struct softnet_data *remsd = sd->rps_ipi_list; 5882 5883 if (remsd) { 5884 sd->rps_ipi_list = NULL; 5885 5886 local_irq_enable(); 5887 5888 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5889 net_rps_send_ipi(remsd); 5890 } else 5891 #endif 5892 local_irq_enable(); 5893 } 5894 5895 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5896 { 5897 #ifdef CONFIG_RPS 5898 return sd->rps_ipi_list != NULL; 5899 #else 5900 return false; 5901 #endif 5902 } 5903 5904 static int process_backlog(struct napi_struct *napi, int quota) 5905 { 5906 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5907 bool again = true; 5908 int work = 0; 5909 5910 /* Check if we have pending ipi, its better to send them now, 5911 * not waiting net_rx_action() end. 5912 */ 5913 if (sd_has_rps_ipi_waiting(sd)) { 5914 local_irq_disable(); 5915 net_rps_action_and_irq_enable(sd); 5916 } 5917 5918 napi->weight = dev_rx_weight; 5919 while (again) { 5920 struct sk_buff *skb; 5921 5922 while ((skb = __skb_dequeue(&sd->process_queue))) { 5923 rcu_read_lock(); 5924 __netif_receive_skb(skb); 5925 rcu_read_unlock(); 5926 input_queue_head_incr(sd); 5927 if (++work >= quota) 5928 return work; 5929 5930 } 5931 5932 local_irq_disable(); 5933 rps_lock(sd); 5934 if (skb_queue_empty(&sd->input_pkt_queue)) { 5935 /* 5936 * Inline a custom version of __napi_complete(). 5937 * only current cpu owns and manipulates this napi, 5938 * and NAPI_STATE_SCHED is the only possible flag set 5939 * on backlog. 5940 * We can use a plain write instead of clear_bit(), 5941 * and we dont need an smp_mb() memory barrier. 5942 */ 5943 napi->state = 0; 5944 again = false; 5945 } else { 5946 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5947 &sd->process_queue); 5948 } 5949 rps_unlock(sd); 5950 local_irq_enable(); 5951 } 5952 5953 return work; 5954 } 5955 5956 /** 5957 * __napi_schedule - schedule for receive 5958 * @n: entry to schedule 5959 * 5960 * The entry's receive function will be scheduled to run. 5961 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5962 */ 5963 void __napi_schedule(struct napi_struct *n) 5964 { 5965 unsigned long flags; 5966 5967 local_irq_save(flags); 5968 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5969 local_irq_restore(flags); 5970 } 5971 EXPORT_SYMBOL(__napi_schedule); 5972 5973 /** 5974 * napi_schedule_prep - check if napi can be scheduled 5975 * @n: napi context 5976 * 5977 * Test if NAPI routine is already running, and if not mark 5978 * it as running. This is used as a condition variable 5979 * insure only one NAPI poll instance runs. We also make 5980 * sure there is no pending NAPI disable. 5981 */ 5982 bool napi_schedule_prep(struct napi_struct *n) 5983 { 5984 unsigned long val, new; 5985 5986 do { 5987 val = READ_ONCE(n->state); 5988 if (unlikely(val & NAPIF_STATE_DISABLE)) 5989 return false; 5990 new = val | NAPIF_STATE_SCHED; 5991 5992 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5993 * This was suggested by Alexander Duyck, as compiler 5994 * emits better code than : 5995 * if (val & NAPIF_STATE_SCHED) 5996 * new |= NAPIF_STATE_MISSED; 5997 */ 5998 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 5999 NAPIF_STATE_MISSED; 6000 } while (cmpxchg(&n->state, val, new) != val); 6001 6002 return !(val & NAPIF_STATE_SCHED); 6003 } 6004 EXPORT_SYMBOL(napi_schedule_prep); 6005 6006 /** 6007 * __napi_schedule_irqoff - schedule for receive 6008 * @n: entry to schedule 6009 * 6010 * Variant of __napi_schedule() assuming hard irqs are masked 6011 */ 6012 void __napi_schedule_irqoff(struct napi_struct *n) 6013 { 6014 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6015 } 6016 EXPORT_SYMBOL(__napi_schedule_irqoff); 6017 6018 bool napi_complete_done(struct napi_struct *n, int work_done) 6019 { 6020 unsigned long flags, val, new; 6021 6022 /* 6023 * 1) Don't let napi dequeue from the cpu poll list 6024 * just in case its running on a different cpu. 6025 * 2) If we are busy polling, do nothing here, we have 6026 * the guarantee we will be called later. 6027 */ 6028 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6029 NAPIF_STATE_IN_BUSY_POLL))) 6030 return false; 6031 6032 if (n->gro_bitmask) { 6033 unsigned long timeout = 0; 6034 6035 if (work_done) 6036 timeout = n->dev->gro_flush_timeout; 6037 6038 /* When the NAPI instance uses a timeout and keeps postponing 6039 * it, we need to bound somehow the time packets are kept in 6040 * the GRO layer 6041 */ 6042 napi_gro_flush(n, !!timeout); 6043 if (timeout) 6044 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6045 HRTIMER_MODE_REL_PINNED); 6046 } 6047 if (unlikely(!list_empty(&n->poll_list))) { 6048 /* If n->poll_list is not empty, we need to mask irqs */ 6049 local_irq_save(flags); 6050 list_del_init(&n->poll_list); 6051 local_irq_restore(flags); 6052 } 6053 6054 do { 6055 val = READ_ONCE(n->state); 6056 6057 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6058 6059 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); 6060 6061 /* If STATE_MISSED was set, leave STATE_SCHED set, 6062 * because we will call napi->poll() one more time. 6063 * This C code was suggested by Alexander Duyck to help gcc. 6064 */ 6065 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6066 NAPIF_STATE_SCHED; 6067 } while (cmpxchg(&n->state, val, new) != val); 6068 6069 if (unlikely(val & NAPIF_STATE_MISSED)) { 6070 __napi_schedule(n); 6071 return false; 6072 } 6073 6074 return true; 6075 } 6076 EXPORT_SYMBOL(napi_complete_done); 6077 6078 /* must be called under rcu_read_lock(), as we dont take a reference */ 6079 static struct napi_struct *napi_by_id(unsigned int napi_id) 6080 { 6081 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6082 struct napi_struct *napi; 6083 6084 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6085 if (napi->napi_id == napi_id) 6086 return napi; 6087 6088 return NULL; 6089 } 6090 6091 #if defined(CONFIG_NET_RX_BUSY_POLL) 6092 6093 #define BUSY_POLL_BUDGET 8 6094 6095 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) 6096 { 6097 int rc; 6098 6099 /* Busy polling means there is a high chance device driver hard irq 6100 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6101 * set in napi_schedule_prep(). 6102 * Since we are about to call napi->poll() once more, we can safely 6103 * clear NAPI_STATE_MISSED. 6104 * 6105 * Note: x86 could use a single "lock and ..." instruction 6106 * to perform these two clear_bit() 6107 */ 6108 clear_bit(NAPI_STATE_MISSED, &napi->state); 6109 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6110 6111 local_bh_disable(); 6112 6113 /* All we really want here is to re-enable device interrupts. 6114 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6115 */ 6116 rc = napi->poll(napi, BUSY_POLL_BUDGET); 6117 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); 6118 netpoll_poll_unlock(have_poll_lock); 6119 if (rc == BUSY_POLL_BUDGET) 6120 __napi_schedule(napi); 6121 local_bh_enable(); 6122 } 6123 6124 void napi_busy_loop(unsigned int napi_id, 6125 bool (*loop_end)(void *, unsigned long), 6126 void *loop_end_arg) 6127 { 6128 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6129 int (*napi_poll)(struct napi_struct *napi, int budget); 6130 void *have_poll_lock = NULL; 6131 struct napi_struct *napi; 6132 6133 restart: 6134 napi_poll = NULL; 6135 6136 rcu_read_lock(); 6137 6138 napi = napi_by_id(napi_id); 6139 if (!napi) 6140 goto out; 6141 6142 preempt_disable(); 6143 for (;;) { 6144 int work = 0; 6145 6146 local_bh_disable(); 6147 if (!napi_poll) { 6148 unsigned long val = READ_ONCE(napi->state); 6149 6150 /* If multiple threads are competing for this napi, 6151 * we avoid dirtying napi->state as much as we can. 6152 */ 6153 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6154 NAPIF_STATE_IN_BUSY_POLL)) 6155 goto count; 6156 if (cmpxchg(&napi->state, val, 6157 val | NAPIF_STATE_IN_BUSY_POLL | 6158 NAPIF_STATE_SCHED) != val) 6159 goto count; 6160 have_poll_lock = netpoll_poll_lock(napi); 6161 napi_poll = napi->poll; 6162 } 6163 work = napi_poll(napi, BUSY_POLL_BUDGET); 6164 trace_napi_poll(napi, work, BUSY_POLL_BUDGET); 6165 count: 6166 if (work > 0) 6167 __NET_ADD_STATS(dev_net(napi->dev), 6168 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6169 local_bh_enable(); 6170 6171 if (!loop_end || loop_end(loop_end_arg, start_time)) 6172 break; 6173 6174 if (unlikely(need_resched())) { 6175 if (napi_poll) 6176 busy_poll_stop(napi, have_poll_lock); 6177 preempt_enable(); 6178 rcu_read_unlock(); 6179 cond_resched(); 6180 if (loop_end(loop_end_arg, start_time)) 6181 return; 6182 goto restart; 6183 } 6184 cpu_relax(); 6185 } 6186 if (napi_poll) 6187 busy_poll_stop(napi, have_poll_lock); 6188 preempt_enable(); 6189 out: 6190 rcu_read_unlock(); 6191 } 6192 EXPORT_SYMBOL(napi_busy_loop); 6193 6194 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6195 6196 static void napi_hash_add(struct napi_struct *napi) 6197 { 6198 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) || 6199 test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) 6200 return; 6201 6202 spin_lock(&napi_hash_lock); 6203 6204 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6205 do { 6206 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6207 napi_gen_id = MIN_NAPI_ID; 6208 } while (napi_by_id(napi_gen_id)); 6209 napi->napi_id = napi_gen_id; 6210 6211 hlist_add_head_rcu(&napi->napi_hash_node, 6212 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6213 6214 spin_unlock(&napi_hash_lock); 6215 } 6216 6217 /* Warning : caller is responsible to make sure rcu grace period 6218 * is respected before freeing memory containing @napi 6219 */ 6220 bool napi_hash_del(struct napi_struct *napi) 6221 { 6222 bool rcu_sync_needed = false; 6223 6224 spin_lock(&napi_hash_lock); 6225 6226 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) { 6227 rcu_sync_needed = true; 6228 hlist_del_rcu(&napi->napi_hash_node); 6229 } 6230 spin_unlock(&napi_hash_lock); 6231 return rcu_sync_needed; 6232 } 6233 EXPORT_SYMBOL_GPL(napi_hash_del); 6234 6235 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6236 { 6237 struct napi_struct *napi; 6238 6239 napi = container_of(timer, struct napi_struct, timer); 6240 6241 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6242 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6243 */ 6244 if (napi->gro_bitmask && !napi_disable_pending(napi) && 6245 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 6246 __napi_schedule_irqoff(napi); 6247 6248 return HRTIMER_NORESTART; 6249 } 6250 6251 static void init_gro_hash(struct napi_struct *napi) 6252 { 6253 int i; 6254 6255 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6256 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6257 napi->gro_hash[i].count = 0; 6258 } 6259 napi->gro_bitmask = 0; 6260 } 6261 6262 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6263 int (*poll)(struct napi_struct *, int), int weight) 6264 { 6265 INIT_LIST_HEAD(&napi->poll_list); 6266 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6267 napi->timer.function = napi_watchdog; 6268 init_gro_hash(napi); 6269 napi->skb = NULL; 6270 napi->poll = poll; 6271 if (weight > NAPI_POLL_WEIGHT) 6272 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6273 weight); 6274 napi->weight = weight; 6275 list_add(&napi->dev_list, &dev->napi_list); 6276 napi->dev = dev; 6277 #ifdef CONFIG_NETPOLL 6278 napi->poll_owner = -1; 6279 #endif 6280 set_bit(NAPI_STATE_SCHED, &napi->state); 6281 napi_hash_add(napi); 6282 } 6283 EXPORT_SYMBOL(netif_napi_add); 6284 6285 void napi_disable(struct napi_struct *n) 6286 { 6287 might_sleep(); 6288 set_bit(NAPI_STATE_DISABLE, &n->state); 6289 6290 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6291 msleep(1); 6292 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6293 msleep(1); 6294 6295 hrtimer_cancel(&n->timer); 6296 6297 clear_bit(NAPI_STATE_DISABLE, &n->state); 6298 } 6299 EXPORT_SYMBOL(napi_disable); 6300 6301 static void flush_gro_hash(struct napi_struct *napi) 6302 { 6303 int i; 6304 6305 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6306 struct sk_buff *skb, *n; 6307 6308 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6309 kfree_skb(skb); 6310 napi->gro_hash[i].count = 0; 6311 } 6312 } 6313 6314 /* Must be called in process context */ 6315 void netif_napi_del(struct napi_struct *napi) 6316 { 6317 might_sleep(); 6318 if (napi_hash_del(napi)) 6319 synchronize_net(); 6320 list_del_init(&napi->dev_list); 6321 napi_free_frags(napi); 6322 6323 flush_gro_hash(napi); 6324 napi->gro_bitmask = 0; 6325 } 6326 EXPORT_SYMBOL(netif_napi_del); 6327 6328 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6329 { 6330 void *have; 6331 int work, weight; 6332 6333 list_del_init(&n->poll_list); 6334 6335 have = netpoll_poll_lock(n); 6336 6337 weight = n->weight; 6338 6339 /* This NAPI_STATE_SCHED test is for avoiding a race 6340 * with netpoll's poll_napi(). Only the entity which 6341 * obtains the lock and sees NAPI_STATE_SCHED set will 6342 * actually make the ->poll() call. Therefore we avoid 6343 * accidentally calling ->poll() when NAPI is not scheduled. 6344 */ 6345 work = 0; 6346 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6347 work = n->poll(n, weight); 6348 trace_napi_poll(n, work, weight); 6349 } 6350 6351 WARN_ON_ONCE(work > weight); 6352 6353 if (likely(work < weight)) 6354 goto out_unlock; 6355 6356 /* Drivers must not modify the NAPI state if they 6357 * consume the entire weight. In such cases this code 6358 * still "owns" the NAPI instance and therefore can 6359 * move the instance around on the list at-will. 6360 */ 6361 if (unlikely(napi_disable_pending(n))) { 6362 napi_complete(n); 6363 goto out_unlock; 6364 } 6365 6366 if (n->gro_bitmask) { 6367 /* flush too old packets 6368 * If HZ < 1000, flush all packets. 6369 */ 6370 napi_gro_flush(n, HZ >= 1000); 6371 } 6372 6373 /* Some drivers may have called napi_schedule 6374 * prior to exhausting their budget. 6375 */ 6376 if (unlikely(!list_empty(&n->poll_list))) { 6377 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6378 n->dev ? n->dev->name : "backlog"); 6379 goto out_unlock; 6380 } 6381 6382 list_add_tail(&n->poll_list, repoll); 6383 6384 out_unlock: 6385 netpoll_poll_unlock(have); 6386 6387 return work; 6388 } 6389 6390 static __latent_entropy void net_rx_action(struct softirq_action *h) 6391 { 6392 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6393 unsigned long time_limit = jiffies + 6394 usecs_to_jiffies(netdev_budget_usecs); 6395 int budget = netdev_budget; 6396 LIST_HEAD(list); 6397 LIST_HEAD(repoll); 6398 6399 local_irq_disable(); 6400 list_splice_init(&sd->poll_list, &list); 6401 local_irq_enable(); 6402 6403 for (;;) { 6404 struct napi_struct *n; 6405 6406 if (list_empty(&list)) { 6407 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6408 goto out; 6409 break; 6410 } 6411 6412 n = list_first_entry(&list, struct napi_struct, poll_list); 6413 budget -= napi_poll(n, &repoll); 6414 6415 /* If softirq window is exhausted then punt. 6416 * Allow this to run for 2 jiffies since which will allow 6417 * an average latency of 1.5/HZ. 6418 */ 6419 if (unlikely(budget <= 0 || 6420 time_after_eq(jiffies, time_limit))) { 6421 sd->time_squeeze++; 6422 break; 6423 } 6424 } 6425 6426 local_irq_disable(); 6427 6428 list_splice_tail_init(&sd->poll_list, &list); 6429 list_splice_tail(&repoll, &list); 6430 list_splice(&list, &sd->poll_list); 6431 if (!list_empty(&sd->poll_list)) 6432 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6433 6434 net_rps_action_and_irq_enable(sd); 6435 out: 6436 __kfree_skb_flush(); 6437 } 6438 6439 struct netdev_adjacent { 6440 struct net_device *dev; 6441 6442 /* upper master flag, there can only be one master device per list */ 6443 bool master; 6444 6445 /* counter for the number of times this device was added to us */ 6446 u16 ref_nr; 6447 6448 /* private field for the users */ 6449 void *private; 6450 6451 struct list_head list; 6452 struct rcu_head rcu; 6453 }; 6454 6455 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6456 struct list_head *adj_list) 6457 { 6458 struct netdev_adjacent *adj; 6459 6460 list_for_each_entry(adj, adj_list, list) { 6461 if (adj->dev == adj_dev) 6462 return adj; 6463 } 6464 return NULL; 6465 } 6466 6467 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6468 { 6469 struct net_device *dev = data; 6470 6471 return upper_dev == dev; 6472 } 6473 6474 /** 6475 * netdev_has_upper_dev - Check if device is linked to an upper device 6476 * @dev: device 6477 * @upper_dev: upper device to check 6478 * 6479 * Find out if a device is linked to specified upper device and return true 6480 * in case it is. Note that this checks only immediate upper device, 6481 * not through a complete stack of devices. The caller must hold the RTNL lock. 6482 */ 6483 bool netdev_has_upper_dev(struct net_device *dev, 6484 struct net_device *upper_dev) 6485 { 6486 ASSERT_RTNL(); 6487 6488 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6489 upper_dev); 6490 } 6491 EXPORT_SYMBOL(netdev_has_upper_dev); 6492 6493 /** 6494 * netdev_has_upper_dev_all - Check if device is linked to an upper device 6495 * @dev: device 6496 * @upper_dev: upper device to check 6497 * 6498 * Find out if a device is linked to specified upper device and return true 6499 * in case it is. Note that this checks the entire upper device chain. 6500 * The caller must hold rcu lock. 6501 */ 6502 6503 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6504 struct net_device *upper_dev) 6505 { 6506 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6507 upper_dev); 6508 } 6509 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6510 6511 /** 6512 * netdev_has_any_upper_dev - Check if device is linked to some device 6513 * @dev: device 6514 * 6515 * Find out if a device is linked to an upper device and return true in case 6516 * it is. The caller must hold the RTNL lock. 6517 */ 6518 bool netdev_has_any_upper_dev(struct net_device *dev) 6519 { 6520 ASSERT_RTNL(); 6521 6522 return !list_empty(&dev->adj_list.upper); 6523 } 6524 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6525 6526 /** 6527 * netdev_master_upper_dev_get - Get master upper device 6528 * @dev: device 6529 * 6530 * Find a master upper device and return pointer to it or NULL in case 6531 * it's not there. The caller must hold the RTNL lock. 6532 */ 6533 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6534 { 6535 struct netdev_adjacent *upper; 6536 6537 ASSERT_RTNL(); 6538 6539 if (list_empty(&dev->adj_list.upper)) 6540 return NULL; 6541 6542 upper = list_first_entry(&dev->adj_list.upper, 6543 struct netdev_adjacent, list); 6544 if (likely(upper->master)) 6545 return upper->dev; 6546 return NULL; 6547 } 6548 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6549 6550 /** 6551 * netdev_has_any_lower_dev - Check if device is linked to some device 6552 * @dev: device 6553 * 6554 * Find out if a device is linked to a lower device and return true in case 6555 * it is. The caller must hold the RTNL lock. 6556 */ 6557 static bool netdev_has_any_lower_dev(struct net_device *dev) 6558 { 6559 ASSERT_RTNL(); 6560 6561 return !list_empty(&dev->adj_list.lower); 6562 } 6563 6564 void *netdev_adjacent_get_private(struct list_head *adj_list) 6565 { 6566 struct netdev_adjacent *adj; 6567 6568 adj = list_entry(adj_list, struct netdev_adjacent, list); 6569 6570 return adj->private; 6571 } 6572 EXPORT_SYMBOL(netdev_adjacent_get_private); 6573 6574 /** 6575 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6576 * @dev: device 6577 * @iter: list_head ** of the current position 6578 * 6579 * Gets the next device from the dev's upper list, starting from iter 6580 * position. The caller must hold RCU read lock. 6581 */ 6582 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6583 struct list_head **iter) 6584 { 6585 struct netdev_adjacent *upper; 6586 6587 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6588 6589 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6590 6591 if (&upper->list == &dev->adj_list.upper) 6592 return NULL; 6593 6594 *iter = &upper->list; 6595 6596 return upper->dev; 6597 } 6598 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6599 6600 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6601 struct list_head **iter) 6602 { 6603 struct netdev_adjacent *upper; 6604 6605 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6606 6607 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6608 6609 if (&upper->list == &dev->adj_list.upper) 6610 return NULL; 6611 6612 *iter = &upper->list; 6613 6614 return upper->dev; 6615 } 6616 6617 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6618 int (*fn)(struct net_device *dev, 6619 void *data), 6620 void *data) 6621 { 6622 struct net_device *udev; 6623 struct list_head *iter; 6624 int ret; 6625 6626 for (iter = &dev->adj_list.upper, 6627 udev = netdev_next_upper_dev_rcu(dev, &iter); 6628 udev; 6629 udev = netdev_next_upper_dev_rcu(dev, &iter)) { 6630 /* first is the upper device itself */ 6631 ret = fn(udev, data); 6632 if (ret) 6633 return ret; 6634 6635 /* then look at all of its upper devices */ 6636 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); 6637 if (ret) 6638 return ret; 6639 } 6640 6641 return 0; 6642 } 6643 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6644 6645 /** 6646 * netdev_lower_get_next_private - Get the next ->private from the 6647 * lower neighbour list 6648 * @dev: device 6649 * @iter: list_head ** of the current position 6650 * 6651 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6652 * list, starting from iter position. The caller must hold either hold the 6653 * RTNL lock or its own locking that guarantees that the neighbour lower 6654 * list will remain unchanged. 6655 */ 6656 void *netdev_lower_get_next_private(struct net_device *dev, 6657 struct list_head **iter) 6658 { 6659 struct netdev_adjacent *lower; 6660 6661 lower = list_entry(*iter, struct netdev_adjacent, list); 6662 6663 if (&lower->list == &dev->adj_list.lower) 6664 return NULL; 6665 6666 *iter = lower->list.next; 6667 6668 return lower->private; 6669 } 6670 EXPORT_SYMBOL(netdev_lower_get_next_private); 6671 6672 /** 6673 * netdev_lower_get_next_private_rcu - Get the next ->private from the 6674 * lower neighbour list, RCU 6675 * variant 6676 * @dev: device 6677 * @iter: list_head ** of the current position 6678 * 6679 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6680 * list, starting from iter position. The caller must hold RCU read lock. 6681 */ 6682 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 6683 struct list_head **iter) 6684 { 6685 struct netdev_adjacent *lower; 6686 6687 WARN_ON_ONCE(!rcu_read_lock_held()); 6688 6689 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6690 6691 if (&lower->list == &dev->adj_list.lower) 6692 return NULL; 6693 6694 *iter = &lower->list; 6695 6696 return lower->private; 6697 } 6698 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 6699 6700 /** 6701 * netdev_lower_get_next - Get the next device from the lower neighbour 6702 * list 6703 * @dev: device 6704 * @iter: list_head ** of the current position 6705 * 6706 * Gets the next netdev_adjacent from the dev's lower neighbour 6707 * list, starting from iter position. The caller must hold RTNL lock or 6708 * its own locking that guarantees that the neighbour lower 6709 * list will remain unchanged. 6710 */ 6711 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 6712 { 6713 struct netdev_adjacent *lower; 6714 6715 lower = list_entry(*iter, struct netdev_adjacent, list); 6716 6717 if (&lower->list == &dev->adj_list.lower) 6718 return NULL; 6719 6720 *iter = lower->list.next; 6721 6722 return lower->dev; 6723 } 6724 EXPORT_SYMBOL(netdev_lower_get_next); 6725 6726 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 6727 struct list_head **iter) 6728 { 6729 struct netdev_adjacent *lower; 6730 6731 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 6732 6733 if (&lower->list == &dev->adj_list.lower) 6734 return NULL; 6735 6736 *iter = &lower->list; 6737 6738 return lower->dev; 6739 } 6740 6741 int netdev_walk_all_lower_dev(struct net_device *dev, 6742 int (*fn)(struct net_device *dev, 6743 void *data), 6744 void *data) 6745 { 6746 struct net_device *ldev; 6747 struct list_head *iter; 6748 int ret; 6749 6750 for (iter = &dev->adj_list.lower, 6751 ldev = netdev_next_lower_dev(dev, &iter); 6752 ldev; 6753 ldev = netdev_next_lower_dev(dev, &iter)) { 6754 /* first is the lower device itself */ 6755 ret = fn(ldev, data); 6756 if (ret) 6757 return ret; 6758 6759 /* then look at all of its lower devices */ 6760 ret = netdev_walk_all_lower_dev(ldev, fn, data); 6761 if (ret) 6762 return ret; 6763 } 6764 6765 return 0; 6766 } 6767 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 6768 6769 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 6770 struct list_head **iter) 6771 { 6772 struct netdev_adjacent *lower; 6773 6774 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6775 if (&lower->list == &dev->adj_list.lower) 6776 return NULL; 6777 6778 *iter = &lower->list; 6779 6780 return lower->dev; 6781 } 6782 6783 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 6784 int (*fn)(struct net_device *dev, 6785 void *data), 6786 void *data) 6787 { 6788 struct net_device *ldev; 6789 struct list_head *iter; 6790 int ret; 6791 6792 for (iter = &dev->adj_list.lower, 6793 ldev = netdev_next_lower_dev_rcu(dev, &iter); 6794 ldev; 6795 ldev = netdev_next_lower_dev_rcu(dev, &iter)) { 6796 /* first is the lower device itself */ 6797 ret = fn(ldev, data); 6798 if (ret) 6799 return ret; 6800 6801 /* then look at all of its lower devices */ 6802 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); 6803 if (ret) 6804 return ret; 6805 } 6806 6807 return 0; 6808 } 6809 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 6810 6811 /** 6812 * netdev_lower_get_first_private_rcu - Get the first ->private from the 6813 * lower neighbour list, RCU 6814 * variant 6815 * @dev: device 6816 * 6817 * Gets the first netdev_adjacent->private from the dev's lower neighbour 6818 * list. The caller must hold RCU read lock. 6819 */ 6820 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 6821 { 6822 struct netdev_adjacent *lower; 6823 6824 lower = list_first_or_null_rcu(&dev->adj_list.lower, 6825 struct netdev_adjacent, list); 6826 if (lower) 6827 return lower->private; 6828 return NULL; 6829 } 6830 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 6831 6832 /** 6833 * netdev_master_upper_dev_get_rcu - Get master upper device 6834 * @dev: device 6835 * 6836 * Find a master upper device and return pointer to it or NULL in case 6837 * it's not there. The caller must hold the RCU read lock. 6838 */ 6839 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 6840 { 6841 struct netdev_adjacent *upper; 6842 6843 upper = list_first_or_null_rcu(&dev->adj_list.upper, 6844 struct netdev_adjacent, list); 6845 if (upper && likely(upper->master)) 6846 return upper->dev; 6847 return NULL; 6848 } 6849 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 6850 6851 static int netdev_adjacent_sysfs_add(struct net_device *dev, 6852 struct net_device *adj_dev, 6853 struct list_head *dev_list) 6854 { 6855 char linkname[IFNAMSIZ+7]; 6856 6857 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6858 "upper_%s" : "lower_%s", adj_dev->name); 6859 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 6860 linkname); 6861 } 6862 static void netdev_adjacent_sysfs_del(struct net_device *dev, 6863 char *name, 6864 struct list_head *dev_list) 6865 { 6866 char linkname[IFNAMSIZ+7]; 6867 6868 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6869 "upper_%s" : "lower_%s", name); 6870 sysfs_remove_link(&(dev->dev.kobj), linkname); 6871 } 6872 6873 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 6874 struct net_device *adj_dev, 6875 struct list_head *dev_list) 6876 { 6877 return (dev_list == &dev->adj_list.upper || 6878 dev_list == &dev->adj_list.lower) && 6879 net_eq(dev_net(dev), dev_net(adj_dev)); 6880 } 6881 6882 static int __netdev_adjacent_dev_insert(struct net_device *dev, 6883 struct net_device *adj_dev, 6884 struct list_head *dev_list, 6885 void *private, bool master) 6886 { 6887 struct netdev_adjacent *adj; 6888 int ret; 6889 6890 adj = __netdev_find_adj(adj_dev, dev_list); 6891 6892 if (adj) { 6893 adj->ref_nr += 1; 6894 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 6895 dev->name, adj_dev->name, adj->ref_nr); 6896 6897 return 0; 6898 } 6899 6900 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 6901 if (!adj) 6902 return -ENOMEM; 6903 6904 adj->dev = adj_dev; 6905 adj->master = master; 6906 adj->ref_nr = 1; 6907 adj->private = private; 6908 dev_hold(adj_dev); 6909 6910 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 6911 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 6912 6913 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 6914 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 6915 if (ret) 6916 goto free_adj; 6917 } 6918 6919 /* Ensure that master link is always the first item in list. */ 6920 if (master) { 6921 ret = sysfs_create_link(&(dev->dev.kobj), 6922 &(adj_dev->dev.kobj), "master"); 6923 if (ret) 6924 goto remove_symlinks; 6925 6926 list_add_rcu(&adj->list, dev_list); 6927 } else { 6928 list_add_tail_rcu(&adj->list, dev_list); 6929 } 6930 6931 return 0; 6932 6933 remove_symlinks: 6934 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6935 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6936 free_adj: 6937 kfree(adj); 6938 dev_put(adj_dev); 6939 6940 return ret; 6941 } 6942 6943 static void __netdev_adjacent_dev_remove(struct net_device *dev, 6944 struct net_device *adj_dev, 6945 u16 ref_nr, 6946 struct list_head *dev_list) 6947 { 6948 struct netdev_adjacent *adj; 6949 6950 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 6951 dev->name, adj_dev->name, ref_nr); 6952 6953 adj = __netdev_find_adj(adj_dev, dev_list); 6954 6955 if (!adj) { 6956 pr_err("Adjacency does not exist for device %s from %s\n", 6957 dev->name, adj_dev->name); 6958 WARN_ON(1); 6959 return; 6960 } 6961 6962 if (adj->ref_nr > ref_nr) { 6963 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 6964 dev->name, adj_dev->name, ref_nr, 6965 adj->ref_nr - ref_nr); 6966 adj->ref_nr -= ref_nr; 6967 return; 6968 } 6969 6970 if (adj->master) 6971 sysfs_remove_link(&(dev->dev.kobj), "master"); 6972 6973 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6974 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6975 6976 list_del_rcu(&adj->list); 6977 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 6978 adj_dev->name, dev->name, adj_dev->name); 6979 dev_put(adj_dev); 6980 kfree_rcu(adj, rcu); 6981 } 6982 6983 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 6984 struct net_device *upper_dev, 6985 struct list_head *up_list, 6986 struct list_head *down_list, 6987 void *private, bool master) 6988 { 6989 int ret; 6990 6991 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 6992 private, master); 6993 if (ret) 6994 return ret; 6995 6996 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 6997 private, false); 6998 if (ret) { 6999 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7000 return ret; 7001 } 7002 7003 return 0; 7004 } 7005 7006 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7007 struct net_device *upper_dev, 7008 u16 ref_nr, 7009 struct list_head *up_list, 7010 struct list_head *down_list) 7011 { 7012 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7013 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7014 } 7015 7016 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7017 struct net_device *upper_dev, 7018 void *private, bool master) 7019 { 7020 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7021 &dev->adj_list.upper, 7022 &upper_dev->adj_list.lower, 7023 private, master); 7024 } 7025 7026 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7027 struct net_device *upper_dev) 7028 { 7029 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7030 &dev->adj_list.upper, 7031 &upper_dev->adj_list.lower); 7032 } 7033 7034 static int __netdev_upper_dev_link(struct net_device *dev, 7035 struct net_device *upper_dev, bool master, 7036 void *upper_priv, void *upper_info, 7037 struct netlink_ext_ack *extack) 7038 { 7039 struct netdev_notifier_changeupper_info changeupper_info = { 7040 .info = { 7041 .dev = dev, 7042 .extack = extack, 7043 }, 7044 .upper_dev = upper_dev, 7045 .master = master, 7046 .linking = true, 7047 .upper_info = upper_info, 7048 }; 7049 struct net_device *master_dev; 7050 int ret = 0; 7051 7052 ASSERT_RTNL(); 7053 7054 if (dev == upper_dev) 7055 return -EBUSY; 7056 7057 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7058 if (netdev_has_upper_dev(upper_dev, dev)) 7059 return -EBUSY; 7060 7061 if (!master) { 7062 if (netdev_has_upper_dev(dev, upper_dev)) 7063 return -EEXIST; 7064 } else { 7065 master_dev = netdev_master_upper_dev_get(dev); 7066 if (master_dev) 7067 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7068 } 7069 7070 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7071 &changeupper_info.info); 7072 ret = notifier_to_errno(ret); 7073 if (ret) 7074 return ret; 7075 7076 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7077 master); 7078 if (ret) 7079 return ret; 7080 7081 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7082 &changeupper_info.info); 7083 ret = notifier_to_errno(ret); 7084 if (ret) 7085 goto rollback; 7086 7087 return 0; 7088 7089 rollback: 7090 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7091 7092 return ret; 7093 } 7094 7095 /** 7096 * netdev_upper_dev_link - Add a link to the upper device 7097 * @dev: device 7098 * @upper_dev: new upper device 7099 * @extack: netlink extended ack 7100 * 7101 * Adds a link to device which is upper to this one. The caller must hold 7102 * the RTNL lock. On a failure a negative errno code is returned. 7103 * On success the reference counts are adjusted and the function 7104 * returns zero. 7105 */ 7106 int netdev_upper_dev_link(struct net_device *dev, 7107 struct net_device *upper_dev, 7108 struct netlink_ext_ack *extack) 7109 { 7110 return __netdev_upper_dev_link(dev, upper_dev, false, 7111 NULL, NULL, extack); 7112 } 7113 EXPORT_SYMBOL(netdev_upper_dev_link); 7114 7115 /** 7116 * netdev_master_upper_dev_link - Add a master link to the upper device 7117 * @dev: device 7118 * @upper_dev: new upper device 7119 * @upper_priv: upper device private 7120 * @upper_info: upper info to be passed down via notifier 7121 * @extack: netlink extended ack 7122 * 7123 * Adds a link to device which is upper to this one. In this case, only 7124 * one master upper device can be linked, although other non-master devices 7125 * might be linked as well. The caller must hold the RTNL lock. 7126 * On a failure a negative errno code is returned. On success the reference 7127 * counts are adjusted and the function returns zero. 7128 */ 7129 int netdev_master_upper_dev_link(struct net_device *dev, 7130 struct net_device *upper_dev, 7131 void *upper_priv, void *upper_info, 7132 struct netlink_ext_ack *extack) 7133 { 7134 return __netdev_upper_dev_link(dev, upper_dev, true, 7135 upper_priv, upper_info, extack); 7136 } 7137 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7138 7139 /** 7140 * netdev_upper_dev_unlink - Removes a link to upper device 7141 * @dev: device 7142 * @upper_dev: new upper device 7143 * 7144 * Removes a link to device which is upper to this one. The caller must hold 7145 * the RTNL lock. 7146 */ 7147 void netdev_upper_dev_unlink(struct net_device *dev, 7148 struct net_device *upper_dev) 7149 { 7150 struct netdev_notifier_changeupper_info changeupper_info = { 7151 .info = { 7152 .dev = dev, 7153 }, 7154 .upper_dev = upper_dev, 7155 .linking = false, 7156 }; 7157 7158 ASSERT_RTNL(); 7159 7160 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7161 7162 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7163 &changeupper_info.info); 7164 7165 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7166 7167 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7168 &changeupper_info.info); 7169 } 7170 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7171 7172 /** 7173 * netdev_bonding_info_change - Dispatch event about slave change 7174 * @dev: device 7175 * @bonding_info: info to dispatch 7176 * 7177 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7178 * The caller must hold the RTNL lock. 7179 */ 7180 void netdev_bonding_info_change(struct net_device *dev, 7181 struct netdev_bonding_info *bonding_info) 7182 { 7183 struct netdev_notifier_bonding_info info = { 7184 .info.dev = dev, 7185 }; 7186 7187 memcpy(&info.bonding_info, bonding_info, 7188 sizeof(struct netdev_bonding_info)); 7189 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7190 &info.info); 7191 } 7192 EXPORT_SYMBOL(netdev_bonding_info_change); 7193 7194 static void netdev_adjacent_add_links(struct net_device *dev) 7195 { 7196 struct netdev_adjacent *iter; 7197 7198 struct net *net = dev_net(dev); 7199 7200 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7201 if (!net_eq(net, dev_net(iter->dev))) 7202 continue; 7203 netdev_adjacent_sysfs_add(iter->dev, dev, 7204 &iter->dev->adj_list.lower); 7205 netdev_adjacent_sysfs_add(dev, iter->dev, 7206 &dev->adj_list.upper); 7207 } 7208 7209 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7210 if (!net_eq(net, dev_net(iter->dev))) 7211 continue; 7212 netdev_adjacent_sysfs_add(iter->dev, dev, 7213 &iter->dev->adj_list.upper); 7214 netdev_adjacent_sysfs_add(dev, iter->dev, 7215 &dev->adj_list.lower); 7216 } 7217 } 7218 7219 static void netdev_adjacent_del_links(struct net_device *dev) 7220 { 7221 struct netdev_adjacent *iter; 7222 7223 struct net *net = dev_net(dev); 7224 7225 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7226 if (!net_eq(net, dev_net(iter->dev))) 7227 continue; 7228 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7229 &iter->dev->adj_list.lower); 7230 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7231 &dev->adj_list.upper); 7232 } 7233 7234 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7235 if (!net_eq(net, dev_net(iter->dev))) 7236 continue; 7237 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7238 &iter->dev->adj_list.upper); 7239 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7240 &dev->adj_list.lower); 7241 } 7242 } 7243 7244 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 7245 { 7246 struct netdev_adjacent *iter; 7247 7248 struct net *net = dev_net(dev); 7249 7250 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7251 if (!net_eq(net, dev_net(iter->dev))) 7252 continue; 7253 netdev_adjacent_sysfs_del(iter->dev, oldname, 7254 &iter->dev->adj_list.lower); 7255 netdev_adjacent_sysfs_add(iter->dev, dev, 7256 &iter->dev->adj_list.lower); 7257 } 7258 7259 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7260 if (!net_eq(net, dev_net(iter->dev))) 7261 continue; 7262 netdev_adjacent_sysfs_del(iter->dev, oldname, 7263 &iter->dev->adj_list.upper); 7264 netdev_adjacent_sysfs_add(iter->dev, dev, 7265 &iter->dev->adj_list.upper); 7266 } 7267 } 7268 7269 void *netdev_lower_dev_get_private(struct net_device *dev, 7270 struct net_device *lower_dev) 7271 { 7272 struct netdev_adjacent *lower; 7273 7274 if (!lower_dev) 7275 return NULL; 7276 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 7277 if (!lower) 7278 return NULL; 7279 7280 return lower->private; 7281 } 7282 EXPORT_SYMBOL(netdev_lower_dev_get_private); 7283 7284 7285 int dev_get_nest_level(struct net_device *dev) 7286 { 7287 struct net_device *lower = NULL; 7288 struct list_head *iter; 7289 int max_nest = -1; 7290 int nest; 7291 7292 ASSERT_RTNL(); 7293 7294 netdev_for_each_lower_dev(dev, lower, iter) { 7295 nest = dev_get_nest_level(lower); 7296 if (max_nest < nest) 7297 max_nest = nest; 7298 } 7299 7300 return max_nest + 1; 7301 } 7302 EXPORT_SYMBOL(dev_get_nest_level); 7303 7304 /** 7305 * netdev_lower_change - Dispatch event about lower device state change 7306 * @lower_dev: device 7307 * @lower_state_info: state to dispatch 7308 * 7309 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 7310 * The caller must hold the RTNL lock. 7311 */ 7312 void netdev_lower_state_changed(struct net_device *lower_dev, 7313 void *lower_state_info) 7314 { 7315 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 7316 .info.dev = lower_dev, 7317 }; 7318 7319 ASSERT_RTNL(); 7320 changelowerstate_info.lower_state_info = lower_state_info; 7321 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 7322 &changelowerstate_info.info); 7323 } 7324 EXPORT_SYMBOL(netdev_lower_state_changed); 7325 7326 static void dev_change_rx_flags(struct net_device *dev, int flags) 7327 { 7328 const struct net_device_ops *ops = dev->netdev_ops; 7329 7330 if (ops->ndo_change_rx_flags) 7331 ops->ndo_change_rx_flags(dev, flags); 7332 } 7333 7334 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 7335 { 7336 unsigned int old_flags = dev->flags; 7337 kuid_t uid; 7338 kgid_t gid; 7339 7340 ASSERT_RTNL(); 7341 7342 dev->flags |= IFF_PROMISC; 7343 dev->promiscuity += inc; 7344 if (dev->promiscuity == 0) { 7345 /* 7346 * Avoid overflow. 7347 * If inc causes overflow, untouch promisc and return error. 7348 */ 7349 if (inc < 0) 7350 dev->flags &= ~IFF_PROMISC; 7351 else { 7352 dev->promiscuity -= inc; 7353 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 7354 dev->name); 7355 return -EOVERFLOW; 7356 } 7357 } 7358 if (dev->flags != old_flags) { 7359 pr_info("device %s %s promiscuous mode\n", 7360 dev->name, 7361 dev->flags & IFF_PROMISC ? "entered" : "left"); 7362 if (audit_enabled) { 7363 current_uid_gid(&uid, &gid); 7364 audit_log(audit_context(), GFP_ATOMIC, 7365 AUDIT_ANOM_PROMISCUOUS, 7366 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 7367 dev->name, (dev->flags & IFF_PROMISC), 7368 (old_flags & IFF_PROMISC), 7369 from_kuid(&init_user_ns, audit_get_loginuid(current)), 7370 from_kuid(&init_user_ns, uid), 7371 from_kgid(&init_user_ns, gid), 7372 audit_get_sessionid(current)); 7373 } 7374 7375 dev_change_rx_flags(dev, IFF_PROMISC); 7376 } 7377 if (notify) 7378 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 7379 return 0; 7380 } 7381 7382 /** 7383 * dev_set_promiscuity - update promiscuity count on a device 7384 * @dev: device 7385 * @inc: modifier 7386 * 7387 * Add or remove promiscuity from a device. While the count in the device 7388 * remains above zero the interface remains promiscuous. Once it hits zero 7389 * the device reverts back to normal filtering operation. A negative inc 7390 * value is used to drop promiscuity on the device. 7391 * Return 0 if successful or a negative errno code on error. 7392 */ 7393 int dev_set_promiscuity(struct net_device *dev, int inc) 7394 { 7395 unsigned int old_flags = dev->flags; 7396 int err; 7397 7398 err = __dev_set_promiscuity(dev, inc, true); 7399 if (err < 0) 7400 return err; 7401 if (dev->flags != old_flags) 7402 dev_set_rx_mode(dev); 7403 return err; 7404 } 7405 EXPORT_SYMBOL(dev_set_promiscuity); 7406 7407 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 7408 { 7409 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 7410 7411 ASSERT_RTNL(); 7412 7413 dev->flags |= IFF_ALLMULTI; 7414 dev->allmulti += inc; 7415 if (dev->allmulti == 0) { 7416 /* 7417 * Avoid overflow. 7418 * If inc causes overflow, untouch allmulti and return error. 7419 */ 7420 if (inc < 0) 7421 dev->flags &= ~IFF_ALLMULTI; 7422 else { 7423 dev->allmulti -= inc; 7424 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 7425 dev->name); 7426 return -EOVERFLOW; 7427 } 7428 } 7429 if (dev->flags ^ old_flags) { 7430 dev_change_rx_flags(dev, IFF_ALLMULTI); 7431 dev_set_rx_mode(dev); 7432 if (notify) 7433 __dev_notify_flags(dev, old_flags, 7434 dev->gflags ^ old_gflags); 7435 } 7436 return 0; 7437 } 7438 7439 /** 7440 * dev_set_allmulti - update allmulti count on a device 7441 * @dev: device 7442 * @inc: modifier 7443 * 7444 * Add or remove reception of all multicast frames to a device. While the 7445 * count in the device remains above zero the interface remains listening 7446 * to all interfaces. Once it hits zero the device reverts back to normal 7447 * filtering operation. A negative @inc value is used to drop the counter 7448 * when releasing a resource needing all multicasts. 7449 * Return 0 if successful or a negative errno code on error. 7450 */ 7451 7452 int dev_set_allmulti(struct net_device *dev, int inc) 7453 { 7454 return __dev_set_allmulti(dev, inc, true); 7455 } 7456 EXPORT_SYMBOL(dev_set_allmulti); 7457 7458 /* 7459 * Upload unicast and multicast address lists to device and 7460 * configure RX filtering. When the device doesn't support unicast 7461 * filtering it is put in promiscuous mode while unicast addresses 7462 * are present. 7463 */ 7464 void __dev_set_rx_mode(struct net_device *dev) 7465 { 7466 const struct net_device_ops *ops = dev->netdev_ops; 7467 7468 /* dev_open will call this function so the list will stay sane. */ 7469 if (!(dev->flags&IFF_UP)) 7470 return; 7471 7472 if (!netif_device_present(dev)) 7473 return; 7474 7475 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 7476 /* Unicast addresses changes may only happen under the rtnl, 7477 * therefore calling __dev_set_promiscuity here is safe. 7478 */ 7479 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 7480 __dev_set_promiscuity(dev, 1, false); 7481 dev->uc_promisc = true; 7482 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 7483 __dev_set_promiscuity(dev, -1, false); 7484 dev->uc_promisc = false; 7485 } 7486 } 7487 7488 if (ops->ndo_set_rx_mode) 7489 ops->ndo_set_rx_mode(dev); 7490 } 7491 7492 void dev_set_rx_mode(struct net_device *dev) 7493 { 7494 netif_addr_lock_bh(dev); 7495 __dev_set_rx_mode(dev); 7496 netif_addr_unlock_bh(dev); 7497 } 7498 7499 /** 7500 * dev_get_flags - get flags reported to userspace 7501 * @dev: device 7502 * 7503 * Get the combination of flag bits exported through APIs to userspace. 7504 */ 7505 unsigned int dev_get_flags(const struct net_device *dev) 7506 { 7507 unsigned int flags; 7508 7509 flags = (dev->flags & ~(IFF_PROMISC | 7510 IFF_ALLMULTI | 7511 IFF_RUNNING | 7512 IFF_LOWER_UP | 7513 IFF_DORMANT)) | 7514 (dev->gflags & (IFF_PROMISC | 7515 IFF_ALLMULTI)); 7516 7517 if (netif_running(dev)) { 7518 if (netif_oper_up(dev)) 7519 flags |= IFF_RUNNING; 7520 if (netif_carrier_ok(dev)) 7521 flags |= IFF_LOWER_UP; 7522 if (netif_dormant(dev)) 7523 flags |= IFF_DORMANT; 7524 } 7525 7526 return flags; 7527 } 7528 EXPORT_SYMBOL(dev_get_flags); 7529 7530 int __dev_change_flags(struct net_device *dev, unsigned int flags, 7531 struct netlink_ext_ack *extack) 7532 { 7533 unsigned int old_flags = dev->flags; 7534 int ret; 7535 7536 ASSERT_RTNL(); 7537 7538 /* 7539 * Set the flags on our device. 7540 */ 7541 7542 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 7543 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 7544 IFF_AUTOMEDIA)) | 7545 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 7546 IFF_ALLMULTI)); 7547 7548 /* 7549 * Load in the correct multicast list now the flags have changed. 7550 */ 7551 7552 if ((old_flags ^ flags) & IFF_MULTICAST) 7553 dev_change_rx_flags(dev, IFF_MULTICAST); 7554 7555 dev_set_rx_mode(dev); 7556 7557 /* 7558 * Have we downed the interface. We handle IFF_UP ourselves 7559 * according to user attempts to set it, rather than blindly 7560 * setting it. 7561 */ 7562 7563 ret = 0; 7564 if ((old_flags ^ flags) & IFF_UP) { 7565 if (old_flags & IFF_UP) 7566 __dev_close(dev); 7567 else 7568 ret = __dev_open(dev, extack); 7569 } 7570 7571 if ((flags ^ dev->gflags) & IFF_PROMISC) { 7572 int inc = (flags & IFF_PROMISC) ? 1 : -1; 7573 unsigned int old_flags = dev->flags; 7574 7575 dev->gflags ^= IFF_PROMISC; 7576 7577 if (__dev_set_promiscuity(dev, inc, false) >= 0) 7578 if (dev->flags != old_flags) 7579 dev_set_rx_mode(dev); 7580 } 7581 7582 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 7583 * is important. Some (broken) drivers set IFF_PROMISC, when 7584 * IFF_ALLMULTI is requested not asking us and not reporting. 7585 */ 7586 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 7587 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 7588 7589 dev->gflags ^= IFF_ALLMULTI; 7590 __dev_set_allmulti(dev, inc, false); 7591 } 7592 7593 return ret; 7594 } 7595 7596 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 7597 unsigned int gchanges) 7598 { 7599 unsigned int changes = dev->flags ^ old_flags; 7600 7601 if (gchanges) 7602 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 7603 7604 if (changes & IFF_UP) { 7605 if (dev->flags & IFF_UP) 7606 call_netdevice_notifiers(NETDEV_UP, dev); 7607 else 7608 call_netdevice_notifiers(NETDEV_DOWN, dev); 7609 } 7610 7611 if (dev->flags & IFF_UP && 7612 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 7613 struct netdev_notifier_change_info change_info = { 7614 .info = { 7615 .dev = dev, 7616 }, 7617 .flags_changed = changes, 7618 }; 7619 7620 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 7621 } 7622 } 7623 7624 /** 7625 * dev_change_flags - change device settings 7626 * @dev: device 7627 * @flags: device state flags 7628 * @extack: netlink extended ack 7629 * 7630 * Change settings on device based state flags. The flags are 7631 * in the userspace exported format. 7632 */ 7633 int dev_change_flags(struct net_device *dev, unsigned int flags, 7634 struct netlink_ext_ack *extack) 7635 { 7636 int ret; 7637 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 7638 7639 ret = __dev_change_flags(dev, flags, extack); 7640 if (ret < 0) 7641 return ret; 7642 7643 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 7644 __dev_notify_flags(dev, old_flags, changes); 7645 return ret; 7646 } 7647 EXPORT_SYMBOL(dev_change_flags); 7648 7649 int __dev_set_mtu(struct net_device *dev, int new_mtu) 7650 { 7651 const struct net_device_ops *ops = dev->netdev_ops; 7652 7653 if (ops->ndo_change_mtu) 7654 return ops->ndo_change_mtu(dev, new_mtu); 7655 7656 dev->mtu = new_mtu; 7657 return 0; 7658 } 7659 EXPORT_SYMBOL(__dev_set_mtu); 7660 7661 /** 7662 * dev_set_mtu_ext - Change maximum transfer unit 7663 * @dev: device 7664 * @new_mtu: new transfer unit 7665 * @extack: netlink extended ack 7666 * 7667 * Change the maximum transfer size of the network device. 7668 */ 7669 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 7670 struct netlink_ext_ack *extack) 7671 { 7672 int err, orig_mtu; 7673 7674 if (new_mtu == dev->mtu) 7675 return 0; 7676 7677 /* MTU must be positive, and in range */ 7678 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 7679 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 7680 return -EINVAL; 7681 } 7682 7683 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 7684 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 7685 return -EINVAL; 7686 } 7687 7688 if (!netif_device_present(dev)) 7689 return -ENODEV; 7690 7691 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 7692 err = notifier_to_errno(err); 7693 if (err) 7694 return err; 7695 7696 orig_mtu = dev->mtu; 7697 err = __dev_set_mtu(dev, new_mtu); 7698 7699 if (!err) { 7700 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7701 orig_mtu); 7702 err = notifier_to_errno(err); 7703 if (err) { 7704 /* setting mtu back and notifying everyone again, 7705 * so that they have a chance to revert changes. 7706 */ 7707 __dev_set_mtu(dev, orig_mtu); 7708 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7709 new_mtu); 7710 } 7711 } 7712 return err; 7713 } 7714 7715 int dev_set_mtu(struct net_device *dev, int new_mtu) 7716 { 7717 struct netlink_ext_ack extack; 7718 int err; 7719 7720 memset(&extack, 0, sizeof(extack)); 7721 err = dev_set_mtu_ext(dev, new_mtu, &extack); 7722 if (err && extack._msg) 7723 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 7724 return err; 7725 } 7726 EXPORT_SYMBOL(dev_set_mtu); 7727 7728 /** 7729 * dev_change_tx_queue_len - Change TX queue length of a netdevice 7730 * @dev: device 7731 * @new_len: new tx queue length 7732 */ 7733 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 7734 { 7735 unsigned int orig_len = dev->tx_queue_len; 7736 int res; 7737 7738 if (new_len != (unsigned int)new_len) 7739 return -ERANGE; 7740 7741 if (new_len != orig_len) { 7742 dev->tx_queue_len = new_len; 7743 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7744 res = notifier_to_errno(res); 7745 if (res) 7746 goto err_rollback; 7747 res = dev_qdisc_change_tx_queue_len(dev); 7748 if (res) 7749 goto err_rollback; 7750 } 7751 7752 return 0; 7753 7754 err_rollback: 7755 netdev_err(dev, "refused to change device tx_queue_len\n"); 7756 dev->tx_queue_len = orig_len; 7757 return res; 7758 } 7759 7760 /** 7761 * dev_set_group - Change group this device belongs to 7762 * @dev: device 7763 * @new_group: group this device should belong to 7764 */ 7765 void dev_set_group(struct net_device *dev, int new_group) 7766 { 7767 dev->group = new_group; 7768 } 7769 EXPORT_SYMBOL(dev_set_group); 7770 7771 /** 7772 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 7773 * @dev: device 7774 * @addr: new address 7775 * @extack: netlink extended ack 7776 */ 7777 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 7778 struct netlink_ext_ack *extack) 7779 { 7780 struct netdev_notifier_pre_changeaddr_info info = { 7781 .info.dev = dev, 7782 .info.extack = extack, 7783 .dev_addr = addr, 7784 }; 7785 int rc; 7786 7787 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 7788 return notifier_to_errno(rc); 7789 } 7790 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 7791 7792 /** 7793 * dev_set_mac_address - Change Media Access Control Address 7794 * @dev: device 7795 * @sa: new address 7796 * @extack: netlink extended ack 7797 * 7798 * Change the hardware (MAC) address of the device 7799 */ 7800 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 7801 struct netlink_ext_ack *extack) 7802 { 7803 const struct net_device_ops *ops = dev->netdev_ops; 7804 int err; 7805 7806 if (!ops->ndo_set_mac_address) 7807 return -EOPNOTSUPP; 7808 if (sa->sa_family != dev->type) 7809 return -EINVAL; 7810 if (!netif_device_present(dev)) 7811 return -ENODEV; 7812 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 7813 if (err) 7814 return err; 7815 err = ops->ndo_set_mac_address(dev, sa); 7816 if (err) 7817 return err; 7818 dev->addr_assign_type = NET_ADDR_SET; 7819 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 7820 add_device_randomness(dev->dev_addr, dev->addr_len); 7821 return 0; 7822 } 7823 EXPORT_SYMBOL(dev_set_mac_address); 7824 7825 /** 7826 * dev_change_carrier - Change device carrier 7827 * @dev: device 7828 * @new_carrier: new value 7829 * 7830 * Change device carrier 7831 */ 7832 int dev_change_carrier(struct net_device *dev, bool new_carrier) 7833 { 7834 const struct net_device_ops *ops = dev->netdev_ops; 7835 7836 if (!ops->ndo_change_carrier) 7837 return -EOPNOTSUPP; 7838 if (!netif_device_present(dev)) 7839 return -ENODEV; 7840 return ops->ndo_change_carrier(dev, new_carrier); 7841 } 7842 EXPORT_SYMBOL(dev_change_carrier); 7843 7844 /** 7845 * dev_get_phys_port_id - Get device physical port ID 7846 * @dev: device 7847 * @ppid: port ID 7848 * 7849 * Get device physical port ID 7850 */ 7851 int dev_get_phys_port_id(struct net_device *dev, 7852 struct netdev_phys_item_id *ppid) 7853 { 7854 const struct net_device_ops *ops = dev->netdev_ops; 7855 7856 if (!ops->ndo_get_phys_port_id) 7857 return -EOPNOTSUPP; 7858 return ops->ndo_get_phys_port_id(dev, ppid); 7859 } 7860 EXPORT_SYMBOL(dev_get_phys_port_id); 7861 7862 /** 7863 * dev_get_phys_port_name - Get device physical port name 7864 * @dev: device 7865 * @name: port name 7866 * @len: limit of bytes to copy to name 7867 * 7868 * Get device physical port name 7869 */ 7870 int dev_get_phys_port_name(struct net_device *dev, 7871 char *name, size_t len) 7872 { 7873 const struct net_device_ops *ops = dev->netdev_ops; 7874 int err; 7875 7876 if (ops->ndo_get_phys_port_name) { 7877 err = ops->ndo_get_phys_port_name(dev, name, len); 7878 if (err != -EOPNOTSUPP) 7879 return err; 7880 } 7881 return devlink_compat_phys_port_name_get(dev, name, len); 7882 } 7883 EXPORT_SYMBOL(dev_get_phys_port_name); 7884 7885 /** 7886 * dev_get_port_parent_id - Get the device's port parent identifier 7887 * @dev: network device 7888 * @ppid: pointer to a storage for the port's parent identifier 7889 * @recurse: allow/disallow recursion to lower devices 7890 * 7891 * Get the devices's port parent identifier 7892 */ 7893 int dev_get_port_parent_id(struct net_device *dev, 7894 struct netdev_phys_item_id *ppid, 7895 bool recurse) 7896 { 7897 const struct net_device_ops *ops = dev->netdev_ops; 7898 struct netdev_phys_item_id first = { }; 7899 struct net_device *lower_dev; 7900 struct list_head *iter; 7901 int err; 7902 7903 if (ops->ndo_get_port_parent_id) { 7904 err = ops->ndo_get_port_parent_id(dev, ppid); 7905 if (err != -EOPNOTSUPP) 7906 return err; 7907 } 7908 7909 err = devlink_compat_switch_id_get(dev, ppid); 7910 if (!err || err != -EOPNOTSUPP) 7911 return err; 7912 7913 if (!recurse) 7914 return -EOPNOTSUPP; 7915 7916 netdev_for_each_lower_dev(dev, lower_dev, iter) { 7917 err = dev_get_port_parent_id(lower_dev, ppid, recurse); 7918 if (err) 7919 break; 7920 if (!first.id_len) 7921 first = *ppid; 7922 else if (memcmp(&first, ppid, sizeof(*ppid))) 7923 return -ENODATA; 7924 } 7925 7926 return err; 7927 } 7928 EXPORT_SYMBOL(dev_get_port_parent_id); 7929 7930 /** 7931 * netdev_port_same_parent_id - Indicate if two network devices have 7932 * the same port parent identifier 7933 * @a: first network device 7934 * @b: second network device 7935 */ 7936 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 7937 { 7938 struct netdev_phys_item_id a_id = { }; 7939 struct netdev_phys_item_id b_id = { }; 7940 7941 if (dev_get_port_parent_id(a, &a_id, true) || 7942 dev_get_port_parent_id(b, &b_id, true)) 7943 return false; 7944 7945 return netdev_phys_item_id_same(&a_id, &b_id); 7946 } 7947 EXPORT_SYMBOL(netdev_port_same_parent_id); 7948 7949 /** 7950 * dev_change_proto_down - update protocol port state information 7951 * @dev: device 7952 * @proto_down: new value 7953 * 7954 * This info can be used by switch drivers to set the phys state of the 7955 * port. 7956 */ 7957 int dev_change_proto_down(struct net_device *dev, bool proto_down) 7958 { 7959 const struct net_device_ops *ops = dev->netdev_ops; 7960 7961 if (!ops->ndo_change_proto_down) 7962 return -EOPNOTSUPP; 7963 if (!netif_device_present(dev)) 7964 return -ENODEV; 7965 return ops->ndo_change_proto_down(dev, proto_down); 7966 } 7967 EXPORT_SYMBOL(dev_change_proto_down); 7968 7969 /** 7970 * dev_change_proto_down_generic - generic implementation for 7971 * ndo_change_proto_down that sets carrier according to 7972 * proto_down. 7973 * 7974 * @dev: device 7975 * @proto_down: new value 7976 */ 7977 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) 7978 { 7979 if (proto_down) 7980 netif_carrier_off(dev); 7981 else 7982 netif_carrier_on(dev); 7983 dev->proto_down = proto_down; 7984 return 0; 7985 } 7986 EXPORT_SYMBOL(dev_change_proto_down_generic); 7987 7988 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, 7989 enum bpf_netdev_command cmd) 7990 { 7991 struct netdev_bpf xdp; 7992 7993 if (!bpf_op) 7994 return 0; 7995 7996 memset(&xdp, 0, sizeof(xdp)); 7997 xdp.command = cmd; 7998 7999 /* Query must always succeed. */ 8000 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); 8001 8002 return xdp.prog_id; 8003 } 8004 8005 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, 8006 struct netlink_ext_ack *extack, u32 flags, 8007 struct bpf_prog *prog) 8008 { 8009 struct netdev_bpf xdp; 8010 8011 memset(&xdp, 0, sizeof(xdp)); 8012 if (flags & XDP_FLAGS_HW_MODE) 8013 xdp.command = XDP_SETUP_PROG_HW; 8014 else 8015 xdp.command = XDP_SETUP_PROG; 8016 xdp.extack = extack; 8017 xdp.flags = flags; 8018 xdp.prog = prog; 8019 8020 return bpf_op(dev, &xdp); 8021 } 8022 8023 static void dev_xdp_uninstall(struct net_device *dev) 8024 { 8025 struct netdev_bpf xdp; 8026 bpf_op_t ndo_bpf; 8027 8028 /* Remove generic XDP */ 8029 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); 8030 8031 /* Remove from the driver */ 8032 ndo_bpf = dev->netdev_ops->ndo_bpf; 8033 if (!ndo_bpf) 8034 return; 8035 8036 memset(&xdp, 0, sizeof(xdp)); 8037 xdp.command = XDP_QUERY_PROG; 8038 WARN_ON(ndo_bpf(dev, &xdp)); 8039 if (xdp.prog_id) 8040 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8041 NULL)); 8042 8043 /* Remove HW offload */ 8044 memset(&xdp, 0, sizeof(xdp)); 8045 xdp.command = XDP_QUERY_PROG_HW; 8046 if (!ndo_bpf(dev, &xdp) && xdp.prog_id) 8047 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8048 NULL)); 8049 } 8050 8051 /** 8052 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 8053 * @dev: device 8054 * @extack: netlink extended ack 8055 * @fd: new program fd or negative value to clear 8056 * @flags: xdp-related flags 8057 * 8058 * Set or clear a bpf program for a device 8059 */ 8060 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 8061 int fd, u32 flags) 8062 { 8063 const struct net_device_ops *ops = dev->netdev_ops; 8064 enum bpf_netdev_command query; 8065 struct bpf_prog *prog = NULL; 8066 bpf_op_t bpf_op, bpf_chk; 8067 bool offload; 8068 int err; 8069 8070 ASSERT_RTNL(); 8071 8072 offload = flags & XDP_FLAGS_HW_MODE; 8073 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; 8074 8075 bpf_op = bpf_chk = ops->ndo_bpf; 8076 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { 8077 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); 8078 return -EOPNOTSUPP; 8079 } 8080 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) 8081 bpf_op = generic_xdp_install; 8082 if (bpf_op == bpf_chk) 8083 bpf_chk = generic_xdp_install; 8084 8085 if (fd >= 0) { 8086 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { 8087 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); 8088 return -EEXIST; 8089 } 8090 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 8091 __dev_xdp_query(dev, bpf_op, query)) { 8092 NL_SET_ERR_MSG(extack, "XDP program already attached"); 8093 return -EBUSY; 8094 } 8095 8096 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 8097 bpf_op == ops->ndo_bpf); 8098 if (IS_ERR(prog)) 8099 return PTR_ERR(prog); 8100 8101 if (!offload && bpf_prog_is_dev_bound(prog->aux)) { 8102 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); 8103 bpf_prog_put(prog); 8104 return -EINVAL; 8105 } 8106 } 8107 8108 err = dev_xdp_install(dev, bpf_op, extack, flags, prog); 8109 if (err < 0 && prog) 8110 bpf_prog_put(prog); 8111 8112 return err; 8113 } 8114 8115 /** 8116 * dev_new_index - allocate an ifindex 8117 * @net: the applicable net namespace 8118 * 8119 * Returns a suitable unique value for a new device interface 8120 * number. The caller must hold the rtnl semaphore or the 8121 * dev_base_lock to be sure it remains unique. 8122 */ 8123 static int dev_new_index(struct net *net) 8124 { 8125 int ifindex = net->ifindex; 8126 8127 for (;;) { 8128 if (++ifindex <= 0) 8129 ifindex = 1; 8130 if (!__dev_get_by_index(net, ifindex)) 8131 return net->ifindex = ifindex; 8132 } 8133 } 8134 8135 /* Delayed registration/unregisteration */ 8136 static LIST_HEAD(net_todo_list); 8137 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 8138 8139 static void net_set_todo(struct net_device *dev) 8140 { 8141 list_add_tail(&dev->todo_list, &net_todo_list); 8142 dev_net(dev)->dev_unreg_count++; 8143 } 8144 8145 static void rollback_registered_many(struct list_head *head) 8146 { 8147 struct net_device *dev, *tmp; 8148 LIST_HEAD(close_head); 8149 8150 BUG_ON(dev_boot_phase); 8151 ASSERT_RTNL(); 8152 8153 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 8154 /* Some devices call without registering 8155 * for initialization unwind. Remove those 8156 * devices and proceed with the remaining. 8157 */ 8158 if (dev->reg_state == NETREG_UNINITIALIZED) { 8159 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 8160 dev->name, dev); 8161 8162 WARN_ON(1); 8163 list_del(&dev->unreg_list); 8164 continue; 8165 } 8166 dev->dismantle = true; 8167 BUG_ON(dev->reg_state != NETREG_REGISTERED); 8168 } 8169 8170 /* If device is running, close it first. */ 8171 list_for_each_entry(dev, head, unreg_list) 8172 list_add_tail(&dev->close_list, &close_head); 8173 dev_close_many(&close_head, true); 8174 8175 list_for_each_entry(dev, head, unreg_list) { 8176 /* And unlink it from device chain. */ 8177 unlist_netdevice(dev); 8178 8179 dev->reg_state = NETREG_UNREGISTERING; 8180 } 8181 flush_all_backlogs(); 8182 8183 synchronize_net(); 8184 8185 list_for_each_entry(dev, head, unreg_list) { 8186 struct sk_buff *skb = NULL; 8187 8188 /* Shutdown queueing discipline. */ 8189 dev_shutdown(dev); 8190 8191 dev_xdp_uninstall(dev); 8192 8193 /* Notify protocols, that we are about to destroy 8194 * this device. They should clean all the things. 8195 */ 8196 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8197 8198 if (!dev->rtnl_link_ops || 8199 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8200 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 8201 GFP_KERNEL, NULL, 0); 8202 8203 /* 8204 * Flush the unicast and multicast chains 8205 */ 8206 dev_uc_flush(dev); 8207 dev_mc_flush(dev); 8208 8209 if (dev->netdev_ops->ndo_uninit) 8210 dev->netdev_ops->ndo_uninit(dev); 8211 8212 if (skb) 8213 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 8214 8215 /* Notifier chain MUST detach us all upper devices. */ 8216 WARN_ON(netdev_has_any_upper_dev(dev)); 8217 WARN_ON(netdev_has_any_lower_dev(dev)); 8218 8219 /* Remove entries from kobject tree */ 8220 netdev_unregister_kobject(dev); 8221 #ifdef CONFIG_XPS 8222 /* Remove XPS queueing entries */ 8223 netif_reset_xps_queues_gt(dev, 0); 8224 #endif 8225 } 8226 8227 synchronize_net(); 8228 8229 list_for_each_entry(dev, head, unreg_list) 8230 dev_put(dev); 8231 } 8232 8233 static void rollback_registered(struct net_device *dev) 8234 { 8235 LIST_HEAD(single); 8236 8237 list_add(&dev->unreg_list, &single); 8238 rollback_registered_many(&single); 8239 list_del(&single); 8240 } 8241 8242 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 8243 struct net_device *upper, netdev_features_t features) 8244 { 8245 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8246 netdev_features_t feature; 8247 int feature_bit; 8248 8249 for_each_netdev_feature(upper_disables, feature_bit) { 8250 feature = __NETIF_F_BIT(feature_bit); 8251 if (!(upper->wanted_features & feature) 8252 && (features & feature)) { 8253 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 8254 &feature, upper->name); 8255 features &= ~feature; 8256 } 8257 } 8258 8259 return features; 8260 } 8261 8262 static void netdev_sync_lower_features(struct net_device *upper, 8263 struct net_device *lower, netdev_features_t features) 8264 { 8265 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8266 netdev_features_t feature; 8267 int feature_bit; 8268 8269 for_each_netdev_feature(upper_disables, feature_bit) { 8270 feature = __NETIF_F_BIT(feature_bit); 8271 if (!(features & feature) && (lower->features & feature)) { 8272 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8273 &feature, lower->name); 8274 lower->wanted_features &= ~feature; 8275 netdev_update_features(lower); 8276 8277 if (unlikely(lower->features & feature)) 8278 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 8279 &feature, lower->name); 8280 } 8281 } 8282 } 8283 8284 static netdev_features_t netdev_fix_features(struct net_device *dev, 8285 netdev_features_t features) 8286 { 8287 /* Fix illegal checksum combinations */ 8288 if ((features & NETIF_F_HW_CSUM) && 8289 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 8290 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 8291 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 8292 } 8293 8294 /* TSO requires that SG is present as well. */ 8295 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 8296 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 8297 features &= ~NETIF_F_ALL_TSO; 8298 } 8299 8300 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 8301 !(features & NETIF_F_IP_CSUM)) { 8302 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 8303 features &= ~NETIF_F_TSO; 8304 features &= ~NETIF_F_TSO_ECN; 8305 } 8306 8307 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 8308 !(features & NETIF_F_IPV6_CSUM)) { 8309 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 8310 features &= ~NETIF_F_TSO6; 8311 } 8312 8313 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 8314 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 8315 features &= ~NETIF_F_TSO_MANGLEID; 8316 8317 /* TSO ECN requires that TSO is present as well. */ 8318 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 8319 features &= ~NETIF_F_TSO_ECN; 8320 8321 /* Software GSO depends on SG. */ 8322 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 8323 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 8324 features &= ~NETIF_F_GSO; 8325 } 8326 8327 /* GSO partial features require GSO partial be set */ 8328 if ((features & dev->gso_partial_features) && 8329 !(features & NETIF_F_GSO_PARTIAL)) { 8330 netdev_dbg(dev, 8331 "Dropping partially supported GSO features since no GSO partial.\n"); 8332 features &= ~dev->gso_partial_features; 8333 } 8334 8335 if (!(features & NETIF_F_RXCSUM)) { 8336 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 8337 * successfully merged by hardware must also have the 8338 * checksum verified by hardware. If the user does not 8339 * want to enable RXCSUM, logically, we should disable GRO_HW. 8340 */ 8341 if (features & NETIF_F_GRO_HW) { 8342 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 8343 features &= ~NETIF_F_GRO_HW; 8344 } 8345 } 8346 8347 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 8348 if (features & NETIF_F_RXFCS) { 8349 if (features & NETIF_F_LRO) { 8350 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 8351 features &= ~NETIF_F_LRO; 8352 } 8353 8354 if (features & NETIF_F_GRO_HW) { 8355 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 8356 features &= ~NETIF_F_GRO_HW; 8357 } 8358 } 8359 8360 return features; 8361 } 8362 8363 int __netdev_update_features(struct net_device *dev) 8364 { 8365 struct net_device *upper, *lower; 8366 netdev_features_t features; 8367 struct list_head *iter; 8368 int err = -1; 8369 8370 ASSERT_RTNL(); 8371 8372 features = netdev_get_wanted_features(dev); 8373 8374 if (dev->netdev_ops->ndo_fix_features) 8375 features = dev->netdev_ops->ndo_fix_features(dev, features); 8376 8377 /* driver might be less strict about feature dependencies */ 8378 features = netdev_fix_features(dev, features); 8379 8380 /* some features can't be enabled if they're off an an upper device */ 8381 netdev_for_each_upper_dev_rcu(dev, upper, iter) 8382 features = netdev_sync_upper_features(dev, upper, features); 8383 8384 if (dev->features == features) 8385 goto sync_lower; 8386 8387 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 8388 &dev->features, &features); 8389 8390 if (dev->netdev_ops->ndo_set_features) 8391 err = dev->netdev_ops->ndo_set_features(dev, features); 8392 else 8393 err = 0; 8394 8395 if (unlikely(err < 0)) { 8396 netdev_err(dev, 8397 "set_features() failed (%d); wanted %pNF, left %pNF\n", 8398 err, &features, &dev->features); 8399 /* return non-0 since some features might have changed and 8400 * it's better to fire a spurious notification than miss it 8401 */ 8402 return -1; 8403 } 8404 8405 sync_lower: 8406 /* some features must be disabled on lower devices when disabled 8407 * on an upper device (think: bonding master or bridge) 8408 */ 8409 netdev_for_each_lower_dev(dev, lower, iter) 8410 netdev_sync_lower_features(dev, lower, features); 8411 8412 if (!err) { 8413 netdev_features_t diff = features ^ dev->features; 8414 8415 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 8416 /* udp_tunnel_{get,drop}_rx_info both need 8417 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 8418 * device, or they won't do anything. 8419 * Thus we need to update dev->features 8420 * *before* calling udp_tunnel_get_rx_info, 8421 * but *after* calling udp_tunnel_drop_rx_info. 8422 */ 8423 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 8424 dev->features = features; 8425 udp_tunnel_get_rx_info(dev); 8426 } else { 8427 udp_tunnel_drop_rx_info(dev); 8428 } 8429 } 8430 8431 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 8432 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 8433 dev->features = features; 8434 err |= vlan_get_rx_ctag_filter_info(dev); 8435 } else { 8436 vlan_drop_rx_ctag_filter_info(dev); 8437 } 8438 } 8439 8440 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 8441 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 8442 dev->features = features; 8443 err |= vlan_get_rx_stag_filter_info(dev); 8444 } else { 8445 vlan_drop_rx_stag_filter_info(dev); 8446 } 8447 } 8448 8449 dev->features = features; 8450 } 8451 8452 return err < 0 ? 0 : 1; 8453 } 8454 8455 /** 8456 * netdev_update_features - recalculate device features 8457 * @dev: the device to check 8458 * 8459 * Recalculate dev->features set and send notifications if it 8460 * has changed. Should be called after driver or hardware dependent 8461 * conditions might have changed that influence the features. 8462 */ 8463 void netdev_update_features(struct net_device *dev) 8464 { 8465 if (__netdev_update_features(dev)) 8466 netdev_features_change(dev); 8467 } 8468 EXPORT_SYMBOL(netdev_update_features); 8469 8470 /** 8471 * netdev_change_features - recalculate device features 8472 * @dev: the device to check 8473 * 8474 * Recalculate dev->features set and send notifications even 8475 * if they have not changed. Should be called instead of 8476 * netdev_update_features() if also dev->vlan_features might 8477 * have changed to allow the changes to be propagated to stacked 8478 * VLAN devices. 8479 */ 8480 void netdev_change_features(struct net_device *dev) 8481 { 8482 __netdev_update_features(dev); 8483 netdev_features_change(dev); 8484 } 8485 EXPORT_SYMBOL(netdev_change_features); 8486 8487 /** 8488 * netif_stacked_transfer_operstate - transfer operstate 8489 * @rootdev: the root or lower level device to transfer state from 8490 * @dev: the device to transfer operstate to 8491 * 8492 * Transfer operational state from root to device. This is normally 8493 * called when a stacking relationship exists between the root 8494 * device and the device(a leaf device). 8495 */ 8496 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 8497 struct net_device *dev) 8498 { 8499 if (rootdev->operstate == IF_OPER_DORMANT) 8500 netif_dormant_on(dev); 8501 else 8502 netif_dormant_off(dev); 8503 8504 if (netif_carrier_ok(rootdev)) 8505 netif_carrier_on(dev); 8506 else 8507 netif_carrier_off(dev); 8508 } 8509 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 8510 8511 static int netif_alloc_rx_queues(struct net_device *dev) 8512 { 8513 unsigned int i, count = dev->num_rx_queues; 8514 struct netdev_rx_queue *rx; 8515 size_t sz = count * sizeof(*rx); 8516 int err = 0; 8517 8518 BUG_ON(count < 1); 8519 8520 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8521 if (!rx) 8522 return -ENOMEM; 8523 8524 dev->_rx = rx; 8525 8526 for (i = 0; i < count; i++) { 8527 rx[i].dev = dev; 8528 8529 /* XDP RX-queue setup */ 8530 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i); 8531 if (err < 0) 8532 goto err_rxq_info; 8533 } 8534 return 0; 8535 8536 err_rxq_info: 8537 /* Rollback successful reg's and free other resources */ 8538 while (i--) 8539 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 8540 kvfree(dev->_rx); 8541 dev->_rx = NULL; 8542 return err; 8543 } 8544 8545 static void netif_free_rx_queues(struct net_device *dev) 8546 { 8547 unsigned int i, count = dev->num_rx_queues; 8548 8549 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 8550 if (!dev->_rx) 8551 return; 8552 8553 for (i = 0; i < count; i++) 8554 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 8555 8556 kvfree(dev->_rx); 8557 } 8558 8559 static void netdev_init_one_queue(struct net_device *dev, 8560 struct netdev_queue *queue, void *_unused) 8561 { 8562 /* Initialize queue lock */ 8563 spin_lock_init(&queue->_xmit_lock); 8564 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8565 queue->xmit_lock_owner = -1; 8566 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8567 queue->dev = dev; 8568 #ifdef CONFIG_BQL 8569 dql_init(&queue->dql, HZ); 8570 #endif 8571 } 8572 8573 static void netif_free_tx_queues(struct net_device *dev) 8574 { 8575 kvfree(dev->_tx); 8576 } 8577 8578 static int netif_alloc_netdev_queues(struct net_device *dev) 8579 { 8580 unsigned int count = dev->num_tx_queues; 8581 struct netdev_queue *tx; 8582 size_t sz = count * sizeof(*tx); 8583 8584 if (count < 1 || count > 0xffff) 8585 return -EINVAL; 8586 8587 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8588 if (!tx) 8589 return -ENOMEM; 8590 8591 dev->_tx = tx; 8592 8593 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 8594 spin_lock_init(&dev->tx_global_lock); 8595 8596 return 0; 8597 } 8598 8599 void netif_tx_stop_all_queues(struct net_device *dev) 8600 { 8601 unsigned int i; 8602 8603 for (i = 0; i < dev->num_tx_queues; i++) { 8604 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 8605 8606 netif_tx_stop_queue(txq); 8607 } 8608 } 8609 EXPORT_SYMBOL(netif_tx_stop_all_queues); 8610 8611 /** 8612 * register_netdevice - register a network device 8613 * @dev: device to register 8614 * 8615 * Take a completed network device structure and add it to the kernel 8616 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8617 * chain. 0 is returned on success. A negative errno code is returned 8618 * on a failure to set up the device, or if the name is a duplicate. 8619 * 8620 * Callers must hold the rtnl semaphore. You may want 8621 * register_netdev() instead of this. 8622 * 8623 * BUGS: 8624 * The locking appears insufficient to guarantee two parallel registers 8625 * will not get the same name. 8626 */ 8627 8628 int register_netdevice(struct net_device *dev) 8629 { 8630 int ret; 8631 struct net *net = dev_net(dev); 8632 8633 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 8634 NETDEV_FEATURE_COUNT); 8635 BUG_ON(dev_boot_phase); 8636 ASSERT_RTNL(); 8637 8638 might_sleep(); 8639 8640 /* When net_device's are persistent, this will be fatal. */ 8641 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 8642 BUG_ON(!net); 8643 8644 spin_lock_init(&dev->addr_list_lock); 8645 netdev_set_addr_lockdep_class(dev); 8646 8647 ret = dev_get_valid_name(net, dev, dev->name); 8648 if (ret < 0) 8649 goto out; 8650 8651 /* Init, if this function is available */ 8652 if (dev->netdev_ops->ndo_init) { 8653 ret = dev->netdev_ops->ndo_init(dev); 8654 if (ret) { 8655 if (ret > 0) 8656 ret = -EIO; 8657 goto out; 8658 } 8659 } 8660 8661 if (((dev->hw_features | dev->features) & 8662 NETIF_F_HW_VLAN_CTAG_FILTER) && 8663 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 8664 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 8665 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 8666 ret = -EINVAL; 8667 goto err_uninit; 8668 } 8669 8670 ret = -EBUSY; 8671 if (!dev->ifindex) 8672 dev->ifindex = dev_new_index(net); 8673 else if (__dev_get_by_index(net, dev->ifindex)) 8674 goto err_uninit; 8675 8676 /* Transfer changeable features to wanted_features and enable 8677 * software offloads (GSO and GRO). 8678 */ 8679 dev->hw_features |= NETIF_F_SOFT_FEATURES; 8680 dev->features |= NETIF_F_SOFT_FEATURES; 8681 8682 if (dev->netdev_ops->ndo_udp_tunnel_add) { 8683 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8684 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8685 } 8686 8687 dev->wanted_features = dev->features & dev->hw_features; 8688 8689 if (!(dev->flags & IFF_LOOPBACK)) 8690 dev->hw_features |= NETIF_F_NOCACHE_COPY; 8691 8692 /* If IPv4 TCP segmentation offload is supported we should also 8693 * allow the device to enable segmenting the frame with the option 8694 * of ignoring a static IP ID value. This doesn't enable the 8695 * feature itself but allows the user to enable it later. 8696 */ 8697 if (dev->hw_features & NETIF_F_TSO) 8698 dev->hw_features |= NETIF_F_TSO_MANGLEID; 8699 if (dev->vlan_features & NETIF_F_TSO) 8700 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 8701 if (dev->mpls_features & NETIF_F_TSO) 8702 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 8703 if (dev->hw_enc_features & NETIF_F_TSO) 8704 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 8705 8706 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 8707 */ 8708 dev->vlan_features |= NETIF_F_HIGHDMA; 8709 8710 /* Make NETIF_F_SG inheritable to tunnel devices. 8711 */ 8712 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 8713 8714 /* Make NETIF_F_SG inheritable to MPLS. 8715 */ 8716 dev->mpls_features |= NETIF_F_SG; 8717 8718 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 8719 ret = notifier_to_errno(ret); 8720 if (ret) 8721 goto err_uninit; 8722 8723 ret = netdev_register_kobject(dev); 8724 if (ret) 8725 goto err_uninit; 8726 dev->reg_state = NETREG_REGISTERED; 8727 8728 __netdev_update_features(dev); 8729 8730 /* 8731 * Default initial state at registry is that the 8732 * device is present. 8733 */ 8734 8735 set_bit(__LINK_STATE_PRESENT, &dev->state); 8736 8737 linkwatch_init_dev(dev); 8738 8739 dev_init_scheduler(dev); 8740 dev_hold(dev); 8741 list_netdevice(dev); 8742 add_device_randomness(dev->dev_addr, dev->addr_len); 8743 8744 /* If the device has permanent device address, driver should 8745 * set dev_addr and also addr_assign_type should be set to 8746 * NET_ADDR_PERM (default value). 8747 */ 8748 if (dev->addr_assign_type == NET_ADDR_PERM) 8749 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 8750 8751 /* Notify protocols, that a new device appeared. */ 8752 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 8753 ret = notifier_to_errno(ret); 8754 if (ret) { 8755 rollback_registered(dev); 8756 dev->reg_state = NETREG_UNREGISTERED; 8757 } 8758 /* 8759 * Prevent userspace races by waiting until the network 8760 * device is fully setup before sending notifications. 8761 */ 8762 if (!dev->rtnl_link_ops || 8763 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8764 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 8765 8766 out: 8767 return ret; 8768 8769 err_uninit: 8770 if (dev->netdev_ops->ndo_uninit) 8771 dev->netdev_ops->ndo_uninit(dev); 8772 if (dev->priv_destructor) 8773 dev->priv_destructor(dev); 8774 goto out; 8775 } 8776 EXPORT_SYMBOL(register_netdevice); 8777 8778 /** 8779 * init_dummy_netdev - init a dummy network device for NAPI 8780 * @dev: device to init 8781 * 8782 * This takes a network device structure and initialize the minimum 8783 * amount of fields so it can be used to schedule NAPI polls without 8784 * registering a full blown interface. This is to be used by drivers 8785 * that need to tie several hardware interfaces to a single NAPI 8786 * poll scheduler due to HW limitations. 8787 */ 8788 int init_dummy_netdev(struct net_device *dev) 8789 { 8790 /* Clear everything. Note we don't initialize spinlocks 8791 * are they aren't supposed to be taken by any of the 8792 * NAPI code and this dummy netdev is supposed to be 8793 * only ever used for NAPI polls 8794 */ 8795 memset(dev, 0, sizeof(struct net_device)); 8796 8797 /* make sure we BUG if trying to hit standard 8798 * register/unregister code path 8799 */ 8800 dev->reg_state = NETREG_DUMMY; 8801 8802 /* NAPI wants this */ 8803 INIT_LIST_HEAD(&dev->napi_list); 8804 8805 /* a dummy interface is started by default */ 8806 set_bit(__LINK_STATE_PRESENT, &dev->state); 8807 set_bit(__LINK_STATE_START, &dev->state); 8808 8809 /* napi_busy_loop stats accounting wants this */ 8810 dev_net_set(dev, &init_net); 8811 8812 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8813 * because users of this 'device' dont need to change 8814 * its refcount. 8815 */ 8816 8817 return 0; 8818 } 8819 EXPORT_SYMBOL_GPL(init_dummy_netdev); 8820 8821 8822 /** 8823 * register_netdev - register a network device 8824 * @dev: device to register 8825 * 8826 * Take a completed network device structure and add it to the kernel 8827 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8828 * chain. 0 is returned on success. A negative errno code is returned 8829 * on a failure to set up the device, or if the name is a duplicate. 8830 * 8831 * This is a wrapper around register_netdevice that takes the rtnl semaphore 8832 * and expands the device name if you passed a format string to 8833 * alloc_netdev. 8834 */ 8835 int register_netdev(struct net_device *dev) 8836 { 8837 int err; 8838 8839 if (rtnl_lock_killable()) 8840 return -EINTR; 8841 err = register_netdevice(dev); 8842 rtnl_unlock(); 8843 return err; 8844 } 8845 EXPORT_SYMBOL(register_netdev); 8846 8847 int netdev_refcnt_read(const struct net_device *dev) 8848 { 8849 int i, refcnt = 0; 8850 8851 for_each_possible_cpu(i) 8852 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 8853 return refcnt; 8854 } 8855 EXPORT_SYMBOL(netdev_refcnt_read); 8856 8857 /** 8858 * netdev_wait_allrefs - wait until all references are gone. 8859 * @dev: target net_device 8860 * 8861 * This is called when unregistering network devices. 8862 * 8863 * Any protocol or device that holds a reference should register 8864 * for netdevice notification, and cleanup and put back the 8865 * reference if they receive an UNREGISTER event. 8866 * We can get stuck here if buggy protocols don't correctly 8867 * call dev_put. 8868 */ 8869 static void netdev_wait_allrefs(struct net_device *dev) 8870 { 8871 unsigned long rebroadcast_time, warning_time; 8872 int refcnt; 8873 8874 linkwatch_forget_dev(dev); 8875 8876 rebroadcast_time = warning_time = jiffies; 8877 refcnt = netdev_refcnt_read(dev); 8878 8879 while (refcnt != 0) { 8880 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 8881 rtnl_lock(); 8882 8883 /* Rebroadcast unregister notification */ 8884 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8885 8886 __rtnl_unlock(); 8887 rcu_barrier(); 8888 rtnl_lock(); 8889 8890 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 8891 &dev->state)) { 8892 /* We must not have linkwatch events 8893 * pending on unregister. If this 8894 * happens, we simply run the queue 8895 * unscheduled, resulting in a noop 8896 * for this device. 8897 */ 8898 linkwatch_run_queue(); 8899 } 8900 8901 __rtnl_unlock(); 8902 8903 rebroadcast_time = jiffies; 8904 } 8905 8906 msleep(250); 8907 8908 refcnt = netdev_refcnt_read(dev); 8909 8910 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { 8911 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 8912 dev->name, refcnt); 8913 warning_time = jiffies; 8914 } 8915 } 8916 } 8917 8918 /* The sequence is: 8919 * 8920 * rtnl_lock(); 8921 * ... 8922 * register_netdevice(x1); 8923 * register_netdevice(x2); 8924 * ... 8925 * unregister_netdevice(y1); 8926 * unregister_netdevice(y2); 8927 * ... 8928 * rtnl_unlock(); 8929 * free_netdev(y1); 8930 * free_netdev(y2); 8931 * 8932 * We are invoked by rtnl_unlock(). 8933 * This allows us to deal with problems: 8934 * 1) We can delete sysfs objects which invoke hotplug 8935 * without deadlocking with linkwatch via keventd. 8936 * 2) Since we run with the RTNL semaphore not held, we can sleep 8937 * safely in order to wait for the netdev refcnt to drop to zero. 8938 * 8939 * We must not return until all unregister events added during 8940 * the interval the lock was held have been completed. 8941 */ 8942 void netdev_run_todo(void) 8943 { 8944 struct list_head list; 8945 8946 /* Snapshot list, allow later requests */ 8947 list_replace_init(&net_todo_list, &list); 8948 8949 __rtnl_unlock(); 8950 8951 8952 /* Wait for rcu callbacks to finish before next phase */ 8953 if (!list_empty(&list)) 8954 rcu_barrier(); 8955 8956 while (!list_empty(&list)) { 8957 struct net_device *dev 8958 = list_first_entry(&list, struct net_device, todo_list); 8959 list_del(&dev->todo_list); 8960 8961 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 8962 pr_err("network todo '%s' but state %d\n", 8963 dev->name, dev->reg_state); 8964 dump_stack(); 8965 continue; 8966 } 8967 8968 dev->reg_state = NETREG_UNREGISTERED; 8969 8970 netdev_wait_allrefs(dev); 8971 8972 /* paranoia */ 8973 BUG_ON(netdev_refcnt_read(dev)); 8974 BUG_ON(!list_empty(&dev->ptype_all)); 8975 BUG_ON(!list_empty(&dev->ptype_specific)); 8976 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 8977 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 8978 #if IS_ENABLED(CONFIG_DECNET) 8979 WARN_ON(dev->dn_ptr); 8980 #endif 8981 if (dev->priv_destructor) 8982 dev->priv_destructor(dev); 8983 if (dev->needs_free_netdev) 8984 free_netdev(dev); 8985 8986 /* Report a network device has been unregistered */ 8987 rtnl_lock(); 8988 dev_net(dev)->dev_unreg_count--; 8989 __rtnl_unlock(); 8990 wake_up(&netdev_unregistering_wq); 8991 8992 /* Free network device */ 8993 kobject_put(&dev->dev.kobj); 8994 } 8995 } 8996 8997 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 8998 * all the same fields in the same order as net_device_stats, with only 8999 * the type differing, but rtnl_link_stats64 may have additional fields 9000 * at the end for newer counters. 9001 */ 9002 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 9003 const struct net_device_stats *netdev_stats) 9004 { 9005 #if BITS_PER_LONG == 64 9006 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 9007 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 9008 /* zero out counters that only exist in rtnl_link_stats64 */ 9009 memset((char *)stats64 + sizeof(*netdev_stats), 0, 9010 sizeof(*stats64) - sizeof(*netdev_stats)); 9011 #else 9012 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 9013 const unsigned long *src = (const unsigned long *)netdev_stats; 9014 u64 *dst = (u64 *)stats64; 9015 9016 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 9017 for (i = 0; i < n; i++) 9018 dst[i] = src[i]; 9019 /* zero out counters that only exist in rtnl_link_stats64 */ 9020 memset((char *)stats64 + n * sizeof(u64), 0, 9021 sizeof(*stats64) - n * sizeof(u64)); 9022 #endif 9023 } 9024 EXPORT_SYMBOL(netdev_stats_to_stats64); 9025 9026 /** 9027 * dev_get_stats - get network device statistics 9028 * @dev: device to get statistics from 9029 * @storage: place to store stats 9030 * 9031 * Get network statistics from device. Return @storage. 9032 * The device driver may provide its own method by setting 9033 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 9034 * otherwise the internal statistics structure is used. 9035 */ 9036 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 9037 struct rtnl_link_stats64 *storage) 9038 { 9039 const struct net_device_ops *ops = dev->netdev_ops; 9040 9041 if (ops->ndo_get_stats64) { 9042 memset(storage, 0, sizeof(*storage)); 9043 ops->ndo_get_stats64(dev, storage); 9044 } else if (ops->ndo_get_stats) { 9045 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 9046 } else { 9047 netdev_stats_to_stats64(storage, &dev->stats); 9048 } 9049 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 9050 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 9051 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 9052 return storage; 9053 } 9054 EXPORT_SYMBOL(dev_get_stats); 9055 9056 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 9057 { 9058 struct netdev_queue *queue = dev_ingress_queue(dev); 9059 9060 #ifdef CONFIG_NET_CLS_ACT 9061 if (queue) 9062 return queue; 9063 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 9064 if (!queue) 9065 return NULL; 9066 netdev_init_one_queue(dev, queue, NULL); 9067 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 9068 queue->qdisc_sleeping = &noop_qdisc; 9069 rcu_assign_pointer(dev->ingress_queue, queue); 9070 #endif 9071 return queue; 9072 } 9073 9074 static const struct ethtool_ops default_ethtool_ops; 9075 9076 void netdev_set_default_ethtool_ops(struct net_device *dev, 9077 const struct ethtool_ops *ops) 9078 { 9079 if (dev->ethtool_ops == &default_ethtool_ops) 9080 dev->ethtool_ops = ops; 9081 } 9082 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 9083 9084 void netdev_freemem(struct net_device *dev) 9085 { 9086 char *addr = (char *)dev - dev->padded; 9087 9088 kvfree(addr); 9089 } 9090 9091 /** 9092 * alloc_netdev_mqs - allocate network device 9093 * @sizeof_priv: size of private data to allocate space for 9094 * @name: device name format string 9095 * @name_assign_type: origin of device name 9096 * @setup: callback to initialize device 9097 * @txqs: the number of TX subqueues to allocate 9098 * @rxqs: the number of RX subqueues to allocate 9099 * 9100 * Allocates a struct net_device with private data area for driver use 9101 * and performs basic initialization. Also allocates subqueue structs 9102 * for each queue on the device. 9103 */ 9104 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 9105 unsigned char name_assign_type, 9106 void (*setup)(struct net_device *), 9107 unsigned int txqs, unsigned int rxqs) 9108 { 9109 struct net_device *dev; 9110 unsigned int alloc_size; 9111 struct net_device *p; 9112 9113 BUG_ON(strlen(name) >= sizeof(dev->name)); 9114 9115 if (txqs < 1) { 9116 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 9117 return NULL; 9118 } 9119 9120 if (rxqs < 1) { 9121 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 9122 return NULL; 9123 } 9124 9125 alloc_size = sizeof(struct net_device); 9126 if (sizeof_priv) { 9127 /* ensure 32-byte alignment of private area */ 9128 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 9129 alloc_size += sizeof_priv; 9130 } 9131 /* ensure 32-byte alignment of whole construct */ 9132 alloc_size += NETDEV_ALIGN - 1; 9133 9134 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9135 if (!p) 9136 return NULL; 9137 9138 dev = PTR_ALIGN(p, NETDEV_ALIGN); 9139 dev->padded = (char *)dev - (char *)p; 9140 9141 dev->pcpu_refcnt = alloc_percpu(int); 9142 if (!dev->pcpu_refcnt) 9143 goto free_dev; 9144 9145 if (dev_addr_init(dev)) 9146 goto free_pcpu; 9147 9148 dev_mc_init(dev); 9149 dev_uc_init(dev); 9150 9151 dev_net_set(dev, &init_net); 9152 9153 dev->gso_max_size = GSO_MAX_SIZE; 9154 dev->gso_max_segs = GSO_MAX_SEGS; 9155 9156 INIT_LIST_HEAD(&dev->napi_list); 9157 INIT_LIST_HEAD(&dev->unreg_list); 9158 INIT_LIST_HEAD(&dev->close_list); 9159 INIT_LIST_HEAD(&dev->link_watch_list); 9160 INIT_LIST_HEAD(&dev->adj_list.upper); 9161 INIT_LIST_HEAD(&dev->adj_list.lower); 9162 INIT_LIST_HEAD(&dev->ptype_all); 9163 INIT_LIST_HEAD(&dev->ptype_specific); 9164 #ifdef CONFIG_NET_SCHED 9165 hash_init(dev->qdisc_hash); 9166 #endif 9167 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 9168 setup(dev); 9169 9170 if (!dev->tx_queue_len) { 9171 dev->priv_flags |= IFF_NO_QUEUE; 9172 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 9173 } 9174 9175 dev->num_tx_queues = txqs; 9176 dev->real_num_tx_queues = txqs; 9177 if (netif_alloc_netdev_queues(dev)) 9178 goto free_all; 9179 9180 dev->num_rx_queues = rxqs; 9181 dev->real_num_rx_queues = rxqs; 9182 if (netif_alloc_rx_queues(dev)) 9183 goto free_all; 9184 9185 strcpy(dev->name, name); 9186 dev->name_assign_type = name_assign_type; 9187 dev->group = INIT_NETDEV_GROUP; 9188 if (!dev->ethtool_ops) 9189 dev->ethtool_ops = &default_ethtool_ops; 9190 9191 nf_hook_ingress_init(dev); 9192 9193 return dev; 9194 9195 free_all: 9196 free_netdev(dev); 9197 return NULL; 9198 9199 free_pcpu: 9200 free_percpu(dev->pcpu_refcnt); 9201 free_dev: 9202 netdev_freemem(dev); 9203 return NULL; 9204 } 9205 EXPORT_SYMBOL(alloc_netdev_mqs); 9206 9207 /** 9208 * free_netdev - free network device 9209 * @dev: device 9210 * 9211 * This function does the last stage of destroying an allocated device 9212 * interface. The reference to the device object is released. If this 9213 * is the last reference then it will be freed.Must be called in process 9214 * context. 9215 */ 9216 void free_netdev(struct net_device *dev) 9217 { 9218 struct napi_struct *p, *n; 9219 9220 might_sleep(); 9221 netif_free_tx_queues(dev); 9222 netif_free_rx_queues(dev); 9223 9224 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 9225 9226 /* Flush device addresses */ 9227 dev_addr_flush(dev); 9228 9229 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 9230 netif_napi_del(p); 9231 9232 free_percpu(dev->pcpu_refcnt); 9233 dev->pcpu_refcnt = NULL; 9234 9235 /* Compatibility with error handling in drivers */ 9236 if (dev->reg_state == NETREG_UNINITIALIZED) { 9237 netdev_freemem(dev); 9238 return; 9239 } 9240 9241 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 9242 dev->reg_state = NETREG_RELEASED; 9243 9244 /* will free via device release */ 9245 put_device(&dev->dev); 9246 } 9247 EXPORT_SYMBOL(free_netdev); 9248 9249 /** 9250 * synchronize_net - Synchronize with packet receive processing 9251 * 9252 * Wait for packets currently being received to be done. 9253 * Does not block later packets from starting. 9254 */ 9255 void synchronize_net(void) 9256 { 9257 might_sleep(); 9258 if (rtnl_is_locked()) 9259 synchronize_rcu_expedited(); 9260 else 9261 synchronize_rcu(); 9262 } 9263 EXPORT_SYMBOL(synchronize_net); 9264 9265 /** 9266 * unregister_netdevice_queue - remove device from the kernel 9267 * @dev: device 9268 * @head: list 9269 * 9270 * This function shuts down a device interface and removes it 9271 * from the kernel tables. 9272 * If head not NULL, device is queued to be unregistered later. 9273 * 9274 * Callers must hold the rtnl semaphore. You may want 9275 * unregister_netdev() instead of this. 9276 */ 9277 9278 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 9279 { 9280 ASSERT_RTNL(); 9281 9282 if (head) { 9283 list_move_tail(&dev->unreg_list, head); 9284 } else { 9285 rollback_registered(dev); 9286 /* Finish processing unregister after unlock */ 9287 net_set_todo(dev); 9288 } 9289 } 9290 EXPORT_SYMBOL(unregister_netdevice_queue); 9291 9292 /** 9293 * unregister_netdevice_many - unregister many devices 9294 * @head: list of devices 9295 * 9296 * Note: As most callers use a stack allocated list_head, 9297 * we force a list_del() to make sure stack wont be corrupted later. 9298 */ 9299 void unregister_netdevice_many(struct list_head *head) 9300 { 9301 struct net_device *dev; 9302 9303 if (!list_empty(head)) { 9304 rollback_registered_many(head); 9305 list_for_each_entry(dev, head, unreg_list) 9306 net_set_todo(dev); 9307 list_del(head); 9308 } 9309 } 9310 EXPORT_SYMBOL(unregister_netdevice_many); 9311 9312 /** 9313 * unregister_netdev - remove device from the kernel 9314 * @dev: device 9315 * 9316 * This function shuts down a device interface and removes it 9317 * from the kernel tables. 9318 * 9319 * This is just a wrapper for unregister_netdevice that takes 9320 * the rtnl semaphore. In general you want to use this and not 9321 * unregister_netdevice. 9322 */ 9323 void unregister_netdev(struct net_device *dev) 9324 { 9325 rtnl_lock(); 9326 unregister_netdevice(dev); 9327 rtnl_unlock(); 9328 } 9329 EXPORT_SYMBOL(unregister_netdev); 9330 9331 /** 9332 * dev_change_net_namespace - move device to different nethost namespace 9333 * @dev: device 9334 * @net: network namespace 9335 * @pat: If not NULL name pattern to try if the current device name 9336 * is already taken in the destination network namespace. 9337 * 9338 * This function shuts down a device interface and moves it 9339 * to a new network namespace. On success 0 is returned, on 9340 * a failure a netagive errno code is returned. 9341 * 9342 * Callers must hold the rtnl semaphore. 9343 */ 9344 9345 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 9346 { 9347 int err, new_nsid, new_ifindex; 9348 9349 ASSERT_RTNL(); 9350 9351 /* Don't allow namespace local devices to be moved. */ 9352 err = -EINVAL; 9353 if (dev->features & NETIF_F_NETNS_LOCAL) 9354 goto out; 9355 9356 /* Ensure the device has been registrered */ 9357 if (dev->reg_state != NETREG_REGISTERED) 9358 goto out; 9359 9360 /* Get out if there is nothing todo */ 9361 err = 0; 9362 if (net_eq(dev_net(dev), net)) 9363 goto out; 9364 9365 /* Pick the destination device name, and ensure 9366 * we can use it in the destination network namespace. 9367 */ 9368 err = -EEXIST; 9369 if (__dev_get_by_name(net, dev->name)) { 9370 /* We get here if we can't use the current device name */ 9371 if (!pat) 9372 goto out; 9373 err = dev_get_valid_name(net, dev, pat); 9374 if (err < 0) 9375 goto out; 9376 } 9377 9378 /* 9379 * And now a mini version of register_netdevice unregister_netdevice. 9380 */ 9381 9382 /* If device is running close it first. */ 9383 dev_close(dev); 9384 9385 /* And unlink it from device chain */ 9386 unlist_netdevice(dev); 9387 9388 synchronize_net(); 9389 9390 /* Shutdown queueing discipline. */ 9391 dev_shutdown(dev); 9392 9393 /* Notify protocols, that we are about to destroy 9394 * this device. They should clean all the things. 9395 * 9396 * Note that dev->reg_state stays at NETREG_REGISTERED. 9397 * This is wanted because this way 8021q and macvlan know 9398 * the device is just moving and can keep their slaves up. 9399 */ 9400 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9401 rcu_barrier(); 9402 9403 new_nsid = peernet2id_alloc(dev_net(dev), net); 9404 /* If there is an ifindex conflict assign a new one */ 9405 if (__dev_get_by_index(net, dev->ifindex)) 9406 new_ifindex = dev_new_index(net); 9407 else 9408 new_ifindex = dev->ifindex; 9409 9410 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 9411 new_ifindex); 9412 9413 /* 9414 * Flush the unicast and multicast chains 9415 */ 9416 dev_uc_flush(dev); 9417 dev_mc_flush(dev); 9418 9419 /* Send a netdev-removed uevent to the old namespace */ 9420 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 9421 netdev_adjacent_del_links(dev); 9422 9423 /* Actually switch the network namespace */ 9424 dev_net_set(dev, net); 9425 dev->ifindex = new_ifindex; 9426 9427 /* Send a netdev-add uevent to the new namespace */ 9428 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 9429 netdev_adjacent_add_links(dev); 9430 9431 /* Fixup kobjects */ 9432 err = device_rename(&dev->dev, dev->name); 9433 WARN_ON(err); 9434 9435 /* Add the device back in the hashes */ 9436 list_netdevice(dev); 9437 9438 /* Notify protocols, that a new device appeared. */ 9439 call_netdevice_notifiers(NETDEV_REGISTER, dev); 9440 9441 /* 9442 * Prevent userspace races by waiting until the network 9443 * device is fully setup before sending notifications. 9444 */ 9445 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 9446 9447 synchronize_net(); 9448 err = 0; 9449 out: 9450 return err; 9451 } 9452 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 9453 9454 static int dev_cpu_dead(unsigned int oldcpu) 9455 { 9456 struct sk_buff **list_skb; 9457 struct sk_buff *skb; 9458 unsigned int cpu; 9459 struct softnet_data *sd, *oldsd, *remsd = NULL; 9460 9461 local_irq_disable(); 9462 cpu = smp_processor_id(); 9463 sd = &per_cpu(softnet_data, cpu); 9464 oldsd = &per_cpu(softnet_data, oldcpu); 9465 9466 /* Find end of our completion_queue. */ 9467 list_skb = &sd->completion_queue; 9468 while (*list_skb) 9469 list_skb = &(*list_skb)->next; 9470 /* Append completion queue from offline CPU. */ 9471 *list_skb = oldsd->completion_queue; 9472 oldsd->completion_queue = NULL; 9473 9474 /* Append output queue from offline CPU. */ 9475 if (oldsd->output_queue) { 9476 *sd->output_queue_tailp = oldsd->output_queue; 9477 sd->output_queue_tailp = oldsd->output_queue_tailp; 9478 oldsd->output_queue = NULL; 9479 oldsd->output_queue_tailp = &oldsd->output_queue; 9480 } 9481 /* Append NAPI poll list from offline CPU, with one exception : 9482 * process_backlog() must be called by cpu owning percpu backlog. 9483 * We properly handle process_queue & input_pkt_queue later. 9484 */ 9485 while (!list_empty(&oldsd->poll_list)) { 9486 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 9487 struct napi_struct, 9488 poll_list); 9489 9490 list_del_init(&napi->poll_list); 9491 if (napi->poll == process_backlog) 9492 napi->state = 0; 9493 else 9494 ____napi_schedule(sd, napi); 9495 } 9496 9497 raise_softirq_irqoff(NET_TX_SOFTIRQ); 9498 local_irq_enable(); 9499 9500 #ifdef CONFIG_RPS 9501 remsd = oldsd->rps_ipi_list; 9502 oldsd->rps_ipi_list = NULL; 9503 #endif 9504 /* send out pending IPI's on offline CPU */ 9505 net_rps_send_ipi(remsd); 9506 9507 /* Process offline CPU's input_pkt_queue */ 9508 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 9509 netif_rx_ni(skb); 9510 input_queue_head_incr(oldsd); 9511 } 9512 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 9513 netif_rx_ni(skb); 9514 input_queue_head_incr(oldsd); 9515 } 9516 9517 return 0; 9518 } 9519 9520 /** 9521 * netdev_increment_features - increment feature set by one 9522 * @all: current feature set 9523 * @one: new feature set 9524 * @mask: mask feature set 9525 * 9526 * Computes a new feature set after adding a device with feature set 9527 * @one to the master device with current feature set @all. Will not 9528 * enable anything that is off in @mask. Returns the new feature set. 9529 */ 9530 netdev_features_t netdev_increment_features(netdev_features_t all, 9531 netdev_features_t one, netdev_features_t mask) 9532 { 9533 if (mask & NETIF_F_HW_CSUM) 9534 mask |= NETIF_F_CSUM_MASK; 9535 mask |= NETIF_F_VLAN_CHALLENGED; 9536 9537 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 9538 all &= one | ~NETIF_F_ALL_FOR_ALL; 9539 9540 /* If one device supports hw checksumming, set for all. */ 9541 if (all & NETIF_F_HW_CSUM) 9542 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 9543 9544 return all; 9545 } 9546 EXPORT_SYMBOL(netdev_increment_features); 9547 9548 static struct hlist_head * __net_init netdev_create_hash(void) 9549 { 9550 int i; 9551 struct hlist_head *hash; 9552 9553 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 9554 if (hash != NULL) 9555 for (i = 0; i < NETDEV_HASHENTRIES; i++) 9556 INIT_HLIST_HEAD(&hash[i]); 9557 9558 return hash; 9559 } 9560 9561 /* Initialize per network namespace state */ 9562 static int __net_init netdev_init(struct net *net) 9563 { 9564 BUILD_BUG_ON(GRO_HASH_BUCKETS > 9565 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); 9566 9567 if (net != &init_net) 9568 INIT_LIST_HEAD(&net->dev_base_head); 9569 9570 net->dev_name_head = netdev_create_hash(); 9571 if (net->dev_name_head == NULL) 9572 goto err_name; 9573 9574 net->dev_index_head = netdev_create_hash(); 9575 if (net->dev_index_head == NULL) 9576 goto err_idx; 9577 9578 return 0; 9579 9580 err_idx: 9581 kfree(net->dev_name_head); 9582 err_name: 9583 return -ENOMEM; 9584 } 9585 9586 /** 9587 * netdev_drivername - network driver for the device 9588 * @dev: network device 9589 * 9590 * Determine network driver for device. 9591 */ 9592 const char *netdev_drivername(const struct net_device *dev) 9593 { 9594 const struct device_driver *driver; 9595 const struct device *parent; 9596 const char *empty = ""; 9597 9598 parent = dev->dev.parent; 9599 if (!parent) 9600 return empty; 9601 9602 driver = parent->driver; 9603 if (driver && driver->name) 9604 return driver->name; 9605 return empty; 9606 } 9607 9608 static void __netdev_printk(const char *level, const struct net_device *dev, 9609 struct va_format *vaf) 9610 { 9611 if (dev && dev->dev.parent) { 9612 dev_printk_emit(level[1] - '0', 9613 dev->dev.parent, 9614 "%s %s %s%s: %pV", 9615 dev_driver_string(dev->dev.parent), 9616 dev_name(dev->dev.parent), 9617 netdev_name(dev), netdev_reg_state(dev), 9618 vaf); 9619 } else if (dev) { 9620 printk("%s%s%s: %pV", 9621 level, netdev_name(dev), netdev_reg_state(dev), vaf); 9622 } else { 9623 printk("%s(NULL net_device): %pV", level, vaf); 9624 } 9625 } 9626 9627 void netdev_printk(const char *level, const struct net_device *dev, 9628 const char *format, ...) 9629 { 9630 struct va_format vaf; 9631 va_list args; 9632 9633 va_start(args, format); 9634 9635 vaf.fmt = format; 9636 vaf.va = &args; 9637 9638 __netdev_printk(level, dev, &vaf); 9639 9640 va_end(args); 9641 } 9642 EXPORT_SYMBOL(netdev_printk); 9643 9644 #define define_netdev_printk_level(func, level) \ 9645 void func(const struct net_device *dev, const char *fmt, ...) \ 9646 { \ 9647 struct va_format vaf; \ 9648 va_list args; \ 9649 \ 9650 va_start(args, fmt); \ 9651 \ 9652 vaf.fmt = fmt; \ 9653 vaf.va = &args; \ 9654 \ 9655 __netdev_printk(level, dev, &vaf); \ 9656 \ 9657 va_end(args); \ 9658 } \ 9659 EXPORT_SYMBOL(func); 9660 9661 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 9662 define_netdev_printk_level(netdev_alert, KERN_ALERT); 9663 define_netdev_printk_level(netdev_crit, KERN_CRIT); 9664 define_netdev_printk_level(netdev_err, KERN_ERR); 9665 define_netdev_printk_level(netdev_warn, KERN_WARNING); 9666 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 9667 define_netdev_printk_level(netdev_info, KERN_INFO); 9668 9669 static void __net_exit netdev_exit(struct net *net) 9670 { 9671 kfree(net->dev_name_head); 9672 kfree(net->dev_index_head); 9673 if (net != &init_net) 9674 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 9675 } 9676 9677 static struct pernet_operations __net_initdata netdev_net_ops = { 9678 .init = netdev_init, 9679 .exit = netdev_exit, 9680 }; 9681 9682 static void __net_exit default_device_exit(struct net *net) 9683 { 9684 struct net_device *dev, *aux; 9685 /* 9686 * Push all migratable network devices back to the 9687 * initial network namespace 9688 */ 9689 rtnl_lock(); 9690 for_each_netdev_safe(net, dev, aux) { 9691 int err; 9692 char fb_name[IFNAMSIZ]; 9693 9694 /* Ignore unmoveable devices (i.e. loopback) */ 9695 if (dev->features & NETIF_F_NETNS_LOCAL) 9696 continue; 9697 9698 /* Leave virtual devices for the generic cleanup */ 9699 if (dev->rtnl_link_ops) 9700 continue; 9701 9702 /* Push remaining network devices to init_net */ 9703 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 9704 err = dev_change_net_namespace(dev, &init_net, fb_name); 9705 if (err) { 9706 pr_emerg("%s: failed to move %s to init_net: %d\n", 9707 __func__, dev->name, err); 9708 BUG(); 9709 } 9710 } 9711 rtnl_unlock(); 9712 } 9713 9714 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 9715 { 9716 /* Return with the rtnl_lock held when there are no network 9717 * devices unregistering in any network namespace in net_list. 9718 */ 9719 struct net *net; 9720 bool unregistering; 9721 DEFINE_WAIT_FUNC(wait, woken_wake_function); 9722 9723 add_wait_queue(&netdev_unregistering_wq, &wait); 9724 for (;;) { 9725 unregistering = false; 9726 rtnl_lock(); 9727 list_for_each_entry(net, net_list, exit_list) { 9728 if (net->dev_unreg_count > 0) { 9729 unregistering = true; 9730 break; 9731 } 9732 } 9733 if (!unregistering) 9734 break; 9735 __rtnl_unlock(); 9736 9737 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 9738 } 9739 remove_wait_queue(&netdev_unregistering_wq, &wait); 9740 } 9741 9742 static void __net_exit default_device_exit_batch(struct list_head *net_list) 9743 { 9744 /* At exit all network devices most be removed from a network 9745 * namespace. Do this in the reverse order of registration. 9746 * Do this across as many network namespaces as possible to 9747 * improve batching efficiency. 9748 */ 9749 struct net_device *dev; 9750 struct net *net; 9751 LIST_HEAD(dev_kill_list); 9752 9753 /* To prevent network device cleanup code from dereferencing 9754 * loopback devices or network devices that have been freed 9755 * wait here for all pending unregistrations to complete, 9756 * before unregistring the loopback device and allowing the 9757 * network namespace be freed. 9758 * 9759 * The netdev todo list containing all network devices 9760 * unregistrations that happen in default_device_exit_batch 9761 * will run in the rtnl_unlock() at the end of 9762 * default_device_exit_batch. 9763 */ 9764 rtnl_lock_unregistering(net_list); 9765 list_for_each_entry(net, net_list, exit_list) { 9766 for_each_netdev_reverse(net, dev) { 9767 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 9768 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 9769 else 9770 unregister_netdevice_queue(dev, &dev_kill_list); 9771 } 9772 } 9773 unregister_netdevice_many(&dev_kill_list); 9774 rtnl_unlock(); 9775 } 9776 9777 static struct pernet_operations __net_initdata default_device_ops = { 9778 .exit = default_device_exit, 9779 .exit_batch = default_device_exit_batch, 9780 }; 9781 9782 /* 9783 * Initialize the DEV module. At boot time this walks the device list and 9784 * unhooks any devices that fail to initialise (normally hardware not 9785 * present) and leaves us with a valid list of present and active devices. 9786 * 9787 */ 9788 9789 /* 9790 * This is called single threaded during boot, so no need 9791 * to take the rtnl semaphore. 9792 */ 9793 static int __init net_dev_init(void) 9794 { 9795 int i, rc = -ENOMEM; 9796 9797 BUG_ON(!dev_boot_phase); 9798 9799 if (dev_proc_init()) 9800 goto out; 9801 9802 if (netdev_kobject_init()) 9803 goto out; 9804 9805 INIT_LIST_HEAD(&ptype_all); 9806 for (i = 0; i < PTYPE_HASH_SIZE; i++) 9807 INIT_LIST_HEAD(&ptype_base[i]); 9808 9809 INIT_LIST_HEAD(&offload_base); 9810 9811 if (register_pernet_subsys(&netdev_net_ops)) 9812 goto out; 9813 9814 /* 9815 * Initialise the packet receive queues. 9816 */ 9817 9818 for_each_possible_cpu(i) { 9819 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 9820 struct softnet_data *sd = &per_cpu(softnet_data, i); 9821 9822 INIT_WORK(flush, flush_backlog); 9823 9824 skb_queue_head_init(&sd->input_pkt_queue); 9825 skb_queue_head_init(&sd->process_queue); 9826 #ifdef CONFIG_XFRM_OFFLOAD 9827 skb_queue_head_init(&sd->xfrm_backlog); 9828 #endif 9829 INIT_LIST_HEAD(&sd->poll_list); 9830 sd->output_queue_tailp = &sd->output_queue; 9831 #ifdef CONFIG_RPS 9832 sd->csd.func = rps_trigger_softirq; 9833 sd->csd.info = sd; 9834 sd->cpu = i; 9835 #endif 9836 9837 init_gro_hash(&sd->backlog); 9838 sd->backlog.poll = process_backlog; 9839 sd->backlog.weight = weight_p; 9840 } 9841 9842 dev_boot_phase = 0; 9843 9844 /* The loopback device is special if any other network devices 9845 * is present in a network namespace the loopback device must 9846 * be present. Since we now dynamically allocate and free the 9847 * loopback device ensure this invariant is maintained by 9848 * keeping the loopback device as the first device on the 9849 * list of network devices. Ensuring the loopback devices 9850 * is the first device that appears and the last network device 9851 * that disappears. 9852 */ 9853 if (register_pernet_device(&loopback_net_ops)) 9854 goto out; 9855 9856 if (register_pernet_device(&default_device_ops)) 9857 goto out; 9858 9859 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 9860 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 9861 9862 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 9863 NULL, dev_cpu_dead); 9864 WARN_ON(rc < 0); 9865 rc = 0; 9866 out: 9867 return rc; 9868 } 9869 9870 subsys_initcall(net_dev_init); 9871