1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <linux/inetdevice.h> 135 #include <linux/cpu_rmap.h> 136 #include <linux/static_key.h> 137 #include <linux/hashtable.h> 138 #include <linux/vmalloc.h> 139 #include <linux/if_macvlan.h> 140 #include <linux/errqueue.h> 141 #include <linux/hrtimer.h> 142 #include <linux/netfilter_ingress.h> 143 #include <linux/crash_dump.h> 144 #include <linux/sctp.h> 145 #include <net/udp_tunnel.h> 146 #include <linux/net_namespace.h> 147 #include <linux/indirect_call_wrapper.h> 148 #include <net/devlink.h> 149 #include <linux/pm_runtime.h> 150 #include <linux/prandom.h> 151 152 #include "net-sysfs.h" 153 154 #define MAX_GRO_SKBS 8 155 156 /* This should be increased if a protocol with a bigger head is added. */ 157 #define GRO_MAX_HEAD (MAX_HEADER + 128) 158 159 static DEFINE_SPINLOCK(ptype_lock); 160 static DEFINE_SPINLOCK(offload_lock); 161 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 162 struct list_head ptype_all __read_mostly; /* Taps */ 163 static struct list_head offload_base __read_mostly; 164 165 static int netif_rx_internal(struct sk_buff *skb); 166 static int call_netdevice_notifiers_info(unsigned long val, 167 struct netdev_notifier_info *info); 168 static int call_netdevice_notifiers_extack(unsigned long val, 169 struct net_device *dev, 170 struct netlink_ext_ack *extack); 171 static struct napi_struct *napi_by_id(unsigned int napi_id); 172 173 /* 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 175 * semaphore. 176 * 177 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 178 * 179 * Writers must hold the rtnl semaphore while they loop through the 180 * dev_base_head list, and hold dev_base_lock for writing when they do the 181 * actual updates. This allows pure readers to access the list even 182 * while a writer is preparing to update it. 183 * 184 * To put it another way, dev_base_lock is held for writing only to 185 * protect against pure readers; the rtnl semaphore provides the 186 * protection against other writers. 187 * 188 * See, for example usages, register_netdevice() and 189 * unregister_netdevice(), which must be called with the rtnl 190 * semaphore held. 191 */ 192 DEFINE_RWLOCK(dev_base_lock); 193 EXPORT_SYMBOL(dev_base_lock); 194 195 static DEFINE_MUTEX(ifalias_mutex); 196 197 /* protects napi_hash addition/deletion and napi_gen_id */ 198 static DEFINE_SPINLOCK(napi_hash_lock); 199 200 static unsigned int napi_gen_id = NR_CPUS; 201 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 202 203 static DECLARE_RWSEM(devnet_rename_sem); 204 205 static inline void dev_base_seq_inc(struct net *net) 206 { 207 while (++net->dev_base_seq == 0) 208 ; 209 } 210 211 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 212 { 213 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 214 215 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 216 } 217 218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 219 { 220 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 221 } 222 223 static inline void rps_lock(struct softnet_data *sd) 224 { 225 #ifdef CONFIG_RPS 226 spin_lock(&sd->input_pkt_queue.lock); 227 #endif 228 } 229 230 static inline void rps_unlock(struct softnet_data *sd) 231 { 232 #ifdef CONFIG_RPS 233 spin_unlock(&sd->input_pkt_queue.lock); 234 #endif 235 } 236 237 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 238 const char *name) 239 { 240 struct netdev_name_node *name_node; 241 242 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 243 if (!name_node) 244 return NULL; 245 INIT_HLIST_NODE(&name_node->hlist); 246 name_node->dev = dev; 247 name_node->name = name; 248 return name_node; 249 } 250 251 static struct netdev_name_node * 252 netdev_name_node_head_alloc(struct net_device *dev) 253 { 254 struct netdev_name_node *name_node; 255 256 name_node = netdev_name_node_alloc(dev, dev->name); 257 if (!name_node) 258 return NULL; 259 INIT_LIST_HEAD(&name_node->list); 260 return name_node; 261 } 262 263 static void netdev_name_node_free(struct netdev_name_node *name_node) 264 { 265 kfree(name_node); 266 } 267 268 static void netdev_name_node_add(struct net *net, 269 struct netdev_name_node *name_node) 270 { 271 hlist_add_head_rcu(&name_node->hlist, 272 dev_name_hash(net, name_node->name)); 273 } 274 275 static void netdev_name_node_del(struct netdev_name_node *name_node) 276 { 277 hlist_del_rcu(&name_node->hlist); 278 } 279 280 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 281 const char *name) 282 { 283 struct hlist_head *head = dev_name_hash(net, name); 284 struct netdev_name_node *name_node; 285 286 hlist_for_each_entry(name_node, head, hlist) 287 if (!strcmp(name_node->name, name)) 288 return name_node; 289 return NULL; 290 } 291 292 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 293 const char *name) 294 { 295 struct hlist_head *head = dev_name_hash(net, name); 296 struct netdev_name_node *name_node; 297 298 hlist_for_each_entry_rcu(name_node, head, hlist) 299 if (!strcmp(name_node->name, name)) 300 return name_node; 301 return NULL; 302 } 303 304 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 305 { 306 struct netdev_name_node *name_node; 307 struct net *net = dev_net(dev); 308 309 name_node = netdev_name_node_lookup(net, name); 310 if (name_node) 311 return -EEXIST; 312 name_node = netdev_name_node_alloc(dev, name); 313 if (!name_node) 314 return -ENOMEM; 315 netdev_name_node_add(net, name_node); 316 /* The node that holds dev->name acts as a head of per-device list. */ 317 list_add_tail(&name_node->list, &dev->name_node->list); 318 319 return 0; 320 } 321 EXPORT_SYMBOL(netdev_name_node_alt_create); 322 323 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 324 { 325 list_del(&name_node->list); 326 netdev_name_node_del(name_node); 327 kfree(name_node->name); 328 netdev_name_node_free(name_node); 329 } 330 331 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 332 { 333 struct netdev_name_node *name_node; 334 struct net *net = dev_net(dev); 335 336 name_node = netdev_name_node_lookup(net, name); 337 if (!name_node) 338 return -ENOENT; 339 /* lookup might have found our primary name or a name belonging 340 * to another device. 341 */ 342 if (name_node == dev->name_node || name_node->dev != dev) 343 return -EINVAL; 344 345 __netdev_name_node_alt_destroy(name_node); 346 347 return 0; 348 } 349 EXPORT_SYMBOL(netdev_name_node_alt_destroy); 350 351 static void netdev_name_node_alt_flush(struct net_device *dev) 352 { 353 struct netdev_name_node *name_node, *tmp; 354 355 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 356 __netdev_name_node_alt_destroy(name_node); 357 } 358 359 /* Device list insertion */ 360 static void list_netdevice(struct net_device *dev) 361 { 362 struct net *net = dev_net(dev); 363 364 ASSERT_RTNL(); 365 366 write_lock_bh(&dev_base_lock); 367 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 368 netdev_name_node_add(net, dev->name_node); 369 hlist_add_head_rcu(&dev->index_hlist, 370 dev_index_hash(net, dev->ifindex)); 371 write_unlock_bh(&dev_base_lock); 372 373 dev_base_seq_inc(net); 374 } 375 376 /* Device list removal 377 * caller must respect a RCU grace period before freeing/reusing dev 378 */ 379 static void unlist_netdevice(struct net_device *dev) 380 { 381 ASSERT_RTNL(); 382 383 /* Unlink dev from the device chain */ 384 write_lock_bh(&dev_base_lock); 385 list_del_rcu(&dev->dev_list); 386 netdev_name_node_del(dev->name_node); 387 hlist_del_rcu(&dev->index_hlist); 388 write_unlock_bh(&dev_base_lock); 389 390 dev_base_seq_inc(dev_net(dev)); 391 } 392 393 /* 394 * Our notifier list 395 */ 396 397 static RAW_NOTIFIER_HEAD(netdev_chain); 398 399 /* 400 * Device drivers call our routines to queue packets here. We empty the 401 * queue in the local softnet handler. 402 */ 403 404 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 405 EXPORT_PER_CPU_SYMBOL(softnet_data); 406 407 #ifdef CONFIG_LOCKDEP 408 /* 409 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 410 * according to dev->type 411 */ 412 static const unsigned short netdev_lock_type[] = { 413 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 414 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 415 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 416 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 417 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 418 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 419 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 420 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 421 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 422 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 423 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 424 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 425 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 426 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 427 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 428 429 static const char *const netdev_lock_name[] = { 430 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 431 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 432 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 433 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 434 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 435 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 436 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 437 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 438 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 439 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 440 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 441 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 442 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 443 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 444 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 445 446 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 447 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 448 449 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 450 { 451 int i; 452 453 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 454 if (netdev_lock_type[i] == dev_type) 455 return i; 456 /* the last key is used by default */ 457 return ARRAY_SIZE(netdev_lock_type) - 1; 458 } 459 460 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 461 unsigned short dev_type) 462 { 463 int i; 464 465 i = netdev_lock_pos(dev_type); 466 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 467 netdev_lock_name[i]); 468 } 469 470 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 471 { 472 int i; 473 474 i = netdev_lock_pos(dev->type); 475 lockdep_set_class_and_name(&dev->addr_list_lock, 476 &netdev_addr_lock_key[i], 477 netdev_lock_name[i]); 478 } 479 #else 480 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 481 unsigned short dev_type) 482 { 483 } 484 485 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 486 { 487 } 488 #endif 489 490 /******************************************************************************* 491 * 492 * Protocol management and registration routines 493 * 494 *******************************************************************************/ 495 496 497 /* 498 * Add a protocol ID to the list. Now that the input handler is 499 * smarter we can dispense with all the messy stuff that used to be 500 * here. 501 * 502 * BEWARE!!! Protocol handlers, mangling input packets, 503 * MUST BE last in hash buckets and checking protocol handlers 504 * MUST start from promiscuous ptype_all chain in net_bh. 505 * It is true now, do not change it. 506 * Explanation follows: if protocol handler, mangling packet, will 507 * be the first on list, it is not able to sense, that packet 508 * is cloned and should be copied-on-write, so that it will 509 * change it and subsequent readers will get broken packet. 510 * --ANK (980803) 511 */ 512 513 static inline struct list_head *ptype_head(const struct packet_type *pt) 514 { 515 if (pt->type == htons(ETH_P_ALL)) 516 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 517 else 518 return pt->dev ? &pt->dev->ptype_specific : 519 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 520 } 521 522 /** 523 * dev_add_pack - add packet handler 524 * @pt: packet type declaration 525 * 526 * Add a protocol handler to the networking stack. The passed &packet_type 527 * is linked into kernel lists and may not be freed until it has been 528 * removed from the kernel lists. 529 * 530 * This call does not sleep therefore it can not 531 * guarantee all CPU's that are in middle of receiving packets 532 * will see the new packet type (until the next received packet). 533 */ 534 535 void dev_add_pack(struct packet_type *pt) 536 { 537 struct list_head *head = ptype_head(pt); 538 539 spin_lock(&ptype_lock); 540 list_add_rcu(&pt->list, head); 541 spin_unlock(&ptype_lock); 542 } 543 EXPORT_SYMBOL(dev_add_pack); 544 545 /** 546 * __dev_remove_pack - remove packet handler 547 * @pt: packet type declaration 548 * 549 * Remove a protocol handler that was previously added to the kernel 550 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 551 * from the kernel lists and can be freed or reused once this function 552 * returns. 553 * 554 * The packet type might still be in use by receivers 555 * and must not be freed until after all the CPU's have gone 556 * through a quiescent state. 557 */ 558 void __dev_remove_pack(struct packet_type *pt) 559 { 560 struct list_head *head = ptype_head(pt); 561 struct packet_type *pt1; 562 563 spin_lock(&ptype_lock); 564 565 list_for_each_entry(pt1, head, list) { 566 if (pt == pt1) { 567 list_del_rcu(&pt->list); 568 goto out; 569 } 570 } 571 572 pr_warn("dev_remove_pack: %p not found\n", pt); 573 out: 574 spin_unlock(&ptype_lock); 575 } 576 EXPORT_SYMBOL(__dev_remove_pack); 577 578 /** 579 * dev_remove_pack - remove packet handler 580 * @pt: packet type declaration 581 * 582 * Remove a protocol handler that was previously added to the kernel 583 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 584 * from the kernel lists and can be freed or reused once this function 585 * returns. 586 * 587 * This call sleeps to guarantee that no CPU is looking at the packet 588 * type after return. 589 */ 590 void dev_remove_pack(struct packet_type *pt) 591 { 592 __dev_remove_pack(pt); 593 594 synchronize_net(); 595 } 596 EXPORT_SYMBOL(dev_remove_pack); 597 598 599 /** 600 * dev_add_offload - register offload handlers 601 * @po: protocol offload declaration 602 * 603 * Add protocol offload handlers to the networking stack. The passed 604 * &proto_offload is linked into kernel lists and may not be freed until 605 * it has been removed from the kernel lists. 606 * 607 * This call does not sleep therefore it can not 608 * guarantee all CPU's that are in middle of receiving packets 609 * will see the new offload handlers (until the next received packet). 610 */ 611 void dev_add_offload(struct packet_offload *po) 612 { 613 struct packet_offload *elem; 614 615 spin_lock(&offload_lock); 616 list_for_each_entry(elem, &offload_base, list) { 617 if (po->priority < elem->priority) 618 break; 619 } 620 list_add_rcu(&po->list, elem->list.prev); 621 spin_unlock(&offload_lock); 622 } 623 EXPORT_SYMBOL(dev_add_offload); 624 625 /** 626 * __dev_remove_offload - remove offload handler 627 * @po: packet offload declaration 628 * 629 * Remove a protocol offload handler that was previously added to the 630 * kernel offload handlers by dev_add_offload(). The passed &offload_type 631 * is removed from the kernel lists and can be freed or reused once this 632 * function returns. 633 * 634 * The packet type might still be in use by receivers 635 * and must not be freed until after all the CPU's have gone 636 * through a quiescent state. 637 */ 638 static void __dev_remove_offload(struct packet_offload *po) 639 { 640 struct list_head *head = &offload_base; 641 struct packet_offload *po1; 642 643 spin_lock(&offload_lock); 644 645 list_for_each_entry(po1, head, list) { 646 if (po == po1) { 647 list_del_rcu(&po->list); 648 goto out; 649 } 650 } 651 652 pr_warn("dev_remove_offload: %p not found\n", po); 653 out: 654 spin_unlock(&offload_lock); 655 } 656 657 /** 658 * dev_remove_offload - remove packet offload handler 659 * @po: packet offload declaration 660 * 661 * Remove a packet offload handler that was previously added to the kernel 662 * offload handlers by dev_add_offload(). The passed &offload_type is 663 * removed from the kernel lists and can be freed or reused once this 664 * function returns. 665 * 666 * This call sleeps to guarantee that no CPU is looking at the packet 667 * type after return. 668 */ 669 void dev_remove_offload(struct packet_offload *po) 670 { 671 __dev_remove_offload(po); 672 673 synchronize_net(); 674 } 675 EXPORT_SYMBOL(dev_remove_offload); 676 677 /****************************************************************************** 678 * 679 * Device Boot-time Settings Routines 680 * 681 ******************************************************************************/ 682 683 /* Boot time configuration table */ 684 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 685 686 /** 687 * netdev_boot_setup_add - add new setup entry 688 * @name: name of the device 689 * @map: configured settings for the device 690 * 691 * Adds new setup entry to the dev_boot_setup list. The function 692 * returns 0 on error and 1 on success. This is a generic routine to 693 * all netdevices. 694 */ 695 static int netdev_boot_setup_add(char *name, struct ifmap *map) 696 { 697 struct netdev_boot_setup *s; 698 int i; 699 700 s = dev_boot_setup; 701 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 702 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 703 memset(s[i].name, 0, sizeof(s[i].name)); 704 strlcpy(s[i].name, name, IFNAMSIZ); 705 memcpy(&s[i].map, map, sizeof(s[i].map)); 706 break; 707 } 708 } 709 710 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 711 } 712 713 /** 714 * netdev_boot_setup_check - check boot time settings 715 * @dev: the netdevice 716 * 717 * Check boot time settings for the device. 718 * The found settings are set for the device to be used 719 * later in the device probing. 720 * Returns 0 if no settings found, 1 if they are. 721 */ 722 int netdev_boot_setup_check(struct net_device *dev) 723 { 724 struct netdev_boot_setup *s = dev_boot_setup; 725 int i; 726 727 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 728 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 729 !strcmp(dev->name, s[i].name)) { 730 dev->irq = s[i].map.irq; 731 dev->base_addr = s[i].map.base_addr; 732 dev->mem_start = s[i].map.mem_start; 733 dev->mem_end = s[i].map.mem_end; 734 return 1; 735 } 736 } 737 return 0; 738 } 739 EXPORT_SYMBOL(netdev_boot_setup_check); 740 741 742 /** 743 * netdev_boot_base - get address from boot time settings 744 * @prefix: prefix for network device 745 * @unit: id for network device 746 * 747 * Check boot time settings for the base address of device. 748 * The found settings are set for the device to be used 749 * later in the device probing. 750 * Returns 0 if no settings found. 751 */ 752 unsigned long netdev_boot_base(const char *prefix, int unit) 753 { 754 const struct netdev_boot_setup *s = dev_boot_setup; 755 char name[IFNAMSIZ]; 756 int i; 757 758 sprintf(name, "%s%d", prefix, unit); 759 760 /* 761 * If device already registered then return base of 1 762 * to indicate not to probe for this interface 763 */ 764 if (__dev_get_by_name(&init_net, name)) 765 return 1; 766 767 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 768 if (!strcmp(name, s[i].name)) 769 return s[i].map.base_addr; 770 return 0; 771 } 772 773 /* 774 * Saves at boot time configured settings for any netdevice. 775 */ 776 int __init netdev_boot_setup(char *str) 777 { 778 int ints[5]; 779 struct ifmap map; 780 781 str = get_options(str, ARRAY_SIZE(ints), ints); 782 if (!str || !*str) 783 return 0; 784 785 /* Save settings */ 786 memset(&map, 0, sizeof(map)); 787 if (ints[0] > 0) 788 map.irq = ints[1]; 789 if (ints[0] > 1) 790 map.base_addr = ints[2]; 791 if (ints[0] > 2) 792 map.mem_start = ints[3]; 793 if (ints[0] > 3) 794 map.mem_end = ints[4]; 795 796 /* Add new entry to the list */ 797 return netdev_boot_setup_add(str, &map); 798 } 799 800 __setup("netdev=", netdev_boot_setup); 801 802 /******************************************************************************* 803 * 804 * Device Interface Subroutines 805 * 806 *******************************************************************************/ 807 808 /** 809 * dev_get_iflink - get 'iflink' value of a interface 810 * @dev: targeted interface 811 * 812 * Indicates the ifindex the interface is linked to. 813 * Physical interfaces have the same 'ifindex' and 'iflink' values. 814 */ 815 816 int dev_get_iflink(const struct net_device *dev) 817 { 818 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 819 return dev->netdev_ops->ndo_get_iflink(dev); 820 821 return dev->ifindex; 822 } 823 EXPORT_SYMBOL(dev_get_iflink); 824 825 /** 826 * dev_fill_metadata_dst - Retrieve tunnel egress information. 827 * @dev: targeted interface 828 * @skb: The packet. 829 * 830 * For better visibility of tunnel traffic OVS needs to retrieve 831 * egress tunnel information for a packet. Following API allows 832 * user to get this info. 833 */ 834 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 835 { 836 struct ip_tunnel_info *info; 837 838 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 839 return -EINVAL; 840 841 info = skb_tunnel_info_unclone(skb); 842 if (!info) 843 return -ENOMEM; 844 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 845 return -EINVAL; 846 847 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 848 } 849 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 850 851 /** 852 * __dev_get_by_name - find a device by its name 853 * @net: the applicable net namespace 854 * @name: name to find 855 * 856 * Find an interface by name. Must be called under RTNL semaphore 857 * or @dev_base_lock. If the name is found a pointer to the device 858 * is returned. If the name is not found then %NULL is returned. The 859 * reference counters are not incremented so the caller must be 860 * careful with locks. 861 */ 862 863 struct net_device *__dev_get_by_name(struct net *net, const char *name) 864 { 865 struct netdev_name_node *node_name; 866 867 node_name = netdev_name_node_lookup(net, name); 868 return node_name ? node_name->dev : NULL; 869 } 870 EXPORT_SYMBOL(__dev_get_by_name); 871 872 /** 873 * dev_get_by_name_rcu - find a device by its name 874 * @net: the applicable net namespace 875 * @name: name to find 876 * 877 * Find an interface by name. 878 * If the name is found a pointer to the device is returned. 879 * If the name is not found then %NULL is returned. 880 * The reference counters are not incremented so the caller must be 881 * careful with locks. The caller must hold RCU lock. 882 */ 883 884 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 885 { 886 struct netdev_name_node *node_name; 887 888 node_name = netdev_name_node_lookup_rcu(net, name); 889 return node_name ? node_name->dev : NULL; 890 } 891 EXPORT_SYMBOL(dev_get_by_name_rcu); 892 893 /** 894 * dev_get_by_name - find a device by its name 895 * @net: the applicable net namespace 896 * @name: name to find 897 * 898 * Find an interface by name. This can be called from any 899 * context and does its own locking. The returned handle has 900 * the usage count incremented and the caller must use dev_put() to 901 * release it when it is no longer needed. %NULL is returned if no 902 * matching device is found. 903 */ 904 905 struct net_device *dev_get_by_name(struct net *net, const char *name) 906 { 907 struct net_device *dev; 908 909 rcu_read_lock(); 910 dev = dev_get_by_name_rcu(net, name); 911 if (dev) 912 dev_hold(dev); 913 rcu_read_unlock(); 914 return dev; 915 } 916 EXPORT_SYMBOL(dev_get_by_name); 917 918 /** 919 * __dev_get_by_index - find a device by its ifindex 920 * @net: the applicable net namespace 921 * @ifindex: index of device 922 * 923 * Search for an interface by index. Returns %NULL if the device 924 * is not found or a pointer to the device. The device has not 925 * had its reference counter increased so the caller must be careful 926 * about locking. The caller must hold either the RTNL semaphore 927 * or @dev_base_lock. 928 */ 929 930 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 931 { 932 struct net_device *dev; 933 struct hlist_head *head = dev_index_hash(net, ifindex); 934 935 hlist_for_each_entry(dev, head, index_hlist) 936 if (dev->ifindex == ifindex) 937 return dev; 938 939 return NULL; 940 } 941 EXPORT_SYMBOL(__dev_get_by_index); 942 943 /** 944 * dev_get_by_index_rcu - find a device by its ifindex 945 * @net: the applicable net namespace 946 * @ifindex: index of device 947 * 948 * Search for an interface by index. Returns %NULL if the device 949 * is not found or a pointer to the device. The device has not 950 * had its reference counter increased so the caller must be careful 951 * about locking. The caller must hold RCU lock. 952 */ 953 954 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 955 { 956 struct net_device *dev; 957 struct hlist_head *head = dev_index_hash(net, ifindex); 958 959 hlist_for_each_entry_rcu(dev, head, index_hlist) 960 if (dev->ifindex == ifindex) 961 return dev; 962 963 return NULL; 964 } 965 EXPORT_SYMBOL(dev_get_by_index_rcu); 966 967 968 /** 969 * dev_get_by_index - find a device by its ifindex 970 * @net: the applicable net namespace 971 * @ifindex: index of device 972 * 973 * Search for an interface by index. Returns NULL if the device 974 * is not found or a pointer to the device. The device returned has 975 * had a reference added and the pointer is safe until the user calls 976 * dev_put to indicate they have finished with it. 977 */ 978 979 struct net_device *dev_get_by_index(struct net *net, int ifindex) 980 { 981 struct net_device *dev; 982 983 rcu_read_lock(); 984 dev = dev_get_by_index_rcu(net, ifindex); 985 if (dev) 986 dev_hold(dev); 987 rcu_read_unlock(); 988 return dev; 989 } 990 EXPORT_SYMBOL(dev_get_by_index); 991 992 /** 993 * dev_get_by_napi_id - find a device by napi_id 994 * @napi_id: ID of the NAPI struct 995 * 996 * Search for an interface by NAPI ID. Returns %NULL if the device 997 * is not found or a pointer to the device. The device has not had 998 * its reference counter increased so the caller must be careful 999 * about locking. The caller must hold RCU lock. 1000 */ 1001 1002 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 1003 { 1004 struct napi_struct *napi; 1005 1006 WARN_ON_ONCE(!rcu_read_lock_held()); 1007 1008 if (napi_id < MIN_NAPI_ID) 1009 return NULL; 1010 1011 napi = napi_by_id(napi_id); 1012 1013 return napi ? napi->dev : NULL; 1014 } 1015 EXPORT_SYMBOL(dev_get_by_napi_id); 1016 1017 /** 1018 * netdev_get_name - get a netdevice name, knowing its ifindex. 1019 * @net: network namespace 1020 * @name: a pointer to the buffer where the name will be stored. 1021 * @ifindex: the ifindex of the interface to get the name from. 1022 */ 1023 int netdev_get_name(struct net *net, char *name, int ifindex) 1024 { 1025 struct net_device *dev; 1026 int ret; 1027 1028 down_read(&devnet_rename_sem); 1029 rcu_read_lock(); 1030 1031 dev = dev_get_by_index_rcu(net, ifindex); 1032 if (!dev) { 1033 ret = -ENODEV; 1034 goto out; 1035 } 1036 1037 strcpy(name, dev->name); 1038 1039 ret = 0; 1040 out: 1041 rcu_read_unlock(); 1042 up_read(&devnet_rename_sem); 1043 return ret; 1044 } 1045 1046 /** 1047 * dev_getbyhwaddr_rcu - find a device by its hardware address 1048 * @net: the applicable net namespace 1049 * @type: media type of device 1050 * @ha: hardware address 1051 * 1052 * Search for an interface by MAC address. Returns NULL if the device 1053 * is not found or a pointer to the device. 1054 * The caller must hold RCU or RTNL. 1055 * The returned device has not had its ref count increased 1056 * and the caller must therefore be careful about locking 1057 * 1058 */ 1059 1060 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1061 const char *ha) 1062 { 1063 struct net_device *dev; 1064 1065 for_each_netdev_rcu(net, dev) 1066 if (dev->type == type && 1067 !memcmp(dev->dev_addr, ha, dev->addr_len)) 1068 return dev; 1069 1070 return NULL; 1071 } 1072 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 1073 1074 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 1075 { 1076 struct net_device *dev, *ret = NULL; 1077 1078 rcu_read_lock(); 1079 for_each_netdev_rcu(net, dev) 1080 if (dev->type == type) { 1081 dev_hold(dev); 1082 ret = dev; 1083 break; 1084 } 1085 rcu_read_unlock(); 1086 return ret; 1087 } 1088 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1089 1090 /** 1091 * __dev_get_by_flags - find any device with given flags 1092 * @net: the applicable net namespace 1093 * @if_flags: IFF_* values 1094 * @mask: bitmask of bits in if_flags to check 1095 * 1096 * Search for any interface with the given flags. Returns NULL if a device 1097 * is not found or a pointer to the device. Must be called inside 1098 * rtnl_lock(), and result refcount is unchanged. 1099 */ 1100 1101 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1102 unsigned short mask) 1103 { 1104 struct net_device *dev, *ret; 1105 1106 ASSERT_RTNL(); 1107 1108 ret = NULL; 1109 for_each_netdev(net, dev) { 1110 if (((dev->flags ^ if_flags) & mask) == 0) { 1111 ret = dev; 1112 break; 1113 } 1114 } 1115 return ret; 1116 } 1117 EXPORT_SYMBOL(__dev_get_by_flags); 1118 1119 /** 1120 * dev_valid_name - check if name is okay for network device 1121 * @name: name string 1122 * 1123 * Network device names need to be valid file names to 1124 * allow sysfs to work. We also disallow any kind of 1125 * whitespace. 1126 */ 1127 bool dev_valid_name(const char *name) 1128 { 1129 if (*name == '\0') 1130 return false; 1131 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1132 return false; 1133 if (!strcmp(name, ".") || !strcmp(name, "..")) 1134 return false; 1135 1136 while (*name) { 1137 if (*name == '/' || *name == ':' || isspace(*name)) 1138 return false; 1139 name++; 1140 } 1141 return true; 1142 } 1143 EXPORT_SYMBOL(dev_valid_name); 1144 1145 /** 1146 * __dev_alloc_name - allocate a name for a device 1147 * @net: network namespace to allocate the device name in 1148 * @name: name format string 1149 * @buf: scratch buffer and result name string 1150 * 1151 * Passed a format string - eg "lt%d" it will try and find a suitable 1152 * id. It scans list of devices to build up a free map, then chooses 1153 * the first empty slot. The caller must hold the dev_base or rtnl lock 1154 * while allocating the name and adding the device in order to avoid 1155 * duplicates. 1156 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1157 * Returns the number of the unit assigned or a negative errno code. 1158 */ 1159 1160 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1161 { 1162 int i = 0; 1163 const char *p; 1164 const int max_netdevices = 8*PAGE_SIZE; 1165 unsigned long *inuse; 1166 struct net_device *d; 1167 1168 if (!dev_valid_name(name)) 1169 return -EINVAL; 1170 1171 p = strchr(name, '%'); 1172 if (p) { 1173 /* 1174 * Verify the string as this thing may have come from 1175 * the user. There must be either one "%d" and no other "%" 1176 * characters. 1177 */ 1178 if (p[1] != 'd' || strchr(p + 2, '%')) 1179 return -EINVAL; 1180 1181 /* Use one page as a bit array of possible slots */ 1182 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1183 if (!inuse) 1184 return -ENOMEM; 1185 1186 for_each_netdev(net, d) { 1187 if (!sscanf(d->name, name, &i)) 1188 continue; 1189 if (i < 0 || i >= max_netdevices) 1190 continue; 1191 1192 /* avoid cases where sscanf is not exact inverse of printf */ 1193 snprintf(buf, IFNAMSIZ, name, i); 1194 if (!strncmp(buf, d->name, IFNAMSIZ)) 1195 set_bit(i, inuse); 1196 } 1197 1198 i = find_first_zero_bit(inuse, max_netdevices); 1199 free_page((unsigned long) inuse); 1200 } 1201 1202 snprintf(buf, IFNAMSIZ, name, i); 1203 if (!__dev_get_by_name(net, buf)) 1204 return i; 1205 1206 /* It is possible to run out of possible slots 1207 * when the name is long and there isn't enough space left 1208 * for the digits, or if all bits are used. 1209 */ 1210 return -ENFILE; 1211 } 1212 1213 static int dev_alloc_name_ns(struct net *net, 1214 struct net_device *dev, 1215 const char *name) 1216 { 1217 char buf[IFNAMSIZ]; 1218 int ret; 1219 1220 BUG_ON(!net); 1221 ret = __dev_alloc_name(net, name, buf); 1222 if (ret >= 0) 1223 strlcpy(dev->name, buf, IFNAMSIZ); 1224 return ret; 1225 } 1226 1227 /** 1228 * dev_alloc_name - allocate a name for a device 1229 * @dev: device 1230 * @name: name format string 1231 * 1232 * Passed a format string - eg "lt%d" it will try and find a suitable 1233 * id. It scans list of devices to build up a free map, then chooses 1234 * the first empty slot. The caller must hold the dev_base or rtnl lock 1235 * while allocating the name and adding the device in order to avoid 1236 * duplicates. 1237 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1238 * Returns the number of the unit assigned or a negative errno code. 1239 */ 1240 1241 int dev_alloc_name(struct net_device *dev, const char *name) 1242 { 1243 return dev_alloc_name_ns(dev_net(dev), dev, name); 1244 } 1245 EXPORT_SYMBOL(dev_alloc_name); 1246 1247 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1248 const char *name) 1249 { 1250 BUG_ON(!net); 1251 1252 if (!dev_valid_name(name)) 1253 return -EINVAL; 1254 1255 if (strchr(name, '%')) 1256 return dev_alloc_name_ns(net, dev, name); 1257 else if (__dev_get_by_name(net, name)) 1258 return -EEXIST; 1259 else if (dev->name != name) 1260 strlcpy(dev->name, name, IFNAMSIZ); 1261 1262 return 0; 1263 } 1264 1265 /** 1266 * dev_change_name - change name of a device 1267 * @dev: device 1268 * @newname: name (or format string) must be at least IFNAMSIZ 1269 * 1270 * Change name of a device, can pass format strings "eth%d". 1271 * for wildcarding. 1272 */ 1273 int dev_change_name(struct net_device *dev, const char *newname) 1274 { 1275 unsigned char old_assign_type; 1276 char oldname[IFNAMSIZ]; 1277 int err = 0; 1278 int ret; 1279 struct net *net; 1280 1281 ASSERT_RTNL(); 1282 BUG_ON(!dev_net(dev)); 1283 1284 net = dev_net(dev); 1285 1286 /* Some auto-enslaved devices e.g. failover slaves are 1287 * special, as userspace might rename the device after 1288 * the interface had been brought up and running since 1289 * the point kernel initiated auto-enslavement. Allow 1290 * live name change even when these slave devices are 1291 * up and running. 1292 * 1293 * Typically, users of these auto-enslaving devices 1294 * don't actually care about slave name change, as 1295 * they are supposed to operate on master interface 1296 * directly. 1297 */ 1298 if (dev->flags & IFF_UP && 1299 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1300 return -EBUSY; 1301 1302 down_write(&devnet_rename_sem); 1303 1304 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1305 up_write(&devnet_rename_sem); 1306 return 0; 1307 } 1308 1309 memcpy(oldname, dev->name, IFNAMSIZ); 1310 1311 err = dev_get_valid_name(net, dev, newname); 1312 if (err < 0) { 1313 up_write(&devnet_rename_sem); 1314 return err; 1315 } 1316 1317 if (oldname[0] && !strchr(oldname, '%')) 1318 netdev_info(dev, "renamed from %s\n", oldname); 1319 1320 old_assign_type = dev->name_assign_type; 1321 dev->name_assign_type = NET_NAME_RENAMED; 1322 1323 rollback: 1324 ret = device_rename(&dev->dev, dev->name); 1325 if (ret) { 1326 memcpy(dev->name, oldname, IFNAMSIZ); 1327 dev->name_assign_type = old_assign_type; 1328 up_write(&devnet_rename_sem); 1329 return ret; 1330 } 1331 1332 up_write(&devnet_rename_sem); 1333 1334 netdev_adjacent_rename_links(dev, oldname); 1335 1336 write_lock_bh(&dev_base_lock); 1337 netdev_name_node_del(dev->name_node); 1338 write_unlock_bh(&dev_base_lock); 1339 1340 synchronize_rcu(); 1341 1342 write_lock_bh(&dev_base_lock); 1343 netdev_name_node_add(net, dev->name_node); 1344 write_unlock_bh(&dev_base_lock); 1345 1346 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1347 ret = notifier_to_errno(ret); 1348 1349 if (ret) { 1350 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1351 if (err >= 0) { 1352 err = ret; 1353 down_write(&devnet_rename_sem); 1354 memcpy(dev->name, oldname, IFNAMSIZ); 1355 memcpy(oldname, newname, IFNAMSIZ); 1356 dev->name_assign_type = old_assign_type; 1357 old_assign_type = NET_NAME_RENAMED; 1358 goto rollback; 1359 } else { 1360 pr_err("%s: name change rollback failed: %d\n", 1361 dev->name, ret); 1362 } 1363 } 1364 1365 return err; 1366 } 1367 1368 /** 1369 * dev_set_alias - change ifalias of a device 1370 * @dev: device 1371 * @alias: name up to IFALIASZ 1372 * @len: limit of bytes to copy from info 1373 * 1374 * Set ifalias for a device, 1375 */ 1376 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1377 { 1378 struct dev_ifalias *new_alias = NULL; 1379 1380 if (len >= IFALIASZ) 1381 return -EINVAL; 1382 1383 if (len) { 1384 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1385 if (!new_alias) 1386 return -ENOMEM; 1387 1388 memcpy(new_alias->ifalias, alias, len); 1389 new_alias->ifalias[len] = 0; 1390 } 1391 1392 mutex_lock(&ifalias_mutex); 1393 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1394 mutex_is_locked(&ifalias_mutex)); 1395 mutex_unlock(&ifalias_mutex); 1396 1397 if (new_alias) 1398 kfree_rcu(new_alias, rcuhead); 1399 1400 return len; 1401 } 1402 EXPORT_SYMBOL(dev_set_alias); 1403 1404 /** 1405 * dev_get_alias - get ifalias of a device 1406 * @dev: device 1407 * @name: buffer to store name of ifalias 1408 * @len: size of buffer 1409 * 1410 * get ifalias for a device. Caller must make sure dev cannot go 1411 * away, e.g. rcu read lock or own a reference count to device. 1412 */ 1413 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1414 { 1415 const struct dev_ifalias *alias; 1416 int ret = 0; 1417 1418 rcu_read_lock(); 1419 alias = rcu_dereference(dev->ifalias); 1420 if (alias) 1421 ret = snprintf(name, len, "%s", alias->ifalias); 1422 rcu_read_unlock(); 1423 1424 return ret; 1425 } 1426 1427 /** 1428 * netdev_features_change - device changes features 1429 * @dev: device to cause notification 1430 * 1431 * Called to indicate a device has changed features. 1432 */ 1433 void netdev_features_change(struct net_device *dev) 1434 { 1435 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1436 } 1437 EXPORT_SYMBOL(netdev_features_change); 1438 1439 /** 1440 * netdev_state_change - device changes state 1441 * @dev: device to cause notification 1442 * 1443 * Called to indicate a device has changed state. This function calls 1444 * the notifier chains for netdev_chain and sends a NEWLINK message 1445 * to the routing socket. 1446 */ 1447 void netdev_state_change(struct net_device *dev) 1448 { 1449 if (dev->flags & IFF_UP) { 1450 struct netdev_notifier_change_info change_info = { 1451 .info.dev = dev, 1452 }; 1453 1454 call_netdevice_notifiers_info(NETDEV_CHANGE, 1455 &change_info.info); 1456 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1457 } 1458 } 1459 EXPORT_SYMBOL(netdev_state_change); 1460 1461 /** 1462 * __netdev_notify_peers - notify network peers about existence of @dev, 1463 * to be called when rtnl lock is already held. 1464 * @dev: network device 1465 * 1466 * Generate traffic such that interested network peers are aware of 1467 * @dev, such as by generating a gratuitous ARP. This may be used when 1468 * a device wants to inform the rest of the network about some sort of 1469 * reconfiguration such as a failover event or virtual machine 1470 * migration. 1471 */ 1472 void __netdev_notify_peers(struct net_device *dev) 1473 { 1474 ASSERT_RTNL(); 1475 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1476 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1477 } 1478 EXPORT_SYMBOL(__netdev_notify_peers); 1479 1480 /** 1481 * netdev_notify_peers - notify network peers about existence of @dev 1482 * @dev: network device 1483 * 1484 * Generate traffic such that interested network peers are aware of 1485 * @dev, such as by generating a gratuitous ARP. This may be used when 1486 * a device wants to inform the rest of the network about some sort of 1487 * reconfiguration such as a failover event or virtual machine 1488 * migration. 1489 */ 1490 void netdev_notify_peers(struct net_device *dev) 1491 { 1492 rtnl_lock(); 1493 __netdev_notify_peers(dev); 1494 rtnl_unlock(); 1495 } 1496 EXPORT_SYMBOL(netdev_notify_peers); 1497 1498 static int napi_threaded_poll(void *data); 1499 1500 static int napi_kthread_create(struct napi_struct *n) 1501 { 1502 int err = 0; 1503 1504 /* Create and wake up the kthread once to put it in 1505 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1506 * warning and work with loadavg. 1507 */ 1508 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1509 n->dev->name, n->napi_id); 1510 if (IS_ERR(n->thread)) { 1511 err = PTR_ERR(n->thread); 1512 pr_err("kthread_run failed with err %d\n", err); 1513 n->thread = NULL; 1514 } 1515 1516 return err; 1517 } 1518 1519 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1520 { 1521 const struct net_device_ops *ops = dev->netdev_ops; 1522 int ret; 1523 1524 ASSERT_RTNL(); 1525 1526 if (!netif_device_present(dev)) { 1527 /* may be detached because parent is runtime-suspended */ 1528 if (dev->dev.parent) 1529 pm_runtime_resume(dev->dev.parent); 1530 if (!netif_device_present(dev)) 1531 return -ENODEV; 1532 } 1533 1534 /* Block netpoll from trying to do any rx path servicing. 1535 * If we don't do this there is a chance ndo_poll_controller 1536 * or ndo_poll may be running while we open the device 1537 */ 1538 netpoll_poll_disable(dev); 1539 1540 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1541 ret = notifier_to_errno(ret); 1542 if (ret) 1543 return ret; 1544 1545 set_bit(__LINK_STATE_START, &dev->state); 1546 1547 if (ops->ndo_validate_addr) 1548 ret = ops->ndo_validate_addr(dev); 1549 1550 if (!ret && ops->ndo_open) 1551 ret = ops->ndo_open(dev); 1552 1553 netpoll_poll_enable(dev); 1554 1555 if (ret) 1556 clear_bit(__LINK_STATE_START, &dev->state); 1557 else { 1558 dev->flags |= IFF_UP; 1559 dev_set_rx_mode(dev); 1560 dev_activate(dev); 1561 add_device_randomness(dev->dev_addr, dev->addr_len); 1562 } 1563 1564 return ret; 1565 } 1566 1567 /** 1568 * dev_open - prepare an interface for use. 1569 * @dev: device to open 1570 * @extack: netlink extended ack 1571 * 1572 * Takes a device from down to up state. The device's private open 1573 * function is invoked and then the multicast lists are loaded. Finally 1574 * the device is moved into the up state and a %NETDEV_UP message is 1575 * sent to the netdev notifier chain. 1576 * 1577 * Calling this function on an active interface is a nop. On a failure 1578 * a negative errno code is returned. 1579 */ 1580 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1581 { 1582 int ret; 1583 1584 if (dev->flags & IFF_UP) 1585 return 0; 1586 1587 ret = __dev_open(dev, extack); 1588 if (ret < 0) 1589 return ret; 1590 1591 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1592 call_netdevice_notifiers(NETDEV_UP, dev); 1593 1594 return ret; 1595 } 1596 EXPORT_SYMBOL(dev_open); 1597 1598 static void __dev_close_many(struct list_head *head) 1599 { 1600 struct net_device *dev; 1601 1602 ASSERT_RTNL(); 1603 might_sleep(); 1604 1605 list_for_each_entry(dev, head, close_list) { 1606 /* Temporarily disable netpoll until the interface is down */ 1607 netpoll_poll_disable(dev); 1608 1609 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1610 1611 clear_bit(__LINK_STATE_START, &dev->state); 1612 1613 /* Synchronize to scheduled poll. We cannot touch poll list, it 1614 * can be even on different cpu. So just clear netif_running(). 1615 * 1616 * dev->stop() will invoke napi_disable() on all of it's 1617 * napi_struct instances on this device. 1618 */ 1619 smp_mb__after_atomic(); /* Commit netif_running(). */ 1620 } 1621 1622 dev_deactivate_many(head); 1623 1624 list_for_each_entry(dev, head, close_list) { 1625 const struct net_device_ops *ops = dev->netdev_ops; 1626 1627 /* 1628 * Call the device specific close. This cannot fail. 1629 * Only if device is UP 1630 * 1631 * We allow it to be called even after a DETACH hot-plug 1632 * event. 1633 */ 1634 if (ops->ndo_stop) 1635 ops->ndo_stop(dev); 1636 1637 dev->flags &= ~IFF_UP; 1638 netpoll_poll_enable(dev); 1639 } 1640 } 1641 1642 static void __dev_close(struct net_device *dev) 1643 { 1644 LIST_HEAD(single); 1645 1646 list_add(&dev->close_list, &single); 1647 __dev_close_many(&single); 1648 list_del(&single); 1649 } 1650 1651 void dev_close_many(struct list_head *head, bool unlink) 1652 { 1653 struct net_device *dev, *tmp; 1654 1655 /* Remove the devices that don't need to be closed */ 1656 list_for_each_entry_safe(dev, tmp, head, close_list) 1657 if (!(dev->flags & IFF_UP)) 1658 list_del_init(&dev->close_list); 1659 1660 __dev_close_many(head); 1661 1662 list_for_each_entry_safe(dev, tmp, head, close_list) { 1663 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1664 call_netdevice_notifiers(NETDEV_DOWN, dev); 1665 if (unlink) 1666 list_del_init(&dev->close_list); 1667 } 1668 } 1669 EXPORT_SYMBOL(dev_close_many); 1670 1671 /** 1672 * dev_close - shutdown an interface. 1673 * @dev: device to shutdown 1674 * 1675 * This function moves an active device into down state. A 1676 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1677 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1678 * chain. 1679 */ 1680 void dev_close(struct net_device *dev) 1681 { 1682 if (dev->flags & IFF_UP) { 1683 LIST_HEAD(single); 1684 1685 list_add(&dev->close_list, &single); 1686 dev_close_many(&single, true); 1687 list_del(&single); 1688 } 1689 } 1690 EXPORT_SYMBOL(dev_close); 1691 1692 1693 /** 1694 * dev_disable_lro - disable Large Receive Offload on a device 1695 * @dev: device 1696 * 1697 * Disable Large Receive Offload (LRO) on a net device. Must be 1698 * called under RTNL. This is needed if received packets may be 1699 * forwarded to another interface. 1700 */ 1701 void dev_disable_lro(struct net_device *dev) 1702 { 1703 struct net_device *lower_dev; 1704 struct list_head *iter; 1705 1706 dev->wanted_features &= ~NETIF_F_LRO; 1707 netdev_update_features(dev); 1708 1709 if (unlikely(dev->features & NETIF_F_LRO)) 1710 netdev_WARN(dev, "failed to disable LRO!\n"); 1711 1712 netdev_for_each_lower_dev(dev, lower_dev, iter) 1713 dev_disable_lro(lower_dev); 1714 } 1715 EXPORT_SYMBOL(dev_disable_lro); 1716 1717 /** 1718 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1719 * @dev: device 1720 * 1721 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1722 * called under RTNL. This is needed if Generic XDP is installed on 1723 * the device. 1724 */ 1725 static void dev_disable_gro_hw(struct net_device *dev) 1726 { 1727 dev->wanted_features &= ~NETIF_F_GRO_HW; 1728 netdev_update_features(dev); 1729 1730 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1731 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1732 } 1733 1734 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1735 { 1736 #define N(val) \ 1737 case NETDEV_##val: \ 1738 return "NETDEV_" __stringify(val); 1739 switch (cmd) { 1740 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1741 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1742 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1743 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1744 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1745 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1746 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1747 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1748 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1749 N(PRE_CHANGEADDR) 1750 } 1751 #undef N 1752 return "UNKNOWN_NETDEV_EVENT"; 1753 } 1754 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1755 1756 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1757 struct net_device *dev) 1758 { 1759 struct netdev_notifier_info info = { 1760 .dev = dev, 1761 }; 1762 1763 return nb->notifier_call(nb, val, &info); 1764 } 1765 1766 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1767 struct net_device *dev) 1768 { 1769 int err; 1770 1771 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1772 err = notifier_to_errno(err); 1773 if (err) 1774 return err; 1775 1776 if (!(dev->flags & IFF_UP)) 1777 return 0; 1778 1779 call_netdevice_notifier(nb, NETDEV_UP, dev); 1780 return 0; 1781 } 1782 1783 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1784 struct net_device *dev) 1785 { 1786 if (dev->flags & IFF_UP) { 1787 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1788 dev); 1789 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1790 } 1791 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1792 } 1793 1794 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1795 struct net *net) 1796 { 1797 struct net_device *dev; 1798 int err; 1799 1800 for_each_netdev(net, dev) { 1801 err = call_netdevice_register_notifiers(nb, dev); 1802 if (err) 1803 goto rollback; 1804 } 1805 return 0; 1806 1807 rollback: 1808 for_each_netdev_continue_reverse(net, dev) 1809 call_netdevice_unregister_notifiers(nb, dev); 1810 return err; 1811 } 1812 1813 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1814 struct net *net) 1815 { 1816 struct net_device *dev; 1817 1818 for_each_netdev(net, dev) 1819 call_netdevice_unregister_notifiers(nb, dev); 1820 } 1821 1822 static int dev_boot_phase = 1; 1823 1824 /** 1825 * register_netdevice_notifier - register a network notifier block 1826 * @nb: notifier 1827 * 1828 * Register a notifier to be called when network device events occur. 1829 * The notifier passed is linked into the kernel structures and must 1830 * not be reused until it has been unregistered. A negative errno code 1831 * is returned on a failure. 1832 * 1833 * When registered all registration and up events are replayed 1834 * to the new notifier to allow device to have a race free 1835 * view of the network device list. 1836 */ 1837 1838 int register_netdevice_notifier(struct notifier_block *nb) 1839 { 1840 struct net *net; 1841 int err; 1842 1843 /* Close race with setup_net() and cleanup_net() */ 1844 down_write(&pernet_ops_rwsem); 1845 rtnl_lock(); 1846 err = raw_notifier_chain_register(&netdev_chain, nb); 1847 if (err) 1848 goto unlock; 1849 if (dev_boot_phase) 1850 goto unlock; 1851 for_each_net(net) { 1852 err = call_netdevice_register_net_notifiers(nb, net); 1853 if (err) 1854 goto rollback; 1855 } 1856 1857 unlock: 1858 rtnl_unlock(); 1859 up_write(&pernet_ops_rwsem); 1860 return err; 1861 1862 rollback: 1863 for_each_net_continue_reverse(net) 1864 call_netdevice_unregister_net_notifiers(nb, net); 1865 1866 raw_notifier_chain_unregister(&netdev_chain, nb); 1867 goto unlock; 1868 } 1869 EXPORT_SYMBOL(register_netdevice_notifier); 1870 1871 /** 1872 * unregister_netdevice_notifier - unregister a network notifier block 1873 * @nb: notifier 1874 * 1875 * Unregister a notifier previously registered by 1876 * register_netdevice_notifier(). The notifier is unlinked into the 1877 * kernel structures and may then be reused. A negative errno code 1878 * is returned on a failure. 1879 * 1880 * After unregistering unregister and down device events are synthesized 1881 * for all devices on the device list to the removed notifier to remove 1882 * the need for special case cleanup code. 1883 */ 1884 1885 int unregister_netdevice_notifier(struct notifier_block *nb) 1886 { 1887 struct net *net; 1888 int err; 1889 1890 /* Close race with setup_net() and cleanup_net() */ 1891 down_write(&pernet_ops_rwsem); 1892 rtnl_lock(); 1893 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1894 if (err) 1895 goto unlock; 1896 1897 for_each_net(net) 1898 call_netdevice_unregister_net_notifiers(nb, net); 1899 1900 unlock: 1901 rtnl_unlock(); 1902 up_write(&pernet_ops_rwsem); 1903 return err; 1904 } 1905 EXPORT_SYMBOL(unregister_netdevice_notifier); 1906 1907 static int __register_netdevice_notifier_net(struct net *net, 1908 struct notifier_block *nb, 1909 bool ignore_call_fail) 1910 { 1911 int err; 1912 1913 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1914 if (err) 1915 return err; 1916 if (dev_boot_phase) 1917 return 0; 1918 1919 err = call_netdevice_register_net_notifiers(nb, net); 1920 if (err && !ignore_call_fail) 1921 goto chain_unregister; 1922 1923 return 0; 1924 1925 chain_unregister: 1926 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1927 return err; 1928 } 1929 1930 static int __unregister_netdevice_notifier_net(struct net *net, 1931 struct notifier_block *nb) 1932 { 1933 int err; 1934 1935 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1936 if (err) 1937 return err; 1938 1939 call_netdevice_unregister_net_notifiers(nb, net); 1940 return 0; 1941 } 1942 1943 /** 1944 * register_netdevice_notifier_net - register a per-netns network notifier block 1945 * @net: network namespace 1946 * @nb: notifier 1947 * 1948 * Register a notifier to be called when network device events occur. 1949 * The notifier passed is linked into the kernel structures and must 1950 * not be reused until it has been unregistered. A negative errno code 1951 * is returned on a failure. 1952 * 1953 * When registered all registration and up events are replayed 1954 * to the new notifier to allow device to have a race free 1955 * view of the network device list. 1956 */ 1957 1958 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1959 { 1960 int err; 1961 1962 rtnl_lock(); 1963 err = __register_netdevice_notifier_net(net, nb, false); 1964 rtnl_unlock(); 1965 return err; 1966 } 1967 EXPORT_SYMBOL(register_netdevice_notifier_net); 1968 1969 /** 1970 * unregister_netdevice_notifier_net - unregister a per-netns 1971 * network notifier block 1972 * @net: network namespace 1973 * @nb: notifier 1974 * 1975 * Unregister a notifier previously registered by 1976 * register_netdevice_notifier(). The notifier is unlinked into the 1977 * kernel structures and may then be reused. A negative errno code 1978 * is returned on a failure. 1979 * 1980 * After unregistering unregister and down device events are synthesized 1981 * for all devices on the device list to the removed notifier to remove 1982 * the need for special case cleanup code. 1983 */ 1984 1985 int unregister_netdevice_notifier_net(struct net *net, 1986 struct notifier_block *nb) 1987 { 1988 int err; 1989 1990 rtnl_lock(); 1991 err = __unregister_netdevice_notifier_net(net, nb); 1992 rtnl_unlock(); 1993 return err; 1994 } 1995 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1996 1997 int register_netdevice_notifier_dev_net(struct net_device *dev, 1998 struct notifier_block *nb, 1999 struct netdev_net_notifier *nn) 2000 { 2001 int err; 2002 2003 rtnl_lock(); 2004 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 2005 if (!err) { 2006 nn->nb = nb; 2007 list_add(&nn->list, &dev->net_notifier_list); 2008 } 2009 rtnl_unlock(); 2010 return err; 2011 } 2012 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 2013 2014 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2015 struct notifier_block *nb, 2016 struct netdev_net_notifier *nn) 2017 { 2018 int err; 2019 2020 rtnl_lock(); 2021 list_del(&nn->list); 2022 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 2023 rtnl_unlock(); 2024 return err; 2025 } 2026 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 2027 2028 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 2029 struct net *net) 2030 { 2031 struct netdev_net_notifier *nn; 2032 2033 list_for_each_entry(nn, &dev->net_notifier_list, list) { 2034 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb); 2035 __register_netdevice_notifier_net(net, nn->nb, true); 2036 } 2037 } 2038 2039 /** 2040 * call_netdevice_notifiers_info - call all network notifier blocks 2041 * @val: value passed unmodified to notifier function 2042 * @info: notifier information data 2043 * 2044 * Call all network notifier blocks. Parameters and return value 2045 * are as for raw_notifier_call_chain(). 2046 */ 2047 2048 static int call_netdevice_notifiers_info(unsigned long val, 2049 struct netdev_notifier_info *info) 2050 { 2051 struct net *net = dev_net(info->dev); 2052 int ret; 2053 2054 ASSERT_RTNL(); 2055 2056 /* Run per-netns notifier block chain first, then run the global one. 2057 * Hopefully, one day, the global one is going to be removed after 2058 * all notifier block registrators get converted to be per-netns. 2059 */ 2060 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 2061 if (ret & NOTIFY_STOP_MASK) 2062 return ret; 2063 return raw_notifier_call_chain(&netdev_chain, val, info); 2064 } 2065 2066 static int call_netdevice_notifiers_extack(unsigned long val, 2067 struct net_device *dev, 2068 struct netlink_ext_ack *extack) 2069 { 2070 struct netdev_notifier_info info = { 2071 .dev = dev, 2072 .extack = extack, 2073 }; 2074 2075 return call_netdevice_notifiers_info(val, &info); 2076 } 2077 2078 /** 2079 * call_netdevice_notifiers - call all network notifier blocks 2080 * @val: value passed unmodified to notifier function 2081 * @dev: net_device pointer passed unmodified to notifier function 2082 * 2083 * Call all network notifier blocks. Parameters and return value 2084 * are as for raw_notifier_call_chain(). 2085 */ 2086 2087 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 2088 { 2089 return call_netdevice_notifiers_extack(val, dev, NULL); 2090 } 2091 EXPORT_SYMBOL(call_netdevice_notifiers); 2092 2093 /** 2094 * call_netdevice_notifiers_mtu - call all network notifier blocks 2095 * @val: value passed unmodified to notifier function 2096 * @dev: net_device pointer passed unmodified to notifier function 2097 * @arg: additional u32 argument passed to the notifier function 2098 * 2099 * Call all network notifier blocks. Parameters and return value 2100 * are as for raw_notifier_call_chain(). 2101 */ 2102 static int call_netdevice_notifiers_mtu(unsigned long val, 2103 struct net_device *dev, u32 arg) 2104 { 2105 struct netdev_notifier_info_ext info = { 2106 .info.dev = dev, 2107 .ext.mtu = arg, 2108 }; 2109 2110 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2111 2112 return call_netdevice_notifiers_info(val, &info.info); 2113 } 2114 2115 #ifdef CONFIG_NET_INGRESS 2116 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2117 2118 void net_inc_ingress_queue(void) 2119 { 2120 static_branch_inc(&ingress_needed_key); 2121 } 2122 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2123 2124 void net_dec_ingress_queue(void) 2125 { 2126 static_branch_dec(&ingress_needed_key); 2127 } 2128 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2129 #endif 2130 2131 #ifdef CONFIG_NET_EGRESS 2132 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2133 2134 void net_inc_egress_queue(void) 2135 { 2136 static_branch_inc(&egress_needed_key); 2137 } 2138 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2139 2140 void net_dec_egress_queue(void) 2141 { 2142 static_branch_dec(&egress_needed_key); 2143 } 2144 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2145 #endif 2146 2147 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2148 #ifdef CONFIG_JUMP_LABEL 2149 static atomic_t netstamp_needed_deferred; 2150 static atomic_t netstamp_wanted; 2151 static void netstamp_clear(struct work_struct *work) 2152 { 2153 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2154 int wanted; 2155 2156 wanted = atomic_add_return(deferred, &netstamp_wanted); 2157 if (wanted > 0) 2158 static_branch_enable(&netstamp_needed_key); 2159 else 2160 static_branch_disable(&netstamp_needed_key); 2161 } 2162 static DECLARE_WORK(netstamp_work, netstamp_clear); 2163 #endif 2164 2165 void net_enable_timestamp(void) 2166 { 2167 #ifdef CONFIG_JUMP_LABEL 2168 int wanted; 2169 2170 while (1) { 2171 wanted = atomic_read(&netstamp_wanted); 2172 if (wanted <= 0) 2173 break; 2174 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 2175 return; 2176 } 2177 atomic_inc(&netstamp_needed_deferred); 2178 schedule_work(&netstamp_work); 2179 #else 2180 static_branch_inc(&netstamp_needed_key); 2181 #endif 2182 } 2183 EXPORT_SYMBOL(net_enable_timestamp); 2184 2185 void net_disable_timestamp(void) 2186 { 2187 #ifdef CONFIG_JUMP_LABEL 2188 int wanted; 2189 2190 while (1) { 2191 wanted = atomic_read(&netstamp_wanted); 2192 if (wanted <= 1) 2193 break; 2194 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 2195 return; 2196 } 2197 atomic_dec(&netstamp_needed_deferred); 2198 schedule_work(&netstamp_work); 2199 #else 2200 static_branch_dec(&netstamp_needed_key); 2201 #endif 2202 } 2203 EXPORT_SYMBOL(net_disable_timestamp); 2204 2205 static inline void net_timestamp_set(struct sk_buff *skb) 2206 { 2207 skb->tstamp = 0; 2208 if (static_branch_unlikely(&netstamp_needed_key)) 2209 __net_timestamp(skb); 2210 } 2211 2212 #define net_timestamp_check(COND, SKB) \ 2213 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2214 if ((COND) && !(SKB)->tstamp) \ 2215 __net_timestamp(SKB); \ 2216 } \ 2217 2218 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2219 { 2220 return __is_skb_forwardable(dev, skb, true); 2221 } 2222 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2223 2224 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2225 bool check_mtu) 2226 { 2227 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2228 2229 if (likely(!ret)) { 2230 skb->protocol = eth_type_trans(skb, dev); 2231 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2232 } 2233 2234 return ret; 2235 } 2236 2237 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2238 { 2239 return __dev_forward_skb2(dev, skb, true); 2240 } 2241 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2242 2243 /** 2244 * dev_forward_skb - loopback an skb to another netif 2245 * 2246 * @dev: destination network device 2247 * @skb: buffer to forward 2248 * 2249 * return values: 2250 * NET_RX_SUCCESS (no congestion) 2251 * NET_RX_DROP (packet was dropped, but freed) 2252 * 2253 * dev_forward_skb can be used for injecting an skb from the 2254 * start_xmit function of one device into the receive queue 2255 * of another device. 2256 * 2257 * The receiving device may be in another namespace, so 2258 * we have to clear all information in the skb that could 2259 * impact namespace isolation. 2260 */ 2261 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2262 { 2263 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2264 } 2265 EXPORT_SYMBOL_GPL(dev_forward_skb); 2266 2267 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2268 { 2269 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2270 } 2271 2272 static inline int deliver_skb(struct sk_buff *skb, 2273 struct packet_type *pt_prev, 2274 struct net_device *orig_dev) 2275 { 2276 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2277 return -ENOMEM; 2278 refcount_inc(&skb->users); 2279 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2280 } 2281 2282 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2283 struct packet_type **pt, 2284 struct net_device *orig_dev, 2285 __be16 type, 2286 struct list_head *ptype_list) 2287 { 2288 struct packet_type *ptype, *pt_prev = *pt; 2289 2290 list_for_each_entry_rcu(ptype, ptype_list, list) { 2291 if (ptype->type != type) 2292 continue; 2293 if (pt_prev) 2294 deliver_skb(skb, pt_prev, orig_dev); 2295 pt_prev = ptype; 2296 } 2297 *pt = pt_prev; 2298 } 2299 2300 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2301 { 2302 if (!ptype->af_packet_priv || !skb->sk) 2303 return false; 2304 2305 if (ptype->id_match) 2306 return ptype->id_match(ptype, skb->sk); 2307 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2308 return true; 2309 2310 return false; 2311 } 2312 2313 /** 2314 * dev_nit_active - return true if any network interface taps are in use 2315 * 2316 * @dev: network device to check for the presence of taps 2317 */ 2318 bool dev_nit_active(struct net_device *dev) 2319 { 2320 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2321 } 2322 EXPORT_SYMBOL_GPL(dev_nit_active); 2323 2324 /* 2325 * Support routine. Sends outgoing frames to any network 2326 * taps currently in use. 2327 */ 2328 2329 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2330 { 2331 struct packet_type *ptype; 2332 struct sk_buff *skb2 = NULL; 2333 struct packet_type *pt_prev = NULL; 2334 struct list_head *ptype_list = &ptype_all; 2335 2336 rcu_read_lock(); 2337 again: 2338 list_for_each_entry_rcu(ptype, ptype_list, list) { 2339 if (ptype->ignore_outgoing) 2340 continue; 2341 2342 /* Never send packets back to the socket 2343 * they originated from - MvS (miquels@drinkel.ow.org) 2344 */ 2345 if (skb_loop_sk(ptype, skb)) 2346 continue; 2347 2348 if (pt_prev) { 2349 deliver_skb(skb2, pt_prev, skb->dev); 2350 pt_prev = ptype; 2351 continue; 2352 } 2353 2354 /* need to clone skb, done only once */ 2355 skb2 = skb_clone(skb, GFP_ATOMIC); 2356 if (!skb2) 2357 goto out_unlock; 2358 2359 net_timestamp_set(skb2); 2360 2361 /* skb->nh should be correctly 2362 * set by sender, so that the second statement is 2363 * just protection against buggy protocols. 2364 */ 2365 skb_reset_mac_header(skb2); 2366 2367 if (skb_network_header(skb2) < skb2->data || 2368 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2369 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2370 ntohs(skb2->protocol), 2371 dev->name); 2372 skb_reset_network_header(skb2); 2373 } 2374 2375 skb2->transport_header = skb2->network_header; 2376 skb2->pkt_type = PACKET_OUTGOING; 2377 pt_prev = ptype; 2378 } 2379 2380 if (ptype_list == &ptype_all) { 2381 ptype_list = &dev->ptype_all; 2382 goto again; 2383 } 2384 out_unlock: 2385 if (pt_prev) { 2386 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2387 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2388 else 2389 kfree_skb(skb2); 2390 } 2391 rcu_read_unlock(); 2392 } 2393 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2394 2395 /** 2396 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2397 * @dev: Network device 2398 * @txq: number of queues available 2399 * 2400 * If real_num_tx_queues is changed the tc mappings may no longer be 2401 * valid. To resolve this verify the tc mapping remains valid and if 2402 * not NULL the mapping. With no priorities mapping to this 2403 * offset/count pair it will no longer be used. In the worst case TC0 2404 * is invalid nothing can be done so disable priority mappings. If is 2405 * expected that drivers will fix this mapping if they can before 2406 * calling netif_set_real_num_tx_queues. 2407 */ 2408 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2409 { 2410 int i; 2411 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2412 2413 /* If TC0 is invalidated disable TC mapping */ 2414 if (tc->offset + tc->count > txq) { 2415 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2416 dev->num_tc = 0; 2417 return; 2418 } 2419 2420 /* Invalidated prio to tc mappings set to TC0 */ 2421 for (i = 1; i < TC_BITMASK + 1; i++) { 2422 int q = netdev_get_prio_tc_map(dev, i); 2423 2424 tc = &dev->tc_to_txq[q]; 2425 if (tc->offset + tc->count > txq) { 2426 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2427 i, q); 2428 netdev_set_prio_tc_map(dev, i, 0); 2429 } 2430 } 2431 } 2432 2433 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2434 { 2435 if (dev->num_tc) { 2436 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2437 int i; 2438 2439 /* walk through the TCs and see if it falls into any of them */ 2440 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2441 if ((txq - tc->offset) < tc->count) 2442 return i; 2443 } 2444 2445 /* didn't find it, just return -1 to indicate no match */ 2446 return -1; 2447 } 2448 2449 return 0; 2450 } 2451 EXPORT_SYMBOL(netdev_txq_to_tc); 2452 2453 #ifdef CONFIG_XPS 2454 struct static_key xps_needed __read_mostly; 2455 EXPORT_SYMBOL(xps_needed); 2456 struct static_key xps_rxqs_needed __read_mostly; 2457 EXPORT_SYMBOL(xps_rxqs_needed); 2458 static DEFINE_MUTEX(xps_map_mutex); 2459 #define xmap_dereference(P) \ 2460 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2461 2462 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2463 int tci, u16 index) 2464 { 2465 struct xps_map *map = NULL; 2466 int pos; 2467 2468 if (dev_maps) 2469 map = xmap_dereference(dev_maps->attr_map[tci]); 2470 if (!map) 2471 return false; 2472 2473 for (pos = map->len; pos--;) { 2474 if (map->queues[pos] != index) 2475 continue; 2476 2477 if (map->len > 1) { 2478 map->queues[pos] = map->queues[--map->len]; 2479 break; 2480 } 2481 2482 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2483 kfree_rcu(map, rcu); 2484 return false; 2485 } 2486 2487 return true; 2488 } 2489 2490 static bool remove_xps_queue_cpu(struct net_device *dev, 2491 struct xps_dev_maps *dev_maps, 2492 int cpu, u16 offset, u16 count) 2493 { 2494 int num_tc = dev->num_tc ? : 1; 2495 bool active = false; 2496 int tci; 2497 2498 for (tci = cpu * num_tc; num_tc--; tci++) { 2499 int i, j; 2500 2501 for (i = count, j = offset; i--; j++) { 2502 if (!remove_xps_queue(dev_maps, tci, j)) 2503 break; 2504 } 2505 2506 active |= i < 0; 2507 } 2508 2509 return active; 2510 } 2511 2512 static void reset_xps_maps(struct net_device *dev, 2513 struct xps_dev_maps *dev_maps, 2514 bool is_rxqs_map) 2515 { 2516 if (is_rxqs_map) { 2517 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2518 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2519 } else { 2520 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2521 } 2522 static_key_slow_dec_cpuslocked(&xps_needed); 2523 kfree_rcu(dev_maps, rcu); 2524 } 2525 2526 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2527 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2528 u16 offset, u16 count, bool is_rxqs_map) 2529 { 2530 bool active = false; 2531 int i, j; 2532 2533 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), 2534 j < nr_ids;) 2535 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2536 count); 2537 if (!active) 2538 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2539 2540 if (!is_rxqs_map) { 2541 for (i = offset + (count - 1); count--; i--) { 2542 netdev_queue_numa_node_write( 2543 netdev_get_tx_queue(dev, i), 2544 NUMA_NO_NODE); 2545 } 2546 } 2547 } 2548 2549 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2550 u16 count) 2551 { 2552 const unsigned long *possible_mask = NULL; 2553 struct xps_dev_maps *dev_maps; 2554 unsigned int nr_ids; 2555 2556 if (!static_key_false(&xps_needed)) 2557 return; 2558 2559 cpus_read_lock(); 2560 mutex_lock(&xps_map_mutex); 2561 2562 if (static_key_false(&xps_rxqs_needed)) { 2563 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2564 if (dev_maps) { 2565 nr_ids = dev->num_rx_queues; 2566 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, 2567 offset, count, true); 2568 } 2569 } 2570 2571 dev_maps = xmap_dereference(dev->xps_cpus_map); 2572 if (!dev_maps) 2573 goto out_no_maps; 2574 2575 if (num_possible_cpus() > 1) 2576 possible_mask = cpumask_bits(cpu_possible_mask); 2577 nr_ids = nr_cpu_ids; 2578 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, 2579 false); 2580 2581 out_no_maps: 2582 mutex_unlock(&xps_map_mutex); 2583 cpus_read_unlock(); 2584 } 2585 2586 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2587 { 2588 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2589 } 2590 2591 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2592 u16 index, bool is_rxqs_map) 2593 { 2594 struct xps_map *new_map; 2595 int alloc_len = XPS_MIN_MAP_ALLOC; 2596 int i, pos; 2597 2598 for (pos = 0; map && pos < map->len; pos++) { 2599 if (map->queues[pos] != index) 2600 continue; 2601 return map; 2602 } 2603 2604 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2605 if (map) { 2606 if (pos < map->alloc_len) 2607 return map; 2608 2609 alloc_len = map->alloc_len * 2; 2610 } 2611 2612 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2613 * map 2614 */ 2615 if (is_rxqs_map) 2616 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2617 else 2618 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2619 cpu_to_node(attr_index)); 2620 if (!new_map) 2621 return NULL; 2622 2623 for (i = 0; i < pos; i++) 2624 new_map->queues[i] = map->queues[i]; 2625 new_map->alloc_len = alloc_len; 2626 new_map->len = pos; 2627 2628 return new_map; 2629 } 2630 2631 /* Must be called under cpus_read_lock */ 2632 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2633 u16 index, bool is_rxqs_map) 2634 { 2635 const unsigned long *online_mask = NULL, *possible_mask = NULL; 2636 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 2637 int i, j, tci, numa_node_id = -2; 2638 int maps_sz, num_tc = 1, tc = 0; 2639 struct xps_map *map, *new_map; 2640 bool active = false; 2641 unsigned int nr_ids; 2642 2643 if (dev->num_tc) { 2644 /* Do not allow XPS on subordinate device directly */ 2645 num_tc = dev->num_tc; 2646 if (num_tc < 0) 2647 return -EINVAL; 2648 2649 /* If queue belongs to subordinate dev use its map */ 2650 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2651 2652 tc = netdev_txq_to_tc(dev, index); 2653 if (tc < 0) 2654 return -EINVAL; 2655 } 2656 2657 mutex_lock(&xps_map_mutex); 2658 if (is_rxqs_map) { 2659 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2660 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2661 nr_ids = dev->num_rx_queues; 2662 } else { 2663 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2664 if (num_possible_cpus() > 1) { 2665 online_mask = cpumask_bits(cpu_online_mask); 2666 possible_mask = cpumask_bits(cpu_possible_mask); 2667 } 2668 dev_maps = xmap_dereference(dev->xps_cpus_map); 2669 nr_ids = nr_cpu_ids; 2670 } 2671 2672 if (maps_sz < L1_CACHE_BYTES) 2673 maps_sz = L1_CACHE_BYTES; 2674 2675 /* allocate memory for queue storage */ 2676 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2677 j < nr_ids;) { 2678 if (!new_dev_maps) 2679 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2680 if (!new_dev_maps) { 2681 mutex_unlock(&xps_map_mutex); 2682 return -ENOMEM; 2683 } 2684 2685 tci = j * num_tc + tc; 2686 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : 2687 NULL; 2688 2689 map = expand_xps_map(map, j, index, is_rxqs_map); 2690 if (!map) 2691 goto error; 2692 2693 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2694 } 2695 2696 if (!new_dev_maps) 2697 goto out_no_new_maps; 2698 2699 if (!dev_maps) { 2700 /* Increment static keys at most once per type */ 2701 static_key_slow_inc_cpuslocked(&xps_needed); 2702 if (is_rxqs_map) 2703 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2704 } 2705 2706 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2707 j < nr_ids;) { 2708 /* copy maps belonging to foreign traffic classes */ 2709 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { 2710 /* fill in the new device map from the old device map */ 2711 map = xmap_dereference(dev_maps->attr_map[tci]); 2712 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2713 } 2714 2715 /* We need to explicitly update tci as prevous loop 2716 * could break out early if dev_maps is NULL. 2717 */ 2718 tci = j * num_tc + tc; 2719 2720 if (netif_attr_test_mask(j, mask, nr_ids) && 2721 netif_attr_test_online(j, online_mask, nr_ids)) { 2722 /* add tx-queue to CPU/rx-queue maps */ 2723 int pos = 0; 2724 2725 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2726 while ((pos < map->len) && (map->queues[pos] != index)) 2727 pos++; 2728 2729 if (pos == map->len) 2730 map->queues[map->len++] = index; 2731 #ifdef CONFIG_NUMA 2732 if (!is_rxqs_map) { 2733 if (numa_node_id == -2) 2734 numa_node_id = cpu_to_node(j); 2735 else if (numa_node_id != cpu_to_node(j)) 2736 numa_node_id = -1; 2737 } 2738 #endif 2739 } else if (dev_maps) { 2740 /* fill in the new device map from the old device map */ 2741 map = xmap_dereference(dev_maps->attr_map[tci]); 2742 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2743 } 2744 2745 /* copy maps belonging to foreign traffic classes */ 2746 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { 2747 /* fill in the new device map from the old device map */ 2748 map = xmap_dereference(dev_maps->attr_map[tci]); 2749 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2750 } 2751 } 2752 2753 if (is_rxqs_map) 2754 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); 2755 else 2756 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); 2757 2758 /* Cleanup old maps */ 2759 if (!dev_maps) 2760 goto out_no_old_maps; 2761 2762 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2763 j < nr_ids;) { 2764 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2765 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2766 map = xmap_dereference(dev_maps->attr_map[tci]); 2767 if (map && map != new_map) 2768 kfree_rcu(map, rcu); 2769 } 2770 } 2771 2772 kfree_rcu(dev_maps, rcu); 2773 2774 out_no_old_maps: 2775 dev_maps = new_dev_maps; 2776 active = true; 2777 2778 out_no_new_maps: 2779 if (!is_rxqs_map) { 2780 /* update Tx queue numa node */ 2781 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2782 (numa_node_id >= 0) ? 2783 numa_node_id : NUMA_NO_NODE); 2784 } 2785 2786 if (!dev_maps) 2787 goto out_no_maps; 2788 2789 /* removes tx-queue from unused CPUs/rx-queues */ 2790 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2791 j < nr_ids;) { 2792 for (i = tc, tci = j * num_tc; i--; tci++) 2793 active |= remove_xps_queue(dev_maps, tci, index); 2794 if (!netif_attr_test_mask(j, mask, nr_ids) || 2795 !netif_attr_test_online(j, online_mask, nr_ids)) 2796 active |= remove_xps_queue(dev_maps, tci, index); 2797 for (i = num_tc - tc, tci++; --i; tci++) 2798 active |= remove_xps_queue(dev_maps, tci, index); 2799 } 2800 2801 /* free map if not active */ 2802 if (!active) 2803 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2804 2805 out_no_maps: 2806 mutex_unlock(&xps_map_mutex); 2807 2808 return 0; 2809 error: 2810 /* remove any maps that we added */ 2811 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2812 j < nr_ids;) { 2813 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2814 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2815 map = dev_maps ? 2816 xmap_dereference(dev_maps->attr_map[tci]) : 2817 NULL; 2818 if (new_map && new_map != map) 2819 kfree(new_map); 2820 } 2821 } 2822 2823 mutex_unlock(&xps_map_mutex); 2824 2825 kfree(new_dev_maps); 2826 return -ENOMEM; 2827 } 2828 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2829 2830 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2831 u16 index) 2832 { 2833 int ret; 2834 2835 cpus_read_lock(); 2836 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); 2837 cpus_read_unlock(); 2838 2839 return ret; 2840 } 2841 EXPORT_SYMBOL(netif_set_xps_queue); 2842 2843 #endif 2844 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2845 { 2846 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2847 2848 /* Unbind any subordinate channels */ 2849 while (txq-- != &dev->_tx[0]) { 2850 if (txq->sb_dev) 2851 netdev_unbind_sb_channel(dev, txq->sb_dev); 2852 } 2853 } 2854 2855 void netdev_reset_tc(struct net_device *dev) 2856 { 2857 #ifdef CONFIG_XPS 2858 netif_reset_xps_queues_gt(dev, 0); 2859 #endif 2860 netdev_unbind_all_sb_channels(dev); 2861 2862 /* Reset TC configuration of device */ 2863 dev->num_tc = 0; 2864 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2865 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2866 } 2867 EXPORT_SYMBOL(netdev_reset_tc); 2868 2869 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2870 { 2871 if (tc >= dev->num_tc) 2872 return -EINVAL; 2873 2874 #ifdef CONFIG_XPS 2875 netif_reset_xps_queues(dev, offset, count); 2876 #endif 2877 dev->tc_to_txq[tc].count = count; 2878 dev->tc_to_txq[tc].offset = offset; 2879 return 0; 2880 } 2881 EXPORT_SYMBOL(netdev_set_tc_queue); 2882 2883 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2884 { 2885 if (num_tc > TC_MAX_QUEUE) 2886 return -EINVAL; 2887 2888 #ifdef CONFIG_XPS 2889 netif_reset_xps_queues_gt(dev, 0); 2890 #endif 2891 netdev_unbind_all_sb_channels(dev); 2892 2893 dev->num_tc = num_tc; 2894 return 0; 2895 } 2896 EXPORT_SYMBOL(netdev_set_num_tc); 2897 2898 void netdev_unbind_sb_channel(struct net_device *dev, 2899 struct net_device *sb_dev) 2900 { 2901 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2902 2903 #ifdef CONFIG_XPS 2904 netif_reset_xps_queues_gt(sb_dev, 0); 2905 #endif 2906 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2907 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2908 2909 while (txq-- != &dev->_tx[0]) { 2910 if (txq->sb_dev == sb_dev) 2911 txq->sb_dev = NULL; 2912 } 2913 } 2914 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2915 2916 int netdev_bind_sb_channel_queue(struct net_device *dev, 2917 struct net_device *sb_dev, 2918 u8 tc, u16 count, u16 offset) 2919 { 2920 /* Make certain the sb_dev and dev are already configured */ 2921 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2922 return -EINVAL; 2923 2924 /* We cannot hand out queues we don't have */ 2925 if ((offset + count) > dev->real_num_tx_queues) 2926 return -EINVAL; 2927 2928 /* Record the mapping */ 2929 sb_dev->tc_to_txq[tc].count = count; 2930 sb_dev->tc_to_txq[tc].offset = offset; 2931 2932 /* Provide a way for Tx queue to find the tc_to_txq map or 2933 * XPS map for itself. 2934 */ 2935 while (count--) 2936 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2937 2938 return 0; 2939 } 2940 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2941 2942 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2943 { 2944 /* Do not use a multiqueue device to represent a subordinate channel */ 2945 if (netif_is_multiqueue(dev)) 2946 return -ENODEV; 2947 2948 /* We allow channels 1 - 32767 to be used for subordinate channels. 2949 * Channel 0 is meant to be "native" mode and used only to represent 2950 * the main root device. We allow writing 0 to reset the device back 2951 * to normal mode after being used as a subordinate channel. 2952 */ 2953 if (channel > S16_MAX) 2954 return -EINVAL; 2955 2956 dev->num_tc = -channel; 2957 2958 return 0; 2959 } 2960 EXPORT_SYMBOL(netdev_set_sb_channel); 2961 2962 /* 2963 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2964 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2965 */ 2966 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2967 { 2968 bool disabling; 2969 int rc; 2970 2971 disabling = txq < dev->real_num_tx_queues; 2972 2973 if (txq < 1 || txq > dev->num_tx_queues) 2974 return -EINVAL; 2975 2976 if (dev->reg_state == NETREG_REGISTERED || 2977 dev->reg_state == NETREG_UNREGISTERING) { 2978 ASSERT_RTNL(); 2979 2980 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2981 txq); 2982 if (rc) 2983 return rc; 2984 2985 if (dev->num_tc) 2986 netif_setup_tc(dev, txq); 2987 2988 dev->real_num_tx_queues = txq; 2989 2990 if (disabling) { 2991 synchronize_net(); 2992 qdisc_reset_all_tx_gt(dev, txq); 2993 #ifdef CONFIG_XPS 2994 netif_reset_xps_queues_gt(dev, txq); 2995 #endif 2996 } 2997 } else { 2998 dev->real_num_tx_queues = txq; 2999 } 3000 3001 return 0; 3002 } 3003 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 3004 3005 #ifdef CONFIG_SYSFS 3006 /** 3007 * netif_set_real_num_rx_queues - set actual number of RX queues used 3008 * @dev: Network device 3009 * @rxq: Actual number of RX queues 3010 * 3011 * This must be called either with the rtnl_lock held or before 3012 * registration of the net device. Returns 0 on success, or a 3013 * negative error code. If called before registration, it always 3014 * succeeds. 3015 */ 3016 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 3017 { 3018 int rc; 3019 3020 if (rxq < 1 || rxq > dev->num_rx_queues) 3021 return -EINVAL; 3022 3023 if (dev->reg_state == NETREG_REGISTERED) { 3024 ASSERT_RTNL(); 3025 3026 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 3027 rxq); 3028 if (rc) 3029 return rc; 3030 } 3031 3032 dev->real_num_rx_queues = rxq; 3033 return 0; 3034 } 3035 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 3036 #endif 3037 3038 /** 3039 * netif_get_num_default_rss_queues - default number of RSS queues 3040 * 3041 * This routine should set an upper limit on the number of RSS queues 3042 * used by default by multiqueue devices. 3043 */ 3044 int netif_get_num_default_rss_queues(void) 3045 { 3046 return is_kdump_kernel() ? 3047 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 3048 } 3049 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3050 3051 static void __netif_reschedule(struct Qdisc *q) 3052 { 3053 struct softnet_data *sd; 3054 unsigned long flags; 3055 3056 local_irq_save(flags); 3057 sd = this_cpu_ptr(&softnet_data); 3058 q->next_sched = NULL; 3059 *sd->output_queue_tailp = q; 3060 sd->output_queue_tailp = &q->next_sched; 3061 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3062 local_irq_restore(flags); 3063 } 3064 3065 void __netif_schedule(struct Qdisc *q) 3066 { 3067 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3068 __netif_reschedule(q); 3069 } 3070 EXPORT_SYMBOL(__netif_schedule); 3071 3072 struct dev_kfree_skb_cb { 3073 enum skb_free_reason reason; 3074 }; 3075 3076 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3077 { 3078 return (struct dev_kfree_skb_cb *)skb->cb; 3079 } 3080 3081 void netif_schedule_queue(struct netdev_queue *txq) 3082 { 3083 rcu_read_lock(); 3084 if (!netif_xmit_stopped(txq)) { 3085 struct Qdisc *q = rcu_dereference(txq->qdisc); 3086 3087 __netif_schedule(q); 3088 } 3089 rcu_read_unlock(); 3090 } 3091 EXPORT_SYMBOL(netif_schedule_queue); 3092 3093 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3094 { 3095 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3096 struct Qdisc *q; 3097 3098 rcu_read_lock(); 3099 q = rcu_dereference(dev_queue->qdisc); 3100 __netif_schedule(q); 3101 rcu_read_unlock(); 3102 } 3103 } 3104 EXPORT_SYMBOL(netif_tx_wake_queue); 3105 3106 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 3107 { 3108 unsigned long flags; 3109 3110 if (unlikely(!skb)) 3111 return; 3112 3113 if (likely(refcount_read(&skb->users) == 1)) { 3114 smp_rmb(); 3115 refcount_set(&skb->users, 0); 3116 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3117 return; 3118 } 3119 get_kfree_skb_cb(skb)->reason = reason; 3120 local_irq_save(flags); 3121 skb->next = __this_cpu_read(softnet_data.completion_queue); 3122 __this_cpu_write(softnet_data.completion_queue, skb); 3123 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3124 local_irq_restore(flags); 3125 } 3126 EXPORT_SYMBOL(__dev_kfree_skb_irq); 3127 3128 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 3129 { 3130 if (in_irq() || irqs_disabled()) 3131 __dev_kfree_skb_irq(skb, reason); 3132 else 3133 dev_kfree_skb(skb); 3134 } 3135 EXPORT_SYMBOL(__dev_kfree_skb_any); 3136 3137 3138 /** 3139 * netif_device_detach - mark device as removed 3140 * @dev: network device 3141 * 3142 * Mark device as removed from system and therefore no longer available. 3143 */ 3144 void netif_device_detach(struct net_device *dev) 3145 { 3146 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3147 netif_running(dev)) { 3148 netif_tx_stop_all_queues(dev); 3149 } 3150 } 3151 EXPORT_SYMBOL(netif_device_detach); 3152 3153 /** 3154 * netif_device_attach - mark device as attached 3155 * @dev: network device 3156 * 3157 * Mark device as attached from system and restart if needed. 3158 */ 3159 void netif_device_attach(struct net_device *dev) 3160 { 3161 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3162 netif_running(dev)) { 3163 netif_tx_wake_all_queues(dev); 3164 __netdev_watchdog_up(dev); 3165 } 3166 } 3167 EXPORT_SYMBOL(netif_device_attach); 3168 3169 /* 3170 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3171 * to be used as a distribution range. 3172 */ 3173 static u16 skb_tx_hash(const struct net_device *dev, 3174 const struct net_device *sb_dev, 3175 struct sk_buff *skb) 3176 { 3177 u32 hash; 3178 u16 qoffset = 0; 3179 u16 qcount = dev->real_num_tx_queues; 3180 3181 if (dev->num_tc) { 3182 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3183 3184 qoffset = sb_dev->tc_to_txq[tc].offset; 3185 qcount = sb_dev->tc_to_txq[tc].count; 3186 } 3187 3188 if (skb_rx_queue_recorded(skb)) { 3189 hash = skb_get_rx_queue(skb); 3190 if (hash >= qoffset) 3191 hash -= qoffset; 3192 while (unlikely(hash >= qcount)) 3193 hash -= qcount; 3194 return hash + qoffset; 3195 } 3196 3197 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3198 } 3199 3200 static void skb_warn_bad_offload(const struct sk_buff *skb) 3201 { 3202 static const netdev_features_t null_features; 3203 struct net_device *dev = skb->dev; 3204 const char *name = ""; 3205 3206 if (!net_ratelimit()) 3207 return; 3208 3209 if (dev) { 3210 if (dev->dev.parent) 3211 name = dev_driver_string(dev->dev.parent); 3212 else 3213 name = netdev_name(dev); 3214 } 3215 skb_dump(KERN_WARNING, skb, false); 3216 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3217 name, dev ? &dev->features : &null_features, 3218 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3219 } 3220 3221 /* 3222 * Invalidate hardware checksum when packet is to be mangled, and 3223 * complete checksum manually on outgoing path. 3224 */ 3225 int skb_checksum_help(struct sk_buff *skb) 3226 { 3227 __wsum csum; 3228 int ret = 0, offset; 3229 3230 if (skb->ip_summed == CHECKSUM_COMPLETE) 3231 goto out_set_summed; 3232 3233 if (unlikely(skb_is_gso(skb))) { 3234 skb_warn_bad_offload(skb); 3235 return -EINVAL; 3236 } 3237 3238 /* Before computing a checksum, we should make sure no frag could 3239 * be modified by an external entity : checksum could be wrong. 3240 */ 3241 if (skb_has_shared_frag(skb)) { 3242 ret = __skb_linearize(skb); 3243 if (ret) 3244 goto out; 3245 } 3246 3247 offset = skb_checksum_start_offset(skb); 3248 BUG_ON(offset >= skb_headlen(skb)); 3249 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3250 3251 offset += skb->csum_offset; 3252 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 3253 3254 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3255 if (ret) 3256 goto out; 3257 3258 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3259 out_set_summed: 3260 skb->ip_summed = CHECKSUM_NONE; 3261 out: 3262 return ret; 3263 } 3264 EXPORT_SYMBOL(skb_checksum_help); 3265 3266 int skb_crc32c_csum_help(struct sk_buff *skb) 3267 { 3268 __le32 crc32c_csum; 3269 int ret = 0, offset, start; 3270 3271 if (skb->ip_summed != CHECKSUM_PARTIAL) 3272 goto out; 3273 3274 if (unlikely(skb_is_gso(skb))) 3275 goto out; 3276 3277 /* Before computing a checksum, we should make sure no frag could 3278 * be modified by an external entity : checksum could be wrong. 3279 */ 3280 if (unlikely(skb_has_shared_frag(skb))) { 3281 ret = __skb_linearize(skb); 3282 if (ret) 3283 goto out; 3284 } 3285 start = skb_checksum_start_offset(skb); 3286 offset = start + offsetof(struct sctphdr, checksum); 3287 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3288 ret = -EINVAL; 3289 goto out; 3290 } 3291 3292 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3293 if (ret) 3294 goto out; 3295 3296 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3297 skb->len - start, ~(__u32)0, 3298 crc32c_csum_stub)); 3299 *(__le32 *)(skb->data + offset) = crc32c_csum; 3300 skb->ip_summed = CHECKSUM_NONE; 3301 skb->csum_not_inet = 0; 3302 out: 3303 return ret; 3304 } 3305 3306 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3307 { 3308 __be16 type = skb->protocol; 3309 3310 /* Tunnel gso handlers can set protocol to ethernet. */ 3311 if (type == htons(ETH_P_TEB)) { 3312 struct ethhdr *eth; 3313 3314 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3315 return 0; 3316 3317 eth = (struct ethhdr *)skb->data; 3318 type = eth->h_proto; 3319 } 3320 3321 return __vlan_get_protocol(skb, type, depth); 3322 } 3323 3324 /** 3325 * skb_mac_gso_segment - mac layer segmentation handler. 3326 * @skb: buffer to segment 3327 * @features: features for the output path (see dev->features) 3328 */ 3329 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3330 netdev_features_t features) 3331 { 3332 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3333 struct packet_offload *ptype; 3334 int vlan_depth = skb->mac_len; 3335 __be16 type = skb_network_protocol(skb, &vlan_depth); 3336 3337 if (unlikely(!type)) 3338 return ERR_PTR(-EINVAL); 3339 3340 __skb_pull(skb, vlan_depth); 3341 3342 rcu_read_lock(); 3343 list_for_each_entry_rcu(ptype, &offload_base, list) { 3344 if (ptype->type == type && ptype->callbacks.gso_segment) { 3345 segs = ptype->callbacks.gso_segment(skb, features); 3346 break; 3347 } 3348 } 3349 rcu_read_unlock(); 3350 3351 __skb_push(skb, skb->data - skb_mac_header(skb)); 3352 3353 return segs; 3354 } 3355 EXPORT_SYMBOL(skb_mac_gso_segment); 3356 3357 3358 /* openvswitch calls this on rx path, so we need a different check. 3359 */ 3360 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3361 { 3362 if (tx_path) 3363 return skb->ip_summed != CHECKSUM_PARTIAL && 3364 skb->ip_summed != CHECKSUM_UNNECESSARY; 3365 3366 return skb->ip_summed == CHECKSUM_NONE; 3367 } 3368 3369 /** 3370 * __skb_gso_segment - Perform segmentation on skb. 3371 * @skb: buffer to segment 3372 * @features: features for the output path (see dev->features) 3373 * @tx_path: whether it is called in TX path 3374 * 3375 * This function segments the given skb and returns a list of segments. 3376 * 3377 * It may return NULL if the skb requires no segmentation. This is 3378 * only possible when GSO is used for verifying header integrity. 3379 * 3380 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3381 */ 3382 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3383 netdev_features_t features, bool tx_path) 3384 { 3385 struct sk_buff *segs; 3386 3387 if (unlikely(skb_needs_check(skb, tx_path))) { 3388 int err; 3389 3390 /* We're going to init ->check field in TCP or UDP header */ 3391 err = skb_cow_head(skb, 0); 3392 if (err < 0) 3393 return ERR_PTR(err); 3394 } 3395 3396 /* Only report GSO partial support if it will enable us to 3397 * support segmentation on this frame without needing additional 3398 * work. 3399 */ 3400 if (features & NETIF_F_GSO_PARTIAL) { 3401 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3402 struct net_device *dev = skb->dev; 3403 3404 partial_features |= dev->features & dev->gso_partial_features; 3405 if (!skb_gso_ok(skb, features | partial_features)) 3406 features &= ~NETIF_F_GSO_PARTIAL; 3407 } 3408 3409 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3410 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3411 3412 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3413 SKB_GSO_CB(skb)->encap_level = 0; 3414 3415 skb_reset_mac_header(skb); 3416 skb_reset_mac_len(skb); 3417 3418 segs = skb_mac_gso_segment(skb, features); 3419 3420 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3421 skb_warn_bad_offload(skb); 3422 3423 return segs; 3424 } 3425 EXPORT_SYMBOL(__skb_gso_segment); 3426 3427 /* Take action when hardware reception checksum errors are detected. */ 3428 #ifdef CONFIG_BUG 3429 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3430 { 3431 if (net_ratelimit()) { 3432 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3433 skb_dump(KERN_ERR, skb, true); 3434 dump_stack(); 3435 } 3436 } 3437 EXPORT_SYMBOL(netdev_rx_csum_fault); 3438 #endif 3439 3440 /* XXX: check that highmem exists at all on the given machine. */ 3441 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3442 { 3443 #ifdef CONFIG_HIGHMEM 3444 int i; 3445 3446 if (!(dev->features & NETIF_F_HIGHDMA)) { 3447 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3448 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3449 3450 if (PageHighMem(skb_frag_page(frag))) 3451 return 1; 3452 } 3453 } 3454 #endif 3455 return 0; 3456 } 3457 3458 /* If MPLS offload request, verify we are testing hardware MPLS features 3459 * instead of standard features for the netdev. 3460 */ 3461 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3462 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3463 netdev_features_t features, 3464 __be16 type) 3465 { 3466 if (eth_p_mpls(type)) 3467 features &= skb->dev->mpls_features; 3468 3469 return features; 3470 } 3471 #else 3472 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3473 netdev_features_t features, 3474 __be16 type) 3475 { 3476 return features; 3477 } 3478 #endif 3479 3480 static netdev_features_t harmonize_features(struct sk_buff *skb, 3481 netdev_features_t features) 3482 { 3483 __be16 type; 3484 3485 type = skb_network_protocol(skb, NULL); 3486 features = net_mpls_features(skb, features, type); 3487 3488 if (skb->ip_summed != CHECKSUM_NONE && 3489 !can_checksum_protocol(features, type)) { 3490 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3491 } 3492 if (illegal_highdma(skb->dev, skb)) 3493 features &= ~NETIF_F_SG; 3494 3495 return features; 3496 } 3497 3498 netdev_features_t passthru_features_check(struct sk_buff *skb, 3499 struct net_device *dev, 3500 netdev_features_t features) 3501 { 3502 return features; 3503 } 3504 EXPORT_SYMBOL(passthru_features_check); 3505 3506 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3507 struct net_device *dev, 3508 netdev_features_t features) 3509 { 3510 return vlan_features_check(skb, features); 3511 } 3512 3513 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3514 struct net_device *dev, 3515 netdev_features_t features) 3516 { 3517 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3518 3519 if (gso_segs > dev->gso_max_segs) 3520 return features & ~NETIF_F_GSO_MASK; 3521 3522 if (!skb_shinfo(skb)->gso_type) { 3523 skb_warn_bad_offload(skb); 3524 return features & ~NETIF_F_GSO_MASK; 3525 } 3526 3527 /* Support for GSO partial features requires software 3528 * intervention before we can actually process the packets 3529 * so we need to strip support for any partial features now 3530 * and we can pull them back in after we have partially 3531 * segmented the frame. 3532 */ 3533 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3534 features &= ~dev->gso_partial_features; 3535 3536 /* Make sure to clear the IPv4 ID mangling feature if the 3537 * IPv4 header has the potential to be fragmented. 3538 */ 3539 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3540 struct iphdr *iph = skb->encapsulation ? 3541 inner_ip_hdr(skb) : ip_hdr(skb); 3542 3543 if (!(iph->frag_off & htons(IP_DF))) 3544 features &= ~NETIF_F_TSO_MANGLEID; 3545 } 3546 3547 return features; 3548 } 3549 3550 netdev_features_t netif_skb_features(struct sk_buff *skb) 3551 { 3552 struct net_device *dev = skb->dev; 3553 netdev_features_t features = dev->features; 3554 3555 if (skb_is_gso(skb)) 3556 features = gso_features_check(skb, dev, features); 3557 3558 /* If encapsulation offload request, verify we are testing 3559 * hardware encapsulation features instead of standard 3560 * features for the netdev 3561 */ 3562 if (skb->encapsulation) 3563 features &= dev->hw_enc_features; 3564 3565 if (skb_vlan_tagged(skb)) 3566 features = netdev_intersect_features(features, 3567 dev->vlan_features | 3568 NETIF_F_HW_VLAN_CTAG_TX | 3569 NETIF_F_HW_VLAN_STAG_TX); 3570 3571 if (dev->netdev_ops->ndo_features_check) 3572 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3573 features); 3574 else 3575 features &= dflt_features_check(skb, dev, features); 3576 3577 return harmonize_features(skb, features); 3578 } 3579 EXPORT_SYMBOL(netif_skb_features); 3580 3581 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3582 struct netdev_queue *txq, bool more) 3583 { 3584 unsigned int len; 3585 int rc; 3586 3587 if (dev_nit_active(dev)) 3588 dev_queue_xmit_nit(skb, dev); 3589 3590 len = skb->len; 3591 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); 3592 trace_net_dev_start_xmit(skb, dev); 3593 rc = netdev_start_xmit(skb, dev, txq, more); 3594 trace_net_dev_xmit(skb, rc, dev, len); 3595 3596 return rc; 3597 } 3598 3599 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3600 struct netdev_queue *txq, int *ret) 3601 { 3602 struct sk_buff *skb = first; 3603 int rc = NETDEV_TX_OK; 3604 3605 while (skb) { 3606 struct sk_buff *next = skb->next; 3607 3608 skb_mark_not_on_list(skb); 3609 rc = xmit_one(skb, dev, txq, next != NULL); 3610 if (unlikely(!dev_xmit_complete(rc))) { 3611 skb->next = next; 3612 goto out; 3613 } 3614 3615 skb = next; 3616 if (netif_tx_queue_stopped(txq) && skb) { 3617 rc = NETDEV_TX_BUSY; 3618 break; 3619 } 3620 } 3621 3622 out: 3623 *ret = rc; 3624 return skb; 3625 } 3626 3627 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3628 netdev_features_t features) 3629 { 3630 if (skb_vlan_tag_present(skb) && 3631 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3632 skb = __vlan_hwaccel_push_inside(skb); 3633 return skb; 3634 } 3635 3636 int skb_csum_hwoffload_help(struct sk_buff *skb, 3637 const netdev_features_t features) 3638 { 3639 if (unlikely(skb_csum_is_sctp(skb))) 3640 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3641 skb_crc32c_csum_help(skb); 3642 3643 if (features & NETIF_F_HW_CSUM) 3644 return 0; 3645 3646 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3647 switch (skb->csum_offset) { 3648 case offsetof(struct tcphdr, check): 3649 case offsetof(struct udphdr, check): 3650 return 0; 3651 } 3652 } 3653 3654 return skb_checksum_help(skb); 3655 } 3656 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3657 3658 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3659 { 3660 netdev_features_t features; 3661 3662 features = netif_skb_features(skb); 3663 skb = validate_xmit_vlan(skb, features); 3664 if (unlikely(!skb)) 3665 goto out_null; 3666 3667 skb = sk_validate_xmit_skb(skb, dev); 3668 if (unlikely(!skb)) 3669 goto out_null; 3670 3671 if (netif_needs_gso(skb, features)) { 3672 struct sk_buff *segs; 3673 3674 segs = skb_gso_segment(skb, features); 3675 if (IS_ERR(segs)) { 3676 goto out_kfree_skb; 3677 } else if (segs) { 3678 consume_skb(skb); 3679 skb = segs; 3680 } 3681 } else { 3682 if (skb_needs_linearize(skb, features) && 3683 __skb_linearize(skb)) 3684 goto out_kfree_skb; 3685 3686 /* If packet is not checksummed and device does not 3687 * support checksumming for this protocol, complete 3688 * checksumming here. 3689 */ 3690 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3691 if (skb->encapsulation) 3692 skb_set_inner_transport_header(skb, 3693 skb_checksum_start_offset(skb)); 3694 else 3695 skb_set_transport_header(skb, 3696 skb_checksum_start_offset(skb)); 3697 if (skb_csum_hwoffload_help(skb, features)) 3698 goto out_kfree_skb; 3699 } 3700 } 3701 3702 skb = validate_xmit_xfrm(skb, features, again); 3703 3704 return skb; 3705 3706 out_kfree_skb: 3707 kfree_skb(skb); 3708 out_null: 3709 atomic_long_inc(&dev->tx_dropped); 3710 return NULL; 3711 } 3712 3713 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3714 { 3715 struct sk_buff *next, *head = NULL, *tail; 3716 3717 for (; skb != NULL; skb = next) { 3718 next = skb->next; 3719 skb_mark_not_on_list(skb); 3720 3721 /* in case skb wont be segmented, point to itself */ 3722 skb->prev = skb; 3723 3724 skb = validate_xmit_skb(skb, dev, again); 3725 if (!skb) 3726 continue; 3727 3728 if (!head) 3729 head = skb; 3730 else 3731 tail->next = skb; 3732 /* If skb was segmented, skb->prev points to 3733 * the last segment. If not, it still contains skb. 3734 */ 3735 tail = skb->prev; 3736 } 3737 return head; 3738 } 3739 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3740 3741 static void qdisc_pkt_len_init(struct sk_buff *skb) 3742 { 3743 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3744 3745 qdisc_skb_cb(skb)->pkt_len = skb->len; 3746 3747 /* To get more precise estimation of bytes sent on wire, 3748 * we add to pkt_len the headers size of all segments 3749 */ 3750 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3751 unsigned int hdr_len; 3752 u16 gso_segs = shinfo->gso_segs; 3753 3754 /* mac layer + network layer */ 3755 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3756 3757 /* + transport layer */ 3758 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3759 const struct tcphdr *th; 3760 struct tcphdr _tcphdr; 3761 3762 th = skb_header_pointer(skb, skb_transport_offset(skb), 3763 sizeof(_tcphdr), &_tcphdr); 3764 if (likely(th)) 3765 hdr_len += __tcp_hdrlen(th); 3766 } else { 3767 struct udphdr _udphdr; 3768 3769 if (skb_header_pointer(skb, skb_transport_offset(skb), 3770 sizeof(_udphdr), &_udphdr)) 3771 hdr_len += sizeof(struct udphdr); 3772 } 3773 3774 if (shinfo->gso_type & SKB_GSO_DODGY) 3775 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3776 shinfo->gso_size); 3777 3778 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3779 } 3780 } 3781 3782 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3783 struct net_device *dev, 3784 struct netdev_queue *txq) 3785 { 3786 spinlock_t *root_lock = qdisc_lock(q); 3787 struct sk_buff *to_free = NULL; 3788 bool contended; 3789 int rc; 3790 3791 qdisc_calculate_pkt_len(skb, q); 3792 3793 if (q->flags & TCQ_F_NOLOCK) { 3794 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3795 qdisc_run(q); 3796 3797 if (unlikely(to_free)) 3798 kfree_skb_list(to_free); 3799 return rc; 3800 } 3801 3802 /* 3803 * Heuristic to force contended enqueues to serialize on a 3804 * separate lock before trying to get qdisc main lock. 3805 * This permits qdisc->running owner to get the lock more 3806 * often and dequeue packets faster. 3807 */ 3808 contended = qdisc_is_running(q); 3809 if (unlikely(contended)) 3810 spin_lock(&q->busylock); 3811 3812 spin_lock(root_lock); 3813 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3814 __qdisc_drop(skb, &to_free); 3815 rc = NET_XMIT_DROP; 3816 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3817 qdisc_run_begin(q)) { 3818 /* 3819 * This is a work-conserving queue; there are no old skbs 3820 * waiting to be sent out; and the qdisc is not running - 3821 * xmit the skb directly. 3822 */ 3823 3824 qdisc_bstats_update(q, skb); 3825 3826 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3827 if (unlikely(contended)) { 3828 spin_unlock(&q->busylock); 3829 contended = false; 3830 } 3831 __qdisc_run(q); 3832 } 3833 3834 qdisc_run_end(q); 3835 rc = NET_XMIT_SUCCESS; 3836 } else { 3837 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3838 if (qdisc_run_begin(q)) { 3839 if (unlikely(contended)) { 3840 spin_unlock(&q->busylock); 3841 contended = false; 3842 } 3843 __qdisc_run(q); 3844 qdisc_run_end(q); 3845 } 3846 } 3847 spin_unlock(root_lock); 3848 if (unlikely(to_free)) 3849 kfree_skb_list(to_free); 3850 if (unlikely(contended)) 3851 spin_unlock(&q->busylock); 3852 return rc; 3853 } 3854 3855 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3856 static void skb_update_prio(struct sk_buff *skb) 3857 { 3858 const struct netprio_map *map; 3859 const struct sock *sk; 3860 unsigned int prioidx; 3861 3862 if (skb->priority) 3863 return; 3864 map = rcu_dereference_bh(skb->dev->priomap); 3865 if (!map) 3866 return; 3867 sk = skb_to_full_sk(skb); 3868 if (!sk) 3869 return; 3870 3871 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3872 3873 if (prioidx < map->priomap_len) 3874 skb->priority = map->priomap[prioidx]; 3875 } 3876 #else 3877 #define skb_update_prio(skb) 3878 #endif 3879 3880 /** 3881 * dev_loopback_xmit - loop back @skb 3882 * @net: network namespace this loopback is happening in 3883 * @sk: sk needed to be a netfilter okfn 3884 * @skb: buffer to transmit 3885 */ 3886 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3887 { 3888 skb_reset_mac_header(skb); 3889 __skb_pull(skb, skb_network_offset(skb)); 3890 skb->pkt_type = PACKET_LOOPBACK; 3891 skb->ip_summed = CHECKSUM_UNNECESSARY; 3892 WARN_ON(!skb_dst(skb)); 3893 skb_dst_force(skb); 3894 netif_rx_ni(skb); 3895 return 0; 3896 } 3897 EXPORT_SYMBOL(dev_loopback_xmit); 3898 3899 #ifdef CONFIG_NET_EGRESS 3900 static struct sk_buff * 3901 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3902 { 3903 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3904 struct tcf_result cl_res; 3905 3906 if (!miniq) 3907 return skb; 3908 3909 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3910 qdisc_skb_cb(skb)->mru = 0; 3911 qdisc_skb_cb(skb)->post_ct = false; 3912 mini_qdisc_bstats_cpu_update(miniq, skb); 3913 3914 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 3915 case TC_ACT_OK: 3916 case TC_ACT_RECLASSIFY: 3917 skb->tc_index = TC_H_MIN(cl_res.classid); 3918 break; 3919 case TC_ACT_SHOT: 3920 mini_qdisc_qstats_cpu_drop(miniq); 3921 *ret = NET_XMIT_DROP; 3922 kfree_skb(skb); 3923 return NULL; 3924 case TC_ACT_STOLEN: 3925 case TC_ACT_QUEUED: 3926 case TC_ACT_TRAP: 3927 *ret = NET_XMIT_SUCCESS; 3928 consume_skb(skb); 3929 return NULL; 3930 case TC_ACT_REDIRECT: 3931 /* No need to push/pop skb's mac_header here on egress! */ 3932 skb_do_redirect(skb); 3933 *ret = NET_XMIT_SUCCESS; 3934 return NULL; 3935 default: 3936 break; 3937 } 3938 3939 return skb; 3940 } 3941 #endif /* CONFIG_NET_EGRESS */ 3942 3943 #ifdef CONFIG_XPS 3944 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3945 struct xps_dev_maps *dev_maps, unsigned int tci) 3946 { 3947 struct xps_map *map; 3948 int queue_index = -1; 3949 3950 if (dev->num_tc) { 3951 tci *= dev->num_tc; 3952 tci += netdev_get_prio_tc_map(dev, skb->priority); 3953 } 3954 3955 map = rcu_dereference(dev_maps->attr_map[tci]); 3956 if (map) { 3957 if (map->len == 1) 3958 queue_index = map->queues[0]; 3959 else 3960 queue_index = map->queues[reciprocal_scale( 3961 skb_get_hash(skb), map->len)]; 3962 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3963 queue_index = -1; 3964 } 3965 return queue_index; 3966 } 3967 #endif 3968 3969 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3970 struct sk_buff *skb) 3971 { 3972 #ifdef CONFIG_XPS 3973 struct xps_dev_maps *dev_maps; 3974 struct sock *sk = skb->sk; 3975 int queue_index = -1; 3976 3977 if (!static_key_false(&xps_needed)) 3978 return -1; 3979 3980 rcu_read_lock(); 3981 if (!static_key_false(&xps_rxqs_needed)) 3982 goto get_cpus_map; 3983 3984 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); 3985 if (dev_maps) { 3986 int tci = sk_rx_queue_get(sk); 3987 3988 if (tci >= 0 && tci < dev->num_rx_queues) 3989 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3990 tci); 3991 } 3992 3993 get_cpus_map: 3994 if (queue_index < 0) { 3995 dev_maps = rcu_dereference(sb_dev->xps_cpus_map); 3996 if (dev_maps) { 3997 unsigned int tci = skb->sender_cpu - 1; 3998 3999 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4000 tci); 4001 } 4002 } 4003 rcu_read_unlock(); 4004 4005 return queue_index; 4006 #else 4007 return -1; 4008 #endif 4009 } 4010 4011 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4012 struct net_device *sb_dev) 4013 { 4014 return 0; 4015 } 4016 EXPORT_SYMBOL(dev_pick_tx_zero); 4017 4018 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4019 struct net_device *sb_dev) 4020 { 4021 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4022 } 4023 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4024 4025 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4026 struct net_device *sb_dev) 4027 { 4028 struct sock *sk = skb->sk; 4029 int queue_index = sk_tx_queue_get(sk); 4030 4031 sb_dev = sb_dev ? : dev; 4032 4033 if (queue_index < 0 || skb->ooo_okay || 4034 queue_index >= dev->real_num_tx_queues) { 4035 int new_index = get_xps_queue(dev, sb_dev, skb); 4036 4037 if (new_index < 0) 4038 new_index = skb_tx_hash(dev, sb_dev, skb); 4039 4040 if (queue_index != new_index && sk && 4041 sk_fullsock(sk) && 4042 rcu_access_pointer(sk->sk_dst_cache)) 4043 sk_tx_queue_set(sk, new_index); 4044 4045 queue_index = new_index; 4046 } 4047 4048 return queue_index; 4049 } 4050 EXPORT_SYMBOL(netdev_pick_tx); 4051 4052 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4053 struct sk_buff *skb, 4054 struct net_device *sb_dev) 4055 { 4056 int queue_index = 0; 4057 4058 #ifdef CONFIG_XPS 4059 u32 sender_cpu = skb->sender_cpu - 1; 4060 4061 if (sender_cpu >= (u32)NR_CPUS) 4062 skb->sender_cpu = raw_smp_processor_id() + 1; 4063 #endif 4064 4065 if (dev->real_num_tx_queues != 1) { 4066 const struct net_device_ops *ops = dev->netdev_ops; 4067 4068 if (ops->ndo_select_queue) 4069 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4070 else 4071 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4072 4073 queue_index = netdev_cap_txqueue(dev, queue_index); 4074 } 4075 4076 skb_set_queue_mapping(skb, queue_index); 4077 return netdev_get_tx_queue(dev, queue_index); 4078 } 4079 4080 /** 4081 * __dev_queue_xmit - transmit a buffer 4082 * @skb: buffer to transmit 4083 * @sb_dev: suboordinate device used for L2 forwarding offload 4084 * 4085 * Queue a buffer for transmission to a network device. The caller must 4086 * have set the device and priority and built the buffer before calling 4087 * this function. The function can be called from an interrupt. 4088 * 4089 * A negative errno code is returned on a failure. A success does not 4090 * guarantee the frame will be transmitted as it may be dropped due 4091 * to congestion or traffic shaping. 4092 * 4093 * ----------------------------------------------------------------------------------- 4094 * I notice this method can also return errors from the queue disciplines, 4095 * including NET_XMIT_DROP, which is a positive value. So, errors can also 4096 * be positive. 4097 * 4098 * Regardless of the return value, the skb is consumed, so it is currently 4099 * difficult to retry a send to this method. (You can bump the ref count 4100 * before sending to hold a reference for retry if you are careful.) 4101 * 4102 * When calling this method, interrupts MUST be enabled. This is because 4103 * the BH enable code must have IRQs enabled so that it will not deadlock. 4104 * --BLG 4105 */ 4106 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4107 { 4108 struct net_device *dev = skb->dev; 4109 struct netdev_queue *txq; 4110 struct Qdisc *q; 4111 int rc = -ENOMEM; 4112 bool again = false; 4113 4114 skb_reset_mac_header(skb); 4115 4116 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4117 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4118 4119 /* Disable soft irqs for various locks below. Also 4120 * stops preemption for RCU. 4121 */ 4122 rcu_read_lock_bh(); 4123 4124 skb_update_prio(skb); 4125 4126 qdisc_pkt_len_init(skb); 4127 #ifdef CONFIG_NET_CLS_ACT 4128 skb->tc_at_ingress = 0; 4129 # ifdef CONFIG_NET_EGRESS 4130 if (static_branch_unlikely(&egress_needed_key)) { 4131 skb = sch_handle_egress(skb, &rc, dev); 4132 if (!skb) 4133 goto out; 4134 } 4135 # endif 4136 #endif 4137 /* If device/qdisc don't need skb->dst, release it right now while 4138 * its hot in this cpu cache. 4139 */ 4140 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4141 skb_dst_drop(skb); 4142 else 4143 skb_dst_force(skb); 4144 4145 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4146 q = rcu_dereference_bh(txq->qdisc); 4147 4148 trace_net_dev_queue(skb); 4149 if (q->enqueue) { 4150 rc = __dev_xmit_skb(skb, q, dev, txq); 4151 goto out; 4152 } 4153 4154 /* The device has no queue. Common case for software devices: 4155 * loopback, all the sorts of tunnels... 4156 4157 * Really, it is unlikely that netif_tx_lock protection is necessary 4158 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4159 * counters.) 4160 * However, it is possible, that they rely on protection 4161 * made by us here. 4162 4163 * Check this and shot the lock. It is not prone from deadlocks. 4164 *Either shot noqueue qdisc, it is even simpler 8) 4165 */ 4166 if (dev->flags & IFF_UP) { 4167 int cpu = smp_processor_id(); /* ok because BHs are off */ 4168 4169 if (txq->xmit_lock_owner != cpu) { 4170 if (dev_xmit_recursion()) 4171 goto recursion_alert; 4172 4173 skb = validate_xmit_skb(skb, dev, &again); 4174 if (!skb) 4175 goto out; 4176 4177 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4178 HARD_TX_LOCK(dev, txq, cpu); 4179 4180 if (!netif_xmit_stopped(txq)) { 4181 dev_xmit_recursion_inc(); 4182 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4183 dev_xmit_recursion_dec(); 4184 if (dev_xmit_complete(rc)) { 4185 HARD_TX_UNLOCK(dev, txq); 4186 goto out; 4187 } 4188 } 4189 HARD_TX_UNLOCK(dev, txq); 4190 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4191 dev->name); 4192 } else { 4193 /* Recursion is detected! It is possible, 4194 * unfortunately 4195 */ 4196 recursion_alert: 4197 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4198 dev->name); 4199 } 4200 } 4201 4202 rc = -ENETDOWN; 4203 rcu_read_unlock_bh(); 4204 4205 atomic_long_inc(&dev->tx_dropped); 4206 kfree_skb_list(skb); 4207 return rc; 4208 out: 4209 rcu_read_unlock_bh(); 4210 return rc; 4211 } 4212 4213 int dev_queue_xmit(struct sk_buff *skb) 4214 { 4215 return __dev_queue_xmit(skb, NULL); 4216 } 4217 EXPORT_SYMBOL(dev_queue_xmit); 4218 4219 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 4220 { 4221 return __dev_queue_xmit(skb, sb_dev); 4222 } 4223 EXPORT_SYMBOL(dev_queue_xmit_accel); 4224 4225 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4226 { 4227 struct net_device *dev = skb->dev; 4228 struct sk_buff *orig_skb = skb; 4229 struct netdev_queue *txq; 4230 int ret = NETDEV_TX_BUSY; 4231 bool again = false; 4232 4233 if (unlikely(!netif_running(dev) || 4234 !netif_carrier_ok(dev))) 4235 goto drop; 4236 4237 skb = validate_xmit_skb_list(skb, dev, &again); 4238 if (skb != orig_skb) 4239 goto drop; 4240 4241 skb_set_queue_mapping(skb, queue_id); 4242 txq = skb_get_tx_queue(dev, skb); 4243 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4244 4245 local_bh_disable(); 4246 4247 dev_xmit_recursion_inc(); 4248 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4249 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4250 ret = netdev_start_xmit(skb, dev, txq, false); 4251 HARD_TX_UNLOCK(dev, txq); 4252 dev_xmit_recursion_dec(); 4253 4254 local_bh_enable(); 4255 return ret; 4256 drop: 4257 atomic_long_inc(&dev->tx_dropped); 4258 kfree_skb_list(skb); 4259 return NET_XMIT_DROP; 4260 } 4261 EXPORT_SYMBOL(__dev_direct_xmit); 4262 4263 /************************************************************************* 4264 * Receiver routines 4265 *************************************************************************/ 4266 4267 int netdev_max_backlog __read_mostly = 1000; 4268 EXPORT_SYMBOL(netdev_max_backlog); 4269 4270 int netdev_tstamp_prequeue __read_mostly = 1; 4271 int netdev_budget __read_mostly = 300; 4272 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4273 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4274 int weight_p __read_mostly = 64; /* old backlog weight */ 4275 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4276 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4277 int dev_rx_weight __read_mostly = 64; 4278 int dev_tx_weight __read_mostly = 64; 4279 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 4280 int gro_normal_batch __read_mostly = 8; 4281 4282 /* Called with irq disabled */ 4283 static inline void ____napi_schedule(struct softnet_data *sd, 4284 struct napi_struct *napi) 4285 { 4286 struct task_struct *thread; 4287 4288 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4289 /* Paired with smp_mb__before_atomic() in 4290 * napi_enable()/dev_set_threaded(). 4291 * Use READ_ONCE() to guarantee a complete 4292 * read on napi->thread. Only call 4293 * wake_up_process() when it's not NULL. 4294 */ 4295 thread = READ_ONCE(napi->thread); 4296 if (thread) { 4297 wake_up_process(thread); 4298 return; 4299 } 4300 } 4301 4302 list_add_tail(&napi->poll_list, &sd->poll_list); 4303 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4304 } 4305 4306 #ifdef CONFIG_RPS 4307 4308 /* One global table that all flow-based protocols share. */ 4309 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4310 EXPORT_SYMBOL(rps_sock_flow_table); 4311 u32 rps_cpu_mask __read_mostly; 4312 EXPORT_SYMBOL(rps_cpu_mask); 4313 4314 struct static_key_false rps_needed __read_mostly; 4315 EXPORT_SYMBOL(rps_needed); 4316 struct static_key_false rfs_needed __read_mostly; 4317 EXPORT_SYMBOL(rfs_needed); 4318 4319 static struct rps_dev_flow * 4320 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4321 struct rps_dev_flow *rflow, u16 next_cpu) 4322 { 4323 if (next_cpu < nr_cpu_ids) { 4324 #ifdef CONFIG_RFS_ACCEL 4325 struct netdev_rx_queue *rxqueue; 4326 struct rps_dev_flow_table *flow_table; 4327 struct rps_dev_flow *old_rflow; 4328 u32 flow_id; 4329 u16 rxq_index; 4330 int rc; 4331 4332 /* Should we steer this flow to a different hardware queue? */ 4333 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4334 !(dev->features & NETIF_F_NTUPLE)) 4335 goto out; 4336 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4337 if (rxq_index == skb_get_rx_queue(skb)) 4338 goto out; 4339 4340 rxqueue = dev->_rx + rxq_index; 4341 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4342 if (!flow_table) 4343 goto out; 4344 flow_id = skb_get_hash(skb) & flow_table->mask; 4345 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4346 rxq_index, flow_id); 4347 if (rc < 0) 4348 goto out; 4349 old_rflow = rflow; 4350 rflow = &flow_table->flows[flow_id]; 4351 rflow->filter = rc; 4352 if (old_rflow->filter == rflow->filter) 4353 old_rflow->filter = RPS_NO_FILTER; 4354 out: 4355 #endif 4356 rflow->last_qtail = 4357 per_cpu(softnet_data, next_cpu).input_queue_head; 4358 } 4359 4360 rflow->cpu = next_cpu; 4361 return rflow; 4362 } 4363 4364 /* 4365 * get_rps_cpu is called from netif_receive_skb and returns the target 4366 * CPU from the RPS map of the receiving queue for a given skb. 4367 * rcu_read_lock must be held on entry. 4368 */ 4369 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4370 struct rps_dev_flow **rflowp) 4371 { 4372 const struct rps_sock_flow_table *sock_flow_table; 4373 struct netdev_rx_queue *rxqueue = dev->_rx; 4374 struct rps_dev_flow_table *flow_table; 4375 struct rps_map *map; 4376 int cpu = -1; 4377 u32 tcpu; 4378 u32 hash; 4379 4380 if (skb_rx_queue_recorded(skb)) { 4381 u16 index = skb_get_rx_queue(skb); 4382 4383 if (unlikely(index >= dev->real_num_rx_queues)) { 4384 WARN_ONCE(dev->real_num_rx_queues > 1, 4385 "%s received packet on queue %u, but number " 4386 "of RX queues is %u\n", 4387 dev->name, index, dev->real_num_rx_queues); 4388 goto done; 4389 } 4390 rxqueue += index; 4391 } 4392 4393 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4394 4395 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4396 map = rcu_dereference(rxqueue->rps_map); 4397 if (!flow_table && !map) 4398 goto done; 4399 4400 skb_reset_network_header(skb); 4401 hash = skb_get_hash(skb); 4402 if (!hash) 4403 goto done; 4404 4405 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4406 if (flow_table && sock_flow_table) { 4407 struct rps_dev_flow *rflow; 4408 u32 next_cpu; 4409 u32 ident; 4410 4411 /* First check into global flow table if there is a match */ 4412 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4413 if ((ident ^ hash) & ~rps_cpu_mask) 4414 goto try_rps; 4415 4416 next_cpu = ident & rps_cpu_mask; 4417 4418 /* OK, now we know there is a match, 4419 * we can look at the local (per receive queue) flow table 4420 */ 4421 rflow = &flow_table->flows[hash & flow_table->mask]; 4422 tcpu = rflow->cpu; 4423 4424 /* 4425 * If the desired CPU (where last recvmsg was done) is 4426 * different from current CPU (one in the rx-queue flow 4427 * table entry), switch if one of the following holds: 4428 * - Current CPU is unset (>= nr_cpu_ids). 4429 * - Current CPU is offline. 4430 * - The current CPU's queue tail has advanced beyond the 4431 * last packet that was enqueued using this table entry. 4432 * This guarantees that all previous packets for the flow 4433 * have been dequeued, thus preserving in order delivery. 4434 */ 4435 if (unlikely(tcpu != next_cpu) && 4436 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4437 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4438 rflow->last_qtail)) >= 0)) { 4439 tcpu = next_cpu; 4440 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4441 } 4442 4443 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4444 *rflowp = rflow; 4445 cpu = tcpu; 4446 goto done; 4447 } 4448 } 4449 4450 try_rps: 4451 4452 if (map) { 4453 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4454 if (cpu_online(tcpu)) { 4455 cpu = tcpu; 4456 goto done; 4457 } 4458 } 4459 4460 done: 4461 return cpu; 4462 } 4463 4464 #ifdef CONFIG_RFS_ACCEL 4465 4466 /** 4467 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4468 * @dev: Device on which the filter was set 4469 * @rxq_index: RX queue index 4470 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4471 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4472 * 4473 * Drivers that implement ndo_rx_flow_steer() should periodically call 4474 * this function for each installed filter and remove the filters for 4475 * which it returns %true. 4476 */ 4477 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4478 u32 flow_id, u16 filter_id) 4479 { 4480 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4481 struct rps_dev_flow_table *flow_table; 4482 struct rps_dev_flow *rflow; 4483 bool expire = true; 4484 unsigned int cpu; 4485 4486 rcu_read_lock(); 4487 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4488 if (flow_table && flow_id <= flow_table->mask) { 4489 rflow = &flow_table->flows[flow_id]; 4490 cpu = READ_ONCE(rflow->cpu); 4491 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4492 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4493 rflow->last_qtail) < 4494 (int)(10 * flow_table->mask))) 4495 expire = false; 4496 } 4497 rcu_read_unlock(); 4498 return expire; 4499 } 4500 EXPORT_SYMBOL(rps_may_expire_flow); 4501 4502 #endif /* CONFIG_RFS_ACCEL */ 4503 4504 /* Called from hardirq (IPI) context */ 4505 static void rps_trigger_softirq(void *data) 4506 { 4507 struct softnet_data *sd = data; 4508 4509 ____napi_schedule(sd, &sd->backlog); 4510 sd->received_rps++; 4511 } 4512 4513 #endif /* CONFIG_RPS */ 4514 4515 /* 4516 * Check if this softnet_data structure is another cpu one 4517 * If yes, queue it to our IPI list and return 1 4518 * If no, return 0 4519 */ 4520 static int rps_ipi_queued(struct softnet_data *sd) 4521 { 4522 #ifdef CONFIG_RPS 4523 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4524 4525 if (sd != mysd) { 4526 sd->rps_ipi_next = mysd->rps_ipi_list; 4527 mysd->rps_ipi_list = sd; 4528 4529 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4530 return 1; 4531 } 4532 #endif /* CONFIG_RPS */ 4533 return 0; 4534 } 4535 4536 #ifdef CONFIG_NET_FLOW_LIMIT 4537 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4538 #endif 4539 4540 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4541 { 4542 #ifdef CONFIG_NET_FLOW_LIMIT 4543 struct sd_flow_limit *fl; 4544 struct softnet_data *sd; 4545 unsigned int old_flow, new_flow; 4546 4547 if (qlen < (netdev_max_backlog >> 1)) 4548 return false; 4549 4550 sd = this_cpu_ptr(&softnet_data); 4551 4552 rcu_read_lock(); 4553 fl = rcu_dereference(sd->flow_limit); 4554 if (fl) { 4555 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4556 old_flow = fl->history[fl->history_head]; 4557 fl->history[fl->history_head] = new_flow; 4558 4559 fl->history_head++; 4560 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4561 4562 if (likely(fl->buckets[old_flow])) 4563 fl->buckets[old_flow]--; 4564 4565 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4566 fl->count++; 4567 rcu_read_unlock(); 4568 return true; 4569 } 4570 } 4571 rcu_read_unlock(); 4572 #endif 4573 return false; 4574 } 4575 4576 /* 4577 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4578 * queue (may be a remote CPU queue). 4579 */ 4580 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4581 unsigned int *qtail) 4582 { 4583 struct softnet_data *sd; 4584 unsigned long flags; 4585 unsigned int qlen; 4586 4587 sd = &per_cpu(softnet_data, cpu); 4588 4589 local_irq_save(flags); 4590 4591 rps_lock(sd); 4592 if (!netif_running(skb->dev)) 4593 goto drop; 4594 qlen = skb_queue_len(&sd->input_pkt_queue); 4595 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4596 if (qlen) { 4597 enqueue: 4598 __skb_queue_tail(&sd->input_pkt_queue, skb); 4599 input_queue_tail_incr_save(sd, qtail); 4600 rps_unlock(sd); 4601 local_irq_restore(flags); 4602 return NET_RX_SUCCESS; 4603 } 4604 4605 /* Schedule NAPI for backlog device 4606 * We can use non atomic operation since we own the queue lock 4607 */ 4608 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4609 if (!rps_ipi_queued(sd)) 4610 ____napi_schedule(sd, &sd->backlog); 4611 } 4612 goto enqueue; 4613 } 4614 4615 drop: 4616 sd->dropped++; 4617 rps_unlock(sd); 4618 4619 local_irq_restore(flags); 4620 4621 atomic_long_inc(&skb->dev->rx_dropped); 4622 kfree_skb(skb); 4623 return NET_RX_DROP; 4624 } 4625 4626 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4627 { 4628 struct net_device *dev = skb->dev; 4629 struct netdev_rx_queue *rxqueue; 4630 4631 rxqueue = dev->_rx; 4632 4633 if (skb_rx_queue_recorded(skb)) { 4634 u16 index = skb_get_rx_queue(skb); 4635 4636 if (unlikely(index >= dev->real_num_rx_queues)) { 4637 WARN_ONCE(dev->real_num_rx_queues > 1, 4638 "%s received packet on queue %u, but number " 4639 "of RX queues is %u\n", 4640 dev->name, index, dev->real_num_rx_queues); 4641 4642 return rxqueue; /* Return first rxqueue */ 4643 } 4644 rxqueue += index; 4645 } 4646 return rxqueue; 4647 } 4648 4649 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4650 struct xdp_buff *xdp, 4651 struct bpf_prog *xdp_prog) 4652 { 4653 void *orig_data, *orig_data_end, *hard_start; 4654 struct netdev_rx_queue *rxqueue; 4655 u32 metalen, act = XDP_DROP; 4656 u32 mac_len, frame_sz; 4657 __be16 orig_eth_type; 4658 struct ethhdr *eth; 4659 bool orig_bcast; 4660 int off; 4661 4662 /* Reinjected packets coming from act_mirred or similar should 4663 * not get XDP generic processing. 4664 */ 4665 if (skb_is_redirected(skb)) 4666 return XDP_PASS; 4667 4668 /* XDP packets must be linear and must have sufficient headroom 4669 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4670 * native XDP provides, thus we need to do it here as well. 4671 */ 4672 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4673 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4674 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4675 int troom = skb->tail + skb->data_len - skb->end; 4676 4677 /* In case we have to go down the path and also linearize, 4678 * then lets do the pskb_expand_head() work just once here. 4679 */ 4680 if (pskb_expand_head(skb, 4681 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4682 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4683 goto do_drop; 4684 if (skb_linearize(skb)) 4685 goto do_drop; 4686 } 4687 4688 /* The XDP program wants to see the packet starting at the MAC 4689 * header. 4690 */ 4691 mac_len = skb->data - skb_mac_header(skb); 4692 hard_start = skb->data - skb_headroom(skb); 4693 4694 /* SKB "head" area always have tailroom for skb_shared_info */ 4695 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4696 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4697 4698 rxqueue = netif_get_rxqueue(skb); 4699 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4700 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4701 skb_headlen(skb) + mac_len, true); 4702 4703 orig_data_end = xdp->data_end; 4704 orig_data = xdp->data; 4705 eth = (struct ethhdr *)xdp->data; 4706 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4707 orig_eth_type = eth->h_proto; 4708 4709 act = bpf_prog_run_xdp(xdp_prog, xdp); 4710 4711 /* check if bpf_xdp_adjust_head was used */ 4712 off = xdp->data - orig_data; 4713 if (off) { 4714 if (off > 0) 4715 __skb_pull(skb, off); 4716 else if (off < 0) 4717 __skb_push(skb, -off); 4718 4719 skb->mac_header += off; 4720 skb_reset_network_header(skb); 4721 } 4722 4723 /* check if bpf_xdp_adjust_tail was used */ 4724 off = xdp->data_end - orig_data_end; 4725 if (off != 0) { 4726 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4727 skb->len += off; /* positive on grow, negative on shrink */ 4728 } 4729 4730 /* check if XDP changed eth hdr such SKB needs update */ 4731 eth = (struct ethhdr *)xdp->data; 4732 if ((orig_eth_type != eth->h_proto) || 4733 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4734 __skb_push(skb, ETH_HLEN); 4735 skb->protocol = eth_type_trans(skb, skb->dev); 4736 } 4737 4738 switch (act) { 4739 case XDP_REDIRECT: 4740 case XDP_TX: 4741 __skb_push(skb, mac_len); 4742 break; 4743 case XDP_PASS: 4744 metalen = xdp->data - xdp->data_meta; 4745 if (metalen) 4746 skb_metadata_set(skb, metalen); 4747 break; 4748 default: 4749 bpf_warn_invalid_xdp_action(act); 4750 fallthrough; 4751 case XDP_ABORTED: 4752 trace_xdp_exception(skb->dev, xdp_prog, act); 4753 fallthrough; 4754 case XDP_DROP: 4755 do_drop: 4756 kfree_skb(skb); 4757 break; 4758 } 4759 4760 return act; 4761 } 4762 4763 /* When doing generic XDP we have to bypass the qdisc layer and the 4764 * network taps in order to match in-driver-XDP behavior. 4765 */ 4766 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4767 { 4768 struct net_device *dev = skb->dev; 4769 struct netdev_queue *txq; 4770 bool free_skb = true; 4771 int cpu, rc; 4772 4773 txq = netdev_core_pick_tx(dev, skb, NULL); 4774 cpu = smp_processor_id(); 4775 HARD_TX_LOCK(dev, txq, cpu); 4776 if (!netif_xmit_stopped(txq)) { 4777 rc = netdev_start_xmit(skb, dev, txq, 0); 4778 if (dev_xmit_complete(rc)) 4779 free_skb = false; 4780 } 4781 HARD_TX_UNLOCK(dev, txq); 4782 if (free_skb) { 4783 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4784 kfree_skb(skb); 4785 } 4786 } 4787 4788 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4789 4790 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4791 { 4792 if (xdp_prog) { 4793 struct xdp_buff xdp; 4794 u32 act; 4795 int err; 4796 4797 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4798 if (act != XDP_PASS) { 4799 switch (act) { 4800 case XDP_REDIRECT: 4801 err = xdp_do_generic_redirect(skb->dev, skb, 4802 &xdp, xdp_prog); 4803 if (err) 4804 goto out_redir; 4805 break; 4806 case XDP_TX: 4807 generic_xdp_tx(skb, xdp_prog); 4808 break; 4809 } 4810 return XDP_DROP; 4811 } 4812 } 4813 return XDP_PASS; 4814 out_redir: 4815 kfree_skb(skb); 4816 return XDP_DROP; 4817 } 4818 EXPORT_SYMBOL_GPL(do_xdp_generic); 4819 4820 static int netif_rx_internal(struct sk_buff *skb) 4821 { 4822 int ret; 4823 4824 net_timestamp_check(netdev_tstamp_prequeue, skb); 4825 4826 trace_netif_rx(skb); 4827 4828 #ifdef CONFIG_RPS 4829 if (static_branch_unlikely(&rps_needed)) { 4830 struct rps_dev_flow voidflow, *rflow = &voidflow; 4831 int cpu; 4832 4833 preempt_disable(); 4834 rcu_read_lock(); 4835 4836 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4837 if (cpu < 0) 4838 cpu = smp_processor_id(); 4839 4840 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4841 4842 rcu_read_unlock(); 4843 preempt_enable(); 4844 } else 4845 #endif 4846 { 4847 unsigned int qtail; 4848 4849 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4850 put_cpu(); 4851 } 4852 return ret; 4853 } 4854 4855 /** 4856 * netif_rx - post buffer to the network code 4857 * @skb: buffer to post 4858 * 4859 * This function receives a packet from a device driver and queues it for 4860 * the upper (protocol) levels to process. It always succeeds. The buffer 4861 * may be dropped during processing for congestion control or by the 4862 * protocol layers. 4863 * 4864 * return values: 4865 * NET_RX_SUCCESS (no congestion) 4866 * NET_RX_DROP (packet was dropped) 4867 * 4868 */ 4869 4870 int netif_rx(struct sk_buff *skb) 4871 { 4872 int ret; 4873 4874 trace_netif_rx_entry(skb); 4875 4876 ret = netif_rx_internal(skb); 4877 trace_netif_rx_exit(ret); 4878 4879 return ret; 4880 } 4881 EXPORT_SYMBOL(netif_rx); 4882 4883 int netif_rx_ni(struct sk_buff *skb) 4884 { 4885 int err; 4886 4887 trace_netif_rx_ni_entry(skb); 4888 4889 preempt_disable(); 4890 err = netif_rx_internal(skb); 4891 if (local_softirq_pending()) 4892 do_softirq(); 4893 preempt_enable(); 4894 trace_netif_rx_ni_exit(err); 4895 4896 return err; 4897 } 4898 EXPORT_SYMBOL(netif_rx_ni); 4899 4900 int netif_rx_any_context(struct sk_buff *skb) 4901 { 4902 /* 4903 * If invoked from contexts which do not invoke bottom half 4904 * processing either at return from interrupt or when softrqs are 4905 * reenabled, use netif_rx_ni() which invokes bottomhalf processing 4906 * directly. 4907 */ 4908 if (in_interrupt()) 4909 return netif_rx(skb); 4910 else 4911 return netif_rx_ni(skb); 4912 } 4913 EXPORT_SYMBOL(netif_rx_any_context); 4914 4915 static __latent_entropy void net_tx_action(struct softirq_action *h) 4916 { 4917 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4918 4919 if (sd->completion_queue) { 4920 struct sk_buff *clist; 4921 4922 local_irq_disable(); 4923 clist = sd->completion_queue; 4924 sd->completion_queue = NULL; 4925 local_irq_enable(); 4926 4927 while (clist) { 4928 struct sk_buff *skb = clist; 4929 4930 clist = clist->next; 4931 4932 WARN_ON(refcount_read(&skb->users)); 4933 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4934 trace_consume_skb(skb); 4935 else 4936 trace_kfree_skb(skb, net_tx_action); 4937 4938 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4939 __kfree_skb(skb); 4940 else 4941 __kfree_skb_defer(skb); 4942 } 4943 } 4944 4945 if (sd->output_queue) { 4946 struct Qdisc *head; 4947 4948 local_irq_disable(); 4949 head = sd->output_queue; 4950 sd->output_queue = NULL; 4951 sd->output_queue_tailp = &sd->output_queue; 4952 local_irq_enable(); 4953 4954 while (head) { 4955 struct Qdisc *q = head; 4956 spinlock_t *root_lock = NULL; 4957 4958 head = head->next_sched; 4959 4960 if (!(q->flags & TCQ_F_NOLOCK)) { 4961 root_lock = qdisc_lock(q); 4962 spin_lock(root_lock); 4963 } 4964 /* We need to make sure head->next_sched is read 4965 * before clearing __QDISC_STATE_SCHED 4966 */ 4967 smp_mb__before_atomic(); 4968 clear_bit(__QDISC_STATE_SCHED, &q->state); 4969 qdisc_run(q); 4970 if (root_lock) 4971 spin_unlock(root_lock); 4972 } 4973 } 4974 4975 xfrm_dev_backlog(sd); 4976 } 4977 4978 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4979 /* This hook is defined here for ATM LANE */ 4980 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4981 unsigned char *addr) __read_mostly; 4982 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4983 #endif 4984 4985 static inline struct sk_buff * 4986 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4987 struct net_device *orig_dev, bool *another) 4988 { 4989 #ifdef CONFIG_NET_CLS_ACT 4990 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4991 struct tcf_result cl_res; 4992 4993 /* If there's at least one ingress present somewhere (so 4994 * we get here via enabled static key), remaining devices 4995 * that are not configured with an ingress qdisc will bail 4996 * out here. 4997 */ 4998 if (!miniq) 4999 return skb; 5000 5001 if (*pt_prev) { 5002 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5003 *pt_prev = NULL; 5004 } 5005 5006 qdisc_skb_cb(skb)->pkt_len = skb->len; 5007 qdisc_skb_cb(skb)->mru = 0; 5008 qdisc_skb_cb(skb)->post_ct = false; 5009 skb->tc_at_ingress = 1; 5010 mini_qdisc_bstats_cpu_update(miniq, skb); 5011 5012 switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list, 5013 &cl_res, false)) { 5014 case TC_ACT_OK: 5015 case TC_ACT_RECLASSIFY: 5016 skb->tc_index = TC_H_MIN(cl_res.classid); 5017 break; 5018 case TC_ACT_SHOT: 5019 mini_qdisc_qstats_cpu_drop(miniq); 5020 kfree_skb(skb); 5021 return NULL; 5022 case TC_ACT_STOLEN: 5023 case TC_ACT_QUEUED: 5024 case TC_ACT_TRAP: 5025 consume_skb(skb); 5026 return NULL; 5027 case TC_ACT_REDIRECT: 5028 /* skb_mac_header check was done by cls/act_bpf, so 5029 * we can safely push the L2 header back before 5030 * redirecting to another netdev 5031 */ 5032 __skb_push(skb, skb->mac_len); 5033 if (skb_do_redirect(skb) == -EAGAIN) { 5034 __skb_pull(skb, skb->mac_len); 5035 *another = true; 5036 break; 5037 } 5038 return NULL; 5039 case TC_ACT_CONSUMED: 5040 return NULL; 5041 default: 5042 break; 5043 } 5044 #endif /* CONFIG_NET_CLS_ACT */ 5045 return skb; 5046 } 5047 5048 /** 5049 * netdev_is_rx_handler_busy - check if receive handler is registered 5050 * @dev: device to check 5051 * 5052 * Check if a receive handler is already registered for a given device. 5053 * Return true if there one. 5054 * 5055 * The caller must hold the rtnl_mutex. 5056 */ 5057 bool netdev_is_rx_handler_busy(struct net_device *dev) 5058 { 5059 ASSERT_RTNL(); 5060 return dev && rtnl_dereference(dev->rx_handler); 5061 } 5062 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5063 5064 /** 5065 * netdev_rx_handler_register - register receive handler 5066 * @dev: device to register a handler for 5067 * @rx_handler: receive handler to register 5068 * @rx_handler_data: data pointer that is used by rx handler 5069 * 5070 * Register a receive handler for a device. This handler will then be 5071 * called from __netif_receive_skb. A negative errno code is returned 5072 * on a failure. 5073 * 5074 * The caller must hold the rtnl_mutex. 5075 * 5076 * For a general description of rx_handler, see enum rx_handler_result. 5077 */ 5078 int netdev_rx_handler_register(struct net_device *dev, 5079 rx_handler_func_t *rx_handler, 5080 void *rx_handler_data) 5081 { 5082 if (netdev_is_rx_handler_busy(dev)) 5083 return -EBUSY; 5084 5085 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5086 return -EINVAL; 5087 5088 /* Note: rx_handler_data must be set before rx_handler */ 5089 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5090 rcu_assign_pointer(dev->rx_handler, rx_handler); 5091 5092 return 0; 5093 } 5094 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5095 5096 /** 5097 * netdev_rx_handler_unregister - unregister receive handler 5098 * @dev: device to unregister a handler from 5099 * 5100 * Unregister a receive handler from a device. 5101 * 5102 * The caller must hold the rtnl_mutex. 5103 */ 5104 void netdev_rx_handler_unregister(struct net_device *dev) 5105 { 5106 5107 ASSERT_RTNL(); 5108 RCU_INIT_POINTER(dev->rx_handler, NULL); 5109 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5110 * section has a guarantee to see a non NULL rx_handler_data 5111 * as well. 5112 */ 5113 synchronize_net(); 5114 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5115 } 5116 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5117 5118 /* 5119 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5120 * the special handling of PFMEMALLOC skbs. 5121 */ 5122 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5123 { 5124 switch (skb->protocol) { 5125 case htons(ETH_P_ARP): 5126 case htons(ETH_P_IP): 5127 case htons(ETH_P_IPV6): 5128 case htons(ETH_P_8021Q): 5129 case htons(ETH_P_8021AD): 5130 return true; 5131 default: 5132 return false; 5133 } 5134 } 5135 5136 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5137 int *ret, struct net_device *orig_dev) 5138 { 5139 if (nf_hook_ingress_active(skb)) { 5140 int ingress_retval; 5141 5142 if (*pt_prev) { 5143 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5144 *pt_prev = NULL; 5145 } 5146 5147 rcu_read_lock(); 5148 ingress_retval = nf_hook_ingress(skb); 5149 rcu_read_unlock(); 5150 return ingress_retval; 5151 } 5152 return 0; 5153 } 5154 5155 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5156 struct packet_type **ppt_prev) 5157 { 5158 struct packet_type *ptype, *pt_prev; 5159 rx_handler_func_t *rx_handler; 5160 struct sk_buff *skb = *pskb; 5161 struct net_device *orig_dev; 5162 bool deliver_exact = false; 5163 int ret = NET_RX_DROP; 5164 __be16 type; 5165 5166 net_timestamp_check(!netdev_tstamp_prequeue, skb); 5167 5168 trace_netif_receive_skb(skb); 5169 5170 orig_dev = skb->dev; 5171 5172 skb_reset_network_header(skb); 5173 if (!skb_transport_header_was_set(skb)) 5174 skb_reset_transport_header(skb); 5175 skb_reset_mac_len(skb); 5176 5177 pt_prev = NULL; 5178 5179 another_round: 5180 skb->skb_iif = skb->dev->ifindex; 5181 5182 __this_cpu_inc(softnet_data.processed); 5183 5184 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5185 int ret2; 5186 5187 preempt_disable(); 5188 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5189 preempt_enable(); 5190 5191 if (ret2 != XDP_PASS) { 5192 ret = NET_RX_DROP; 5193 goto out; 5194 } 5195 skb_reset_mac_len(skb); 5196 } 5197 5198 if (eth_type_vlan(skb->protocol)) { 5199 skb = skb_vlan_untag(skb); 5200 if (unlikely(!skb)) 5201 goto out; 5202 } 5203 5204 if (skb_skip_tc_classify(skb)) 5205 goto skip_classify; 5206 5207 if (pfmemalloc) 5208 goto skip_taps; 5209 5210 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5211 if (pt_prev) 5212 ret = deliver_skb(skb, pt_prev, orig_dev); 5213 pt_prev = ptype; 5214 } 5215 5216 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5217 if (pt_prev) 5218 ret = deliver_skb(skb, pt_prev, orig_dev); 5219 pt_prev = ptype; 5220 } 5221 5222 skip_taps: 5223 #ifdef CONFIG_NET_INGRESS 5224 if (static_branch_unlikely(&ingress_needed_key)) { 5225 bool another = false; 5226 5227 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5228 &another); 5229 if (another) 5230 goto another_round; 5231 if (!skb) 5232 goto out; 5233 5234 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5235 goto out; 5236 } 5237 #endif 5238 skb_reset_redirect(skb); 5239 skip_classify: 5240 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5241 goto drop; 5242 5243 if (skb_vlan_tag_present(skb)) { 5244 if (pt_prev) { 5245 ret = deliver_skb(skb, pt_prev, orig_dev); 5246 pt_prev = NULL; 5247 } 5248 if (vlan_do_receive(&skb)) 5249 goto another_round; 5250 else if (unlikely(!skb)) 5251 goto out; 5252 } 5253 5254 rx_handler = rcu_dereference(skb->dev->rx_handler); 5255 if (rx_handler) { 5256 if (pt_prev) { 5257 ret = deliver_skb(skb, pt_prev, orig_dev); 5258 pt_prev = NULL; 5259 } 5260 switch (rx_handler(&skb)) { 5261 case RX_HANDLER_CONSUMED: 5262 ret = NET_RX_SUCCESS; 5263 goto out; 5264 case RX_HANDLER_ANOTHER: 5265 goto another_round; 5266 case RX_HANDLER_EXACT: 5267 deliver_exact = true; 5268 break; 5269 case RX_HANDLER_PASS: 5270 break; 5271 default: 5272 BUG(); 5273 } 5274 } 5275 5276 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5277 check_vlan_id: 5278 if (skb_vlan_tag_get_id(skb)) { 5279 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5280 * find vlan device. 5281 */ 5282 skb->pkt_type = PACKET_OTHERHOST; 5283 } else if (eth_type_vlan(skb->protocol)) { 5284 /* Outer header is 802.1P with vlan 0, inner header is 5285 * 802.1Q or 802.1AD and vlan_do_receive() above could 5286 * not find vlan dev for vlan id 0. 5287 */ 5288 __vlan_hwaccel_clear_tag(skb); 5289 skb = skb_vlan_untag(skb); 5290 if (unlikely(!skb)) 5291 goto out; 5292 if (vlan_do_receive(&skb)) 5293 /* After stripping off 802.1P header with vlan 0 5294 * vlan dev is found for inner header. 5295 */ 5296 goto another_round; 5297 else if (unlikely(!skb)) 5298 goto out; 5299 else 5300 /* We have stripped outer 802.1P vlan 0 header. 5301 * But could not find vlan dev. 5302 * check again for vlan id to set OTHERHOST. 5303 */ 5304 goto check_vlan_id; 5305 } 5306 /* Note: we might in the future use prio bits 5307 * and set skb->priority like in vlan_do_receive() 5308 * For the time being, just ignore Priority Code Point 5309 */ 5310 __vlan_hwaccel_clear_tag(skb); 5311 } 5312 5313 type = skb->protocol; 5314 5315 /* deliver only exact match when indicated */ 5316 if (likely(!deliver_exact)) { 5317 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5318 &ptype_base[ntohs(type) & 5319 PTYPE_HASH_MASK]); 5320 } 5321 5322 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5323 &orig_dev->ptype_specific); 5324 5325 if (unlikely(skb->dev != orig_dev)) { 5326 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5327 &skb->dev->ptype_specific); 5328 } 5329 5330 if (pt_prev) { 5331 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5332 goto drop; 5333 *ppt_prev = pt_prev; 5334 } else { 5335 drop: 5336 if (!deliver_exact) 5337 atomic_long_inc(&skb->dev->rx_dropped); 5338 else 5339 atomic_long_inc(&skb->dev->rx_nohandler); 5340 kfree_skb(skb); 5341 /* Jamal, now you will not able to escape explaining 5342 * me how you were going to use this. :-) 5343 */ 5344 ret = NET_RX_DROP; 5345 } 5346 5347 out: 5348 /* The invariant here is that if *ppt_prev is not NULL 5349 * then skb should also be non-NULL. 5350 * 5351 * Apparently *ppt_prev assignment above holds this invariant due to 5352 * skb dereferencing near it. 5353 */ 5354 *pskb = skb; 5355 return ret; 5356 } 5357 5358 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5359 { 5360 struct net_device *orig_dev = skb->dev; 5361 struct packet_type *pt_prev = NULL; 5362 int ret; 5363 5364 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5365 if (pt_prev) 5366 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5367 skb->dev, pt_prev, orig_dev); 5368 return ret; 5369 } 5370 5371 /** 5372 * netif_receive_skb_core - special purpose version of netif_receive_skb 5373 * @skb: buffer to process 5374 * 5375 * More direct receive version of netif_receive_skb(). It should 5376 * only be used by callers that have a need to skip RPS and Generic XDP. 5377 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5378 * 5379 * This function may only be called from softirq context and interrupts 5380 * should be enabled. 5381 * 5382 * Return values (usually ignored): 5383 * NET_RX_SUCCESS: no congestion 5384 * NET_RX_DROP: packet was dropped 5385 */ 5386 int netif_receive_skb_core(struct sk_buff *skb) 5387 { 5388 int ret; 5389 5390 rcu_read_lock(); 5391 ret = __netif_receive_skb_one_core(skb, false); 5392 rcu_read_unlock(); 5393 5394 return ret; 5395 } 5396 EXPORT_SYMBOL(netif_receive_skb_core); 5397 5398 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5399 struct packet_type *pt_prev, 5400 struct net_device *orig_dev) 5401 { 5402 struct sk_buff *skb, *next; 5403 5404 if (!pt_prev) 5405 return; 5406 if (list_empty(head)) 5407 return; 5408 if (pt_prev->list_func != NULL) 5409 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5410 ip_list_rcv, head, pt_prev, orig_dev); 5411 else 5412 list_for_each_entry_safe(skb, next, head, list) { 5413 skb_list_del_init(skb); 5414 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5415 } 5416 } 5417 5418 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5419 { 5420 /* Fast-path assumptions: 5421 * - There is no RX handler. 5422 * - Only one packet_type matches. 5423 * If either of these fails, we will end up doing some per-packet 5424 * processing in-line, then handling the 'last ptype' for the whole 5425 * sublist. This can't cause out-of-order delivery to any single ptype, 5426 * because the 'last ptype' must be constant across the sublist, and all 5427 * other ptypes are handled per-packet. 5428 */ 5429 /* Current (common) ptype of sublist */ 5430 struct packet_type *pt_curr = NULL; 5431 /* Current (common) orig_dev of sublist */ 5432 struct net_device *od_curr = NULL; 5433 struct list_head sublist; 5434 struct sk_buff *skb, *next; 5435 5436 INIT_LIST_HEAD(&sublist); 5437 list_for_each_entry_safe(skb, next, head, list) { 5438 struct net_device *orig_dev = skb->dev; 5439 struct packet_type *pt_prev = NULL; 5440 5441 skb_list_del_init(skb); 5442 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5443 if (!pt_prev) 5444 continue; 5445 if (pt_curr != pt_prev || od_curr != orig_dev) { 5446 /* dispatch old sublist */ 5447 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5448 /* start new sublist */ 5449 INIT_LIST_HEAD(&sublist); 5450 pt_curr = pt_prev; 5451 od_curr = orig_dev; 5452 } 5453 list_add_tail(&skb->list, &sublist); 5454 } 5455 5456 /* dispatch final sublist */ 5457 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5458 } 5459 5460 static int __netif_receive_skb(struct sk_buff *skb) 5461 { 5462 int ret; 5463 5464 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5465 unsigned int noreclaim_flag; 5466 5467 /* 5468 * PFMEMALLOC skbs are special, they should 5469 * - be delivered to SOCK_MEMALLOC sockets only 5470 * - stay away from userspace 5471 * - have bounded memory usage 5472 * 5473 * Use PF_MEMALLOC as this saves us from propagating the allocation 5474 * context down to all allocation sites. 5475 */ 5476 noreclaim_flag = memalloc_noreclaim_save(); 5477 ret = __netif_receive_skb_one_core(skb, true); 5478 memalloc_noreclaim_restore(noreclaim_flag); 5479 } else 5480 ret = __netif_receive_skb_one_core(skb, false); 5481 5482 return ret; 5483 } 5484 5485 static void __netif_receive_skb_list(struct list_head *head) 5486 { 5487 unsigned long noreclaim_flag = 0; 5488 struct sk_buff *skb, *next; 5489 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5490 5491 list_for_each_entry_safe(skb, next, head, list) { 5492 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5493 struct list_head sublist; 5494 5495 /* Handle the previous sublist */ 5496 list_cut_before(&sublist, head, &skb->list); 5497 if (!list_empty(&sublist)) 5498 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5499 pfmemalloc = !pfmemalloc; 5500 /* See comments in __netif_receive_skb */ 5501 if (pfmemalloc) 5502 noreclaim_flag = memalloc_noreclaim_save(); 5503 else 5504 memalloc_noreclaim_restore(noreclaim_flag); 5505 } 5506 } 5507 /* Handle the remaining sublist */ 5508 if (!list_empty(head)) 5509 __netif_receive_skb_list_core(head, pfmemalloc); 5510 /* Restore pflags */ 5511 if (pfmemalloc) 5512 memalloc_noreclaim_restore(noreclaim_flag); 5513 } 5514 5515 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5516 { 5517 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5518 struct bpf_prog *new = xdp->prog; 5519 int ret = 0; 5520 5521 if (new) { 5522 u32 i; 5523 5524 mutex_lock(&new->aux->used_maps_mutex); 5525 5526 /* generic XDP does not work with DEVMAPs that can 5527 * have a bpf_prog installed on an entry 5528 */ 5529 for (i = 0; i < new->aux->used_map_cnt; i++) { 5530 if (dev_map_can_have_prog(new->aux->used_maps[i]) || 5531 cpu_map_prog_allowed(new->aux->used_maps[i])) { 5532 mutex_unlock(&new->aux->used_maps_mutex); 5533 return -EINVAL; 5534 } 5535 } 5536 5537 mutex_unlock(&new->aux->used_maps_mutex); 5538 } 5539 5540 switch (xdp->command) { 5541 case XDP_SETUP_PROG: 5542 rcu_assign_pointer(dev->xdp_prog, new); 5543 if (old) 5544 bpf_prog_put(old); 5545 5546 if (old && !new) { 5547 static_branch_dec(&generic_xdp_needed_key); 5548 } else if (new && !old) { 5549 static_branch_inc(&generic_xdp_needed_key); 5550 dev_disable_lro(dev); 5551 dev_disable_gro_hw(dev); 5552 } 5553 break; 5554 5555 default: 5556 ret = -EINVAL; 5557 break; 5558 } 5559 5560 return ret; 5561 } 5562 5563 static int netif_receive_skb_internal(struct sk_buff *skb) 5564 { 5565 int ret; 5566 5567 net_timestamp_check(netdev_tstamp_prequeue, skb); 5568 5569 if (skb_defer_rx_timestamp(skb)) 5570 return NET_RX_SUCCESS; 5571 5572 rcu_read_lock(); 5573 #ifdef CONFIG_RPS 5574 if (static_branch_unlikely(&rps_needed)) { 5575 struct rps_dev_flow voidflow, *rflow = &voidflow; 5576 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5577 5578 if (cpu >= 0) { 5579 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5580 rcu_read_unlock(); 5581 return ret; 5582 } 5583 } 5584 #endif 5585 ret = __netif_receive_skb(skb); 5586 rcu_read_unlock(); 5587 return ret; 5588 } 5589 5590 static void netif_receive_skb_list_internal(struct list_head *head) 5591 { 5592 struct sk_buff *skb, *next; 5593 struct list_head sublist; 5594 5595 INIT_LIST_HEAD(&sublist); 5596 list_for_each_entry_safe(skb, next, head, list) { 5597 net_timestamp_check(netdev_tstamp_prequeue, skb); 5598 skb_list_del_init(skb); 5599 if (!skb_defer_rx_timestamp(skb)) 5600 list_add_tail(&skb->list, &sublist); 5601 } 5602 list_splice_init(&sublist, head); 5603 5604 rcu_read_lock(); 5605 #ifdef CONFIG_RPS 5606 if (static_branch_unlikely(&rps_needed)) { 5607 list_for_each_entry_safe(skb, next, head, list) { 5608 struct rps_dev_flow voidflow, *rflow = &voidflow; 5609 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5610 5611 if (cpu >= 0) { 5612 /* Will be handled, remove from list */ 5613 skb_list_del_init(skb); 5614 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5615 } 5616 } 5617 } 5618 #endif 5619 __netif_receive_skb_list(head); 5620 rcu_read_unlock(); 5621 } 5622 5623 /** 5624 * netif_receive_skb - process receive buffer from network 5625 * @skb: buffer to process 5626 * 5627 * netif_receive_skb() is the main receive data processing function. 5628 * It always succeeds. The buffer may be dropped during processing 5629 * for congestion control or by the protocol layers. 5630 * 5631 * This function may only be called from softirq context and interrupts 5632 * should be enabled. 5633 * 5634 * Return values (usually ignored): 5635 * NET_RX_SUCCESS: no congestion 5636 * NET_RX_DROP: packet was dropped 5637 */ 5638 int netif_receive_skb(struct sk_buff *skb) 5639 { 5640 int ret; 5641 5642 trace_netif_receive_skb_entry(skb); 5643 5644 ret = netif_receive_skb_internal(skb); 5645 trace_netif_receive_skb_exit(ret); 5646 5647 return ret; 5648 } 5649 EXPORT_SYMBOL(netif_receive_skb); 5650 5651 /** 5652 * netif_receive_skb_list - process many receive buffers from network 5653 * @head: list of skbs to process. 5654 * 5655 * Since return value of netif_receive_skb() is normally ignored, and 5656 * wouldn't be meaningful for a list, this function returns void. 5657 * 5658 * This function may only be called from softirq context and interrupts 5659 * should be enabled. 5660 */ 5661 void netif_receive_skb_list(struct list_head *head) 5662 { 5663 struct sk_buff *skb; 5664 5665 if (list_empty(head)) 5666 return; 5667 if (trace_netif_receive_skb_list_entry_enabled()) { 5668 list_for_each_entry(skb, head, list) 5669 trace_netif_receive_skb_list_entry(skb); 5670 } 5671 netif_receive_skb_list_internal(head); 5672 trace_netif_receive_skb_list_exit(0); 5673 } 5674 EXPORT_SYMBOL(netif_receive_skb_list); 5675 5676 static DEFINE_PER_CPU(struct work_struct, flush_works); 5677 5678 /* Network device is going away, flush any packets still pending */ 5679 static void flush_backlog(struct work_struct *work) 5680 { 5681 struct sk_buff *skb, *tmp; 5682 struct softnet_data *sd; 5683 5684 local_bh_disable(); 5685 sd = this_cpu_ptr(&softnet_data); 5686 5687 local_irq_disable(); 5688 rps_lock(sd); 5689 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5690 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5691 __skb_unlink(skb, &sd->input_pkt_queue); 5692 dev_kfree_skb_irq(skb); 5693 input_queue_head_incr(sd); 5694 } 5695 } 5696 rps_unlock(sd); 5697 local_irq_enable(); 5698 5699 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5700 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5701 __skb_unlink(skb, &sd->process_queue); 5702 kfree_skb(skb); 5703 input_queue_head_incr(sd); 5704 } 5705 } 5706 local_bh_enable(); 5707 } 5708 5709 static bool flush_required(int cpu) 5710 { 5711 #if IS_ENABLED(CONFIG_RPS) 5712 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5713 bool do_flush; 5714 5715 local_irq_disable(); 5716 rps_lock(sd); 5717 5718 /* as insertion into process_queue happens with the rps lock held, 5719 * process_queue access may race only with dequeue 5720 */ 5721 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5722 !skb_queue_empty_lockless(&sd->process_queue); 5723 rps_unlock(sd); 5724 local_irq_enable(); 5725 5726 return do_flush; 5727 #endif 5728 /* without RPS we can't safely check input_pkt_queue: during a 5729 * concurrent remote skb_queue_splice() we can detect as empty both 5730 * input_pkt_queue and process_queue even if the latter could end-up 5731 * containing a lot of packets. 5732 */ 5733 return true; 5734 } 5735 5736 static void flush_all_backlogs(void) 5737 { 5738 static cpumask_t flush_cpus; 5739 unsigned int cpu; 5740 5741 /* since we are under rtnl lock protection we can use static data 5742 * for the cpumask and avoid allocating on stack the possibly 5743 * large mask 5744 */ 5745 ASSERT_RTNL(); 5746 5747 get_online_cpus(); 5748 5749 cpumask_clear(&flush_cpus); 5750 for_each_online_cpu(cpu) { 5751 if (flush_required(cpu)) { 5752 queue_work_on(cpu, system_highpri_wq, 5753 per_cpu_ptr(&flush_works, cpu)); 5754 cpumask_set_cpu(cpu, &flush_cpus); 5755 } 5756 } 5757 5758 /* we can have in flight packet[s] on the cpus we are not flushing, 5759 * synchronize_net() in unregister_netdevice_many() will take care of 5760 * them 5761 */ 5762 for_each_cpu(cpu, &flush_cpus) 5763 flush_work(per_cpu_ptr(&flush_works, cpu)); 5764 5765 put_online_cpus(); 5766 } 5767 5768 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ 5769 static void gro_normal_list(struct napi_struct *napi) 5770 { 5771 if (!napi->rx_count) 5772 return; 5773 netif_receive_skb_list_internal(&napi->rx_list); 5774 INIT_LIST_HEAD(&napi->rx_list); 5775 napi->rx_count = 0; 5776 } 5777 5778 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, 5779 * pass the whole batch up to the stack. 5780 */ 5781 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) 5782 { 5783 list_add_tail(&skb->list, &napi->rx_list); 5784 napi->rx_count += segs; 5785 if (napi->rx_count >= gro_normal_batch) 5786 gro_normal_list(napi); 5787 } 5788 5789 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 5790 { 5791 struct packet_offload *ptype; 5792 __be16 type = skb->protocol; 5793 struct list_head *head = &offload_base; 5794 int err = -ENOENT; 5795 5796 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5797 5798 if (NAPI_GRO_CB(skb)->count == 1) { 5799 skb_shinfo(skb)->gso_size = 0; 5800 goto out; 5801 } 5802 5803 rcu_read_lock(); 5804 list_for_each_entry_rcu(ptype, head, list) { 5805 if (ptype->type != type || !ptype->callbacks.gro_complete) 5806 continue; 5807 5808 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 5809 ipv6_gro_complete, inet_gro_complete, 5810 skb, 0); 5811 break; 5812 } 5813 rcu_read_unlock(); 5814 5815 if (err) { 5816 WARN_ON(&ptype->list == head); 5817 kfree_skb(skb); 5818 return NET_RX_SUCCESS; 5819 } 5820 5821 out: 5822 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 5823 return NET_RX_SUCCESS; 5824 } 5825 5826 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5827 bool flush_old) 5828 { 5829 struct list_head *head = &napi->gro_hash[index].list; 5830 struct sk_buff *skb, *p; 5831 5832 list_for_each_entry_safe_reverse(skb, p, head, list) { 5833 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5834 return; 5835 skb_list_del_init(skb); 5836 napi_gro_complete(napi, skb); 5837 napi->gro_hash[index].count--; 5838 } 5839 5840 if (!napi->gro_hash[index].count) 5841 __clear_bit(index, &napi->gro_bitmask); 5842 } 5843 5844 /* napi->gro_hash[].list contains packets ordered by age. 5845 * youngest packets at the head of it. 5846 * Complete skbs in reverse order to reduce latencies. 5847 */ 5848 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5849 { 5850 unsigned long bitmask = napi->gro_bitmask; 5851 unsigned int i, base = ~0U; 5852 5853 while ((i = ffs(bitmask)) != 0) { 5854 bitmask >>= i; 5855 base += i; 5856 __napi_gro_flush_chain(napi, base, flush_old); 5857 } 5858 } 5859 EXPORT_SYMBOL(napi_gro_flush); 5860 5861 static void gro_list_prepare(const struct list_head *head, 5862 const struct sk_buff *skb) 5863 { 5864 unsigned int maclen = skb->dev->hard_header_len; 5865 u32 hash = skb_get_hash_raw(skb); 5866 struct sk_buff *p; 5867 5868 list_for_each_entry(p, head, list) { 5869 unsigned long diffs; 5870 5871 NAPI_GRO_CB(p)->flush = 0; 5872 5873 if (hash != skb_get_hash_raw(p)) { 5874 NAPI_GRO_CB(p)->same_flow = 0; 5875 continue; 5876 } 5877 5878 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5879 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 5880 if (skb_vlan_tag_present(p)) 5881 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); 5882 diffs |= skb_metadata_dst_cmp(p, skb); 5883 diffs |= skb_metadata_differs(p, skb); 5884 if (maclen == ETH_HLEN) 5885 diffs |= compare_ether_header(skb_mac_header(p), 5886 skb_mac_header(skb)); 5887 else if (!diffs) 5888 diffs = memcmp(skb_mac_header(p), 5889 skb_mac_header(skb), 5890 maclen); 5891 NAPI_GRO_CB(p)->same_flow = !diffs; 5892 } 5893 } 5894 5895 static void skb_gro_reset_offset(struct sk_buff *skb) 5896 { 5897 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5898 const skb_frag_t *frag0 = &pinfo->frags[0]; 5899 5900 NAPI_GRO_CB(skb)->data_offset = 0; 5901 NAPI_GRO_CB(skb)->frag0 = NULL; 5902 NAPI_GRO_CB(skb)->frag0_len = 0; 5903 5904 if (!skb_headlen(skb) && pinfo->nr_frags && 5905 !PageHighMem(skb_frag_page(frag0))) { 5906 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5907 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5908 skb_frag_size(frag0), 5909 skb->end - skb->tail); 5910 } 5911 } 5912 5913 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5914 { 5915 struct skb_shared_info *pinfo = skb_shinfo(skb); 5916 5917 BUG_ON(skb->end - skb->tail < grow); 5918 5919 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5920 5921 skb->data_len -= grow; 5922 skb->tail += grow; 5923 5924 skb_frag_off_add(&pinfo->frags[0], grow); 5925 skb_frag_size_sub(&pinfo->frags[0], grow); 5926 5927 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 5928 skb_frag_unref(skb, 0); 5929 memmove(pinfo->frags, pinfo->frags + 1, 5930 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 5931 } 5932 } 5933 5934 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 5935 { 5936 struct sk_buff *oldest; 5937 5938 oldest = list_last_entry(head, struct sk_buff, list); 5939 5940 /* We are called with head length >= MAX_GRO_SKBS, so this is 5941 * impossible. 5942 */ 5943 if (WARN_ON_ONCE(!oldest)) 5944 return; 5945 5946 /* Do not adjust napi->gro_hash[].count, caller is adding a new 5947 * SKB to the chain. 5948 */ 5949 skb_list_del_init(oldest); 5950 napi_gro_complete(napi, oldest); 5951 } 5952 5953 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5954 { 5955 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 5956 struct gro_list *gro_list = &napi->gro_hash[bucket]; 5957 struct list_head *head = &offload_base; 5958 struct packet_offload *ptype; 5959 __be16 type = skb->protocol; 5960 struct sk_buff *pp = NULL; 5961 enum gro_result ret; 5962 int same_flow; 5963 int grow; 5964 5965 if (netif_elide_gro(skb->dev)) 5966 goto normal; 5967 5968 gro_list_prepare(&gro_list->list, skb); 5969 5970 rcu_read_lock(); 5971 list_for_each_entry_rcu(ptype, head, list) { 5972 if (ptype->type != type || !ptype->callbacks.gro_receive) 5973 continue; 5974 5975 skb_set_network_header(skb, skb_gro_offset(skb)); 5976 skb_reset_mac_len(skb); 5977 NAPI_GRO_CB(skb)->same_flow = 0; 5978 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 5979 NAPI_GRO_CB(skb)->free = 0; 5980 NAPI_GRO_CB(skb)->encap_mark = 0; 5981 NAPI_GRO_CB(skb)->recursion_counter = 0; 5982 NAPI_GRO_CB(skb)->is_fou = 0; 5983 NAPI_GRO_CB(skb)->is_atomic = 1; 5984 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 5985 5986 /* Setup for GRO checksum validation */ 5987 switch (skb->ip_summed) { 5988 case CHECKSUM_COMPLETE: 5989 NAPI_GRO_CB(skb)->csum = skb->csum; 5990 NAPI_GRO_CB(skb)->csum_valid = 1; 5991 NAPI_GRO_CB(skb)->csum_cnt = 0; 5992 break; 5993 case CHECKSUM_UNNECESSARY: 5994 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 5995 NAPI_GRO_CB(skb)->csum_valid = 0; 5996 break; 5997 default: 5998 NAPI_GRO_CB(skb)->csum_cnt = 0; 5999 NAPI_GRO_CB(skb)->csum_valid = 0; 6000 } 6001 6002 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 6003 ipv6_gro_receive, inet_gro_receive, 6004 &gro_list->list, skb); 6005 break; 6006 } 6007 rcu_read_unlock(); 6008 6009 if (&ptype->list == head) 6010 goto normal; 6011 6012 if (PTR_ERR(pp) == -EINPROGRESS) { 6013 ret = GRO_CONSUMED; 6014 goto ok; 6015 } 6016 6017 same_flow = NAPI_GRO_CB(skb)->same_flow; 6018 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 6019 6020 if (pp) { 6021 skb_list_del_init(pp); 6022 napi_gro_complete(napi, pp); 6023 gro_list->count--; 6024 } 6025 6026 if (same_flow) 6027 goto ok; 6028 6029 if (NAPI_GRO_CB(skb)->flush) 6030 goto normal; 6031 6032 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 6033 gro_flush_oldest(napi, &gro_list->list); 6034 else 6035 gro_list->count++; 6036 6037 NAPI_GRO_CB(skb)->count = 1; 6038 NAPI_GRO_CB(skb)->age = jiffies; 6039 NAPI_GRO_CB(skb)->last = skb; 6040 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 6041 list_add(&skb->list, &gro_list->list); 6042 ret = GRO_HELD; 6043 6044 pull: 6045 grow = skb_gro_offset(skb) - skb_headlen(skb); 6046 if (grow > 0) 6047 gro_pull_from_frag0(skb, grow); 6048 ok: 6049 if (gro_list->count) { 6050 if (!test_bit(bucket, &napi->gro_bitmask)) 6051 __set_bit(bucket, &napi->gro_bitmask); 6052 } else if (test_bit(bucket, &napi->gro_bitmask)) { 6053 __clear_bit(bucket, &napi->gro_bitmask); 6054 } 6055 6056 return ret; 6057 6058 normal: 6059 ret = GRO_NORMAL; 6060 goto pull; 6061 } 6062 6063 struct packet_offload *gro_find_receive_by_type(__be16 type) 6064 { 6065 struct list_head *offload_head = &offload_base; 6066 struct packet_offload *ptype; 6067 6068 list_for_each_entry_rcu(ptype, offload_head, list) { 6069 if (ptype->type != type || !ptype->callbacks.gro_receive) 6070 continue; 6071 return ptype; 6072 } 6073 return NULL; 6074 } 6075 EXPORT_SYMBOL(gro_find_receive_by_type); 6076 6077 struct packet_offload *gro_find_complete_by_type(__be16 type) 6078 { 6079 struct list_head *offload_head = &offload_base; 6080 struct packet_offload *ptype; 6081 6082 list_for_each_entry_rcu(ptype, offload_head, list) { 6083 if (ptype->type != type || !ptype->callbacks.gro_complete) 6084 continue; 6085 return ptype; 6086 } 6087 return NULL; 6088 } 6089 EXPORT_SYMBOL(gro_find_complete_by_type); 6090 6091 static gro_result_t napi_skb_finish(struct napi_struct *napi, 6092 struct sk_buff *skb, 6093 gro_result_t ret) 6094 { 6095 switch (ret) { 6096 case GRO_NORMAL: 6097 gro_normal_one(napi, skb, 1); 6098 break; 6099 6100 case GRO_MERGED_FREE: 6101 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 6102 napi_skb_free_stolen_head(skb); 6103 else 6104 __kfree_skb_defer(skb); 6105 break; 6106 6107 case GRO_HELD: 6108 case GRO_MERGED: 6109 case GRO_CONSUMED: 6110 break; 6111 } 6112 6113 return ret; 6114 } 6115 6116 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 6117 { 6118 gro_result_t ret; 6119 6120 skb_mark_napi_id(skb, napi); 6121 trace_napi_gro_receive_entry(skb); 6122 6123 skb_gro_reset_offset(skb); 6124 6125 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 6126 trace_napi_gro_receive_exit(ret); 6127 6128 return ret; 6129 } 6130 EXPORT_SYMBOL(napi_gro_receive); 6131 6132 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 6133 { 6134 if (unlikely(skb->pfmemalloc)) { 6135 consume_skb(skb); 6136 return; 6137 } 6138 __skb_pull(skb, skb_headlen(skb)); 6139 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 6140 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 6141 __vlan_hwaccel_clear_tag(skb); 6142 skb->dev = napi->dev; 6143 skb->skb_iif = 0; 6144 6145 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 6146 skb->pkt_type = PACKET_HOST; 6147 6148 skb->encapsulation = 0; 6149 skb_shinfo(skb)->gso_type = 0; 6150 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6151 skb_ext_reset(skb); 6152 6153 napi->skb = skb; 6154 } 6155 6156 struct sk_buff *napi_get_frags(struct napi_struct *napi) 6157 { 6158 struct sk_buff *skb = napi->skb; 6159 6160 if (!skb) { 6161 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 6162 if (skb) { 6163 napi->skb = skb; 6164 skb_mark_napi_id(skb, napi); 6165 } 6166 } 6167 return skb; 6168 } 6169 EXPORT_SYMBOL(napi_get_frags); 6170 6171 static gro_result_t napi_frags_finish(struct napi_struct *napi, 6172 struct sk_buff *skb, 6173 gro_result_t ret) 6174 { 6175 switch (ret) { 6176 case GRO_NORMAL: 6177 case GRO_HELD: 6178 __skb_push(skb, ETH_HLEN); 6179 skb->protocol = eth_type_trans(skb, skb->dev); 6180 if (ret == GRO_NORMAL) 6181 gro_normal_one(napi, skb, 1); 6182 break; 6183 6184 case GRO_MERGED_FREE: 6185 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 6186 napi_skb_free_stolen_head(skb); 6187 else 6188 napi_reuse_skb(napi, skb); 6189 break; 6190 6191 case GRO_MERGED: 6192 case GRO_CONSUMED: 6193 break; 6194 } 6195 6196 return ret; 6197 } 6198 6199 /* Upper GRO stack assumes network header starts at gro_offset=0 6200 * Drivers could call both napi_gro_frags() and napi_gro_receive() 6201 * We copy ethernet header into skb->data to have a common layout. 6202 */ 6203 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 6204 { 6205 struct sk_buff *skb = napi->skb; 6206 const struct ethhdr *eth; 6207 unsigned int hlen = sizeof(*eth); 6208 6209 napi->skb = NULL; 6210 6211 skb_reset_mac_header(skb); 6212 skb_gro_reset_offset(skb); 6213 6214 if (unlikely(skb_gro_header_hard(skb, hlen))) { 6215 eth = skb_gro_header_slow(skb, hlen, 0); 6216 if (unlikely(!eth)) { 6217 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 6218 __func__, napi->dev->name); 6219 napi_reuse_skb(napi, skb); 6220 return NULL; 6221 } 6222 } else { 6223 eth = (const struct ethhdr *)skb->data; 6224 gro_pull_from_frag0(skb, hlen); 6225 NAPI_GRO_CB(skb)->frag0 += hlen; 6226 NAPI_GRO_CB(skb)->frag0_len -= hlen; 6227 } 6228 __skb_pull(skb, hlen); 6229 6230 /* 6231 * This works because the only protocols we care about don't require 6232 * special handling. 6233 * We'll fix it up properly in napi_frags_finish() 6234 */ 6235 skb->protocol = eth->h_proto; 6236 6237 return skb; 6238 } 6239 6240 gro_result_t napi_gro_frags(struct napi_struct *napi) 6241 { 6242 gro_result_t ret; 6243 struct sk_buff *skb = napi_frags_skb(napi); 6244 6245 trace_napi_gro_frags_entry(skb); 6246 6247 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 6248 trace_napi_gro_frags_exit(ret); 6249 6250 return ret; 6251 } 6252 EXPORT_SYMBOL(napi_gro_frags); 6253 6254 /* Compute the checksum from gro_offset and return the folded value 6255 * after adding in any pseudo checksum. 6256 */ 6257 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 6258 { 6259 __wsum wsum; 6260 __sum16 sum; 6261 6262 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 6263 6264 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 6265 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 6266 /* See comments in __skb_checksum_complete(). */ 6267 if (likely(!sum)) { 6268 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 6269 !skb->csum_complete_sw) 6270 netdev_rx_csum_fault(skb->dev, skb); 6271 } 6272 6273 NAPI_GRO_CB(skb)->csum = wsum; 6274 NAPI_GRO_CB(skb)->csum_valid = 1; 6275 6276 return sum; 6277 } 6278 EXPORT_SYMBOL(__skb_gro_checksum_complete); 6279 6280 static void net_rps_send_ipi(struct softnet_data *remsd) 6281 { 6282 #ifdef CONFIG_RPS 6283 while (remsd) { 6284 struct softnet_data *next = remsd->rps_ipi_next; 6285 6286 if (cpu_online(remsd->cpu)) 6287 smp_call_function_single_async(remsd->cpu, &remsd->csd); 6288 remsd = next; 6289 } 6290 #endif 6291 } 6292 6293 /* 6294 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 6295 * Note: called with local irq disabled, but exits with local irq enabled. 6296 */ 6297 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 6298 { 6299 #ifdef CONFIG_RPS 6300 struct softnet_data *remsd = sd->rps_ipi_list; 6301 6302 if (remsd) { 6303 sd->rps_ipi_list = NULL; 6304 6305 local_irq_enable(); 6306 6307 /* Send pending IPI's to kick RPS processing on remote cpus. */ 6308 net_rps_send_ipi(remsd); 6309 } else 6310 #endif 6311 local_irq_enable(); 6312 } 6313 6314 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 6315 { 6316 #ifdef CONFIG_RPS 6317 return sd->rps_ipi_list != NULL; 6318 #else 6319 return false; 6320 #endif 6321 } 6322 6323 static int process_backlog(struct napi_struct *napi, int quota) 6324 { 6325 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 6326 bool again = true; 6327 int work = 0; 6328 6329 /* Check if we have pending ipi, its better to send them now, 6330 * not waiting net_rx_action() end. 6331 */ 6332 if (sd_has_rps_ipi_waiting(sd)) { 6333 local_irq_disable(); 6334 net_rps_action_and_irq_enable(sd); 6335 } 6336 6337 napi->weight = dev_rx_weight; 6338 while (again) { 6339 struct sk_buff *skb; 6340 6341 while ((skb = __skb_dequeue(&sd->process_queue))) { 6342 rcu_read_lock(); 6343 __netif_receive_skb(skb); 6344 rcu_read_unlock(); 6345 input_queue_head_incr(sd); 6346 if (++work >= quota) 6347 return work; 6348 6349 } 6350 6351 local_irq_disable(); 6352 rps_lock(sd); 6353 if (skb_queue_empty(&sd->input_pkt_queue)) { 6354 /* 6355 * Inline a custom version of __napi_complete(). 6356 * only current cpu owns and manipulates this napi, 6357 * and NAPI_STATE_SCHED is the only possible flag set 6358 * on backlog. 6359 * We can use a plain write instead of clear_bit(), 6360 * and we dont need an smp_mb() memory barrier. 6361 */ 6362 napi->state = 0; 6363 again = false; 6364 } else { 6365 skb_queue_splice_tail_init(&sd->input_pkt_queue, 6366 &sd->process_queue); 6367 } 6368 rps_unlock(sd); 6369 local_irq_enable(); 6370 } 6371 6372 return work; 6373 } 6374 6375 /** 6376 * __napi_schedule - schedule for receive 6377 * @n: entry to schedule 6378 * 6379 * The entry's receive function will be scheduled to run. 6380 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6381 */ 6382 void __napi_schedule(struct napi_struct *n) 6383 { 6384 unsigned long flags; 6385 6386 local_irq_save(flags); 6387 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6388 local_irq_restore(flags); 6389 } 6390 EXPORT_SYMBOL(__napi_schedule); 6391 6392 /** 6393 * napi_schedule_prep - check if napi can be scheduled 6394 * @n: napi context 6395 * 6396 * Test if NAPI routine is already running, and if not mark 6397 * it as running. This is used as a condition variable to 6398 * insure only one NAPI poll instance runs. We also make 6399 * sure there is no pending NAPI disable. 6400 */ 6401 bool napi_schedule_prep(struct napi_struct *n) 6402 { 6403 unsigned long val, new; 6404 6405 do { 6406 val = READ_ONCE(n->state); 6407 if (unlikely(val & NAPIF_STATE_DISABLE)) 6408 return false; 6409 new = val | NAPIF_STATE_SCHED; 6410 6411 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6412 * This was suggested by Alexander Duyck, as compiler 6413 * emits better code than : 6414 * if (val & NAPIF_STATE_SCHED) 6415 * new |= NAPIF_STATE_MISSED; 6416 */ 6417 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6418 NAPIF_STATE_MISSED; 6419 } while (cmpxchg(&n->state, val, new) != val); 6420 6421 return !(val & NAPIF_STATE_SCHED); 6422 } 6423 EXPORT_SYMBOL(napi_schedule_prep); 6424 6425 /** 6426 * __napi_schedule_irqoff - schedule for receive 6427 * @n: entry to schedule 6428 * 6429 * Variant of __napi_schedule() assuming hard irqs are masked 6430 */ 6431 void __napi_schedule_irqoff(struct napi_struct *n) 6432 { 6433 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6434 } 6435 EXPORT_SYMBOL(__napi_schedule_irqoff); 6436 6437 bool napi_complete_done(struct napi_struct *n, int work_done) 6438 { 6439 unsigned long flags, val, new, timeout = 0; 6440 bool ret = true; 6441 6442 /* 6443 * 1) Don't let napi dequeue from the cpu poll list 6444 * just in case its running on a different cpu. 6445 * 2) If we are busy polling, do nothing here, we have 6446 * the guarantee we will be called later. 6447 */ 6448 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6449 NAPIF_STATE_IN_BUSY_POLL))) 6450 return false; 6451 6452 if (work_done) { 6453 if (n->gro_bitmask) 6454 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6455 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6456 } 6457 if (n->defer_hard_irqs_count > 0) { 6458 n->defer_hard_irqs_count--; 6459 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6460 if (timeout) 6461 ret = false; 6462 } 6463 if (n->gro_bitmask) { 6464 /* When the NAPI instance uses a timeout and keeps postponing 6465 * it, we need to bound somehow the time packets are kept in 6466 * the GRO layer 6467 */ 6468 napi_gro_flush(n, !!timeout); 6469 } 6470 6471 gro_normal_list(n); 6472 6473 if (unlikely(!list_empty(&n->poll_list))) { 6474 /* If n->poll_list is not empty, we need to mask irqs */ 6475 local_irq_save(flags); 6476 list_del_init(&n->poll_list); 6477 local_irq_restore(flags); 6478 } 6479 6480 do { 6481 val = READ_ONCE(n->state); 6482 6483 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6484 6485 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6486 NAPIF_STATE_PREFER_BUSY_POLL); 6487 6488 /* If STATE_MISSED was set, leave STATE_SCHED set, 6489 * because we will call napi->poll() one more time. 6490 * This C code was suggested by Alexander Duyck to help gcc. 6491 */ 6492 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6493 NAPIF_STATE_SCHED; 6494 } while (cmpxchg(&n->state, val, new) != val); 6495 6496 if (unlikely(val & NAPIF_STATE_MISSED)) { 6497 __napi_schedule(n); 6498 return false; 6499 } 6500 6501 if (timeout) 6502 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6503 HRTIMER_MODE_REL_PINNED); 6504 return ret; 6505 } 6506 EXPORT_SYMBOL(napi_complete_done); 6507 6508 /* must be called under rcu_read_lock(), as we dont take a reference */ 6509 static struct napi_struct *napi_by_id(unsigned int napi_id) 6510 { 6511 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6512 struct napi_struct *napi; 6513 6514 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6515 if (napi->napi_id == napi_id) 6516 return napi; 6517 6518 return NULL; 6519 } 6520 6521 #if defined(CONFIG_NET_RX_BUSY_POLL) 6522 6523 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6524 { 6525 if (!skip_schedule) { 6526 gro_normal_list(napi); 6527 __napi_schedule(napi); 6528 return; 6529 } 6530 6531 if (napi->gro_bitmask) { 6532 /* flush too old packets 6533 * If HZ < 1000, flush all packets. 6534 */ 6535 napi_gro_flush(napi, HZ >= 1000); 6536 } 6537 6538 gro_normal_list(napi); 6539 clear_bit(NAPI_STATE_SCHED, &napi->state); 6540 } 6541 6542 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6543 u16 budget) 6544 { 6545 bool skip_schedule = false; 6546 unsigned long timeout; 6547 int rc; 6548 6549 /* Busy polling means there is a high chance device driver hard irq 6550 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6551 * set in napi_schedule_prep(). 6552 * Since we are about to call napi->poll() once more, we can safely 6553 * clear NAPI_STATE_MISSED. 6554 * 6555 * Note: x86 could use a single "lock and ..." instruction 6556 * to perform these two clear_bit() 6557 */ 6558 clear_bit(NAPI_STATE_MISSED, &napi->state); 6559 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6560 6561 local_bh_disable(); 6562 6563 if (prefer_busy_poll) { 6564 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6565 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6566 if (napi->defer_hard_irqs_count && timeout) { 6567 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6568 skip_schedule = true; 6569 } 6570 } 6571 6572 /* All we really want here is to re-enable device interrupts. 6573 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6574 */ 6575 rc = napi->poll(napi, budget); 6576 /* We can't gro_normal_list() here, because napi->poll() might have 6577 * rearmed the napi (napi_complete_done()) in which case it could 6578 * already be running on another CPU. 6579 */ 6580 trace_napi_poll(napi, rc, budget); 6581 netpoll_poll_unlock(have_poll_lock); 6582 if (rc == budget) 6583 __busy_poll_stop(napi, skip_schedule); 6584 local_bh_enable(); 6585 } 6586 6587 void napi_busy_loop(unsigned int napi_id, 6588 bool (*loop_end)(void *, unsigned long), 6589 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6590 { 6591 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6592 int (*napi_poll)(struct napi_struct *napi, int budget); 6593 void *have_poll_lock = NULL; 6594 struct napi_struct *napi; 6595 6596 restart: 6597 napi_poll = NULL; 6598 6599 rcu_read_lock(); 6600 6601 napi = napi_by_id(napi_id); 6602 if (!napi) 6603 goto out; 6604 6605 preempt_disable(); 6606 for (;;) { 6607 int work = 0; 6608 6609 local_bh_disable(); 6610 if (!napi_poll) { 6611 unsigned long val = READ_ONCE(napi->state); 6612 6613 /* If multiple threads are competing for this napi, 6614 * we avoid dirtying napi->state as much as we can. 6615 */ 6616 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6617 NAPIF_STATE_IN_BUSY_POLL)) { 6618 if (prefer_busy_poll) 6619 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6620 goto count; 6621 } 6622 if (cmpxchg(&napi->state, val, 6623 val | NAPIF_STATE_IN_BUSY_POLL | 6624 NAPIF_STATE_SCHED) != val) { 6625 if (prefer_busy_poll) 6626 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6627 goto count; 6628 } 6629 have_poll_lock = netpoll_poll_lock(napi); 6630 napi_poll = napi->poll; 6631 } 6632 work = napi_poll(napi, budget); 6633 trace_napi_poll(napi, work, budget); 6634 gro_normal_list(napi); 6635 count: 6636 if (work > 0) 6637 __NET_ADD_STATS(dev_net(napi->dev), 6638 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6639 local_bh_enable(); 6640 6641 if (!loop_end || loop_end(loop_end_arg, start_time)) 6642 break; 6643 6644 if (unlikely(need_resched())) { 6645 if (napi_poll) 6646 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6647 preempt_enable(); 6648 rcu_read_unlock(); 6649 cond_resched(); 6650 if (loop_end(loop_end_arg, start_time)) 6651 return; 6652 goto restart; 6653 } 6654 cpu_relax(); 6655 } 6656 if (napi_poll) 6657 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6658 preempt_enable(); 6659 out: 6660 rcu_read_unlock(); 6661 } 6662 EXPORT_SYMBOL(napi_busy_loop); 6663 6664 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6665 6666 static void napi_hash_add(struct napi_struct *napi) 6667 { 6668 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6669 return; 6670 6671 spin_lock(&napi_hash_lock); 6672 6673 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6674 do { 6675 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6676 napi_gen_id = MIN_NAPI_ID; 6677 } while (napi_by_id(napi_gen_id)); 6678 napi->napi_id = napi_gen_id; 6679 6680 hlist_add_head_rcu(&napi->napi_hash_node, 6681 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6682 6683 spin_unlock(&napi_hash_lock); 6684 } 6685 6686 /* Warning : caller is responsible to make sure rcu grace period 6687 * is respected before freeing memory containing @napi 6688 */ 6689 static void napi_hash_del(struct napi_struct *napi) 6690 { 6691 spin_lock(&napi_hash_lock); 6692 6693 hlist_del_init_rcu(&napi->napi_hash_node); 6694 6695 spin_unlock(&napi_hash_lock); 6696 } 6697 6698 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6699 { 6700 struct napi_struct *napi; 6701 6702 napi = container_of(timer, struct napi_struct, timer); 6703 6704 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6705 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6706 */ 6707 if (!napi_disable_pending(napi) && 6708 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6709 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6710 __napi_schedule_irqoff(napi); 6711 } 6712 6713 return HRTIMER_NORESTART; 6714 } 6715 6716 static void init_gro_hash(struct napi_struct *napi) 6717 { 6718 int i; 6719 6720 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6721 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6722 napi->gro_hash[i].count = 0; 6723 } 6724 napi->gro_bitmask = 0; 6725 } 6726 6727 int dev_set_threaded(struct net_device *dev, bool threaded) 6728 { 6729 struct napi_struct *napi; 6730 int err = 0; 6731 6732 if (dev->threaded == threaded) 6733 return 0; 6734 6735 if (threaded) { 6736 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6737 if (!napi->thread) { 6738 err = napi_kthread_create(napi); 6739 if (err) { 6740 threaded = false; 6741 break; 6742 } 6743 } 6744 } 6745 } 6746 6747 dev->threaded = threaded; 6748 6749 /* Make sure kthread is created before THREADED bit 6750 * is set. 6751 */ 6752 smp_mb__before_atomic(); 6753 6754 /* Setting/unsetting threaded mode on a napi might not immediately 6755 * take effect, if the current napi instance is actively being 6756 * polled. In this case, the switch between threaded mode and 6757 * softirq mode will happen in the next round of napi_schedule(). 6758 * This should not cause hiccups/stalls to the live traffic. 6759 */ 6760 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6761 if (threaded) 6762 set_bit(NAPI_STATE_THREADED, &napi->state); 6763 else 6764 clear_bit(NAPI_STATE_THREADED, &napi->state); 6765 } 6766 6767 return err; 6768 } 6769 EXPORT_SYMBOL(dev_set_threaded); 6770 6771 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6772 int (*poll)(struct napi_struct *, int), int weight) 6773 { 6774 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6775 return; 6776 6777 INIT_LIST_HEAD(&napi->poll_list); 6778 INIT_HLIST_NODE(&napi->napi_hash_node); 6779 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6780 napi->timer.function = napi_watchdog; 6781 init_gro_hash(napi); 6782 napi->skb = NULL; 6783 INIT_LIST_HEAD(&napi->rx_list); 6784 napi->rx_count = 0; 6785 napi->poll = poll; 6786 if (weight > NAPI_POLL_WEIGHT) 6787 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6788 weight); 6789 napi->weight = weight; 6790 napi->dev = dev; 6791 #ifdef CONFIG_NETPOLL 6792 napi->poll_owner = -1; 6793 #endif 6794 set_bit(NAPI_STATE_SCHED, &napi->state); 6795 set_bit(NAPI_STATE_NPSVC, &napi->state); 6796 list_add_rcu(&napi->dev_list, &dev->napi_list); 6797 napi_hash_add(napi); 6798 /* Create kthread for this napi if dev->threaded is set. 6799 * Clear dev->threaded if kthread creation failed so that 6800 * threaded mode will not be enabled in napi_enable(). 6801 */ 6802 if (dev->threaded && napi_kthread_create(napi)) 6803 dev->threaded = 0; 6804 } 6805 EXPORT_SYMBOL(netif_napi_add); 6806 6807 void napi_disable(struct napi_struct *n) 6808 { 6809 might_sleep(); 6810 set_bit(NAPI_STATE_DISABLE, &n->state); 6811 6812 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6813 msleep(1); 6814 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6815 msleep(1); 6816 6817 hrtimer_cancel(&n->timer); 6818 6819 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 6820 clear_bit(NAPI_STATE_DISABLE, &n->state); 6821 clear_bit(NAPI_STATE_THREADED, &n->state); 6822 } 6823 EXPORT_SYMBOL(napi_disable); 6824 6825 /** 6826 * napi_enable - enable NAPI scheduling 6827 * @n: NAPI context 6828 * 6829 * Resume NAPI from being scheduled on this context. 6830 * Must be paired with napi_disable. 6831 */ 6832 void napi_enable(struct napi_struct *n) 6833 { 6834 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 6835 smp_mb__before_atomic(); 6836 clear_bit(NAPI_STATE_SCHED, &n->state); 6837 clear_bit(NAPI_STATE_NPSVC, &n->state); 6838 if (n->dev->threaded && n->thread) 6839 set_bit(NAPI_STATE_THREADED, &n->state); 6840 } 6841 EXPORT_SYMBOL(napi_enable); 6842 6843 static void flush_gro_hash(struct napi_struct *napi) 6844 { 6845 int i; 6846 6847 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6848 struct sk_buff *skb, *n; 6849 6850 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6851 kfree_skb(skb); 6852 napi->gro_hash[i].count = 0; 6853 } 6854 } 6855 6856 /* Must be called in process context */ 6857 void __netif_napi_del(struct napi_struct *napi) 6858 { 6859 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6860 return; 6861 6862 napi_hash_del(napi); 6863 list_del_rcu(&napi->dev_list); 6864 napi_free_frags(napi); 6865 6866 flush_gro_hash(napi); 6867 napi->gro_bitmask = 0; 6868 6869 if (napi->thread) { 6870 kthread_stop(napi->thread); 6871 napi->thread = NULL; 6872 } 6873 } 6874 EXPORT_SYMBOL(__netif_napi_del); 6875 6876 static int __napi_poll(struct napi_struct *n, bool *repoll) 6877 { 6878 int work, weight; 6879 6880 weight = n->weight; 6881 6882 /* This NAPI_STATE_SCHED test is for avoiding a race 6883 * with netpoll's poll_napi(). Only the entity which 6884 * obtains the lock and sees NAPI_STATE_SCHED set will 6885 * actually make the ->poll() call. Therefore we avoid 6886 * accidentally calling ->poll() when NAPI is not scheduled. 6887 */ 6888 work = 0; 6889 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6890 work = n->poll(n, weight); 6891 trace_napi_poll(n, work, weight); 6892 } 6893 6894 if (unlikely(work > weight)) 6895 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6896 n->poll, work, weight); 6897 6898 if (likely(work < weight)) 6899 return work; 6900 6901 /* Drivers must not modify the NAPI state if they 6902 * consume the entire weight. In such cases this code 6903 * still "owns" the NAPI instance and therefore can 6904 * move the instance around on the list at-will. 6905 */ 6906 if (unlikely(napi_disable_pending(n))) { 6907 napi_complete(n); 6908 return work; 6909 } 6910 6911 /* The NAPI context has more processing work, but busy-polling 6912 * is preferred. Exit early. 6913 */ 6914 if (napi_prefer_busy_poll(n)) { 6915 if (napi_complete_done(n, work)) { 6916 /* If timeout is not set, we need to make sure 6917 * that the NAPI is re-scheduled. 6918 */ 6919 napi_schedule(n); 6920 } 6921 return work; 6922 } 6923 6924 if (n->gro_bitmask) { 6925 /* flush too old packets 6926 * If HZ < 1000, flush all packets. 6927 */ 6928 napi_gro_flush(n, HZ >= 1000); 6929 } 6930 6931 gro_normal_list(n); 6932 6933 /* Some drivers may have called napi_schedule 6934 * prior to exhausting their budget. 6935 */ 6936 if (unlikely(!list_empty(&n->poll_list))) { 6937 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6938 n->dev ? n->dev->name : "backlog"); 6939 return work; 6940 } 6941 6942 *repoll = true; 6943 6944 return work; 6945 } 6946 6947 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6948 { 6949 bool do_repoll = false; 6950 void *have; 6951 int work; 6952 6953 list_del_init(&n->poll_list); 6954 6955 have = netpoll_poll_lock(n); 6956 6957 work = __napi_poll(n, &do_repoll); 6958 6959 if (do_repoll) 6960 list_add_tail(&n->poll_list, repoll); 6961 6962 netpoll_poll_unlock(have); 6963 6964 return work; 6965 } 6966 6967 static int napi_thread_wait(struct napi_struct *napi) 6968 { 6969 set_current_state(TASK_INTERRUPTIBLE); 6970 6971 while (!kthread_should_stop() && !napi_disable_pending(napi)) { 6972 if (test_bit(NAPI_STATE_SCHED, &napi->state)) { 6973 WARN_ON(!list_empty(&napi->poll_list)); 6974 __set_current_state(TASK_RUNNING); 6975 return 0; 6976 } 6977 6978 schedule(); 6979 set_current_state(TASK_INTERRUPTIBLE); 6980 } 6981 __set_current_state(TASK_RUNNING); 6982 return -1; 6983 } 6984 6985 static int napi_threaded_poll(void *data) 6986 { 6987 struct napi_struct *napi = data; 6988 void *have; 6989 6990 while (!napi_thread_wait(napi)) { 6991 for (;;) { 6992 bool repoll = false; 6993 6994 local_bh_disable(); 6995 6996 have = netpoll_poll_lock(napi); 6997 __napi_poll(napi, &repoll); 6998 netpoll_poll_unlock(have); 6999 7000 local_bh_enable(); 7001 7002 if (!repoll) 7003 break; 7004 7005 cond_resched(); 7006 } 7007 } 7008 return 0; 7009 } 7010 7011 static __latent_entropy void net_rx_action(struct softirq_action *h) 7012 { 7013 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 7014 unsigned long time_limit = jiffies + 7015 usecs_to_jiffies(netdev_budget_usecs); 7016 int budget = netdev_budget; 7017 LIST_HEAD(list); 7018 LIST_HEAD(repoll); 7019 7020 local_irq_disable(); 7021 list_splice_init(&sd->poll_list, &list); 7022 local_irq_enable(); 7023 7024 for (;;) { 7025 struct napi_struct *n; 7026 7027 if (list_empty(&list)) { 7028 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 7029 return; 7030 break; 7031 } 7032 7033 n = list_first_entry(&list, struct napi_struct, poll_list); 7034 budget -= napi_poll(n, &repoll); 7035 7036 /* If softirq window is exhausted then punt. 7037 * Allow this to run for 2 jiffies since which will allow 7038 * an average latency of 1.5/HZ. 7039 */ 7040 if (unlikely(budget <= 0 || 7041 time_after_eq(jiffies, time_limit))) { 7042 sd->time_squeeze++; 7043 break; 7044 } 7045 } 7046 7047 local_irq_disable(); 7048 7049 list_splice_tail_init(&sd->poll_list, &list); 7050 list_splice_tail(&repoll, &list); 7051 list_splice(&list, &sd->poll_list); 7052 if (!list_empty(&sd->poll_list)) 7053 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 7054 7055 net_rps_action_and_irq_enable(sd); 7056 } 7057 7058 struct netdev_adjacent { 7059 struct net_device *dev; 7060 7061 /* upper master flag, there can only be one master device per list */ 7062 bool master; 7063 7064 /* lookup ignore flag */ 7065 bool ignore; 7066 7067 /* counter for the number of times this device was added to us */ 7068 u16 ref_nr; 7069 7070 /* private field for the users */ 7071 void *private; 7072 7073 struct list_head list; 7074 struct rcu_head rcu; 7075 }; 7076 7077 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 7078 struct list_head *adj_list) 7079 { 7080 struct netdev_adjacent *adj; 7081 7082 list_for_each_entry(adj, adj_list, list) { 7083 if (adj->dev == adj_dev) 7084 return adj; 7085 } 7086 return NULL; 7087 } 7088 7089 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 7090 struct netdev_nested_priv *priv) 7091 { 7092 struct net_device *dev = (struct net_device *)priv->data; 7093 7094 return upper_dev == dev; 7095 } 7096 7097 /** 7098 * netdev_has_upper_dev - Check if device is linked to an upper device 7099 * @dev: device 7100 * @upper_dev: upper device to check 7101 * 7102 * Find out if a device is linked to specified upper device and return true 7103 * in case it is. Note that this checks only immediate upper device, 7104 * not through a complete stack of devices. The caller must hold the RTNL lock. 7105 */ 7106 bool netdev_has_upper_dev(struct net_device *dev, 7107 struct net_device *upper_dev) 7108 { 7109 struct netdev_nested_priv priv = { 7110 .data = (void *)upper_dev, 7111 }; 7112 7113 ASSERT_RTNL(); 7114 7115 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7116 &priv); 7117 } 7118 EXPORT_SYMBOL(netdev_has_upper_dev); 7119 7120 /** 7121 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 7122 * @dev: device 7123 * @upper_dev: upper device to check 7124 * 7125 * Find out if a device is linked to specified upper device and return true 7126 * in case it is. Note that this checks the entire upper device chain. 7127 * The caller must hold rcu lock. 7128 */ 7129 7130 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 7131 struct net_device *upper_dev) 7132 { 7133 struct netdev_nested_priv priv = { 7134 .data = (void *)upper_dev, 7135 }; 7136 7137 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7138 &priv); 7139 } 7140 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 7141 7142 /** 7143 * netdev_has_any_upper_dev - Check if device is linked to some device 7144 * @dev: device 7145 * 7146 * Find out if a device is linked to an upper device and return true in case 7147 * it is. The caller must hold the RTNL lock. 7148 */ 7149 bool netdev_has_any_upper_dev(struct net_device *dev) 7150 { 7151 ASSERT_RTNL(); 7152 7153 return !list_empty(&dev->adj_list.upper); 7154 } 7155 EXPORT_SYMBOL(netdev_has_any_upper_dev); 7156 7157 /** 7158 * netdev_master_upper_dev_get - Get master upper device 7159 * @dev: device 7160 * 7161 * Find a master upper device and return pointer to it or NULL in case 7162 * it's not there. The caller must hold the RTNL lock. 7163 */ 7164 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 7165 { 7166 struct netdev_adjacent *upper; 7167 7168 ASSERT_RTNL(); 7169 7170 if (list_empty(&dev->adj_list.upper)) 7171 return NULL; 7172 7173 upper = list_first_entry(&dev->adj_list.upper, 7174 struct netdev_adjacent, list); 7175 if (likely(upper->master)) 7176 return upper->dev; 7177 return NULL; 7178 } 7179 EXPORT_SYMBOL(netdev_master_upper_dev_get); 7180 7181 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 7182 { 7183 struct netdev_adjacent *upper; 7184 7185 ASSERT_RTNL(); 7186 7187 if (list_empty(&dev->adj_list.upper)) 7188 return NULL; 7189 7190 upper = list_first_entry(&dev->adj_list.upper, 7191 struct netdev_adjacent, list); 7192 if (likely(upper->master) && !upper->ignore) 7193 return upper->dev; 7194 return NULL; 7195 } 7196 7197 /** 7198 * netdev_has_any_lower_dev - Check if device is linked to some device 7199 * @dev: device 7200 * 7201 * Find out if a device is linked to a lower device and return true in case 7202 * it is. The caller must hold the RTNL lock. 7203 */ 7204 static bool netdev_has_any_lower_dev(struct net_device *dev) 7205 { 7206 ASSERT_RTNL(); 7207 7208 return !list_empty(&dev->adj_list.lower); 7209 } 7210 7211 void *netdev_adjacent_get_private(struct list_head *adj_list) 7212 { 7213 struct netdev_adjacent *adj; 7214 7215 adj = list_entry(adj_list, struct netdev_adjacent, list); 7216 7217 return adj->private; 7218 } 7219 EXPORT_SYMBOL(netdev_adjacent_get_private); 7220 7221 /** 7222 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 7223 * @dev: device 7224 * @iter: list_head ** of the current position 7225 * 7226 * Gets the next device from the dev's upper list, starting from iter 7227 * position. The caller must hold RCU read lock. 7228 */ 7229 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 7230 struct list_head **iter) 7231 { 7232 struct netdev_adjacent *upper; 7233 7234 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7235 7236 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7237 7238 if (&upper->list == &dev->adj_list.upper) 7239 return NULL; 7240 7241 *iter = &upper->list; 7242 7243 return upper->dev; 7244 } 7245 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 7246 7247 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 7248 struct list_head **iter, 7249 bool *ignore) 7250 { 7251 struct netdev_adjacent *upper; 7252 7253 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 7254 7255 if (&upper->list == &dev->adj_list.upper) 7256 return NULL; 7257 7258 *iter = &upper->list; 7259 *ignore = upper->ignore; 7260 7261 return upper->dev; 7262 } 7263 7264 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 7265 struct list_head **iter) 7266 { 7267 struct netdev_adjacent *upper; 7268 7269 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7270 7271 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7272 7273 if (&upper->list == &dev->adj_list.upper) 7274 return NULL; 7275 7276 *iter = &upper->list; 7277 7278 return upper->dev; 7279 } 7280 7281 static int __netdev_walk_all_upper_dev(struct net_device *dev, 7282 int (*fn)(struct net_device *dev, 7283 struct netdev_nested_priv *priv), 7284 struct netdev_nested_priv *priv) 7285 { 7286 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7287 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7288 int ret, cur = 0; 7289 bool ignore; 7290 7291 now = dev; 7292 iter = &dev->adj_list.upper; 7293 7294 while (1) { 7295 if (now != dev) { 7296 ret = fn(now, priv); 7297 if (ret) 7298 return ret; 7299 } 7300 7301 next = NULL; 7302 while (1) { 7303 udev = __netdev_next_upper_dev(now, &iter, &ignore); 7304 if (!udev) 7305 break; 7306 if (ignore) 7307 continue; 7308 7309 next = udev; 7310 niter = &udev->adj_list.upper; 7311 dev_stack[cur] = now; 7312 iter_stack[cur++] = iter; 7313 break; 7314 } 7315 7316 if (!next) { 7317 if (!cur) 7318 return 0; 7319 next = dev_stack[--cur]; 7320 niter = iter_stack[cur]; 7321 } 7322 7323 now = next; 7324 iter = niter; 7325 } 7326 7327 return 0; 7328 } 7329 7330 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 7331 int (*fn)(struct net_device *dev, 7332 struct netdev_nested_priv *priv), 7333 struct netdev_nested_priv *priv) 7334 { 7335 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7336 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7337 int ret, cur = 0; 7338 7339 now = dev; 7340 iter = &dev->adj_list.upper; 7341 7342 while (1) { 7343 if (now != dev) { 7344 ret = fn(now, priv); 7345 if (ret) 7346 return ret; 7347 } 7348 7349 next = NULL; 7350 while (1) { 7351 udev = netdev_next_upper_dev_rcu(now, &iter); 7352 if (!udev) 7353 break; 7354 7355 next = udev; 7356 niter = &udev->adj_list.upper; 7357 dev_stack[cur] = now; 7358 iter_stack[cur++] = iter; 7359 break; 7360 } 7361 7362 if (!next) { 7363 if (!cur) 7364 return 0; 7365 next = dev_stack[--cur]; 7366 niter = iter_stack[cur]; 7367 } 7368 7369 now = next; 7370 iter = niter; 7371 } 7372 7373 return 0; 7374 } 7375 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7376 7377 static bool __netdev_has_upper_dev(struct net_device *dev, 7378 struct net_device *upper_dev) 7379 { 7380 struct netdev_nested_priv priv = { 7381 .flags = 0, 7382 .data = (void *)upper_dev, 7383 }; 7384 7385 ASSERT_RTNL(); 7386 7387 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7388 &priv); 7389 } 7390 7391 /** 7392 * netdev_lower_get_next_private - Get the next ->private from the 7393 * lower neighbour list 7394 * @dev: device 7395 * @iter: list_head ** of the current position 7396 * 7397 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7398 * list, starting from iter position. The caller must hold either hold the 7399 * RTNL lock or its own locking that guarantees that the neighbour lower 7400 * list will remain unchanged. 7401 */ 7402 void *netdev_lower_get_next_private(struct net_device *dev, 7403 struct list_head **iter) 7404 { 7405 struct netdev_adjacent *lower; 7406 7407 lower = list_entry(*iter, struct netdev_adjacent, list); 7408 7409 if (&lower->list == &dev->adj_list.lower) 7410 return NULL; 7411 7412 *iter = lower->list.next; 7413 7414 return lower->private; 7415 } 7416 EXPORT_SYMBOL(netdev_lower_get_next_private); 7417 7418 /** 7419 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7420 * lower neighbour list, RCU 7421 * variant 7422 * @dev: device 7423 * @iter: list_head ** of the current position 7424 * 7425 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7426 * list, starting from iter position. The caller must hold RCU read lock. 7427 */ 7428 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7429 struct list_head **iter) 7430 { 7431 struct netdev_adjacent *lower; 7432 7433 WARN_ON_ONCE(!rcu_read_lock_held()); 7434 7435 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7436 7437 if (&lower->list == &dev->adj_list.lower) 7438 return NULL; 7439 7440 *iter = &lower->list; 7441 7442 return lower->private; 7443 } 7444 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7445 7446 /** 7447 * netdev_lower_get_next - Get the next device from the lower neighbour 7448 * list 7449 * @dev: device 7450 * @iter: list_head ** of the current position 7451 * 7452 * Gets the next netdev_adjacent from the dev's lower neighbour 7453 * list, starting from iter position. The caller must hold RTNL lock or 7454 * its own locking that guarantees that the neighbour lower 7455 * list will remain unchanged. 7456 */ 7457 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7458 { 7459 struct netdev_adjacent *lower; 7460 7461 lower = list_entry(*iter, struct netdev_adjacent, list); 7462 7463 if (&lower->list == &dev->adj_list.lower) 7464 return NULL; 7465 7466 *iter = lower->list.next; 7467 7468 return lower->dev; 7469 } 7470 EXPORT_SYMBOL(netdev_lower_get_next); 7471 7472 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7473 struct list_head **iter) 7474 { 7475 struct netdev_adjacent *lower; 7476 7477 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7478 7479 if (&lower->list == &dev->adj_list.lower) 7480 return NULL; 7481 7482 *iter = &lower->list; 7483 7484 return lower->dev; 7485 } 7486 7487 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7488 struct list_head **iter, 7489 bool *ignore) 7490 { 7491 struct netdev_adjacent *lower; 7492 7493 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7494 7495 if (&lower->list == &dev->adj_list.lower) 7496 return NULL; 7497 7498 *iter = &lower->list; 7499 *ignore = lower->ignore; 7500 7501 return lower->dev; 7502 } 7503 7504 int netdev_walk_all_lower_dev(struct net_device *dev, 7505 int (*fn)(struct net_device *dev, 7506 struct netdev_nested_priv *priv), 7507 struct netdev_nested_priv *priv) 7508 { 7509 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7510 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7511 int ret, cur = 0; 7512 7513 now = dev; 7514 iter = &dev->adj_list.lower; 7515 7516 while (1) { 7517 if (now != dev) { 7518 ret = fn(now, priv); 7519 if (ret) 7520 return ret; 7521 } 7522 7523 next = NULL; 7524 while (1) { 7525 ldev = netdev_next_lower_dev(now, &iter); 7526 if (!ldev) 7527 break; 7528 7529 next = ldev; 7530 niter = &ldev->adj_list.lower; 7531 dev_stack[cur] = now; 7532 iter_stack[cur++] = iter; 7533 break; 7534 } 7535 7536 if (!next) { 7537 if (!cur) 7538 return 0; 7539 next = dev_stack[--cur]; 7540 niter = iter_stack[cur]; 7541 } 7542 7543 now = next; 7544 iter = niter; 7545 } 7546 7547 return 0; 7548 } 7549 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7550 7551 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7552 int (*fn)(struct net_device *dev, 7553 struct netdev_nested_priv *priv), 7554 struct netdev_nested_priv *priv) 7555 { 7556 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7557 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7558 int ret, cur = 0; 7559 bool ignore; 7560 7561 now = dev; 7562 iter = &dev->adj_list.lower; 7563 7564 while (1) { 7565 if (now != dev) { 7566 ret = fn(now, priv); 7567 if (ret) 7568 return ret; 7569 } 7570 7571 next = NULL; 7572 while (1) { 7573 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7574 if (!ldev) 7575 break; 7576 if (ignore) 7577 continue; 7578 7579 next = ldev; 7580 niter = &ldev->adj_list.lower; 7581 dev_stack[cur] = now; 7582 iter_stack[cur++] = iter; 7583 break; 7584 } 7585 7586 if (!next) { 7587 if (!cur) 7588 return 0; 7589 next = dev_stack[--cur]; 7590 niter = iter_stack[cur]; 7591 } 7592 7593 now = next; 7594 iter = niter; 7595 } 7596 7597 return 0; 7598 } 7599 7600 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7601 struct list_head **iter) 7602 { 7603 struct netdev_adjacent *lower; 7604 7605 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7606 if (&lower->list == &dev->adj_list.lower) 7607 return NULL; 7608 7609 *iter = &lower->list; 7610 7611 return lower->dev; 7612 } 7613 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7614 7615 static u8 __netdev_upper_depth(struct net_device *dev) 7616 { 7617 struct net_device *udev; 7618 struct list_head *iter; 7619 u8 max_depth = 0; 7620 bool ignore; 7621 7622 for (iter = &dev->adj_list.upper, 7623 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7624 udev; 7625 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7626 if (ignore) 7627 continue; 7628 if (max_depth < udev->upper_level) 7629 max_depth = udev->upper_level; 7630 } 7631 7632 return max_depth; 7633 } 7634 7635 static u8 __netdev_lower_depth(struct net_device *dev) 7636 { 7637 struct net_device *ldev; 7638 struct list_head *iter; 7639 u8 max_depth = 0; 7640 bool ignore; 7641 7642 for (iter = &dev->adj_list.lower, 7643 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7644 ldev; 7645 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7646 if (ignore) 7647 continue; 7648 if (max_depth < ldev->lower_level) 7649 max_depth = ldev->lower_level; 7650 } 7651 7652 return max_depth; 7653 } 7654 7655 static int __netdev_update_upper_level(struct net_device *dev, 7656 struct netdev_nested_priv *__unused) 7657 { 7658 dev->upper_level = __netdev_upper_depth(dev) + 1; 7659 return 0; 7660 } 7661 7662 static int __netdev_update_lower_level(struct net_device *dev, 7663 struct netdev_nested_priv *priv) 7664 { 7665 dev->lower_level = __netdev_lower_depth(dev) + 1; 7666 7667 #ifdef CONFIG_LOCKDEP 7668 if (!priv) 7669 return 0; 7670 7671 if (priv->flags & NESTED_SYNC_IMM) 7672 dev->nested_level = dev->lower_level - 1; 7673 if (priv->flags & NESTED_SYNC_TODO) 7674 net_unlink_todo(dev); 7675 #endif 7676 return 0; 7677 } 7678 7679 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7680 int (*fn)(struct net_device *dev, 7681 struct netdev_nested_priv *priv), 7682 struct netdev_nested_priv *priv) 7683 { 7684 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7685 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7686 int ret, cur = 0; 7687 7688 now = dev; 7689 iter = &dev->adj_list.lower; 7690 7691 while (1) { 7692 if (now != dev) { 7693 ret = fn(now, priv); 7694 if (ret) 7695 return ret; 7696 } 7697 7698 next = NULL; 7699 while (1) { 7700 ldev = netdev_next_lower_dev_rcu(now, &iter); 7701 if (!ldev) 7702 break; 7703 7704 next = ldev; 7705 niter = &ldev->adj_list.lower; 7706 dev_stack[cur] = now; 7707 iter_stack[cur++] = iter; 7708 break; 7709 } 7710 7711 if (!next) { 7712 if (!cur) 7713 return 0; 7714 next = dev_stack[--cur]; 7715 niter = iter_stack[cur]; 7716 } 7717 7718 now = next; 7719 iter = niter; 7720 } 7721 7722 return 0; 7723 } 7724 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7725 7726 /** 7727 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7728 * lower neighbour list, RCU 7729 * variant 7730 * @dev: device 7731 * 7732 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7733 * list. The caller must hold RCU read lock. 7734 */ 7735 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7736 { 7737 struct netdev_adjacent *lower; 7738 7739 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7740 struct netdev_adjacent, list); 7741 if (lower) 7742 return lower->private; 7743 return NULL; 7744 } 7745 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7746 7747 /** 7748 * netdev_master_upper_dev_get_rcu - Get master upper device 7749 * @dev: device 7750 * 7751 * Find a master upper device and return pointer to it or NULL in case 7752 * it's not there. The caller must hold the RCU read lock. 7753 */ 7754 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7755 { 7756 struct netdev_adjacent *upper; 7757 7758 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7759 struct netdev_adjacent, list); 7760 if (upper && likely(upper->master)) 7761 return upper->dev; 7762 return NULL; 7763 } 7764 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7765 7766 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7767 struct net_device *adj_dev, 7768 struct list_head *dev_list) 7769 { 7770 char linkname[IFNAMSIZ+7]; 7771 7772 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7773 "upper_%s" : "lower_%s", adj_dev->name); 7774 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7775 linkname); 7776 } 7777 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7778 char *name, 7779 struct list_head *dev_list) 7780 { 7781 char linkname[IFNAMSIZ+7]; 7782 7783 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7784 "upper_%s" : "lower_%s", name); 7785 sysfs_remove_link(&(dev->dev.kobj), linkname); 7786 } 7787 7788 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7789 struct net_device *adj_dev, 7790 struct list_head *dev_list) 7791 { 7792 return (dev_list == &dev->adj_list.upper || 7793 dev_list == &dev->adj_list.lower) && 7794 net_eq(dev_net(dev), dev_net(adj_dev)); 7795 } 7796 7797 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7798 struct net_device *adj_dev, 7799 struct list_head *dev_list, 7800 void *private, bool master) 7801 { 7802 struct netdev_adjacent *adj; 7803 int ret; 7804 7805 adj = __netdev_find_adj(adj_dev, dev_list); 7806 7807 if (adj) { 7808 adj->ref_nr += 1; 7809 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7810 dev->name, adj_dev->name, adj->ref_nr); 7811 7812 return 0; 7813 } 7814 7815 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7816 if (!adj) 7817 return -ENOMEM; 7818 7819 adj->dev = adj_dev; 7820 adj->master = master; 7821 adj->ref_nr = 1; 7822 adj->private = private; 7823 adj->ignore = false; 7824 dev_hold(adj_dev); 7825 7826 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7827 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7828 7829 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7830 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7831 if (ret) 7832 goto free_adj; 7833 } 7834 7835 /* Ensure that master link is always the first item in list. */ 7836 if (master) { 7837 ret = sysfs_create_link(&(dev->dev.kobj), 7838 &(adj_dev->dev.kobj), "master"); 7839 if (ret) 7840 goto remove_symlinks; 7841 7842 list_add_rcu(&adj->list, dev_list); 7843 } else { 7844 list_add_tail_rcu(&adj->list, dev_list); 7845 } 7846 7847 return 0; 7848 7849 remove_symlinks: 7850 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7851 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7852 free_adj: 7853 kfree(adj); 7854 dev_put(adj_dev); 7855 7856 return ret; 7857 } 7858 7859 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7860 struct net_device *adj_dev, 7861 u16 ref_nr, 7862 struct list_head *dev_list) 7863 { 7864 struct netdev_adjacent *adj; 7865 7866 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7867 dev->name, adj_dev->name, ref_nr); 7868 7869 adj = __netdev_find_adj(adj_dev, dev_list); 7870 7871 if (!adj) { 7872 pr_err("Adjacency does not exist for device %s from %s\n", 7873 dev->name, adj_dev->name); 7874 WARN_ON(1); 7875 return; 7876 } 7877 7878 if (adj->ref_nr > ref_nr) { 7879 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7880 dev->name, adj_dev->name, ref_nr, 7881 adj->ref_nr - ref_nr); 7882 adj->ref_nr -= ref_nr; 7883 return; 7884 } 7885 7886 if (adj->master) 7887 sysfs_remove_link(&(dev->dev.kobj), "master"); 7888 7889 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7890 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7891 7892 list_del_rcu(&adj->list); 7893 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7894 adj_dev->name, dev->name, adj_dev->name); 7895 dev_put(adj_dev); 7896 kfree_rcu(adj, rcu); 7897 } 7898 7899 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7900 struct net_device *upper_dev, 7901 struct list_head *up_list, 7902 struct list_head *down_list, 7903 void *private, bool master) 7904 { 7905 int ret; 7906 7907 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7908 private, master); 7909 if (ret) 7910 return ret; 7911 7912 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7913 private, false); 7914 if (ret) { 7915 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7916 return ret; 7917 } 7918 7919 return 0; 7920 } 7921 7922 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7923 struct net_device *upper_dev, 7924 u16 ref_nr, 7925 struct list_head *up_list, 7926 struct list_head *down_list) 7927 { 7928 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7929 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7930 } 7931 7932 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7933 struct net_device *upper_dev, 7934 void *private, bool master) 7935 { 7936 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7937 &dev->adj_list.upper, 7938 &upper_dev->adj_list.lower, 7939 private, master); 7940 } 7941 7942 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7943 struct net_device *upper_dev) 7944 { 7945 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7946 &dev->adj_list.upper, 7947 &upper_dev->adj_list.lower); 7948 } 7949 7950 static int __netdev_upper_dev_link(struct net_device *dev, 7951 struct net_device *upper_dev, bool master, 7952 void *upper_priv, void *upper_info, 7953 struct netdev_nested_priv *priv, 7954 struct netlink_ext_ack *extack) 7955 { 7956 struct netdev_notifier_changeupper_info changeupper_info = { 7957 .info = { 7958 .dev = dev, 7959 .extack = extack, 7960 }, 7961 .upper_dev = upper_dev, 7962 .master = master, 7963 .linking = true, 7964 .upper_info = upper_info, 7965 }; 7966 struct net_device *master_dev; 7967 int ret = 0; 7968 7969 ASSERT_RTNL(); 7970 7971 if (dev == upper_dev) 7972 return -EBUSY; 7973 7974 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7975 if (__netdev_has_upper_dev(upper_dev, dev)) 7976 return -EBUSY; 7977 7978 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7979 return -EMLINK; 7980 7981 if (!master) { 7982 if (__netdev_has_upper_dev(dev, upper_dev)) 7983 return -EEXIST; 7984 } else { 7985 master_dev = __netdev_master_upper_dev_get(dev); 7986 if (master_dev) 7987 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7988 } 7989 7990 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7991 &changeupper_info.info); 7992 ret = notifier_to_errno(ret); 7993 if (ret) 7994 return ret; 7995 7996 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7997 master); 7998 if (ret) 7999 return ret; 8000 8001 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8002 &changeupper_info.info); 8003 ret = notifier_to_errno(ret); 8004 if (ret) 8005 goto rollback; 8006 8007 __netdev_update_upper_level(dev, NULL); 8008 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8009 8010 __netdev_update_lower_level(upper_dev, priv); 8011 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8012 priv); 8013 8014 return 0; 8015 8016 rollback: 8017 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8018 8019 return ret; 8020 } 8021 8022 /** 8023 * netdev_upper_dev_link - Add a link to the upper device 8024 * @dev: device 8025 * @upper_dev: new upper device 8026 * @extack: netlink extended ack 8027 * 8028 * Adds a link to device which is upper to this one. The caller must hold 8029 * the RTNL lock. On a failure a negative errno code is returned. 8030 * On success the reference counts are adjusted and the function 8031 * returns zero. 8032 */ 8033 int netdev_upper_dev_link(struct net_device *dev, 8034 struct net_device *upper_dev, 8035 struct netlink_ext_ack *extack) 8036 { 8037 struct netdev_nested_priv priv = { 8038 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8039 .data = NULL, 8040 }; 8041 8042 return __netdev_upper_dev_link(dev, upper_dev, false, 8043 NULL, NULL, &priv, extack); 8044 } 8045 EXPORT_SYMBOL(netdev_upper_dev_link); 8046 8047 /** 8048 * netdev_master_upper_dev_link - Add a master link to the upper device 8049 * @dev: device 8050 * @upper_dev: new upper device 8051 * @upper_priv: upper device private 8052 * @upper_info: upper info to be passed down via notifier 8053 * @extack: netlink extended ack 8054 * 8055 * Adds a link to device which is upper to this one. In this case, only 8056 * one master upper device can be linked, although other non-master devices 8057 * might be linked as well. The caller must hold the RTNL lock. 8058 * On a failure a negative errno code is returned. On success the reference 8059 * counts are adjusted and the function returns zero. 8060 */ 8061 int netdev_master_upper_dev_link(struct net_device *dev, 8062 struct net_device *upper_dev, 8063 void *upper_priv, void *upper_info, 8064 struct netlink_ext_ack *extack) 8065 { 8066 struct netdev_nested_priv priv = { 8067 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8068 .data = NULL, 8069 }; 8070 8071 return __netdev_upper_dev_link(dev, upper_dev, true, 8072 upper_priv, upper_info, &priv, extack); 8073 } 8074 EXPORT_SYMBOL(netdev_master_upper_dev_link); 8075 8076 static void __netdev_upper_dev_unlink(struct net_device *dev, 8077 struct net_device *upper_dev, 8078 struct netdev_nested_priv *priv) 8079 { 8080 struct netdev_notifier_changeupper_info changeupper_info = { 8081 .info = { 8082 .dev = dev, 8083 }, 8084 .upper_dev = upper_dev, 8085 .linking = false, 8086 }; 8087 8088 ASSERT_RTNL(); 8089 8090 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 8091 8092 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8093 &changeupper_info.info); 8094 8095 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8096 8097 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8098 &changeupper_info.info); 8099 8100 __netdev_update_upper_level(dev, NULL); 8101 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8102 8103 __netdev_update_lower_level(upper_dev, priv); 8104 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8105 priv); 8106 } 8107 8108 /** 8109 * netdev_upper_dev_unlink - Removes a link to upper device 8110 * @dev: device 8111 * @upper_dev: new upper device 8112 * 8113 * Removes a link to device which is upper to this one. The caller must hold 8114 * the RTNL lock. 8115 */ 8116 void netdev_upper_dev_unlink(struct net_device *dev, 8117 struct net_device *upper_dev) 8118 { 8119 struct netdev_nested_priv priv = { 8120 .flags = NESTED_SYNC_TODO, 8121 .data = NULL, 8122 }; 8123 8124 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 8125 } 8126 EXPORT_SYMBOL(netdev_upper_dev_unlink); 8127 8128 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 8129 struct net_device *lower_dev, 8130 bool val) 8131 { 8132 struct netdev_adjacent *adj; 8133 8134 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 8135 if (adj) 8136 adj->ignore = val; 8137 8138 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 8139 if (adj) 8140 adj->ignore = val; 8141 } 8142 8143 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 8144 struct net_device *lower_dev) 8145 { 8146 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 8147 } 8148 8149 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 8150 struct net_device *lower_dev) 8151 { 8152 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 8153 } 8154 8155 int netdev_adjacent_change_prepare(struct net_device *old_dev, 8156 struct net_device *new_dev, 8157 struct net_device *dev, 8158 struct netlink_ext_ack *extack) 8159 { 8160 struct netdev_nested_priv priv = { 8161 .flags = 0, 8162 .data = NULL, 8163 }; 8164 int err; 8165 8166 if (!new_dev) 8167 return 0; 8168 8169 if (old_dev && new_dev != old_dev) 8170 netdev_adjacent_dev_disable(dev, old_dev); 8171 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 8172 extack); 8173 if (err) { 8174 if (old_dev && new_dev != old_dev) 8175 netdev_adjacent_dev_enable(dev, old_dev); 8176 return err; 8177 } 8178 8179 return 0; 8180 } 8181 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 8182 8183 void netdev_adjacent_change_commit(struct net_device *old_dev, 8184 struct net_device *new_dev, 8185 struct net_device *dev) 8186 { 8187 struct netdev_nested_priv priv = { 8188 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8189 .data = NULL, 8190 }; 8191 8192 if (!new_dev || !old_dev) 8193 return; 8194 8195 if (new_dev == old_dev) 8196 return; 8197 8198 netdev_adjacent_dev_enable(dev, old_dev); 8199 __netdev_upper_dev_unlink(old_dev, dev, &priv); 8200 } 8201 EXPORT_SYMBOL(netdev_adjacent_change_commit); 8202 8203 void netdev_adjacent_change_abort(struct net_device *old_dev, 8204 struct net_device *new_dev, 8205 struct net_device *dev) 8206 { 8207 struct netdev_nested_priv priv = { 8208 .flags = 0, 8209 .data = NULL, 8210 }; 8211 8212 if (!new_dev) 8213 return; 8214 8215 if (old_dev && new_dev != old_dev) 8216 netdev_adjacent_dev_enable(dev, old_dev); 8217 8218 __netdev_upper_dev_unlink(new_dev, dev, &priv); 8219 } 8220 EXPORT_SYMBOL(netdev_adjacent_change_abort); 8221 8222 /** 8223 * netdev_bonding_info_change - Dispatch event about slave change 8224 * @dev: device 8225 * @bonding_info: info to dispatch 8226 * 8227 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 8228 * The caller must hold the RTNL lock. 8229 */ 8230 void netdev_bonding_info_change(struct net_device *dev, 8231 struct netdev_bonding_info *bonding_info) 8232 { 8233 struct netdev_notifier_bonding_info info = { 8234 .info.dev = dev, 8235 }; 8236 8237 memcpy(&info.bonding_info, bonding_info, 8238 sizeof(struct netdev_bonding_info)); 8239 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 8240 &info.info); 8241 } 8242 EXPORT_SYMBOL(netdev_bonding_info_change); 8243 8244 /** 8245 * netdev_get_xmit_slave - Get the xmit slave of master device 8246 * @dev: device 8247 * @skb: The packet 8248 * @all_slaves: assume all the slaves are active 8249 * 8250 * The reference counters are not incremented so the caller must be 8251 * careful with locks. The caller must hold RCU lock. 8252 * %NULL is returned if no slave is found. 8253 */ 8254 8255 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8256 struct sk_buff *skb, 8257 bool all_slaves) 8258 { 8259 const struct net_device_ops *ops = dev->netdev_ops; 8260 8261 if (!ops->ndo_get_xmit_slave) 8262 return NULL; 8263 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8264 } 8265 EXPORT_SYMBOL(netdev_get_xmit_slave); 8266 8267 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8268 struct sock *sk) 8269 { 8270 const struct net_device_ops *ops = dev->netdev_ops; 8271 8272 if (!ops->ndo_sk_get_lower_dev) 8273 return NULL; 8274 return ops->ndo_sk_get_lower_dev(dev, sk); 8275 } 8276 8277 /** 8278 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8279 * @dev: device 8280 * @sk: the socket 8281 * 8282 * %NULL is returned if no lower device is found. 8283 */ 8284 8285 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8286 struct sock *sk) 8287 { 8288 struct net_device *lower; 8289 8290 lower = netdev_sk_get_lower_dev(dev, sk); 8291 while (lower) { 8292 dev = lower; 8293 lower = netdev_sk_get_lower_dev(dev, sk); 8294 } 8295 8296 return dev; 8297 } 8298 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8299 8300 static void netdev_adjacent_add_links(struct net_device *dev) 8301 { 8302 struct netdev_adjacent *iter; 8303 8304 struct net *net = dev_net(dev); 8305 8306 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8307 if (!net_eq(net, dev_net(iter->dev))) 8308 continue; 8309 netdev_adjacent_sysfs_add(iter->dev, dev, 8310 &iter->dev->adj_list.lower); 8311 netdev_adjacent_sysfs_add(dev, iter->dev, 8312 &dev->adj_list.upper); 8313 } 8314 8315 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8316 if (!net_eq(net, dev_net(iter->dev))) 8317 continue; 8318 netdev_adjacent_sysfs_add(iter->dev, dev, 8319 &iter->dev->adj_list.upper); 8320 netdev_adjacent_sysfs_add(dev, iter->dev, 8321 &dev->adj_list.lower); 8322 } 8323 } 8324 8325 static void netdev_adjacent_del_links(struct net_device *dev) 8326 { 8327 struct netdev_adjacent *iter; 8328 8329 struct net *net = dev_net(dev); 8330 8331 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8332 if (!net_eq(net, dev_net(iter->dev))) 8333 continue; 8334 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8335 &iter->dev->adj_list.lower); 8336 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8337 &dev->adj_list.upper); 8338 } 8339 8340 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8341 if (!net_eq(net, dev_net(iter->dev))) 8342 continue; 8343 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8344 &iter->dev->adj_list.upper); 8345 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8346 &dev->adj_list.lower); 8347 } 8348 } 8349 8350 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8351 { 8352 struct netdev_adjacent *iter; 8353 8354 struct net *net = dev_net(dev); 8355 8356 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8357 if (!net_eq(net, dev_net(iter->dev))) 8358 continue; 8359 netdev_adjacent_sysfs_del(iter->dev, oldname, 8360 &iter->dev->adj_list.lower); 8361 netdev_adjacent_sysfs_add(iter->dev, dev, 8362 &iter->dev->adj_list.lower); 8363 } 8364 8365 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8366 if (!net_eq(net, dev_net(iter->dev))) 8367 continue; 8368 netdev_adjacent_sysfs_del(iter->dev, oldname, 8369 &iter->dev->adj_list.upper); 8370 netdev_adjacent_sysfs_add(iter->dev, dev, 8371 &iter->dev->adj_list.upper); 8372 } 8373 } 8374 8375 void *netdev_lower_dev_get_private(struct net_device *dev, 8376 struct net_device *lower_dev) 8377 { 8378 struct netdev_adjacent *lower; 8379 8380 if (!lower_dev) 8381 return NULL; 8382 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8383 if (!lower) 8384 return NULL; 8385 8386 return lower->private; 8387 } 8388 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8389 8390 8391 /** 8392 * netdev_lower_state_changed - Dispatch event about lower device state change 8393 * @lower_dev: device 8394 * @lower_state_info: state to dispatch 8395 * 8396 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8397 * The caller must hold the RTNL lock. 8398 */ 8399 void netdev_lower_state_changed(struct net_device *lower_dev, 8400 void *lower_state_info) 8401 { 8402 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8403 .info.dev = lower_dev, 8404 }; 8405 8406 ASSERT_RTNL(); 8407 changelowerstate_info.lower_state_info = lower_state_info; 8408 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8409 &changelowerstate_info.info); 8410 } 8411 EXPORT_SYMBOL(netdev_lower_state_changed); 8412 8413 static void dev_change_rx_flags(struct net_device *dev, int flags) 8414 { 8415 const struct net_device_ops *ops = dev->netdev_ops; 8416 8417 if (ops->ndo_change_rx_flags) 8418 ops->ndo_change_rx_flags(dev, flags); 8419 } 8420 8421 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8422 { 8423 unsigned int old_flags = dev->flags; 8424 kuid_t uid; 8425 kgid_t gid; 8426 8427 ASSERT_RTNL(); 8428 8429 dev->flags |= IFF_PROMISC; 8430 dev->promiscuity += inc; 8431 if (dev->promiscuity == 0) { 8432 /* 8433 * Avoid overflow. 8434 * If inc causes overflow, untouch promisc and return error. 8435 */ 8436 if (inc < 0) 8437 dev->flags &= ~IFF_PROMISC; 8438 else { 8439 dev->promiscuity -= inc; 8440 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 8441 dev->name); 8442 return -EOVERFLOW; 8443 } 8444 } 8445 if (dev->flags != old_flags) { 8446 pr_info("device %s %s promiscuous mode\n", 8447 dev->name, 8448 dev->flags & IFF_PROMISC ? "entered" : "left"); 8449 if (audit_enabled) { 8450 current_uid_gid(&uid, &gid); 8451 audit_log(audit_context(), GFP_ATOMIC, 8452 AUDIT_ANOM_PROMISCUOUS, 8453 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8454 dev->name, (dev->flags & IFF_PROMISC), 8455 (old_flags & IFF_PROMISC), 8456 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8457 from_kuid(&init_user_ns, uid), 8458 from_kgid(&init_user_ns, gid), 8459 audit_get_sessionid(current)); 8460 } 8461 8462 dev_change_rx_flags(dev, IFF_PROMISC); 8463 } 8464 if (notify) 8465 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 8466 return 0; 8467 } 8468 8469 /** 8470 * dev_set_promiscuity - update promiscuity count on a device 8471 * @dev: device 8472 * @inc: modifier 8473 * 8474 * Add or remove promiscuity from a device. While the count in the device 8475 * remains above zero the interface remains promiscuous. Once it hits zero 8476 * the device reverts back to normal filtering operation. A negative inc 8477 * value is used to drop promiscuity on the device. 8478 * Return 0 if successful or a negative errno code on error. 8479 */ 8480 int dev_set_promiscuity(struct net_device *dev, int inc) 8481 { 8482 unsigned int old_flags = dev->flags; 8483 int err; 8484 8485 err = __dev_set_promiscuity(dev, inc, true); 8486 if (err < 0) 8487 return err; 8488 if (dev->flags != old_flags) 8489 dev_set_rx_mode(dev); 8490 return err; 8491 } 8492 EXPORT_SYMBOL(dev_set_promiscuity); 8493 8494 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8495 { 8496 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8497 8498 ASSERT_RTNL(); 8499 8500 dev->flags |= IFF_ALLMULTI; 8501 dev->allmulti += inc; 8502 if (dev->allmulti == 0) { 8503 /* 8504 * Avoid overflow. 8505 * If inc causes overflow, untouch allmulti and return error. 8506 */ 8507 if (inc < 0) 8508 dev->flags &= ~IFF_ALLMULTI; 8509 else { 8510 dev->allmulti -= inc; 8511 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 8512 dev->name); 8513 return -EOVERFLOW; 8514 } 8515 } 8516 if (dev->flags ^ old_flags) { 8517 dev_change_rx_flags(dev, IFF_ALLMULTI); 8518 dev_set_rx_mode(dev); 8519 if (notify) 8520 __dev_notify_flags(dev, old_flags, 8521 dev->gflags ^ old_gflags); 8522 } 8523 return 0; 8524 } 8525 8526 /** 8527 * dev_set_allmulti - update allmulti count on a device 8528 * @dev: device 8529 * @inc: modifier 8530 * 8531 * Add or remove reception of all multicast frames to a device. While the 8532 * count in the device remains above zero the interface remains listening 8533 * to all interfaces. Once it hits zero the device reverts back to normal 8534 * filtering operation. A negative @inc value is used to drop the counter 8535 * when releasing a resource needing all multicasts. 8536 * Return 0 if successful or a negative errno code on error. 8537 */ 8538 8539 int dev_set_allmulti(struct net_device *dev, int inc) 8540 { 8541 return __dev_set_allmulti(dev, inc, true); 8542 } 8543 EXPORT_SYMBOL(dev_set_allmulti); 8544 8545 /* 8546 * Upload unicast and multicast address lists to device and 8547 * configure RX filtering. When the device doesn't support unicast 8548 * filtering it is put in promiscuous mode while unicast addresses 8549 * are present. 8550 */ 8551 void __dev_set_rx_mode(struct net_device *dev) 8552 { 8553 const struct net_device_ops *ops = dev->netdev_ops; 8554 8555 /* dev_open will call this function so the list will stay sane. */ 8556 if (!(dev->flags&IFF_UP)) 8557 return; 8558 8559 if (!netif_device_present(dev)) 8560 return; 8561 8562 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8563 /* Unicast addresses changes may only happen under the rtnl, 8564 * therefore calling __dev_set_promiscuity here is safe. 8565 */ 8566 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8567 __dev_set_promiscuity(dev, 1, false); 8568 dev->uc_promisc = true; 8569 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8570 __dev_set_promiscuity(dev, -1, false); 8571 dev->uc_promisc = false; 8572 } 8573 } 8574 8575 if (ops->ndo_set_rx_mode) 8576 ops->ndo_set_rx_mode(dev); 8577 } 8578 8579 void dev_set_rx_mode(struct net_device *dev) 8580 { 8581 netif_addr_lock_bh(dev); 8582 __dev_set_rx_mode(dev); 8583 netif_addr_unlock_bh(dev); 8584 } 8585 8586 /** 8587 * dev_get_flags - get flags reported to userspace 8588 * @dev: device 8589 * 8590 * Get the combination of flag bits exported through APIs to userspace. 8591 */ 8592 unsigned int dev_get_flags(const struct net_device *dev) 8593 { 8594 unsigned int flags; 8595 8596 flags = (dev->flags & ~(IFF_PROMISC | 8597 IFF_ALLMULTI | 8598 IFF_RUNNING | 8599 IFF_LOWER_UP | 8600 IFF_DORMANT)) | 8601 (dev->gflags & (IFF_PROMISC | 8602 IFF_ALLMULTI)); 8603 8604 if (netif_running(dev)) { 8605 if (netif_oper_up(dev)) 8606 flags |= IFF_RUNNING; 8607 if (netif_carrier_ok(dev)) 8608 flags |= IFF_LOWER_UP; 8609 if (netif_dormant(dev)) 8610 flags |= IFF_DORMANT; 8611 } 8612 8613 return flags; 8614 } 8615 EXPORT_SYMBOL(dev_get_flags); 8616 8617 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8618 struct netlink_ext_ack *extack) 8619 { 8620 unsigned int old_flags = dev->flags; 8621 int ret; 8622 8623 ASSERT_RTNL(); 8624 8625 /* 8626 * Set the flags on our device. 8627 */ 8628 8629 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8630 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8631 IFF_AUTOMEDIA)) | 8632 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8633 IFF_ALLMULTI)); 8634 8635 /* 8636 * Load in the correct multicast list now the flags have changed. 8637 */ 8638 8639 if ((old_flags ^ flags) & IFF_MULTICAST) 8640 dev_change_rx_flags(dev, IFF_MULTICAST); 8641 8642 dev_set_rx_mode(dev); 8643 8644 /* 8645 * Have we downed the interface. We handle IFF_UP ourselves 8646 * according to user attempts to set it, rather than blindly 8647 * setting it. 8648 */ 8649 8650 ret = 0; 8651 if ((old_flags ^ flags) & IFF_UP) { 8652 if (old_flags & IFF_UP) 8653 __dev_close(dev); 8654 else 8655 ret = __dev_open(dev, extack); 8656 } 8657 8658 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8659 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8660 unsigned int old_flags = dev->flags; 8661 8662 dev->gflags ^= IFF_PROMISC; 8663 8664 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8665 if (dev->flags != old_flags) 8666 dev_set_rx_mode(dev); 8667 } 8668 8669 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8670 * is important. Some (broken) drivers set IFF_PROMISC, when 8671 * IFF_ALLMULTI is requested not asking us and not reporting. 8672 */ 8673 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8674 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8675 8676 dev->gflags ^= IFF_ALLMULTI; 8677 __dev_set_allmulti(dev, inc, false); 8678 } 8679 8680 return ret; 8681 } 8682 8683 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8684 unsigned int gchanges) 8685 { 8686 unsigned int changes = dev->flags ^ old_flags; 8687 8688 if (gchanges) 8689 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 8690 8691 if (changes & IFF_UP) { 8692 if (dev->flags & IFF_UP) 8693 call_netdevice_notifiers(NETDEV_UP, dev); 8694 else 8695 call_netdevice_notifiers(NETDEV_DOWN, dev); 8696 } 8697 8698 if (dev->flags & IFF_UP && 8699 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8700 struct netdev_notifier_change_info change_info = { 8701 .info = { 8702 .dev = dev, 8703 }, 8704 .flags_changed = changes, 8705 }; 8706 8707 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8708 } 8709 } 8710 8711 /** 8712 * dev_change_flags - change device settings 8713 * @dev: device 8714 * @flags: device state flags 8715 * @extack: netlink extended ack 8716 * 8717 * Change settings on device based state flags. The flags are 8718 * in the userspace exported format. 8719 */ 8720 int dev_change_flags(struct net_device *dev, unsigned int flags, 8721 struct netlink_ext_ack *extack) 8722 { 8723 int ret; 8724 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8725 8726 ret = __dev_change_flags(dev, flags, extack); 8727 if (ret < 0) 8728 return ret; 8729 8730 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8731 __dev_notify_flags(dev, old_flags, changes); 8732 return ret; 8733 } 8734 EXPORT_SYMBOL(dev_change_flags); 8735 8736 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8737 { 8738 const struct net_device_ops *ops = dev->netdev_ops; 8739 8740 if (ops->ndo_change_mtu) 8741 return ops->ndo_change_mtu(dev, new_mtu); 8742 8743 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8744 WRITE_ONCE(dev->mtu, new_mtu); 8745 return 0; 8746 } 8747 EXPORT_SYMBOL(__dev_set_mtu); 8748 8749 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8750 struct netlink_ext_ack *extack) 8751 { 8752 /* MTU must be positive, and in range */ 8753 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8754 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8755 return -EINVAL; 8756 } 8757 8758 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8759 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8760 return -EINVAL; 8761 } 8762 return 0; 8763 } 8764 8765 /** 8766 * dev_set_mtu_ext - Change maximum transfer unit 8767 * @dev: device 8768 * @new_mtu: new transfer unit 8769 * @extack: netlink extended ack 8770 * 8771 * Change the maximum transfer size of the network device. 8772 */ 8773 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8774 struct netlink_ext_ack *extack) 8775 { 8776 int err, orig_mtu; 8777 8778 if (new_mtu == dev->mtu) 8779 return 0; 8780 8781 err = dev_validate_mtu(dev, new_mtu, extack); 8782 if (err) 8783 return err; 8784 8785 if (!netif_device_present(dev)) 8786 return -ENODEV; 8787 8788 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8789 err = notifier_to_errno(err); 8790 if (err) 8791 return err; 8792 8793 orig_mtu = dev->mtu; 8794 err = __dev_set_mtu(dev, new_mtu); 8795 8796 if (!err) { 8797 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8798 orig_mtu); 8799 err = notifier_to_errno(err); 8800 if (err) { 8801 /* setting mtu back and notifying everyone again, 8802 * so that they have a chance to revert changes. 8803 */ 8804 __dev_set_mtu(dev, orig_mtu); 8805 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8806 new_mtu); 8807 } 8808 } 8809 return err; 8810 } 8811 8812 int dev_set_mtu(struct net_device *dev, int new_mtu) 8813 { 8814 struct netlink_ext_ack extack; 8815 int err; 8816 8817 memset(&extack, 0, sizeof(extack)); 8818 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8819 if (err && extack._msg) 8820 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8821 return err; 8822 } 8823 EXPORT_SYMBOL(dev_set_mtu); 8824 8825 /** 8826 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8827 * @dev: device 8828 * @new_len: new tx queue length 8829 */ 8830 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8831 { 8832 unsigned int orig_len = dev->tx_queue_len; 8833 int res; 8834 8835 if (new_len != (unsigned int)new_len) 8836 return -ERANGE; 8837 8838 if (new_len != orig_len) { 8839 dev->tx_queue_len = new_len; 8840 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8841 res = notifier_to_errno(res); 8842 if (res) 8843 goto err_rollback; 8844 res = dev_qdisc_change_tx_queue_len(dev); 8845 if (res) 8846 goto err_rollback; 8847 } 8848 8849 return 0; 8850 8851 err_rollback: 8852 netdev_err(dev, "refused to change device tx_queue_len\n"); 8853 dev->tx_queue_len = orig_len; 8854 return res; 8855 } 8856 8857 /** 8858 * dev_set_group - Change group this device belongs to 8859 * @dev: device 8860 * @new_group: group this device should belong to 8861 */ 8862 void dev_set_group(struct net_device *dev, int new_group) 8863 { 8864 dev->group = new_group; 8865 } 8866 EXPORT_SYMBOL(dev_set_group); 8867 8868 /** 8869 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8870 * @dev: device 8871 * @addr: new address 8872 * @extack: netlink extended ack 8873 */ 8874 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8875 struct netlink_ext_ack *extack) 8876 { 8877 struct netdev_notifier_pre_changeaddr_info info = { 8878 .info.dev = dev, 8879 .info.extack = extack, 8880 .dev_addr = addr, 8881 }; 8882 int rc; 8883 8884 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8885 return notifier_to_errno(rc); 8886 } 8887 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8888 8889 /** 8890 * dev_set_mac_address - Change Media Access Control Address 8891 * @dev: device 8892 * @sa: new address 8893 * @extack: netlink extended ack 8894 * 8895 * Change the hardware (MAC) address of the device 8896 */ 8897 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8898 struct netlink_ext_ack *extack) 8899 { 8900 const struct net_device_ops *ops = dev->netdev_ops; 8901 int err; 8902 8903 if (!ops->ndo_set_mac_address) 8904 return -EOPNOTSUPP; 8905 if (sa->sa_family != dev->type) 8906 return -EINVAL; 8907 if (!netif_device_present(dev)) 8908 return -ENODEV; 8909 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8910 if (err) 8911 return err; 8912 err = ops->ndo_set_mac_address(dev, sa); 8913 if (err) 8914 return err; 8915 dev->addr_assign_type = NET_ADDR_SET; 8916 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8917 add_device_randomness(dev->dev_addr, dev->addr_len); 8918 return 0; 8919 } 8920 EXPORT_SYMBOL(dev_set_mac_address); 8921 8922 static DECLARE_RWSEM(dev_addr_sem); 8923 8924 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8925 struct netlink_ext_ack *extack) 8926 { 8927 int ret; 8928 8929 down_write(&dev_addr_sem); 8930 ret = dev_set_mac_address(dev, sa, extack); 8931 up_write(&dev_addr_sem); 8932 return ret; 8933 } 8934 EXPORT_SYMBOL(dev_set_mac_address_user); 8935 8936 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8937 { 8938 size_t size = sizeof(sa->sa_data); 8939 struct net_device *dev; 8940 int ret = 0; 8941 8942 down_read(&dev_addr_sem); 8943 rcu_read_lock(); 8944 8945 dev = dev_get_by_name_rcu(net, dev_name); 8946 if (!dev) { 8947 ret = -ENODEV; 8948 goto unlock; 8949 } 8950 if (!dev->addr_len) 8951 memset(sa->sa_data, 0, size); 8952 else 8953 memcpy(sa->sa_data, dev->dev_addr, 8954 min_t(size_t, size, dev->addr_len)); 8955 sa->sa_family = dev->type; 8956 8957 unlock: 8958 rcu_read_unlock(); 8959 up_read(&dev_addr_sem); 8960 return ret; 8961 } 8962 EXPORT_SYMBOL(dev_get_mac_address); 8963 8964 /** 8965 * dev_change_carrier - Change device carrier 8966 * @dev: device 8967 * @new_carrier: new value 8968 * 8969 * Change device carrier 8970 */ 8971 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8972 { 8973 const struct net_device_ops *ops = dev->netdev_ops; 8974 8975 if (!ops->ndo_change_carrier) 8976 return -EOPNOTSUPP; 8977 if (!netif_device_present(dev)) 8978 return -ENODEV; 8979 return ops->ndo_change_carrier(dev, new_carrier); 8980 } 8981 EXPORT_SYMBOL(dev_change_carrier); 8982 8983 /** 8984 * dev_get_phys_port_id - Get device physical port ID 8985 * @dev: device 8986 * @ppid: port ID 8987 * 8988 * Get device physical port ID 8989 */ 8990 int dev_get_phys_port_id(struct net_device *dev, 8991 struct netdev_phys_item_id *ppid) 8992 { 8993 const struct net_device_ops *ops = dev->netdev_ops; 8994 8995 if (!ops->ndo_get_phys_port_id) 8996 return -EOPNOTSUPP; 8997 return ops->ndo_get_phys_port_id(dev, ppid); 8998 } 8999 EXPORT_SYMBOL(dev_get_phys_port_id); 9000 9001 /** 9002 * dev_get_phys_port_name - Get device physical port name 9003 * @dev: device 9004 * @name: port name 9005 * @len: limit of bytes to copy to name 9006 * 9007 * Get device physical port name 9008 */ 9009 int dev_get_phys_port_name(struct net_device *dev, 9010 char *name, size_t len) 9011 { 9012 const struct net_device_ops *ops = dev->netdev_ops; 9013 int err; 9014 9015 if (ops->ndo_get_phys_port_name) { 9016 err = ops->ndo_get_phys_port_name(dev, name, len); 9017 if (err != -EOPNOTSUPP) 9018 return err; 9019 } 9020 return devlink_compat_phys_port_name_get(dev, name, len); 9021 } 9022 EXPORT_SYMBOL(dev_get_phys_port_name); 9023 9024 /** 9025 * dev_get_port_parent_id - Get the device's port parent identifier 9026 * @dev: network device 9027 * @ppid: pointer to a storage for the port's parent identifier 9028 * @recurse: allow/disallow recursion to lower devices 9029 * 9030 * Get the devices's port parent identifier 9031 */ 9032 int dev_get_port_parent_id(struct net_device *dev, 9033 struct netdev_phys_item_id *ppid, 9034 bool recurse) 9035 { 9036 const struct net_device_ops *ops = dev->netdev_ops; 9037 struct netdev_phys_item_id first = { }; 9038 struct net_device *lower_dev; 9039 struct list_head *iter; 9040 int err; 9041 9042 if (ops->ndo_get_port_parent_id) { 9043 err = ops->ndo_get_port_parent_id(dev, ppid); 9044 if (err != -EOPNOTSUPP) 9045 return err; 9046 } 9047 9048 err = devlink_compat_switch_id_get(dev, ppid); 9049 if (!err || err != -EOPNOTSUPP) 9050 return err; 9051 9052 if (!recurse) 9053 return -EOPNOTSUPP; 9054 9055 netdev_for_each_lower_dev(dev, lower_dev, iter) { 9056 err = dev_get_port_parent_id(lower_dev, ppid, recurse); 9057 if (err) 9058 break; 9059 if (!first.id_len) 9060 first = *ppid; 9061 else if (memcmp(&first, ppid, sizeof(*ppid))) 9062 return -EOPNOTSUPP; 9063 } 9064 9065 return err; 9066 } 9067 EXPORT_SYMBOL(dev_get_port_parent_id); 9068 9069 /** 9070 * netdev_port_same_parent_id - Indicate if two network devices have 9071 * the same port parent identifier 9072 * @a: first network device 9073 * @b: second network device 9074 */ 9075 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 9076 { 9077 struct netdev_phys_item_id a_id = { }; 9078 struct netdev_phys_item_id b_id = { }; 9079 9080 if (dev_get_port_parent_id(a, &a_id, true) || 9081 dev_get_port_parent_id(b, &b_id, true)) 9082 return false; 9083 9084 return netdev_phys_item_id_same(&a_id, &b_id); 9085 } 9086 EXPORT_SYMBOL(netdev_port_same_parent_id); 9087 9088 /** 9089 * dev_change_proto_down - update protocol port state information 9090 * @dev: device 9091 * @proto_down: new value 9092 * 9093 * This info can be used by switch drivers to set the phys state of the 9094 * port. 9095 */ 9096 int dev_change_proto_down(struct net_device *dev, bool proto_down) 9097 { 9098 const struct net_device_ops *ops = dev->netdev_ops; 9099 9100 if (!ops->ndo_change_proto_down) 9101 return -EOPNOTSUPP; 9102 if (!netif_device_present(dev)) 9103 return -ENODEV; 9104 return ops->ndo_change_proto_down(dev, proto_down); 9105 } 9106 EXPORT_SYMBOL(dev_change_proto_down); 9107 9108 /** 9109 * dev_change_proto_down_generic - generic implementation for 9110 * ndo_change_proto_down that sets carrier according to 9111 * proto_down. 9112 * 9113 * @dev: device 9114 * @proto_down: new value 9115 */ 9116 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) 9117 { 9118 if (proto_down) 9119 netif_carrier_off(dev); 9120 else 9121 netif_carrier_on(dev); 9122 dev->proto_down = proto_down; 9123 return 0; 9124 } 9125 EXPORT_SYMBOL(dev_change_proto_down_generic); 9126 9127 /** 9128 * dev_change_proto_down_reason - proto down reason 9129 * 9130 * @dev: device 9131 * @mask: proto down mask 9132 * @value: proto down value 9133 */ 9134 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9135 u32 value) 9136 { 9137 int b; 9138 9139 if (!mask) { 9140 dev->proto_down_reason = value; 9141 } else { 9142 for_each_set_bit(b, &mask, 32) { 9143 if (value & (1 << b)) 9144 dev->proto_down_reason |= BIT(b); 9145 else 9146 dev->proto_down_reason &= ~BIT(b); 9147 } 9148 } 9149 } 9150 EXPORT_SYMBOL(dev_change_proto_down_reason); 9151 9152 struct bpf_xdp_link { 9153 struct bpf_link link; 9154 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9155 int flags; 9156 }; 9157 9158 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9159 { 9160 if (flags & XDP_FLAGS_HW_MODE) 9161 return XDP_MODE_HW; 9162 if (flags & XDP_FLAGS_DRV_MODE) 9163 return XDP_MODE_DRV; 9164 if (flags & XDP_FLAGS_SKB_MODE) 9165 return XDP_MODE_SKB; 9166 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9167 } 9168 9169 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9170 { 9171 switch (mode) { 9172 case XDP_MODE_SKB: 9173 return generic_xdp_install; 9174 case XDP_MODE_DRV: 9175 case XDP_MODE_HW: 9176 return dev->netdev_ops->ndo_bpf; 9177 default: 9178 return NULL; 9179 } 9180 } 9181 9182 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9183 enum bpf_xdp_mode mode) 9184 { 9185 return dev->xdp_state[mode].link; 9186 } 9187 9188 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9189 enum bpf_xdp_mode mode) 9190 { 9191 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9192 9193 if (link) 9194 return link->link.prog; 9195 return dev->xdp_state[mode].prog; 9196 } 9197 9198 static u8 dev_xdp_prog_count(struct net_device *dev) 9199 { 9200 u8 count = 0; 9201 int i; 9202 9203 for (i = 0; i < __MAX_XDP_MODE; i++) 9204 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9205 count++; 9206 return count; 9207 } 9208 9209 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9210 { 9211 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9212 9213 return prog ? prog->aux->id : 0; 9214 } 9215 9216 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9217 struct bpf_xdp_link *link) 9218 { 9219 dev->xdp_state[mode].link = link; 9220 dev->xdp_state[mode].prog = NULL; 9221 } 9222 9223 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9224 struct bpf_prog *prog) 9225 { 9226 dev->xdp_state[mode].link = NULL; 9227 dev->xdp_state[mode].prog = prog; 9228 } 9229 9230 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9231 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9232 u32 flags, struct bpf_prog *prog) 9233 { 9234 struct netdev_bpf xdp; 9235 int err; 9236 9237 memset(&xdp, 0, sizeof(xdp)); 9238 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9239 xdp.extack = extack; 9240 xdp.flags = flags; 9241 xdp.prog = prog; 9242 9243 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9244 * "moved" into driver), so they don't increment it on their own, but 9245 * they do decrement refcnt when program is detached or replaced. 9246 * Given net_device also owns link/prog, we need to bump refcnt here 9247 * to prevent drivers from underflowing it. 9248 */ 9249 if (prog) 9250 bpf_prog_inc(prog); 9251 err = bpf_op(dev, &xdp); 9252 if (err) { 9253 if (prog) 9254 bpf_prog_put(prog); 9255 return err; 9256 } 9257 9258 if (mode != XDP_MODE_HW) 9259 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9260 9261 return 0; 9262 } 9263 9264 static void dev_xdp_uninstall(struct net_device *dev) 9265 { 9266 struct bpf_xdp_link *link; 9267 struct bpf_prog *prog; 9268 enum bpf_xdp_mode mode; 9269 bpf_op_t bpf_op; 9270 9271 ASSERT_RTNL(); 9272 9273 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9274 prog = dev_xdp_prog(dev, mode); 9275 if (!prog) 9276 continue; 9277 9278 bpf_op = dev_xdp_bpf_op(dev, mode); 9279 if (!bpf_op) 9280 continue; 9281 9282 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9283 9284 /* auto-detach link from net device */ 9285 link = dev_xdp_link(dev, mode); 9286 if (link) 9287 link->dev = NULL; 9288 else 9289 bpf_prog_put(prog); 9290 9291 dev_xdp_set_link(dev, mode, NULL); 9292 } 9293 } 9294 9295 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9296 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9297 struct bpf_prog *old_prog, u32 flags) 9298 { 9299 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9300 struct bpf_prog *cur_prog; 9301 enum bpf_xdp_mode mode; 9302 bpf_op_t bpf_op; 9303 int err; 9304 9305 ASSERT_RTNL(); 9306 9307 /* either link or prog attachment, never both */ 9308 if (link && (new_prog || old_prog)) 9309 return -EINVAL; 9310 /* link supports only XDP mode flags */ 9311 if (link && (flags & ~XDP_FLAGS_MODES)) { 9312 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9313 return -EINVAL; 9314 } 9315 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9316 if (num_modes > 1) { 9317 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9318 return -EINVAL; 9319 } 9320 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9321 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9322 NL_SET_ERR_MSG(extack, 9323 "More than one program loaded, unset mode is ambiguous"); 9324 return -EINVAL; 9325 } 9326 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9327 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9328 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9329 return -EINVAL; 9330 } 9331 9332 mode = dev_xdp_mode(dev, flags); 9333 /* can't replace attached link */ 9334 if (dev_xdp_link(dev, mode)) { 9335 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9336 return -EBUSY; 9337 } 9338 9339 cur_prog = dev_xdp_prog(dev, mode); 9340 /* can't replace attached prog with link */ 9341 if (link && cur_prog) { 9342 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9343 return -EBUSY; 9344 } 9345 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9346 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9347 return -EEXIST; 9348 } 9349 9350 /* put effective new program into new_prog */ 9351 if (link) 9352 new_prog = link->link.prog; 9353 9354 if (new_prog) { 9355 bool offload = mode == XDP_MODE_HW; 9356 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9357 ? XDP_MODE_DRV : XDP_MODE_SKB; 9358 9359 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9360 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9361 return -EBUSY; 9362 } 9363 if (!offload && dev_xdp_prog(dev, other_mode)) { 9364 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9365 return -EEXIST; 9366 } 9367 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) { 9368 NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); 9369 return -EINVAL; 9370 } 9371 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9372 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9373 return -EINVAL; 9374 } 9375 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9376 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9377 return -EINVAL; 9378 } 9379 } 9380 9381 /* don't call drivers if the effective program didn't change */ 9382 if (new_prog != cur_prog) { 9383 bpf_op = dev_xdp_bpf_op(dev, mode); 9384 if (!bpf_op) { 9385 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9386 return -EOPNOTSUPP; 9387 } 9388 9389 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9390 if (err) 9391 return err; 9392 } 9393 9394 if (link) 9395 dev_xdp_set_link(dev, mode, link); 9396 else 9397 dev_xdp_set_prog(dev, mode, new_prog); 9398 if (cur_prog) 9399 bpf_prog_put(cur_prog); 9400 9401 return 0; 9402 } 9403 9404 static int dev_xdp_attach_link(struct net_device *dev, 9405 struct netlink_ext_ack *extack, 9406 struct bpf_xdp_link *link) 9407 { 9408 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9409 } 9410 9411 static int dev_xdp_detach_link(struct net_device *dev, 9412 struct netlink_ext_ack *extack, 9413 struct bpf_xdp_link *link) 9414 { 9415 enum bpf_xdp_mode mode; 9416 bpf_op_t bpf_op; 9417 9418 ASSERT_RTNL(); 9419 9420 mode = dev_xdp_mode(dev, link->flags); 9421 if (dev_xdp_link(dev, mode) != link) 9422 return -EINVAL; 9423 9424 bpf_op = dev_xdp_bpf_op(dev, mode); 9425 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9426 dev_xdp_set_link(dev, mode, NULL); 9427 return 0; 9428 } 9429 9430 static void bpf_xdp_link_release(struct bpf_link *link) 9431 { 9432 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9433 9434 rtnl_lock(); 9435 9436 /* if racing with net_device's tear down, xdp_link->dev might be 9437 * already NULL, in which case link was already auto-detached 9438 */ 9439 if (xdp_link->dev) { 9440 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9441 xdp_link->dev = NULL; 9442 } 9443 9444 rtnl_unlock(); 9445 } 9446 9447 static int bpf_xdp_link_detach(struct bpf_link *link) 9448 { 9449 bpf_xdp_link_release(link); 9450 return 0; 9451 } 9452 9453 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9454 { 9455 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9456 9457 kfree(xdp_link); 9458 } 9459 9460 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9461 struct seq_file *seq) 9462 { 9463 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9464 u32 ifindex = 0; 9465 9466 rtnl_lock(); 9467 if (xdp_link->dev) 9468 ifindex = xdp_link->dev->ifindex; 9469 rtnl_unlock(); 9470 9471 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9472 } 9473 9474 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9475 struct bpf_link_info *info) 9476 { 9477 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9478 u32 ifindex = 0; 9479 9480 rtnl_lock(); 9481 if (xdp_link->dev) 9482 ifindex = xdp_link->dev->ifindex; 9483 rtnl_unlock(); 9484 9485 info->xdp.ifindex = ifindex; 9486 return 0; 9487 } 9488 9489 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9490 struct bpf_prog *old_prog) 9491 { 9492 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9493 enum bpf_xdp_mode mode; 9494 bpf_op_t bpf_op; 9495 int err = 0; 9496 9497 rtnl_lock(); 9498 9499 /* link might have been auto-released already, so fail */ 9500 if (!xdp_link->dev) { 9501 err = -ENOLINK; 9502 goto out_unlock; 9503 } 9504 9505 if (old_prog && link->prog != old_prog) { 9506 err = -EPERM; 9507 goto out_unlock; 9508 } 9509 old_prog = link->prog; 9510 if (old_prog == new_prog) { 9511 /* no-op, don't disturb drivers */ 9512 bpf_prog_put(new_prog); 9513 goto out_unlock; 9514 } 9515 9516 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9517 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9518 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9519 xdp_link->flags, new_prog); 9520 if (err) 9521 goto out_unlock; 9522 9523 old_prog = xchg(&link->prog, new_prog); 9524 bpf_prog_put(old_prog); 9525 9526 out_unlock: 9527 rtnl_unlock(); 9528 return err; 9529 } 9530 9531 static const struct bpf_link_ops bpf_xdp_link_lops = { 9532 .release = bpf_xdp_link_release, 9533 .dealloc = bpf_xdp_link_dealloc, 9534 .detach = bpf_xdp_link_detach, 9535 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9536 .fill_link_info = bpf_xdp_link_fill_link_info, 9537 .update_prog = bpf_xdp_link_update, 9538 }; 9539 9540 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9541 { 9542 struct net *net = current->nsproxy->net_ns; 9543 struct bpf_link_primer link_primer; 9544 struct bpf_xdp_link *link; 9545 struct net_device *dev; 9546 int err, fd; 9547 9548 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9549 if (!dev) 9550 return -EINVAL; 9551 9552 link = kzalloc(sizeof(*link), GFP_USER); 9553 if (!link) { 9554 err = -ENOMEM; 9555 goto out_put_dev; 9556 } 9557 9558 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9559 link->dev = dev; 9560 link->flags = attr->link_create.flags; 9561 9562 err = bpf_link_prime(&link->link, &link_primer); 9563 if (err) { 9564 kfree(link); 9565 goto out_put_dev; 9566 } 9567 9568 rtnl_lock(); 9569 err = dev_xdp_attach_link(dev, NULL, link); 9570 rtnl_unlock(); 9571 9572 if (err) { 9573 bpf_link_cleanup(&link_primer); 9574 goto out_put_dev; 9575 } 9576 9577 fd = bpf_link_settle(&link_primer); 9578 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9579 dev_put(dev); 9580 return fd; 9581 9582 out_put_dev: 9583 dev_put(dev); 9584 return err; 9585 } 9586 9587 /** 9588 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9589 * @dev: device 9590 * @extack: netlink extended ack 9591 * @fd: new program fd or negative value to clear 9592 * @expected_fd: old program fd that userspace expects to replace or clear 9593 * @flags: xdp-related flags 9594 * 9595 * Set or clear a bpf program for a device 9596 */ 9597 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9598 int fd, int expected_fd, u32 flags) 9599 { 9600 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9601 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9602 int err; 9603 9604 ASSERT_RTNL(); 9605 9606 if (fd >= 0) { 9607 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9608 mode != XDP_MODE_SKB); 9609 if (IS_ERR(new_prog)) 9610 return PTR_ERR(new_prog); 9611 } 9612 9613 if (expected_fd >= 0) { 9614 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9615 mode != XDP_MODE_SKB); 9616 if (IS_ERR(old_prog)) { 9617 err = PTR_ERR(old_prog); 9618 old_prog = NULL; 9619 goto err_out; 9620 } 9621 } 9622 9623 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9624 9625 err_out: 9626 if (err && new_prog) 9627 bpf_prog_put(new_prog); 9628 if (old_prog) 9629 bpf_prog_put(old_prog); 9630 return err; 9631 } 9632 9633 /** 9634 * dev_new_index - allocate an ifindex 9635 * @net: the applicable net namespace 9636 * 9637 * Returns a suitable unique value for a new device interface 9638 * number. The caller must hold the rtnl semaphore or the 9639 * dev_base_lock to be sure it remains unique. 9640 */ 9641 static int dev_new_index(struct net *net) 9642 { 9643 int ifindex = net->ifindex; 9644 9645 for (;;) { 9646 if (++ifindex <= 0) 9647 ifindex = 1; 9648 if (!__dev_get_by_index(net, ifindex)) 9649 return net->ifindex = ifindex; 9650 } 9651 } 9652 9653 /* Delayed registration/unregisteration */ 9654 static LIST_HEAD(net_todo_list); 9655 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9656 9657 static void net_set_todo(struct net_device *dev) 9658 { 9659 list_add_tail(&dev->todo_list, &net_todo_list); 9660 dev_net(dev)->dev_unreg_count++; 9661 } 9662 9663 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9664 struct net_device *upper, netdev_features_t features) 9665 { 9666 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9667 netdev_features_t feature; 9668 int feature_bit; 9669 9670 for_each_netdev_feature(upper_disables, feature_bit) { 9671 feature = __NETIF_F_BIT(feature_bit); 9672 if (!(upper->wanted_features & feature) 9673 && (features & feature)) { 9674 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9675 &feature, upper->name); 9676 features &= ~feature; 9677 } 9678 } 9679 9680 return features; 9681 } 9682 9683 static void netdev_sync_lower_features(struct net_device *upper, 9684 struct net_device *lower, netdev_features_t features) 9685 { 9686 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9687 netdev_features_t feature; 9688 int feature_bit; 9689 9690 for_each_netdev_feature(upper_disables, feature_bit) { 9691 feature = __NETIF_F_BIT(feature_bit); 9692 if (!(features & feature) && (lower->features & feature)) { 9693 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9694 &feature, lower->name); 9695 lower->wanted_features &= ~feature; 9696 __netdev_update_features(lower); 9697 9698 if (unlikely(lower->features & feature)) 9699 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9700 &feature, lower->name); 9701 else 9702 netdev_features_change(lower); 9703 } 9704 } 9705 } 9706 9707 static netdev_features_t netdev_fix_features(struct net_device *dev, 9708 netdev_features_t features) 9709 { 9710 /* Fix illegal checksum combinations */ 9711 if ((features & NETIF_F_HW_CSUM) && 9712 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9713 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9714 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9715 } 9716 9717 /* TSO requires that SG is present as well. */ 9718 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9719 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9720 features &= ~NETIF_F_ALL_TSO; 9721 } 9722 9723 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9724 !(features & NETIF_F_IP_CSUM)) { 9725 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9726 features &= ~NETIF_F_TSO; 9727 features &= ~NETIF_F_TSO_ECN; 9728 } 9729 9730 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9731 !(features & NETIF_F_IPV6_CSUM)) { 9732 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9733 features &= ~NETIF_F_TSO6; 9734 } 9735 9736 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9737 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9738 features &= ~NETIF_F_TSO_MANGLEID; 9739 9740 /* TSO ECN requires that TSO is present as well. */ 9741 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9742 features &= ~NETIF_F_TSO_ECN; 9743 9744 /* Software GSO depends on SG. */ 9745 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9746 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9747 features &= ~NETIF_F_GSO; 9748 } 9749 9750 /* GSO partial features require GSO partial be set */ 9751 if ((features & dev->gso_partial_features) && 9752 !(features & NETIF_F_GSO_PARTIAL)) { 9753 netdev_dbg(dev, 9754 "Dropping partially supported GSO features since no GSO partial.\n"); 9755 features &= ~dev->gso_partial_features; 9756 } 9757 9758 if (!(features & NETIF_F_RXCSUM)) { 9759 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9760 * successfully merged by hardware must also have the 9761 * checksum verified by hardware. If the user does not 9762 * want to enable RXCSUM, logically, we should disable GRO_HW. 9763 */ 9764 if (features & NETIF_F_GRO_HW) { 9765 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9766 features &= ~NETIF_F_GRO_HW; 9767 } 9768 } 9769 9770 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9771 if (features & NETIF_F_RXFCS) { 9772 if (features & NETIF_F_LRO) { 9773 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9774 features &= ~NETIF_F_LRO; 9775 } 9776 9777 if (features & NETIF_F_GRO_HW) { 9778 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9779 features &= ~NETIF_F_GRO_HW; 9780 } 9781 } 9782 9783 if (features & NETIF_F_HW_TLS_TX) { 9784 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9785 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9786 bool hw_csum = features & NETIF_F_HW_CSUM; 9787 9788 if (!ip_csum && !hw_csum) { 9789 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9790 features &= ~NETIF_F_HW_TLS_TX; 9791 } 9792 } 9793 9794 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9795 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9796 features &= ~NETIF_F_HW_TLS_RX; 9797 } 9798 9799 return features; 9800 } 9801 9802 int __netdev_update_features(struct net_device *dev) 9803 { 9804 struct net_device *upper, *lower; 9805 netdev_features_t features; 9806 struct list_head *iter; 9807 int err = -1; 9808 9809 ASSERT_RTNL(); 9810 9811 features = netdev_get_wanted_features(dev); 9812 9813 if (dev->netdev_ops->ndo_fix_features) 9814 features = dev->netdev_ops->ndo_fix_features(dev, features); 9815 9816 /* driver might be less strict about feature dependencies */ 9817 features = netdev_fix_features(dev, features); 9818 9819 /* some features can't be enabled if they're off on an upper device */ 9820 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9821 features = netdev_sync_upper_features(dev, upper, features); 9822 9823 if (dev->features == features) 9824 goto sync_lower; 9825 9826 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9827 &dev->features, &features); 9828 9829 if (dev->netdev_ops->ndo_set_features) 9830 err = dev->netdev_ops->ndo_set_features(dev, features); 9831 else 9832 err = 0; 9833 9834 if (unlikely(err < 0)) { 9835 netdev_err(dev, 9836 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9837 err, &features, &dev->features); 9838 /* return non-0 since some features might have changed and 9839 * it's better to fire a spurious notification than miss it 9840 */ 9841 return -1; 9842 } 9843 9844 sync_lower: 9845 /* some features must be disabled on lower devices when disabled 9846 * on an upper device (think: bonding master or bridge) 9847 */ 9848 netdev_for_each_lower_dev(dev, lower, iter) 9849 netdev_sync_lower_features(dev, lower, features); 9850 9851 if (!err) { 9852 netdev_features_t diff = features ^ dev->features; 9853 9854 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9855 /* udp_tunnel_{get,drop}_rx_info both need 9856 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9857 * device, or they won't do anything. 9858 * Thus we need to update dev->features 9859 * *before* calling udp_tunnel_get_rx_info, 9860 * but *after* calling udp_tunnel_drop_rx_info. 9861 */ 9862 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9863 dev->features = features; 9864 udp_tunnel_get_rx_info(dev); 9865 } else { 9866 udp_tunnel_drop_rx_info(dev); 9867 } 9868 } 9869 9870 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9871 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9872 dev->features = features; 9873 err |= vlan_get_rx_ctag_filter_info(dev); 9874 } else { 9875 vlan_drop_rx_ctag_filter_info(dev); 9876 } 9877 } 9878 9879 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9880 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9881 dev->features = features; 9882 err |= vlan_get_rx_stag_filter_info(dev); 9883 } else { 9884 vlan_drop_rx_stag_filter_info(dev); 9885 } 9886 } 9887 9888 dev->features = features; 9889 } 9890 9891 return err < 0 ? 0 : 1; 9892 } 9893 9894 /** 9895 * netdev_update_features - recalculate device features 9896 * @dev: the device to check 9897 * 9898 * Recalculate dev->features set and send notifications if it 9899 * has changed. Should be called after driver or hardware dependent 9900 * conditions might have changed that influence the features. 9901 */ 9902 void netdev_update_features(struct net_device *dev) 9903 { 9904 if (__netdev_update_features(dev)) 9905 netdev_features_change(dev); 9906 } 9907 EXPORT_SYMBOL(netdev_update_features); 9908 9909 /** 9910 * netdev_change_features - recalculate device features 9911 * @dev: the device to check 9912 * 9913 * Recalculate dev->features set and send notifications even 9914 * if they have not changed. Should be called instead of 9915 * netdev_update_features() if also dev->vlan_features might 9916 * have changed to allow the changes to be propagated to stacked 9917 * VLAN devices. 9918 */ 9919 void netdev_change_features(struct net_device *dev) 9920 { 9921 __netdev_update_features(dev); 9922 netdev_features_change(dev); 9923 } 9924 EXPORT_SYMBOL(netdev_change_features); 9925 9926 /** 9927 * netif_stacked_transfer_operstate - transfer operstate 9928 * @rootdev: the root or lower level device to transfer state from 9929 * @dev: the device to transfer operstate to 9930 * 9931 * Transfer operational state from root to device. This is normally 9932 * called when a stacking relationship exists between the root 9933 * device and the device(a leaf device). 9934 */ 9935 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9936 struct net_device *dev) 9937 { 9938 if (rootdev->operstate == IF_OPER_DORMANT) 9939 netif_dormant_on(dev); 9940 else 9941 netif_dormant_off(dev); 9942 9943 if (rootdev->operstate == IF_OPER_TESTING) 9944 netif_testing_on(dev); 9945 else 9946 netif_testing_off(dev); 9947 9948 if (netif_carrier_ok(rootdev)) 9949 netif_carrier_on(dev); 9950 else 9951 netif_carrier_off(dev); 9952 } 9953 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9954 9955 static int netif_alloc_rx_queues(struct net_device *dev) 9956 { 9957 unsigned int i, count = dev->num_rx_queues; 9958 struct netdev_rx_queue *rx; 9959 size_t sz = count * sizeof(*rx); 9960 int err = 0; 9961 9962 BUG_ON(count < 1); 9963 9964 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9965 if (!rx) 9966 return -ENOMEM; 9967 9968 dev->_rx = rx; 9969 9970 for (i = 0; i < count; i++) { 9971 rx[i].dev = dev; 9972 9973 /* XDP RX-queue setup */ 9974 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9975 if (err < 0) 9976 goto err_rxq_info; 9977 } 9978 return 0; 9979 9980 err_rxq_info: 9981 /* Rollback successful reg's and free other resources */ 9982 while (i--) 9983 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9984 kvfree(dev->_rx); 9985 dev->_rx = NULL; 9986 return err; 9987 } 9988 9989 static void netif_free_rx_queues(struct net_device *dev) 9990 { 9991 unsigned int i, count = dev->num_rx_queues; 9992 9993 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9994 if (!dev->_rx) 9995 return; 9996 9997 for (i = 0; i < count; i++) 9998 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9999 10000 kvfree(dev->_rx); 10001 } 10002 10003 static void netdev_init_one_queue(struct net_device *dev, 10004 struct netdev_queue *queue, void *_unused) 10005 { 10006 /* Initialize queue lock */ 10007 spin_lock_init(&queue->_xmit_lock); 10008 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 10009 queue->xmit_lock_owner = -1; 10010 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 10011 queue->dev = dev; 10012 #ifdef CONFIG_BQL 10013 dql_init(&queue->dql, HZ); 10014 #endif 10015 } 10016 10017 static void netif_free_tx_queues(struct net_device *dev) 10018 { 10019 kvfree(dev->_tx); 10020 } 10021 10022 static int netif_alloc_netdev_queues(struct net_device *dev) 10023 { 10024 unsigned int count = dev->num_tx_queues; 10025 struct netdev_queue *tx; 10026 size_t sz = count * sizeof(*tx); 10027 10028 if (count < 1 || count > 0xffff) 10029 return -EINVAL; 10030 10031 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 10032 if (!tx) 10033 return -ENOMEM; 10034 10035 dev->_tx = tx; 10036 10037 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 10038 spin_lock_init(&dev->tx_global_lock); 10039 10040 return 0; 10041 } 10042 10043 void netif_tx_stop_all_queues(struct net_device *dev) 10044 { 10045 unsigned int i; 10046 10047 for (i = 0; i < dev->num_tx_queues; i++) { 10048 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 10049 10050 netif_tx_stop_queue(txq); 10051 } 10052 } 10053 EXPORT_SYMBOL(netif_tx_stop_all_queues); 10054 10055 /** 10056 * register_netdevice - register a network device 10057 * @dev: device to register 10058 * 10059 * Take a completed network device structure and add it to the kernel 10060 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10061 * chain. 0 is returned on success. A negative errno code is returned 10062 * on a failure to set up the device, or if the name is a duplicate. 10063 * 10064 * Callers must hold the rtnl semaphore. You may want 10065 * register_netdev() instead of this. 10066 * 10067 * BUGS: 10068 * The locking appears insufficient to guarantee two parallel registers 10069 * will not get the same name. 10070 */ 10071 10072 int register_netdevice(struct net_device *dev) 10073 { 10074 int ret; 10075 struct net *net = dev_net(dev); 10076 10077 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 10078 NETDEV_FEATURE_COUNT); 10079 BUG_ON(dev_boot_phase); 10080 ASSERT_RTNL(); 10081 10082 might_sleep(); 10083 10084 /* When net_device's are persistent, this will be fatal. */ 10085 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 10086 BUG_ON(!net); 10087 10088 ret = ethtool_check_ops(dev->ethtool_ops); 10089 if (ret) 10090 return ret; 10091 10092 spin_lock_init(&dev->addr_list_lock); 10093 netdev_set_addr_lockdep_class(dev); 10094 10095 ret = dev_get_valid_name(net, dev, dev->name); 10096 if (ret < 0) 10097 goto out; 10098 10099 ret = -ENOMEM; 10100 dev->name_node = netdev_name_node_head_alloc(dev); 10101 if (!dev->name_node) 10102 goto out; 10103 10104 /* Init, if this function is available */ 10105 if (dev->netdev_ops->ndo_init) { 10106 ret = dev->netdev_ops->ndo_init(dev); 10107 if (ret) { 10108 if (ret > 0) 10109 ret = -EIO; 10110 goto err_free_name; 10111 } 10112 } 10113 10114 if (((dev->hw_features | dev->features) & 10115 NETIF_F_HW_VLAN_CTAG_FILTER) && 10116 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10117 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10118 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10119 ret = -EINVAL; 10120 goto err_uninit; 10121 } 10122 10123 ret = -EBUSY; 10124 if (!dev->ifindex) 10125 dev->ifindex = dev_new_index(net); 10126 else if (__dev_get_by_index(net, dev->ifindex)) 10127 goto err_uninit; 10128 10129 /* Transfer changeable features to wanted_features and enable 10130 * software offloads (GSO and GRO). 10131 */ 10132 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10133 dev->features |= NETIF_F_SOFT_FEATURES; 10134 10135 if (dev->udp_tunnel_nic_info) { 10136 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10137 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10138 } 10139 10140 dev->wanted_features = dev->features & dev->hw_features; 10141 10142 if (!(dev->flags & IFF_LOOPBACK)) 10143 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10144 10145 /* If IPv4 TCP segmentation offload is supported we should also 10146 * allow the device to enable segmenting the frame with the option 10147 * of ignoring a static IP ID value. This doesn't enable the 10148 * feature itself but allows the user to enable it later. 10149 */ 10150 if (dev->hw_features & NETIF_F_TSO) 10151 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10152 if (dev->vlan_features & NETIF_F_TSO) 10153 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10154 if (dev->mpls_features & NETIF_F_TSO) 10155 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10156 if (dev->hw_enc_features & NETIF_F_TSO) 10157 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10158 10159 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10160 */ 10161 dev->vlan_features |= NETIF_F_HIGHDMA; 10162 10163 /* Make NETIF_F_SG inheritable to tunnel devices. 10164 */ 10165 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10166 10167 /* Make NETIF_F_SG inheritable to MPLS. 10168 */ 10169 dev->mpls_features |= NETIF_F_SG; 10170 10171 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10172 ret = notifier_to_errno(ret); 10173 if (ret) 10174 goto err_uninit; 10175 10176 ret = netdev_register_kobject(dev); 10177 if (ret) { 10178 dev->reg_state = NETREG_UNREGISTERED; 10179 goto err_uninit; 10180 } 10181 dev->reg_state = NETREG_REGISTERED; 10182 10183 __netdev_update_features(dev); 10184 10185 /* 10186 * Default initial state at registry is that the 10187 * device is present. 10188 */ 10189 10190 set_bit(__LINK_STATE_PRESENT, &dev->state); 10191 10192 linkwatch_init_dev(dev); 10193 10194 dev_init_scheduler(dev); 10195 dev_hold(dev); 10196 list_netdevice(dev); 10197 add_device_randomness(dev->dev_addr, dev->addr_len); 10198 10199 /* If the device has permanent device address, driver should 10200 * set dev_addr and also addr_assign_type should be set to 10201 * NET_ADDR_PERM (default value). 10202 */ 10203 if (dev->addr_assign_type == NET_ADDR_PERM) 10204 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10205 10206 /* Notify protocols, that a new device appeared. */ 10207 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10208 ret = notifier_to_errno(ret); 10209 if (ret) { 10210 /* Expect explicit free_netdev() on failure */ 10211 dev->needs_free_netdev = false; 10212 unregister_netdevice_queue(dev, NULL); 10213 goto out; 10214 } 10215 /* 10216 * Prevent userspace races by waiting until the network 10217 * device is fully setup before sending notifications. 10218 */ 10219 if (!dev->rtnl_link_ops || 10220 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10221 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 10222 10223 out: 10224 return ret; 10225 10226 err_uninit: 10227 if (dev->netdev_ops->ndo_uninit) 10228 dev->netdev_ops->ndo_uninit(dev); 10229 if (dev->priv_destructor) 10230 dev->priv_destructor(dev); 10231 err_free_name: 10232 netdev_name_node_free(dev->name_node); 10233 goto out; 10234 } 10235 EXPORT_SYMBOL(register_netdevice); 10236 10237 /** 10238 * init_dummy_netdev - init a dummy network device for NAPI 10239 * @dev: device to init 10240 * 10241 * This takes a network device structure and initialize the minimum 10242 * amount of fields so it can be used to schedule NAPI polls without 10243 * registering a full blown interface. This is to be used by drivers 10244 * that need to tie several hardware interfaces to a single NAPI 10245 * poll scheduler due to HW limitations. 10246 */ 10247 int init_dummy_netdev(struct net_device *dev) 10248 { 10249 /* Clear everything. Note we don't initialize spinlocks 10250 * are they aren't supposed to be taken by any of the 10251 * NAPI code and this dummy netdev is supposed to be 10252 * only ever used for NAPI polls 10253 */ 10254 memset(dev, 0, sizeof(struct net_device)); 10255 10256 /* make sure we BUG if trying to hit standard 10257 * register/unregister code path 10258 */ 10259 dev->reg_state = NETREG_DUMMY; 10260 10261 /* NAPI wants this */ 10262 INIT_LIST_HEAD(&dev->napi_list); 10263 10264 /* a dummy interface is started by default */ 10265 set_bit(__LINK_STATE_PRESENT, &dev->state); 10266 set_bit(__LINK_STATE_START, &dev->state); 10267 10268 /* napi_busy_loop stats accounting wants this */ 10269 dev_net_set(dev, &init_net); 10270 10271 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10272 * because users of this 'device' dont need to change 10273 * its refcount. 10274 */ 10275 10276 return 0; 10277 } 10278 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10279 10280 10281 /** 10282 * register_netdev - register a network device 10283 * @dev: device to register 10284 * 10285 * Take a completed network device structure and add it to the kernel 10286 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10287 * chain. 0 is returned on success. A negative errno code is returned 10288 * on a failure to set up the device, or if the name is a duplicate. 10289 * 10290 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10291 * and expands the device name if you passed a format string to 10292 * alloc_netdev. 10293 */ 10294 int register_netdev(struct net_device *dev) 10295 { 10296 int err; 10297 10298 if (rtnl_lock_killable()) 10299 return -EINTR; 10300 err = register_netdevice(dev); 10301 rtnl_unlock(); 10302 return err; 10303 } 10304 EXPORT_SYMBOL(register_netdev); 10305 10306 int netdev_refcnt_read(const struct net_device *dev) 10307 { 10308 int i, refcnt = 0; 10309 10310 for_each_possible_cpu(i) 10311 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10312 return refcnt; 10313 } 10314 EXPORT_SYMBOL(netdev_refcnt_read); 10315 10316 #define WAIT_REFS_MIN_MSECS 1 10317 #define WAIT_REFS_MAX_MSECS 250 10318 /** 10319 * netdev_wait_allrefs - wait until all references are gone. 10320 * @dev: target net_device 10321 * 10322 * This is called when unregistering network devices. 10323 * 10324 * Any protocol or device that holds a reference should register 10325 * for netdevice notification, and cleanup and put back the 10326 * reference if they receive an UNREGISTER event. 10327 * We can get stuck here if buggy protocols don't correctly 10328 * call dev_put. 10329 */ 10330 static void netdev_wait_allrefs(struct net_device *dev) 10331 { 10332 unsigned long rebroadcast_time, warning_time; 10333 int wait = 0, refcnt; 10334 10335 linkwatch_forget_dev(dev); 10336 10337 rebroadcast_time = warning_time = jiffies; 10338 refcnt = netdev_refcnt_read(dev); 10339 10340 while (refcnt != 0) { 10341 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10342 rtnl_lock(); 10343 10344 /* Rebroadcast unregister notification */ 10345 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10346 10347 __rtnl_unlock(); 10348 rcu_barrier(); 10349 rtnl_lock(); 10350 10351 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10352 &dev->state)) { 10353 /* We must not have linkwatch events 10354 * pending on unregister. If this 10355 * happens, we simply run the queue 10356 * unscheduled, resulting in a noop 10357 * for this device. 10358 */ 10359 linkwatch_run_queue(); 10360 } 10361 10362 __rtnl_unlock(); 10363 10364 rebroadcast_time = jiffies; 10365 } 10366 10367 if (!wait) { 10368 rcu_barrier(); 10369 wait = WAIT_REFS_MIN_MSECS; 10370 } else { 10371 msleep(wait); 10372 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10373 } 10374 10375 refcnt = netdev_refcnt_read(dev); 10376 10377 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { 10378 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10379 dev->name, refcnt); 10380 warning_time = jiffies; 10381 } 10382 } 10383 } 10384 10385 /* The sequence is: 10386 * 10387 * rtnl_lock(); 10388 * ... 10389 * register_netdevice(x1); 10390 * register_netdevice(x2); 10391 * ... 10392 * unregister_netdevice(y1); 10393 * unregister_netdevice(y2); 10394 * ... 10395 * rtnl_unlock(); 10396 * free_netdev(y1); 10397 * free_netdev(y2); 10398 * 10399 * We are invoked by rtnl_unlock(). 10400 * This allows us to deal with problems: 10401 * 1) We can delete sysfs objects which invoke hotplug 10402 * without deadlocking with linkwatch via keventd. 10403 * 2) Since we run with the RTNL semaphore not held, we can sleep 10404 * safely in order to wait for the netdev refcnt to drop to zero. 10405 * 10406 * We must not return until all unregister events added during 10407 * the interval the lock was held have been completed. 10408 */ 10409 void netdev_run_todo(void) 10410 { 10411 struct list_head list; 10412 #ifdef CONFIG_LOCKDEP 10413 struct list_head unlink_list; 10414 10415 list_replace_init(&net_unlink_list, &unlink_list); 10416 10417 while (!list_empty(&unlink_list)) { 10418 struct net_device *dev = list_first_entry(&unlink_list, 10419 struct net_device, 10420 unlink_list); 10421 list_del_init(&dev->unlink_list); 10422 dev->nested_level = dev->lower_level - 1; 10423 } 10424 #endif 10425 10426 /* Snapshot list, allow later requests */ 10427 list_replace_init(&net_todo_list, &list); 10428 10429 __rtnl_unlock(); 10430 10431 10432 /* Wait for rcu callbacks to finish before next phase */ 10433 if (!list_empty(&list)) 10434 rcu_barrier(); 10435 10436 while (!list_empty(&list)) { 10437 struct net_device *dev 10438 = list_first_entry(&list, struct net_device, todo_list); 10439 list_del(&dev->todo_list); 10440 10441 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10442 pr_err("network todo '%s' but state %d\n", 10443 dev->name, dev->reg_state); 10444 dump_stack(); 10445 continue; 10446 } 10447 10448 dev->reg_state = NETREG_UNREGISTERED; 10449 10450 netdev_wait_allrefs(dev); 10451 10452 /* paranoia */ 10453 BUG_ON(netdev_refcnt_read(dev)); 10454 BUG_ON(!list_empty(&dev->ptype_all)); 10455 BUG_ON(!list_empty(&dev->ptype_specific)); 10456 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10457 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10458 #if IS_ENABLED(CONFIG_DECNET) 10459 WARN_ON(dev->dn_ptr); 10460 #endif 10461 if (dev->priv_destructor) 10462 dev->priv_destructor(dev); 10463 if (dev->needs_free_netdev) 10464 free_netdev(dev); 10465 10466 /* Report a network device has been unregistered */ 10467 rtnl_lock(); 10468 dev_net(dev)->dev_unreg_count--; 10469 __rtnl_unlock(); 10470 wake_up(&netdev_unregistering_wq); 10471 10472 /* Free network device */ 10473 kobject_put(&dev->dev.kobj); 10474 } 10475 } 10476 10477 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10478 * all the same fields in the same order as net_device_stats, with only 10479 * the type differing, but rtnl_link_stats64 may have additional fields 10480 * at the end for newer counters. 10481 */ 10482 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10483 const struct net_device_stats *netdev_stats) 10484 { 10485 #if BITS_PER_LONG == 64 10486 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 10487 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 10488 /* zero out counters that only exist in rtnl_link_stats64 */ 10489 memset((char *)stats64 + sizeof(*netdev_stats), 0, 10490 sizeof(*stats64) - sizeof(*netdev_stats)); 10491 #else 10492 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 10493 const unsigned long *src = (const unsigned long *)netdev_stats; 10494 u64 *dst = (u64 *)stats64; 10495 10496 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10497 for (i = 0; i < n; i++) 10498 dst[i] = src[i]; 10499 /* zero out counters that only exist in rtnl_link_stats64 */ 10500 memset((char *)stats64 + n * sizeof(u64), 0, 10501 sizeof(*stats64) - n * sizeof(u64)); 10502 #endif 10503 } 10504 EXPORT_SYMBOL(netdev_stats_to_stats64); 10505 10506 /** 10507 * dev_get_stats - get network device statistics 10508 * @dev: device to get statistics from 10509 * @storage: place to store stats 10510 * 10511 * Get network statistics from device. Return @storage. 10512 * The device driver may provide its own method by setting 10513 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10514 * otherwise the internal statistics structure is used. 10515 */ 10516 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10517 struct rtnl_link_stats64 *storage) 10518 { 10519 const struct net_device_ops *ops = dev->netdev_ops; 10520 10521 if (ops->ndo_get_stats64) { 10522 memset(storage, 0, sizeof(*storage)); 10523 ops->ndo_get_stats64(dev, storage); 10524 } else if (ops->ndo_get_stats) { 10525 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10526 } else { 10527 netdev_stats_to_stats64(storage, &dev->stats); 10528 } 10529 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 10530 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 10531 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 10532 return storage; 10533 } 10534 EXPORT_SYMBOL(dev_get_stats); 10535 10536 /** 10537 * dev_fetch_sw_netstats - get per-cpu network device statistics 10538 * @s: place to store stats 10539 * @netstats: per-cpu network stats to read from 10540 * 10541 * Read per-cpu network statistics and populate the related fields in @s. 10542 */ 10543 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10544 const struct pcpu_sw_netstats __percpu *netstats) 10545 { 10546 int cpu; 10547 10548 for_each_possible_cpu(cpu) { 10549 const struct pcpu_sw_netstats *stats; 10550 struct pcpu_sw_netstats tmp; 10551 unsigned int start; 10552 10553 stats = per_cpu_ptr(netstats, cpu); 10554 do { 10555 start = u64_stats_fetch_begin_irq(&stats->syncp); 10556 tmp.rx_packets = stats->rx_packets; 10557 tmp.rx_bytes = stats->rx_bytes; 10558 tmp.tx_packets = stats->tx_packets; 10559 tmp.tx_bytes = stats->tx_bytes; 10560 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 10561 10562 s->rx_packets += tmp.rx_packets; 10563 s->rx_bytes += tmp.rx_bytes; 10564 s->tx_packets += tmp.tx_packets; 10565 s->tx_bytes += tmp.tx_bytes; 10566 } 10567 } 10568 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10569 10570 /** 10571 * dev_get_tstats64 - ndo_get_stats64 implementation 10572 * @dev: device to get statistics from 10573 * @s: place to store stats 10574 * 10575 * Populate @s from dev->stats and dev->tstats. Can be used as 10576 * ndo_get_stats64() callback. 10577 */ 10578 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10579 { 10580 netdev_stats_to_stats64(s, &dev->stats); 10581 dev_fetch_sw_netstats(s, dev->tstats); 10582 } 10583 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10584 10585 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10586 { 10587 struct netdev_queue *queue = dev_ingress_queue(dev); 10588 10589 #ifdef CONFIG_NET_CLS_ACT 10590 if (queue) 10591 return queue; 10592 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10593 if (!queue) 10594 return NULL; 10595 netdev_init_one_queue(dev, queue, NULL); 10596 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10597 queue->qdisc_sleeping = &noop_qdisc; 10598 rcu_assign_pointer(dev->ingress_queue, queue); 10599 #endif 10600 return queue; 10601 } 10602 10603 static const struct ethtool_ops default_ethtool_ops; 10604 10605 void netdev_set_default_ethtool_ops(struct net_device *dev, 10606 const struct ethtool_ops *ops) 10607 { 10608 if (dev->ethtool_ops == &default_ethtool_ops) 10609 dev->ethtool_ops = ops; 10610 } 10611 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10612 10613 void netdev_freemem(struct net_device *dev) 10614 { 10615 char *addr = (char *)dev - dev->padded; 10616 10617 kvfree(addr); 10618 } 10619 10620 /** 10621 * alloc_netdev_mqs - allocate network device 10622 * @sizeof_priv: size of private data to allocate space for 10623 * @name: device name format string 10624 * @name_assign_type: origin of device name 10625 * @setup: callback to initialize device 10626 * @txqs: the number of TX subqueues to allocate 10627 * @rxqs: the number of RX subqueues to allocate 10628 * 10629 * Allocates a struct net_device with private data area for driver use 10630 * and performs basic initialization. Also allocates subqueue structs 10631 * for each queue on the device. 10632 */ 10633 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10634 unsigned char name_assign_type, 10635 void (*setup)(struct net_device *), 10636 unsigned int txqs, unsigned int rxqs) 10637 { 10638 struct net_device *dev; 10639 unsigned int alloc_size; 10640 struct net_device *p; 10641 10642 BUG_ON(strlen(name) >= sizeof(dev->name)); 10643 10644 if (txqs < 1) { 10645 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10646 return NULL; 10647 } 10648 10649 if (rxqs < 1) { 10650 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10651 return NULL; 10652 } 10653 10654 alloc_size = sizeof(struct net_device); 10655 if (sizeof_priv) { 10656 /* ensure 32-byte alignment of private area */ 10657 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10658 alloc_size += sizeof_priv; 10659 } 10660 /* ensure 32-byte alignment of whole construct */ 10661 alloc_size += NETDEV_ALIGN - 1; 10662 10663 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 10664 if (!p) 10665 return NULL; 10666 10667 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10668 dev->padded = (char *)dev - (char *)p; 10669 10670 dev->pcpu_refcnt = alloc_percpu(int); 10671 if (!dev->pcpu_refcnt) 10672 goto free_dev; 10673 10674 if (dev_addr_init(dev)) 10675 goto free_pcpu; 10676 10677 dev_mc_init(dev); 10678 dev_uc_init(dev); 10679 10680 dev_net_set(dev, &init_net); 10681 10682 dev->gso_max_size = GSO_MAX_SIZE; 10683 dev->gso_max_segs = GSO_MAX_SEGS; 10684 dev->upper_level = 1; 10685 dev->lower_level = 1; 10686 #ifdef CONFIG_LOCKDEP 10687 dev->nested_level = 0; 10688 INIT_LIST_HEAD(&dev->unlink_list); 10689 #endif 10690 10691 INIT_LIST_HEAD(&dev->napi_list); 10692 INIT_LIST_HEAD(&dev->unreg_list); 10693 INIT_LIST_HEAD(&dev->close_list); 10694 INIT_LIST_HEAD(&dev->link_watch_list); 10695 INIT_LIST_HEAD(&dev->adj_list.upper); 10696 INIT_LIST_HEAD(&dev->adj_list.lower); 10697 INIT_LIST_HEAD(&dev->ptype_all); 10698 INIT_LIST_HEAD(&dev->ptype_specific); 10699 INIT_LIST_HEAD(&dev->net_notifier_list); 10700 #ifdef CONFIG_NET_SCHED 10701 hash_init(dev->qdisc_hash); 10702 #endif 10703 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10704 setup(dev); 10705 10706 if (!dev->tx_queue_len) { 10707 dev->priv_flags |= IFF_NO_QUEUE; 10708 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10709 } 10710 10711 dev->num_tx_queues = txqs; 10712 dev->real_num_tx_queues = txqs; 10713 if (netif_alloc_netdev_queues(dev)) 10714 goto free_all; 10715 10716 dev->num_rx_queues = rxqs; 10717 dev->real_num_rx_queues = rxqs; 10718 if (netif_alloc_rx_queues(dev)) 10719 goto free_all; 10720 10721 strcpy(dev->name, name); 10722 dev->name_assign_type = name_assign_type; 10723 dev->group = INIT_NETDEV_GROUP; 10724 if (!dev->ethtool_ops) 10725 dev->ethtool_ops = &default_ethtool_ops; 10726 10727 nf_hook_ingress_init(dev); 10728 10729 return dev; 10730 10731 free_all: 10732 free_netdev(dev); 10733 return NULL; 10734 10735 free_pcpu: 10736 free_percpu(dev->pcpu_refcnt); 10737 free_dev: 10738 netdev_freemem(dev); 10739 return NULL; 10740 } 10741 EXPORT_SYMBOL(alloc_netdev_mqs); 10742 10743 /** 10744 * free_netdev - free network device 10745 * @dev: device 10746 * 10747 * This function does the last stage of destroying an allocated device 10748 * interface. The reference to the device object is released. If this 10749 * is the last reference then it will be freed.Must be called in process 10750 * context. 10751 */ 10752 void free_netdev(struct net_device *dev) 10753 { 10754 struct napi_struct *p, *n; 10755 10756 might_sleep(); 10757 10758 /* When called immediately after register_netdevice() failed the unwind 10759 * handling may still be dismantling the device. Handle that case by 10760 * deferring the free. 10761 */ 10762 if (dev->reg_state == NETREG_UNREGISTERING) { 10763 ASSERT_RTNL(); 10764 dev->needs_free_netdev = true; 10765 return; 10766 } 10767 10768 netif_free_tx_queues(dev); 10769 netif_free_rx_queues(dev); 10770 10771 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10772 10773 /* Flush device addresses */ 10774 dev_addr_flush(dev); 10775 10776 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10777 netif_napi_del(p); 10778 10779 free_percpu(dev->pcpu_refcnt); 10780 dev->pcpu_refcnt = NULL; 10781 free_percpu(dev->xdp_bulkq); 10782 dev->xdp_bulkq = NULL; 10783 10784 /* Compatibility with error handling in drivers */ 10785 if (dev->reg_state == NETREG_UNINITIALIZED) { 10786 netdev_freemem(dev); 10787 return; 10788 } 10789 10790 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10791 dev->reg_state = NETREG_RELEASED; 10792 10793 /* will free via device release */ 10794 put_device(&dev->dev); 10795 } 10796 EXPORT_SYMBOL(free_netdev); 10797 10798 /** 10799 * synchronize_net - Synchronize with packet receive processing 10800 * 10801 * Wait for packets currently being received to be done. 10802 * Does not block later packets from starting. 10803 */ 10804 void synchronize_net(void) 10805 { 10806 might_sleep(); 10807 if (rtnl_is_locked()) 10808 synchronize_rcu_expedited(); 10809 else 10810 synchronize_rcu(); 10811 } 10812 EXPORT_SYMBOL(synchronize_net); 10813 10814 /** 10815 * unregister_netdevice_queue - remove device from the kernel 10816 * @dev: device 10817 * @head: list 10818 * 10819 * This function shuts down a device interface and removes it 10820 * from the kernel tables. 10821 * If head not NULL, device is queued to be unregistered later. 10822 * 10823 * Callers must hold the rtnl semaphore. You may want 10824 * unregister_netdev() instead of this. 10825 */ 10826 10827 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10828 { 10829 ASSERT_RTNL(); 10830 10831 if (head) { 10832 list_move_tail(&dev->unreg_list, head); 10833 } else { 10834 LIST_HEAD(single); 10835 10836 list_add(&dev->unreg_list, &single); 10837 unregister_netdevice_many(&single); 10838 } 10839 } 10840 EXPORT_SYMBOL(unregister_netdevice_queue); 10841 10842 /** 10843 * unregister_netdevice_many - unregister many devices 10844 * @head: list of devices 10845 * 10846 * Note: As most callers use a stack allocated list_head, 10847 * we force a list_del() to make sure stack wont be corrupted later. 10848 */ 10849 void unregister_netdevice_many(struct list_head *head) 10850 { 10851 struct net_device *dev, *tmp; 10852 LIST_HEAD(close_head); 10853 10854 BUG_ON(dev_boot_phase); 10855 ASSERT_RTNL(); 10856 10857 if (list_empty(head)) 10858 return; 10859 10860 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10861 /* Some devices call without registering 10862 * for initialization unwind. Remove those 10863 * devices and proceed with the remaining. 10864 */ 10865 if (dev->reg_state == NETREG_UNINITIALIZED) { 10866 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10867 dev->name, dev); 10868 10869 WARN_ON(1); 10870 list_del(&dev->unreg_list); 10871 continue; 10872 } 10873 dev->dismantle = true; 10874 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10875 } 10876 10877 /* If device is running, close it first. */ 10878 list_for_each_entry(dev, head, unreg_list) 10879 list_add_tail(&dev->close_list, &close_head); 10880 dev_close_many(&close_head, true); 10881 10882 list_for_each_entry(dev, head, unreg_list) { 10883 /* And unlink it from device chain. */ 10884 unlist_netdevice(dev); 10885 10886 dev->reg_state = NETREG_UNREGISTERING; 10887 } 10888 flush_all_backlogs(); 10889 10890 synchronize_net(); 10891 10892 list_for_each_entry(dev, head, unreg_list) { 10893 struct sk_buff *skb = NULL; 10894 10895 /* Shutdown queueing discipline. */ 10896 dev_shutdown(dev); 10897 10898 dev_xdp_uninstall(dev); 10899 10900 /* Notify protocols, that we are about to destroy 10901 * this device. They should clean all the things. 10902 */ 10903 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10904 10905 if (!dev->rtnl_link_ops || 10906 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10907 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10908 GFP_KERNEL, NULL, 0); 10909 10910 /* 10911 * Flush the unicast and multicast chains 10912 */ 10913 dev_uc_flush(dev); 10914 dev_mc_flush(dev); 10915 10916 netdev_name_node_alt_flush(dev); 10917 netdev_name_node_free(dev->name_node); 10918 10919 if (dev->netdev_ops->ndo_uninit) 10920 dev->netdev_ops->ndo_uninit(dev); 10921 10922 if (skb) 10923 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 10924 10925 /* Notifier chain MUST detach us all upper devices. */ 10926 WARN_ON(netdev_has_any_upper_dev(dev)); 10927 WARN_ON(netdev_has_any_lower_dev(dev)); 10928 10929 /* Remove entries from kobject tree */ 10930 netdev_unregister_kobject(dev); 10931 #ifdef CONFIG_XPS 10932 /* Remove XPS queueing entries */ 10933 netif_reset_xps_queues_gt(dev, 0); 10934 #endif 10935 } 10936 10937 synchronize_net(); 10938 10939 list_for_each_entry(dev, head, unreg_list) { 10940 dev_put(dev); 10941 net_set_todo(dev); 10942 } 10943 10944 list_del(head); 10945 } 10946 EXPORT_SYMBOL(unregister_netdevice_many); 10947 10948 /** 10949 * unregister_netdev - remove device from the kernel 10950 * @dev: device 10951 * 10952 * This function shuts down a device interface and removes it 10953 * from the kernel tables. 10954 * 10955 * This is just a wrapper for unregister_netdevice that takes 10956 * the rtnl semaphore. In general you want to use this and not 10957 * unregister_netdevice. 10958 */ 10959 void unregister_netdev(struct net_device *dev) 10960 { 10961 rtnl_lock(); 10962 unregister_netdevice(dev); 10963 rtnl_unlock(); 10964 } 10965 EXPORT_SYMBOL(unregister_netdev); 10966 10967 /** 10968 * dev_change_net_namespace - move device to different nethost namespace 10969 * @dev: device 10970 * @net: network namespace 10971 * @pat: If not NULL name pattern to try if the current device name 10972 * is already taken in the destination network namespace. 10973 * 10974 * This function shuts down a device interface and moves it 10975 * to a new network namespace. On success 0 is returned, on 10976 * a failure a netagive errno code is returned. 10977 * 10978 * Callers must hold the rtnl semaphore. 10979 */ 10980 10981 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 10982 { 10983 struct net *net_old = dev_net(dev); 10984 int err, new_nsid, new_ifindex; 10985 10986 ASSERT_RTNL(); 10987 10988 /* Don't allow namespace local devices to be moved. */ 10989 err = -EINVAL; 10990 if (dev->features & NETIF_F_NETNS_LOCAL) 10991 goto out; 10992 10993 /* Ensure the device has been registrered */ 10994 if (dev->reg_state != NETREG_REGISTERED) 10995 goto out; 10996 10997 /* Get out if there is nothing todo */ 10998 err = 0; 10999 if (net_eq(net_old, net)) 11000 goto out; 11001 11002 /* Pick the destination device name, and ensure 11003 * we can use it in the destination network namespace. 11004 */ 11005 err = -EEXIST; 11006 if (__dev_get_by_name(net, dev->name)) { 11007 /* We get here if we can't use the current device name */ 11008 if (!pat) 11009 goto out; 11010 err = dev_get_valid_name(net, dev, pat); 11011 if (err < 0) 11012 goto out; 11013 } 11014 11015 /* 11016 * And now a mini version of register_netdevice unregister_netdevice. 11017 */ 11018 11019 /* If device is running close it first. */ 11020 dev_close(dev); 11021 11022 /* And unlink it from device chain */ 11023 unlist_netdevice(dev); 11024 11025 synchronize_net(); 11026 11027 /* Shutdown queueing discipline. */ 11028 dev_shutdown(dev); 11029 11030 /* Notify protocols, that we are about to destroy 11031 * this device. They should clean all the things. 11032 * 11033 * Note that dev->reg_state stays at NETREG_REGISTERED. 11034 * This is wanted because this way 8021q and macvlan know 11035 * the device is just moving and can keep their slaves up. 11036 */ 11037 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11038 rcu_barrier(); 11039 11040 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11041 /* If there is an ifindex conflict assign a new one */ 11042 if (__dev_get_by_index(net, dev->ifindex)) 11043 new_ifindex = dev_new_index(net); 11044 else 11045 new_ifindex = dev->ifindex; 11046 11047 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11048 new_ifindex); 11049 11050 /* 11051 * Flush the unicast and multicast chains 11052 */ 11053 dev_uc_flush(dev); 11054 dev_mc_flush(dev); 11055 11056 /* Send a netdev-removed uevent to the old namespace */ 11057 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11058 netdev_adjacent_del_links(dev); 11059 11060 /* Move per-net netdevice notifiers that are following the netdevice */ 11061 move_netdevice_notifiers_dev_net(dev, net); 11062 11063 /* Actually switch the network namespace */ 11064 dev_net_set(dev, net); 11065 dev->ifindex = new_ifindex; 11066 11067 /* Send a netdev-add uevent to the new namespace */ 11068 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11069 netdev_adjacent_add_links(dev); 11070 11071 /* Fixup kobjects */ 11072 err = device_rename(&dev->dev, dev->name); 11073 WARN_ON(err); 11074 11075 /* Adapt owner in case owning user namespace of target network 11076 * namespace is different from the original one. 11077 */ 11078 err = netdev_change_owner(dev, net_old, net); 11079 WARN_ON(err); 11080 11081 /* Add the device back in the hashes */ 11082 list_netdevice(dev); 11083 11084 /* Notify protocols, that a new device appeared. */ 11085 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11086 11087 /* 11088 * Prevent userspace races by waiting until the network 11089 * device is fully setup before sending notifications. 11090 */ 11091 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 11092 11093 synchronize_net(); 11094 err = 0; 11095 out: 11096 return err; 11097 } 11098 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 11099 11100 static int dev_cpu_dead(unsigned int oldcpu) 11101 { 11102 struct sk_buff **list_skb; 11103 struct sk_buff *skb; 11104 unsigned int cpu; 11105 struct softnet_data *sd, *oldsd, *remsd = NULL; 11106 11107 local_irq_disable(); 11108 cpu = smp_processor_id(); 11109 sd = &per_cpu(softnet_data, cpu); 11110 oldsd = &per_cpu(softnet_data, oldcpu); 11111 11112 /* Find end of our completion_queue. */ 11113 list_skb = &sd->completion_queue; 11114 while (*list_skb) 11115 list_skb = &(*list_skb)->next; 11116 /* Append completion queue from offline CPU. */ 11117 *list_skb = oldsd->completion_queue; 11118 oldsd->completion_queue = NULL; 11119 11120 /* Append output queue from offline CPU. */ 11121 if (oldsd->output_queue) { 11122 *sd->output_queue_tailp = oldsd->output_queue; 11123 sd->output_queue_tailp = oldsd->output_queue_tailp; 11124 oldsd->output_queue = NULL; 11125 oldsd->output_queue_tailp = &oldsd->output_queue; 11126 } 11127 /* Append NAPI poll list from offline CPU, with one exception : 11128 * process_backlog() must be called by cpu owning percpu backlog. 11129 * We properly handle process_queue & input_pkt_queue later. 11130 */ 11131 while (!list_empty(&oldsd->poll_list)) { 11132 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11133 struct napi_struct, 11134 poll_list); 11135 11136 list_del_init(&napi->poll_list); 11137 if (napi->poll == process_backlog) 11138 napi->state = 0; 11139 else 11140 ____napi_schedule(sd, napi); 11141 } 11142 11143 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11144 local_irq_enable(); 11145 11146 #ifdef CONFIG_RPS 11147 remsd = oldsd->rps_ipi_list; 11148 oldsd->rps_ipi_list = NULL; 11149 #endif 11150 /* send out pending IPI's on offline CPU */ 11151 net_rps_send_ipi(remsd); 11152 11153 /* Process offline CPU's input_pkt_queue */ 11154 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11155 netif_rx_ni(skb); 11156 input_queue_head_incr(oldsd); 11157 } 11158 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11159 netif_rx_ni(skb); 11160 input_queue_head_incr(oldsd); 11161 } 11162 11163 return 0; 11164 } 11165 11166 /** 11167 * netdev_increment_features - increment feature set by one 11168 * @all: current feature set 11169 * @one: new feature set 11170 * @mask: mask feature set 11171 * 11172 * Computes a new feature set after adding a device with feature set 11173 * @one to the master device with current feature set @all. Will not 11174 * enable anything that is off in @mask. Returns the new feature set. 11175 */ 11176 netdev_features_t netdev_increment_features(netdev_features_t all, 11177 netdev_features_t one, netdev_features_t mask) 11178 { 11179 if (mask & NETIF_F_HW_CSUM) 11180 mask |= NETIF_F_CSUM_MASK; 11181 mask |= NETIF_F_VLAN_CHALLENGED; 11182 11183 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11184 all &= one | ~NETIF_F_ALL_FOR_ALL; 11185 11186 /* If one device supports hw checksumming, set for all. */ 11187 if (all & NETIF_F_HW_CSUM) 11188 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11189 11190 return all; 11191 } 11192 EXPORT_SYMBOL(netdev_increment_features); 11193 11194 static struct hlist_head * __net_init netdev_create_hash(void) 11195 { 11196 int i; 11197 struct hlist_head *hash; 11198 11199 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11200 if (hash != NULL) 11201 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11202 INIT_HLIST_HEAD(&hash[i]); 11203 11204 return hash; 11205 } 11206 11207 /* Initialize per network namespace state */ 11208 static int __net_init netdev_init(struct net *net) 11209 { 11210 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11211 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11212 11213 if (net != &init_net) 11214 INIT_LIST_HEAD(&net->dev_base_head); 11215 11216 net->dev_name_head = netdev_create_hash(); 11217 if (net->dev_name_head == NULL) 11218 goto err_name; 11219 11220 net->dev_index_head = netdev_create_hash(); 11221 if (net->dev_index_head == NULL) 11222 goto err_idx; 11223 11224 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11225 11226 return 0; 11227 11228 err_idx: 11229 kfree(net->dev_name_head); 11230 err_name: 11231 return -ENOMEM; 11232 } 11233 11234 /** 11235 * netdev_drivername - network driver for the device 11236 * @dev: network device 11237 * 11238 * Determine network driver for device. 11239 */ 11240 const char *netdev_drivername(const struct net_device *dev) 11241 { 11242 const struct device_driver *driver; 11243 const struct device *parent; 11244 const char *empty = ""; 11245 11246 parent = dev->dev.parent; 11247 if (!parent) 11248 return empty; 11249 11250 driver = parent->driver; 11251 if (driver && driver->name) 11252 return driver->name; 11253 return empty; 11254 } 11255 11256 static void __netdev_printk(const char *level, const struct net_device *dev, 11257 struct va_format *vaf) 11258 { 11259 if (dev && dev->dev.parent) { 11260 dev_printk_emit(level[1] - '0', 11261 dev->dev.parent, 11262 "%s %s %s%s: %pV", 11263 dev_driver_string(dev->dev.parent), 11264 dev_name(dev->dev.parent), 11265 netdev_name(dev), netdev_reg_state(dev), 11266 vaf); 11267 } else if (dev) { 11268 printk("%s%s%s: %pV", 11269 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11270 } else { 11271 printk("%s(NULL net_device): %pV", level, vaf); 11272 } 11273 } 11274 11275 void netdev_printk(const char *level, const struct net_device *dev, 11276 const char *format, ...) 11277 { 11278 struct va_format vaf; 11279 va_list args; 11280 11281 va_start(args, format); 11282 11283 vaf.fmt = format; 11284 vaf.va = &args; 11285 11286 __netdev_printk(level, dev, &vaf); 11287 11288 va_end(args); 11289 } 11290 EXPORT_SYMBOL(netdev_printk); 11291 11292 #define define_netdev_printk_level(func, level) \ 11293 void func(const struct net_device *dev, const char *fmt, ...) \ 11294 { \ 11295 struct va_format vaf; \ 11296 va_list args; \ 11297 \ 11298 va_start(args, fmt); \ 11299 \ 11300 vaf.fmt = fmt; \ 11301 vaf.va = &args; \ 11302 \ 11303 __netdev_printk(level, dev, &vaf); \ 11304 \ 11305 va_end(args); \ 11306 } \ 11307 EXPORT_SYMBOL(func); 11308 11309 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11310 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11311 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11312 define_netdev_printk_level(netdev_err, KERN_ERR); 11313 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11314 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11315 define_netdev_printk_level(netdev_info, KERN_INFO); 11316 11317 static void __net_exit netdev_exit(struct net *net) 11318 { 11319 kfree(net->dev_name_head); 11320 kfree(net->dev_index_head); 11321 if (net != &init_net) 11322 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11323 } 11324 11325 static struct pernet_operations __net_initdata netdev_net_ops = { 11326 .init = netdev_init, 11327 .exit = netdev_exit, 11328 }; 11329 11330 static void __net_exit default_device_exit(struct net *net) 11331 { 11332 struct net_device *dev, *aux; 11333 /* 11334 * Push all migratable network devices back to the 11335 * initial network namespace 11336 */ 11337 rtnl_lock(); 11338 for_each_netdev_safe(net, dev, aux) { 11339 int err; 11340 char fb_name[IFNAMSIZ]; 11341 11342 /* Ignore unmoveable devices (i.e. loopback) */ 11343 if (dev->features & NETIF_F_NETNS_LOCAL) 11344 continue; 11345 11346 /* Leave virtual devices for the generic cleanup */ 11347 if (dev->rtnl_link_ops) 11348 continue; 11349 11350 /* Push remaining network devices to init_net */ 11351 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11352 if (__dev_get_by_name(&init_net, fb_name)) 11353 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11354 err = dev_change_net_namespace(dev, &init_net, fb_name); 11355 if (err) { 11356 pr_emerg("%s: failed to move %s to init_net: %d\n", 11357 __func__, dev->name, err); 11358 BUG(); 11359 } 11360 } 11361 rtnl_unlock(); 11362 } 11363 11364 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 11365 { 11366 /* Return with the rtnl_lock held when there are no network 11367 * devices unregistering in any network namespace in net_list. 11368 */ 11369 struct net *net; 11370 bool unregistering; 11371 DEFINE_WAIT_FUNC(wait, woken_wake_function); 11372 11373 add_wait_queue(&netdev_unregistering_wq, &wait); 11374 for (;;) { 11375 unregistering = false; 11376 rtnl_lock(); 11377 list_for_each_entry(net, net_list, exit_list) { 11378 if (net->dev_unreg_count > 0) { 11379 unregistering = true; 11380 break; 11381 } 11382 } 11383 if (!unregistering) 11384 break; 11385 __rtnl_unlock(); 11386 11387 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 11388 } 11389 remove_wait_queue(&netdev_unregistering_wq, &wait); 11390 } 11391 11392 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11393 { 11394 /* At exit all network devices most be removed from a network 11395 * namespace. Do this in the reverse order of registration. 11396 * Do this across as many network namespaces as possible to 11397 * improve batching efficiency. 11398 */ 11399 struct net_device *dev; 11400 struct net *net; 11401 LIST_HEAD(dev_kill_list); 11402 11403 /* To prevent network device cleanup code from dereferencing 11404 * loopback devices or network devices that have been freed 11405 * wait here for all pending unregistrations to complete, 11406 * before unregistring the loopback device and allowing the 11407 * network namespace be freed. 11408 * 11409 * The netdev todo list containing all network devices 11410 * unregistrations that happen in default_device_exit_batch 11411 * will run in the rtnl_unlock() at the end of 11412 * default_device_exit_batch. 11413 */ 11414 rtnl_lock_unregistering(net_list); 11415 list_for_each_entry(net, net_list, exit_list) { 11416 for_each_netdev_reverse(net, dev) { 11417 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11418 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11419 else 11420 unregister_netdevice_queue(dev, &dev_kill_list); 11421 } 11422 } 11423 unregister_netdevice_many(&dev_kill_list); 11424 rtnl_unlock(); 11425 } 11426 11427 static struct pernet_operations __net_initdata default_device_ops = { 11428 .exit = default_device_exit, 11429 .exit_batch = default_device_exit_batch, 11430 }; 11431 11432 /* 11433 * Initialize the DEV module. At boot time this walks the device list and 11434 * unhooks any devices that fail to initialise (normally hardware not 11435 * present) and leaves us with a valid list of present and active devices. 11436 * 11437 */ 11438 11439 /* 11440 * This is called single threaded during boot, so no need 11441 * to take the rtnl semaphore. 11442 */ 11443 static int __init net_dev_init(void) 11444 { 11445 int i, rc = -ENOMEM; 11446 11447 BUG_ON(!dev_boot_phase); 11448 11449 if (dev_proc_init()) 11450 goto out; 11451 11452 if (netdev_kobject_init()) 11453 goto out; 11454 11455 INIT_LIST_HEAD(&ptype_all); 11456 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11457 INIT_LIST_HEAD(&ptype_base[i]); 11458 11459 INIT_LIST_HEAD(&offload_base); 11460 11461 if (register_pernet_subsys(&netdev_net_ops)) 11462 goto out; 11463 11464 /* 11465 * Initialise the packet receive queues. 11466 */ 11467 11468 for_each_possible_cpu(i) { 11469 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11470 struct softnet_data *sd = &per_cpu(softnet_data, i); 11471 11472 INIT_WORK(flush, flush_backlog); 11473 11474 skb_queue_head_init(&sd->input_pkt_queue); 11475 skb_queue_head_init(&sd->process_queue); 11476 #ifdef CONFIG_XFRM_OFFLOAD 11477 skb_queue_head_init(&sd->xfrm_backlog); 11478 #endif 11479 INIT_LIST_HEAD(&sd->poll_list); 11480 sd->output_queue_tailp = &sd->output_queue; 11481 #ifdef CONFIG_RPS 11482 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11483 sd->cpu = i; 11484 #endif 11485 11486 init_gro_hash(&sd->backlog); 11487 sd->backlog.poll = process_backlog; 11488 sd->backlog.weight = weight_p; 11489 } 11490 11491 dev_boot_phase = 0; 11492 11493 /* The loopback device is special if any other network devices 11494 * is present in a network namespace the loopback device must 11495 * be present. Since we now dynamically allocate and free the 11496 * loopback device ensure this invariant is maintained by 11497 * keeping the loopback device as the first device on the 11498 * list of network devices. Ensuring the loopback devices 11499 * is the first device that appears and the last network device 11500 * that disappears. 11501 */ 11502 if (register_pernet_device(&loopback_net_ops)) 11503 goto out; 11504 11505 if (register_pernet_device(&default_device_ops)) 11506 goto out; 11507 11508 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11509 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11510 11511 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11512 NULL, dev_cpu_dead); 11513 WARN_ON(rc < 0); 11514 rc = 0; 11515 out: 11516 return rc; 11517 } 11518 11519 subsys_initcall(net_dev_init); 11520