1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 15 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 16 * Bjorn Ekwall. <bj0rn@blox.se> 17 * Pekka Riikonen <priikone@poseidon.pspt.fi> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <net/xdp.h> 44 45 #include <linux/netdev_features.h> 46 #include <linux/neighbour.h> 47 #include <uapi/linux/netdevice.h> 48 #include <uapi/linux/if_bonding.h> 49 #include <uapi/linux/pkt_cls.h> 50 #include <linux/hashtable.h> 51 #include <linux/rbtree.h> 52 #include <net/net_trackers.h> 53 54 struct netpoll_info; 55 struct device; 56 struct ethtool_ops; 57 struct phy_device; 58 struct dsa_port; 59 struct ip_tunnel_parm; 60 struct macsec_context; 61 struct macsec_ops; 62 struct netdev_name_node; 63 struct sd_flow_limit; 64 struct sfp_bus; 65 /* 802.11 specific */ 66 struct wireless_dev; 67 /* 802.15.4 specific */ 68 struct wpan_dev; 69 struct mpls_dev; 70 /* UDP Tunnel offloads */ 71 struct udp_tunnel_info; 72 struct udp_tunnel_nic_info; 73 struct udp_tunnel_nic; 74 struct bpf_prog; 75 struct xdp_buff; 76 77 void synchronize_net(void); 78 void netdev_set_default_ethtool_ops(struct net_device *dev, 79 const struct ethtool_ops *ops); 80 81 /* Backlog congestion levels */ 82 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 83 #define NET_RX_DROP 1 /* packet dropped */ 84 85 #define MAX_NEST_DEV 8 86 87 /* 88 * Transmit return codes: transmit return codes originate from three different 89 * namespaces: 90 * 91 * - qdisc return codes 92 * - driver transmit return codes 93 * - errno values 94 * 95 * Drivers are allowed to return any one of those in their hard_start_xmit() 96 * function. Real network devices commonly used with qdiscs should only return 97 * the driver transmit return codes though - when qdiscs are used, the actual 98 * transmission happens asynchronously, so the value is not propagated to 99 * higher layers. Virtual network devices transmit synchronously; in this case 100 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 101 * others are propagated to higher layers. 102 */ 103 104 /* qdisc ->enqueue() return codes. */ 105 #define NET_XMIT_SUCCESS 0x00 106 #define NET_XMIT_DROP 0x01 /* skb dropped */ 107 #define NET_XMIT_CN 0x02 /* congestion notification */ 108 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 109 110 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 111 * indicates that the device will soon be dropping packets, or already drops 112 * some packets of the same priority; prompting us to send less aggressively. */ 113 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 114 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 115 116 /* Driver transmit return codes */ 117 #define NETDEV_TX_MASK 0xf0 118 119 enum netdev_tx { 120 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 121 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 122 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 123 }; 124 typedef enum netdev_tx netdev_tx_t; 125 126 /* 127 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 128 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 129 */ 130 static inline bool dev_xmit_complete(int rc) 131 { 132 /* 133 * Positive cases with an skb consumed by a driver: 134 * - successful transmission (rc == NETDEV_TX_OK) 135 * - error while transmitting (rc < 0) 136 * - error while queueing to a different device (rc & NET_XMIT_MASK) 137 */ 138 if (likely(rc < NET_XMIT_MASK)) 139 return true; 140 141 return false; 142 } 143 144 /* 145 * Compute the worst-case header length according to the protocols 146 * used. 147 */ 148 149 #if defined(CONFIG_HYPERV_NET) 150 # define LL_MAX_HEADER 128 151 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 152 # if defined(CONFIG_MAC80211_MESH) 153 # define LL_MAX_HEADER 128 154 # else 155 # define LL_MAX_HEADER 96 156 # endif 157 #else 158 # define LL_MAX_HEADER 32 159 #endif 160 161 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 162 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 163 #define MAX_HEADER LL_MAX_HEADER 164 #else 165 #define MAX_HEADER (LL_MAX_HEADER + 48) 166 #endif 167 168 /* 169 * Old network device statistics. Fields are native words 170 * (unsigned long) so they can be read and written atomically. 171 */ 172 173 struct net_device_stats { 174 unsigned long rx_packets; 175 unsigned long tx_packets; 176 unsigned long rx_bytes; 177 unsigned long tx_bytes; 178 unsigned long rx_errors; 179 unsigned long tx_errors; 180 unsigned long rx_dropped; 181 unsigned long tx_dropped; 182 unsigned long multicast; 183 unsigned long collisions; 184 unsigned long rx_length_errors; 185 unsigned long rx_over_errors; 186 unsigned long rx_crc_errors; 187 unsigned long rx_frame_errors; 188 unsigned long rx_fifo_errors; 189 unsigned long rx_missed_errors; 190 unsigned long tx_aborted_errors; 191 unsigned long tx_carrier_errors; 192 unsigned long tx_fifo_errors; 193 unsigned long tx_heartbeat_errors; 194 unsigned long tx_window_errors; 195 unsigned long rx_compressed; 196 unsigned long tx_compressed; 197 }; 198 199 /* per-cpu stats, allocated on demand. 200 * Try to fit them in a single cache line, for dev_get_stats() sake. 201 */ 202 struct net_device_core_stats { 203 unsigned long rx_dropped; 204 unsigned long tx_dropped; 205 unsigned long rx_nohandler; 206 unsigned long rx_otherhost_dropped; 207 } __aligned(4 * sizeof(unsigned long)); 208 209 #include <linux/cache.h> 210 #include <linux/skbuff.h> 211 212 #ifdef CONFIG_RPS 213 #include <linux/static_key.h> 214 extern struct static_key_false rps_needed; 215 extern struct static_key_false rfs_needed; 216 #endif 217 218 struct neighbour; 219 struct neigh_parms; 220 struct sk_buff; 221 222 struct netdev_hw_addr { 223 struct list_head list; 224 struct rb_node node; 225 unsigned char addr[MAX_ADDR_LEN]; 226 unsigned char type; 227 #define NETDEV_HW_ADDR_T_LAN 1 228 #define NETDEV_HW_ADDR_T_SAN 2 229 #define NETDEV_HW_ADDR_T_UNICAST 3 230 #define NETDEV_HW_ADDR_T_MULTICAST 4 231 bool global_use; 232 int sync_cnt; 233 int refcount; 234 int synced; 235 struct rcu_head rcu_head; 236 }; 237 238 struct netdev_hw_addr_list { 239 struct list_head list; 240 int count; 241 242 /* Auxiliary tree for faster lookup on addition and deletion */ 243 struct rb_root tree; 244 }; 245 246 #define netdev_hw_addr_list_count(l) ((l)->count) 247 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 248 #define netdev_hw_addr_list_for_each(ha, l) \ 249 list_for_each_entry(ha, &(l)->list, list) 250 251 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 252 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 253 #define netdev_for_each_uc_addr(ha, dev) \ 254 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 255 256 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 257 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 258 #define netdev_for_each_mc_addr(ha, dev) \ 259 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 260 261 struct hh_cache { 262 unsigned int hh_len; 263 seqlock_t hh_lock; 264 265 /* cached hardware header; allow for machine alignment needs. */ 266 #define HH_DATA_MOD 16 267 #define HH_DATA_OFF(__len) \ 268 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 269 #define HH_DATA_ALIGN(__len) \ 270 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 271 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 272 }; 273 274 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 275 * Alternative is: 276 * dev->hard_header_len ? (dev->hard_header_len + 277 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 278 * 279 * We could use other alignment values, but we must maintain the 280 * relationship HH alignment <= LL alignment. 281 */ 282 #define LL_RESERVED_SPACE(dev) \ 283 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 284 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 285 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 286 287 struct header_ops { 288 int (*create) (struct sk_buff *skb, struct net_device *dev, 289 unsigned short type, const void *daddr, 290 const void *saddr, unsigned int len); 291 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 292 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 293 void (*cache_update)(struct hh_cache *hh, 294 const struct net_device *dev, 295 const unsigned char *haddr); 296 bool (*validate)(const char *ll_header, unsigned int len); 297 __be16 (*parse_protocol)(const struct sk_buff *skb); 298 }; 299 300 /* These flag bits are private to the generic network queueing 301 * layer; they may not be explicitly referenced by any other 302 * code. 303 */ 304 305 enum netdev_state_t { 306 __LINK_STATE_START, 307 __LINK_STATE_PRESENT, 308 __LINK_STATE_NOCARRIER, 309 __LINK_STATE_LINKWATCH_PENDING, 310 __LINK_STATE_DORMANT, 311 __LINK_STATE_TESTING, 312 }; 313 314 struct gro_list { 315 struct list_head list; 316 int count; 317 }; 318 319 /* 320 * size of gro hash buckets, must less than bit number of 321 * napi_struct::gro_bitmask 322 */ 323 #define GRO_HASH_BUCKETS 8 324 325 /* 326 * Structure for NAPI scheduling similar to tasklet but with weighting 327 */ 328 struct napi_struct { 329 /* The poll_list must only be managed by the entity which 330 * changes the state of the NAPI_STATE_SCHED bit. This means 331 * whoever atomically sets that bit can add this napi_struct 332 * to the per-CPU poll_list, and whoever clears that bit 333 * can remove from the list right before clearing the bit. 334 */ 335 struct list_head poll_list; 336 337 unsigned long state; 338 int weight; 339 int defer_hard_irqs_count; 340 unsigned long gro_bitmask; 341 int (*poll)(struct napi_struct *, int); 342 #ifdef CONFIG_NETPOLL 343 int poll_owner; 344 #endif 345 struct net_device *dev; 346 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 347 struct sk_buff *skb; 348 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 349 int rx_count; /* length of rx_list */ 350 struct hrtimer timer; 351 struct list_head dev_list; 352 struct hlist_node napi_hash_node; 353 unsigned int napi_id; 354 struct task_struct *thread; 355 }; 356 357 enum { 358 NAPI_STATE_SCHED, /* Poll is scheduled */ 359 NAPI_STATE_MISSED, /* reschedule a napi */ 360 NAPI_STATE_DISABLE, /* Disable pending */ 361 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 362 NAPI_STATE_LISTED, /* NAPI added to system lists */ 363 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 364 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 365 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 366 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 367 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 368 }; 369 370 enum { 371 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 372 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 373 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 374 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 375 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 376 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 377 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 378 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 379 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 380 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 381 }; 382 383 enum gro_result { 384 GRO_MERGED, 385 GRO_MERGED_FREE, 386 GRO_HELD, 387 GRO_NORMAL, 388 GRO_CONSUMED, 389 }; 390 typedef enum gro_result gro_result_t; 391 392 /* 393 * enum rx_handler_result - Possible return values for rx_handlers. 394 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 395 * further. 396 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 397 * case skb->dev was changed by rx_handler. 398 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 399 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 400 * 401 * rx_handlers are functions called from inside __netif_receive_skb(), to do 402 * special processing of the skb, prior to delivery to protocol handlers. 403 * 404 * Currently, a net_device can only have a single rx_handler registered. Trying 405 * to register a second rx_handler will return -EBUSY. 406 * 407 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 408 * To unregister a rx_handler on a net_device, use 409 * netdev_rx_handler_unregister(). 410 * 411 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 412 * do with the skb. 413 * 414 * If the rx_handler consumed the skb in some way, it should return 415 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 416 * the skb to be delivered in some other way. 417 * 418 * If the rx_handler changed skb->dev, to divert the skb to another 419 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 420 * new device will be called if it exists. 421 * 422 * If the rx_handler decides the skb should be ignored, it should return 423 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 424 * are registered on exact device (ptype->dev == skb->dev). 425 * 426 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 427 * delivered, it should return RX_HANDLER_PASS. 428 * 429 * A device without a registered rx_handler will behave as if rx_handler 430 * returned RX_HANDLER_PASS. 431 */ 432 433 enum rx_handler_result { 434 RX_HANDLER_CONSUMED, 435 RX_HANDLER_ANOTHER, 436 RX_HANDLER_EXACT, 437 RX_HANDLER_PASS, 438 }; 439 typedef enum rx_handler_result rx_handler_result_t; 440 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 441 442 void __napi_schedule(struct napi_struct *n); 443 void __napi_schedule_irqoff(struct napi_struct *n); 444 445 static inline bool napi_disable_pending(struct napi_struct *n) 446 { 447 return test_bit(NAPI_STATE_DISABLE, &n->state); 448 } 449 450 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 451 { 452 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 453 } 454 455 bool napi_schedule_prep(struct napi_struct *n); 456 457 /** 458 * napi_schedule - schedule NAPI poll 459 * @n: NAPI context 460 * 461 * Schedule NAPI poll routine to be called if it is not already 462 * running. 463 */ 464 static inline void napi_schedule(struct napi_struct *n) 465 { 466 if (napi_schedule_prep(n)) 467 __napi_schedule(n); 468 } 469 470 /** 471 * napi_schedule_irqoff - schedule NAPI poll 472 * @n: NAPI context 473 * 474 * Variant of napi_schedule(), assuming hard irqs are masked. 475 */ 476 static inline void napi_schedule_irqoff(struct napi_struct *n) 477 { 478 if (napi_schedule_prep(n)) 479 __napi_schedule_irqoff(n); 480 } 481 482 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 483 static inline bool napi_reschedule(struct napi_struct *napi) 484 { 485 if (napi_schedule_prep(napi)) { 486 __napi_schedule(napi); 487 return true; 488 } 489 return false; 490 } 491 492 bool napi_complete_done(struct napi_struct *n, int work_done); 493 /** 494 * napi_complete - NAPI processing complete 495 * @n: NAPI context 496 * 497 * Mark NAPI processing as complete. 498 * Consider using napi_complete_done() instead. 499 * Return false if device should avoid rearming interrupts. 500 */ 501 static inline bool napi_complete(struct napi_struct *n) 502 { 503 return napi_complete_done(n, 0); 504 } 505 506 int dev_set_threaded(struct net_device *dev, bool threaded); 507 508 /** 509 * napi_disable - prevent NAPI from scheduling 510 * @n: NAPI context 511 * 512 * Stop NAPI from being scheduled on this context. 513 * Waits till any outstanding processing completes. 514 */ 515 void napi_disable(struct napi_struct *n); 516 517 void napi_enable(struct napi_struct *n); 518 519 /** 520 * napi_synchronize - wait until NAPI is not running 521 * @n: NAPI context 522 * 523 * Wait until NAPI is done being scheduled on this context. 524 * Waits till any outstanding processing completes but 525 * does not disable future activations. 526 */ 527 static inline void napi_synchronize(const struct napi_struct *n) 528 { 529 if (IS_ENABLED(CONFIG_SMP)) 530 while (test_bit(NAPI_STATE_SCHED, &n->state)) 531 msleep(1); 532 else 533 barrier(); 534 } 535 536 /** 537 * napi_if_scheduled_mark_missed - if napi is running, set the 538 * NAPIF_STATE_MISSED 539 * @n: NAPI context 540 * 541 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 542 * NAPI is scheduled. 543 **/ 544 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 545 { 546 unsigned long val, new; 547 548 do { 549 val = READ_ONCE(n->state); 550 if (val & NAPIF_STATE_DISABLE) 551 return true; 552 553 if (!(val & NAPIF_STATE_SCHED)) 554 return false; 555 556 new = val | NAPIF_STATE_MISSED; 557 } while (cmpxchg(&n->state, val, new) != val); 558 559 return true; 560 } 561 562 enum netdev_queue_state_t { 563 __QUEUE_STATE_DRV_XOFF, 564 __QUEUE_STATE_STACK_XOFF, 565 __QUEUE_STATE_FROZEN, 566 }; 567 568 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 569 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 570 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 571 572 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 573 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 574 QUEUE_STATE_FROZEN) 575 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 576 QUEUE_STATE_FROZEN) 577 578 /* 579 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 580 * netif_tx_* functions below are used to manipulate this flag. The 581 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 582 * queue independently. The netif_xmit_*stopped functions below are called 583 * to check if the queue has been stopped by the driver or stack (either 584 * of the XOFF bits are set in the state). Drivers should not need to call 585 * netif_xmit*stopped functions, they should only be using netif_tx_*. 586 */ 587 588 struct netdev_queue { 589 /* 590 * read-mostly part 591 */ 592 struct net_device *dev; 593 netdevice_tracker dev_tracker; 594 595 struct Qdisc __rcu *qdisc; 596 struct Qdisc *qdisc_sleeping; 597 #ifdef CONFIG_SYSFS 598 struct kobject kobj; 599 #endif 600 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 601 int numa_node; 602 #endif 603 unsigned long tx_maxrate; 604 /* 605 * Number of TX timeouts for this queue 606 * (/sys/class/net/DEV/Q/trans_timeout) 607 */ 608 atomic_long_t trans_timeout; 609 610 /* Subordinate device that the queue has been assigned to */ 611 struct net_device *sb_dev; 612 #ifdef CONFIG_XDP_SOCKETS 613 struct xsk_buff_pool *pool; 614 #endif 615 /* 616 * write-mostly part 617 */ 618 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 619 int xmit_lock_owner; 620 /* 621 * Time (in jiffies) of last Tx 622 */ 623 unsigned long trans_start; 624 625 unsigned long state; 626 627 #ifdef CONFIG_BQL 628 struct dql dql; 629 #endif 630 } ____cacheline_aligned_in_smp; 631 632 extern int sysctl_fb_tunnels_only_for_init_net; 633 extern int sysctl_devconf_inherit_init_net; 634 635 /* 636 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 637 * == 1 : For initns only 638 * == 2 : For none. 639 */ 640 static inline bool net_has_fallback_tunnels(const struct net *net) 641 { 642 return !IS_ENABLED(CONFIG_SYSCTL) || 643 !sysctl_fb_tunnels_only_for_init_net || 644 (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1); 645 } 646 647 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 648 { 649 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 650 return q->numa_node; 651 #else 652 return NUMA_NO_NODE; 653 #endif 654 } 655 656 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 657 { 658 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 659 q->numa_node = node; 660 #endif 661 } 662 663 #ifdef CONFIG_RPS 664 /* 665 * This structure holds an RPS map which can be of variable length. The 666 * map is an array of CPUs. 667 */ 668 struct rps_map { 669 unsigned int len; 670 struct rcu_head rcu; 671 u16 cpus[]; 672 }; 673 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 674 675 /* 676 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 677 * tail pointer for that CPU's input queue at the time of last enqueue, and 678 * a hardware filter index. 679 */ 680 struct rps_dev_flow { 681 u16 cpu; 682 u16 filter; 683 unsigned int last_qtail; 684 }; 685 #define RPS_NO_FILTER 0xffff 686 687 /* 688 * The rps_dev_flow_table structure contains a table of flow mappings. 689 */ 690 struct rps_dev_flow_table { 691 unsigned int mask; 692 struct rcu_head rcu; 693 struct rps_dev_flow flows[]; 694 }; 695 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 696 ((_num) * sizeof(struct rps_dev_flow))) 697 698 /* 699 * The rps_sock_flow_table contains mappings of flows to the last CPU 700 * on which they were processed by the application (set in recvmsg). 701 * Each entry is a 32bit value. Upper part is the high-order bits 702 * of flow hash, lower part is CPU number. 703 * rps_cpu_mask is used to partition the space, depending on number of 704 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 705 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 706 * meaning we use 32-6=26 bits for the hash. 707 */ 708 struct rps_sock_flow_table { 709 u32 mask; 710 711 u32 ents[] ____cacheline_aligned_in_smp; 712 }; 713 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 714 715 #define RPS_NO_CPU 0xffff 716 717 extern u32 rps_cpu_mask; 718 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 719 720 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 721 u32 hash) 722 { 723 if (table && hash) { 724 unsigned int index = hash & table->mask; 725 u32 val = hash & ~rps_cpu_mask; 726 727 /* We only give a hint, preemption can change CPU under us */ 728 val |= raw_smp_processor_id(); 729 730 if (table->ents[index] != val) 731 table->ents[index] = val; 732 } 733 } 734 735 #ifdef CONFIG_RFS_ACCEL 736 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 737 u16 filter_id); 738 #endif 739 #endif /* CONFIG_RPS */ 740 741 /* This structure contains an instance of an RX queue. */ 742 struct netdev_rx_queue { 743 struct xdp_rxq_info xdp_rxq; 744 #ifdef CONFIG_RPS 745 struct rps_map __rcu *rps_map; 746 struct rps_dev_flow_table __rcu *rps_flow_table; 747 #endif 748 struct kobject kobj; 749 struct net_device *dev; 750 netdevice_tracker dev_tracker; 751 752 #ifdef CONFIG_XDP_SOCKETS 753 struct xsk_buff_pool *pool; 754 #endif 755 } ____cacheline_aligned_in_smp; 756 757 /* 758 * RX queue sysfs structures and functions. 759 */ 760 struct rx_queue_attribute { 761 struct attribute attr; 762 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); 763 ssize_t (*store)(struct netdev_rx_queue *queue, 764 const char *buf, size_t len); 765 }; 766 767 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 768 enum xps_map_type { 769 XPS_CPUS = 0, 770 XPS_RXQS, 771 XPS_MAPS_MAX, 772 }; 773 774 #ifdef CONFIG_XPS 775 /* 776 * This structure holds an XPS map which can be of variable length. The 777 * map is an array of queues. 778 */ 779 struct xps_map { 780 unsigned int len; 781 unsigned int alloc_len; 782 struct rcu_head rcu; 783 u16 queues[]; 784 }; 785 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 786 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 787 - sizeof(struct xps_map)) / sizeof(u16)) 788 789 /* 790 * This structure holds all XPS maps for device. Maps are indexed by CPU. 791 * 792 * We keep track of the number of cpus/rxqs used when the struct is allocated, 793 * in nr_ids. This will help not accessing out-of-bound memory. 794 * 795 * We keep track of the number of traffic classes used when the struct is 796 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 797 * not crossing its upper bound, as the original dev->num_tc can be updated in 798 * the meantime. 799 */ 800 struct xps_dev_maps { 801 struct rcu_head rcu; 802 unsigned int nr_ids; 803 s16 num_tc; 804 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 805 }; 806 807 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 808 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 809 810 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 811 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 812 813 #endif /* CONFIG_XPS */ 814 815 #define TC_MAX_QUEUE 16 816 #define TC_BITMASK 15 817 /* HW offloaded queuing disciplines txq count and offset maps */ 818 struct netdev_tc_txq { 819 u16 count; 820 u16 offset; 821 }; 822 823 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 824 /* 825 * This structure is to hold information about the device 826 * configured to run FCoE protocol stack. 827 */ 828 struct netdev_fcoe_hbainfo { 829 char manufacturer[64]; 830 char serial_number[64]; 831 char hardware_version[64]; 832 char driver_version[64]; 833 char optionrom_version[64]; 834 char firmware_version[64]; 835 char model[256]; 836 char model_description[256]; 837 }; 838 #endif 839 840 #define MAX_PHYS_ITEM_ID_LEN 32 841 842 /* This structure holds a unique identifier to identify some 843 * physical item (port for example) used by a netdevice. 844 */ 845 struct netdev_phys_item_id { 846 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 847 unsigned char id_len; 848 }; 849 850 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 851 struct netdev_phys_item_id *b) 852 { 853 return a->id_len == b->id_len && 854 memcmp(a->id, b->id, a->id_len) == 0; 855 } 856 857 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 858 struct sk_buff *skb, 859 struct net_device *sb_dev); 860 861 enum net_device_path_type { 862 DEV_PATH_ETHERNET = 0, 863 DEV_PATH_VLAN, 864 DEV_PATH_BRIDGE, 865 DEV_PATH_PPPOE, 866 DEV_PATH_DSA, 867 DEV_PATH_MTK_WDMA, 868 }; 869 870 struct net_device_path { 871 enum net_device_path_type type; 872 const struct net_device *dev; 873 union { 874 struct { 875 u16 id; 876 __be16 proto; 877 u8 h_dest[ETH_ALEN]; 878 } encap; 879 struct { 880 enum { 881 DEV_PATH_BR_VLAN_KEEP, 882 DEV_PATH_BR_VLAN_TAG, 883 DEV_PATH_BR_VLAN_UNTAG, 884 DEV_PATH_BR_VLAN_UNTAG_HW, 885 } vlan_mode; 886 u16 vlan_id; 887 __be16 vlan_proto; 888 } bridge; 889 struct { 890 int port; 891 u16 proto; 892 } dsa; 893 struct { 894 u8 wdma_idx; 895 u8 queue; 896 u16 wcid; 897 u8 bss; 898 } mtk_wdma; 899 }; 900 }; 901 902 #define NET_DEVICE_PATH_STACK_MAX 5 903 #define NET_DEVICE_PATH_VLAN_MAX 2 904 905 struct net_device_path_stack { 906 int num_paths; 907 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 908 }; 909 910 struct net_device_path_ctx { 911 const struct net_device *dev; 912 const u8 *daddr; 913 914 int num_vlans; 915 struct { 916 u16 id; 917 __be16 proto; 918 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 919 }; 920 921 enum tc_setup_type { 922 TC_SETUP_QDISC_MQPRIO, 923 TC_SETUP_CLSU32, 924 TC_SETUP_CLSFLOWER, 925 TC_SETUP_CLSMATCHALL, 926 TC_SETUP_CLSBPF, 927 TC_SETUP_BLOCK, 928 TC_SETUP_QDISC_CBS, 929 TC_SETUP_QDISC_RED, 930 TC_SETUP_QDISC_PRIO, 931 TC_SETUP_QDISC_MQ, 932 TC_SETUP_QDISC_ETF, 933 TC_SETUP_ROOT_QDISC, 934 TC_SETUP_QDISC_GRED, 935 TC_SETUP_QDISC_TAPRIO, 936 TC_SETUP_FT, 937 TC_SETUP_QDISC_ETS, 938 TC_SETUP_QDISC_TBF, 939 TC_SETUP_QDISC_FIFO, 940 TC_SETUP_QDISC_HTB, 941 TC_SETUP_ACT, 942 }; 943 944 /* These structures hold the attributes of bpf state that are being passed 945 * to the netdevice through the bpf op. 946 */ 947 enum bpf_netdev_command { 948 /* Set or clear a bpf program used in the earliest stages of packet 949 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 950 * is responsible for calling bpf_prog_put on any old progs that are 951 * stored. In case of error, the callee need not release the new prog 952 * reference, but on success it takes ownership and must bpf_prog_put 953 * when it is no longer used. 954 */ 955 XDP_SETUP_PROG, 956 XDP_SETUP_PROG_HW, 957 /* BPF program for offload callbacks, invoked at program load time. */ 958 BPF_OFFLOAD_MAP_ALLOC, 959 BPF_OFFLOAD_MAP_FREE, 960 XDP_SETUP_XSK_POOL, 961 }; 962 963 struct bpf_prog_offload_ops; 964 struct netlink_ext_ack; 965 struct xdp_umem; 966 struct xdp_dev_bulk_queue; 967 struct bpf_xdp_link; 968 969 enum bpf_xdp_mode { 970 XDP_MODE_SKB = 0, 971 XDP_MODE_DRV = 1, 972 XDP_MODE_HW = 2, 973 __MAX_XDP_MODE 974 }; 975 976 struct bpf_xdp_entity { 977 struct bpf_prog *prog; 978 struct bpf_xdp_link *link; 979 }; 980 981 struct netdev_bpf { 982 enum bpf_netdev_command command; 983 union { 984 /* XDP_SETUP_PROG */ 985 struct { 986 u32 flags; 987 struct bpf_prog *prog; 988 struct netlink_ext_ack *extack; 989 }; 990 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 991 struct { 992 struct bpf_offloaded_map *offmap; 993 }; 994 /* XDP_SETUP_XSK_POOL */ 995 struct { 996 struct xsk_buff_pool *pool; 997 u16 queue_id; 998 } xsk; 999 }; 1000 }; 1001 1002 /* Flags for ndo_xsk_wakeup. */ 1003 #define XDP_WAKEUP_RX (1 << 0) 1004 #define XDP_WAKEUP_TX (1 << 1) 1005 1006 #ifdef CONFIG_XFRM_OFFLOAD 1007 struct xfrmdev_ops { 1008 int (*xdo_dev_state_add) (struct xfrm_state *x); 1009 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1010 void (*xdo_dev_state_free) (struct xfrm_state *x); 1011 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1012 struct xfrm_state *x); 1013 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1014 }; 1015 #endif 1016 1017 struct dev_ifalias { 1018 struct rcu_head rcuhead; 1019 char ifalias[]; 1020 }; 1021 1022 struct devlink; 1023 struct tlsdev_ops; 1024 1025 struct netdev_net_notifier { 1026 struct list_head list; 1027 struct notifier_block *nb; 1028 }; 1029 1030 /* 1031 * This structure defines the management hooks for network devices. 1032 * The following hooks can be defined; unless noted otherwise, they are 1033 * optional and can be filled with a null pointer. 1034 * 1035 * int (*ndo_init)(struct net_device *dev); 1036 * This function is called once when a network device is registered. 1037 * The network device can use this for any late stage initialization 1038 * or semantic validation. It can fail with an error code which will 1039 * be propagated back to register_netdev. 1040 * 1041 * void (*ndo_uninit)(struct net_device *dev); 1042 * This function is called when device is unregistered or when registration 1043 * fails. It is not called if init fails. 1044 * 1045 * int (*ndo_open)(struct net_device *dev); 1046 * This function is called when a network device transitions to the up 1047 * state. 1048 * 1049 * int (*ndo_stop)(struct net_device *dev); 1050 * This function is called when a network device transitions to the down 1051 * state. 1052 * 1053 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1054 * struct net_device *dev); 1055 * Called when a packet needs to be transmitted. 1056 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1057 * the queue before that can happen; it's for obsolete devices and weird 1058 * corner cases, but the stack really does a non-trivial amount 1059 * of useless work if you return NETDEV_TX_BUSY. 1060 * Required; cannot be NULL. 1061 * 1062 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1063 * struct net_device *dev 1064 * netdev_features_t features); 1065 * Called by core transmit path to determine if device is capable of 1066 * performing offload operations on a given packet. This is to give 1067 * the device an opportunity to implement any restrictions that cannot 1068 * be otherwise expressed by feature flags. The check is called with 1069 * the set of features that the stack has calculated and it returns 1070 * those the driver believes to be appropriate. 1071 * 1072 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1073 * struct net_device *sb_dev); 1074 * Called to decide which queue to use when device supports multiple 1075 * transmit queues. 1076 * 1077 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1078 * This function is called to allow device receiver to make 1079 * changes to configuration when multicast or promiscuous is enabled. 1080 * 1081 * void (*ndo_set_rx_mode)(struct net_device *dev); 1082 * This function is called device changes address list filtering. 1083 * If driver handles unicast address filtering, it should set 1084 * IFF_UNICAST_FLT in its priv_flags. 1085 * 1086 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1087 * This function is called when the Media Access Control address 1088 * needs to be changed. If this interface is not defined, the 1089 * MAC address can not be changed. 1090 * 1091 * int (*ndo_validate_addr)(struct net_device *dev); 1092 * Test if Media Access Control address is valid for the device. 1093 * 1094 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1095 * Old-style ioctl entry point. This is used internally by the 1096 * appletalk and ieee802154 subsystems but is no longer called by 1097 * the device ioctl handler. 1098 * 1099 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1100 * Used by the bonding driver for its device specific ioctls: 1101 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1102 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1103 * 1104 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1105 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1106 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1107 * 1108 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1109 * Used to set network devices bus interface parameters. This interface 1110 * is retained for legacy reasons; new devices should use the bus 1111 * interface (PCI) for low level management. 1112 * 1113 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1114 * Called when a user wants to change the Maximum Transfer Unit 1115 * of a device. 1116 * 1117 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1118 * Callback used when the transmitter has not made any progress 1119 * for dev->watchdog ticks. 1120 * 1121 * void (*ndo_get_stats64)(struct net_device *dev, 1122 * struct rtnl_link_stats64 *storage); 1123 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1124 * Called when a user wants to get the network device usage 1125 * statistics. Drivers must do one of the following: 1126 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1127 * rtnl_link_stats64 structure passed by the caller. 1128 * 2. Define @ndo_get_stats to update a net_device_stats structure 1129 * (which should normally be dev->stats) and return a pointer to 1130 * it. The structure may be changed asynchronously only if each 1131 * field is written atomically. 1132 * 3. Update dev->stats asynchronously and atomically, and define 1133 * neither operation. 1134 * 1135 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1136 * Return true if this device supports offload stats of this attr_id. 1137 * 1138 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1139 * void *attr_data) 1140 * Get statistics for offload operations by attr_id. Write it into the 1141 * attr_data pointer. 1142 * 1143 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1144 * If device supports VLAN filtering this function is called when a 1145 * VLAN id is registered. 1146 * 1147 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1148 * If device supports VLAN filtering this function is called when a 1149 * VLAN id is unregistered. 1150 * 1151 * void (*ndo_poll_controller)(struct net_device *dev); 1152 * 1153 * SR-IOV management functions. 1154 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1155 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1156 * u8 qos, __be16 proto); 1157 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1158 * int max_tx_rate); 1159 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1160 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1161 * int (*ndo_get_vf_config)(struct net_device *dev, 1162 * int vf, struct ifla_vf_info *ivf); 1163 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1164 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1165 * struct nlattr *port[]); 1166 * 1167 * Enable or disable the VF ability to query its RSS Redirection Table and 1168 * Hash Key. This is needed since on some devices VF share this information 1169 * with PF and querying it may introduce a theoretical security risk. 1170 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1171 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1172 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1173 * void *type_data); 1174 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1175 * This is always called from the stack with the rtnl lock held and netif 1176 * tx queues stopped. This allows the netdevice to perform queue 1177 * management safely. 1178 * 1179 * Fiber Channel over Ethernet (FCoE) offload functions. 1180 * int (*ndo_fcoe_enable)(struct net_device *dev); 1181 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1182 * so the underlying device can perform whatever needed configuration or 1183 * initialization to support acceleration of FCoE traffic. 1184 * 1185 * int (*ndo_fcoe_disable)(struct net_device *dev); 1186 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1187 * so the underlying device can perform whatever needed clean-ups to 1188 * stop supporting acceleration of FCoE traffic. 1189 * 1190 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1191 * struct scatterlist *sgl, unsigned int sgc); 1192 * Called when the FCoE Initiator wants to initialize an I/O that 1193 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1194 * perform necessary setup and returns 1 to indicate the device is set up 1195 * successfully to perform DDP on this I/O, otherwise this returns 0. 1196 * 1197 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1198 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1199 * indicated by the FC exchange id 'xid', so the underlying device can 1200 * clean up and reuse resources for later DDP requests. 1201 * 1202 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1203 * struct scatterlist *sgl, unsigned int sgc); 1204 * Called when the FCoE Target wants to initialize an I/O that 1205 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1206 * perform necessary setup and returns 1 to indicate the device is set up 1207 * successfully to perform DDP on this I/O, otherwise this returns 0. 1208 * 1209 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1210 * struct netdev_fcoe_hbainfo *hbainfo); 1211 * Called when the FCoE Protocol stack wants information on the underlying 1212 * device. This information is utilized by the FCoE protocol stack to 1213 * register attributes with Fiber Channel management service as per the 1214 * FC-GS Fabric Device Management Information(FDMI) specification. 1215 * 1216 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1217 * Called when the underlying device wants to override default World Wide 1218 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1219 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1220 * protocol stack to use. 1221 * 1222 * RFS acceleration. 1223 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1224 * u16 rxq_index, u32 flow_id); 1225 * Set hardware filter for RFS. rxq_index is the target queue index; 1226 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1227 * Return the filter ID on success, or a negative error code. 1228 * 1229 * Slave management functions (for bridge, bonding, etc). 1230 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1231 * Called to make another netdev an underling. 1232 * 1233 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1234 * Called to release previously enslaved netdev. 1235 * 1236 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1237 * struct sk_buff *skb, 1238 * bool all_slaves); 1239 * Get the xmit slave of master device. If all_slaves is true, function 1240 * assume all the slaves can transmit. 1241 * 1242 * Feature/offload setting functions. 1243 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1244 * netdev_features_t features); 1245 * Adjusts the requested feature flags according to device-specific 1246 * constraints, and returns the resulting flags. Must not modify 1247 * the device state. 1248 * 1249 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1250 * Called to update device configuration to new features. Passed 1251 * feature set might be less than what was returned by ndo_fix_features()). 1252 * Must return >0 or -errno if it changed dev->features itself. 1253 * 1254 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1255 * struct net_device *dev, 1256 * const unsigned char *addr, u16 vid, u16 flags, 1257 * struct netlink_ext_ack *extack); 1258 * Adds an FDB entry to dev for addr. 1259 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1260 * struct net_device *dev, 1261 * const unsigned char *addr, u16 vid) 1262 * Deletes the FDB entry from dev coresponding to addr. 1263 * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], 1264 * struct net_device *dev, 1265 * u16 vid, 1266 * struct netlink_ext_ack *extack); 1267 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1268 * struct net_device *dev, struct net_device *filter_dev, 1269 * int *idx) 1270 * Used to add FDB entries to dump requests. Implementers should add 1271 * entries to skb and update idx with the number of entries. 1272 * 1273 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1274 * u16 flags, struct netlink_ext_ack *extack) 1275 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1276 * struct net_device *dev, u32 filter_mask, 1277 * int nlflags) 1278 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1279 * u16 flags); 1280 * 1281 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1282 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1283 * which do not represent real hardware may define this to allow their 1284 * userspace components to manage their virtual carrier state. Devices 1285 * that determine carrier state from physical hardware properties (eg 1286 * network cables) or protocol-dependent mechanisms (eg 1287 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1288 * 1289 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1290 * struct netdev_phys_item_id *ppid); 1291 * Called to get ID of physical port of this device. If driver does 1292 * not implement this, it is assumed that the hw is not able to have 1293 * multiple net devices on single physical port. 1294 * 1295 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1296 * struct netdev_phys_item_id *ppid) 1297 * Called to get the parent ID of the physical port of this device. 1298 * 1299 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1300 * struct net_device *dev) 1301 * Called by upper layer devices to accelerate switching or other 1302 * station functionality into hardware. 'pdev is the lowerdev 1303 * to use for the offload and 'dev' is the net device that will 1304 * back the offload. Returns a pointer to the private structure 1305 * the upper layer will maintain. 1306 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1307 * Called by upper layer device to delete the station created 1308 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1309 * the station and priv is the structure returned by the add 1310 * operation. 1311 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1312 * int queue_index, u32 maxrate); 1313 * Called when a user wants to set a max-rate limitation of specific 1314 * TX queue. 1315 * int (*ndo_get_iflink)(const struct net_device *dev); 1316 * Called to get the iflink value of this device. 1317 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1318 * This function is used to get egress tunnel information for given skb. 1319 * This is useful for retrieving outer tunnel header parameters while 1320 * sampling packet. 1321 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1322 * This function is used to specify the headroom that the skb must 1323 * consider when allocation skb during packet reception. Setting 1324 * appropriate rx headroom value allows avoiding skb head copy on 1325 * forward. Setting a negative value resets the rx headroom to the 1326 * default value. 1327 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1328 * This function is used to set or query state related to XDP on the 1329 * netdevice and manage BPF offload. See definition of 1330 * enum bpf_netdev_command for details. 1331 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1332 * u32 flags); 1333 * This function is used to submit @n XDP packets for transmit on a 1334 * netdevice. Returns number of frames successfully transmitted, frames 1335 * that got dropped are freed/returned via xdp_return_frame(). 1336 * Returns negative number, means general error invoking ndo, meaning 1337 * no frames were xmit'ed and core-caller will free all frames. 1338 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1339 * struct xdp_buff *xdp); 1340 * Get the xmit slave of master device based on the xdp_buff. 1341 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1342 * This function is used to wake up the softirq, ksoftirqd or kthread 1343 * responsible for sending and/or receiving packets on a specific 1344 * queue id bound to an AF_XDP socket. The flags field specifies if 1345 * only RX, only Tx, or both should be woken up using the flags 1346 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1347 * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); 1348 * Get devlink port instance associated with a given netdev. 1349 * Called with a reference on the netdevice and devlink locks only, 1350 * rtnl_lock is not held. 1351 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1352 * int cmd); 1353 * Add, change, delete or get information on an IPv4 tunnel. 1354 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1355 * If a device is paired with a peer device, return the peer instance. 1356 * The caller must be under RCU read context. 1357 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1358 * Get the forwarding path to reach the real device from the HW destination address 1359 */ 1360 struct net_device_ops { 1361 int (*ndo_init)(struct net_device *dev); 1362 void (*ndo_uninit)(struct net_device *dev); 1363 int (*ndo_open)(struct net_device *dev); 1364 int (*ndo_stop)(struct net_device *dev); 1365 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1366 struct net_device *dev); 1367 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1368 struct net_device *dev, 1369 netdev_features_t features); 1370 u16 (*ndo_select_queue)(struct net_device *dev, 1371 struct sk_buff *skb, 1372 struct net_device *sb_dev); 1373 void (*ndo_change_rx_flags)(struct net_device *dev, 1374 int flags); 1375 void (*ndo_set_rx_mode)(struct net_device *dev); 1376 int (*ndo_set_mac_address)(struct net_device *dev, 1377 void *addr); 1378 int (*ndo_validate_addr)(struct net_device *dev); 1379 int (*ndo_do_ioctl)(struct net_device *dev, 1380 struct ifreq *ifr, int cmd); 1381 int (*ndo_eth_ioctl)(struct net_device *dev, 1382 struct ifreq *ifr, int cmd); 1383 int (*ndo_siocbond)(struct net_device *dev, 1384 struct ifreq *ifr, int cmd); 1385 int (*ndo_siocwandev)(struct net_device *dev, 1386 struct if_settings *ifs); 1387 int (*ndo_siocdevprivate)(struct net_device *dev, 1388 struct ifreq *ifr, 1389 void __user *data, int cmd); 1390 int (*ndo_set_config)(struct net_device *dev, 1391 struct ifmap *map); 1392 int (*ndo_change_mtu)(struct net_device *dev, 1393 int new_mtu); 1394 int (*ndo_neigh_setup)(struct net_device *dev, 1395 struct neigh_parms *); 1396 void (*ndo_tx_timeout) (struct net_device *dev, 1397 unsigned int txqueue); 1398 1399 void (*ndo_get_stats64)(struct net_device *dev, 1400 struct rtnl_link_stats64 *storage); 1401 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1402 int (*ndo_get_offload_stats)(int attr_id, 1403 const struct net_device *dev, 1404 void *attr_data); 1405 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1406 1407 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1408 __be16 proto, u16 vid); 1409 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1410 __be16 proto, u16 vid); 1411 #ifdef CONFIG_NET_POLL_CONTROLLER 1412 void (*ndo_poll_controller)(struct net_device *dev); 1413 int (*ndo_netpoll_setup)(struct net_device *dev, 1414 struct netpoll_info *info); 1415 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1416 #endif 1417 int (*ndo_set_vf_mac)(struct net_device *dev, 1418 int queue, u8 *mac); 1419 int (*ndo_set_vf_vlan)(struct net_device *dev, 1420 int queue, u16 vlan, 1421 u8 qos, __be16 proto); 1422 int (*ndo_set_vf_rate)(struct net_device *dev, 1423 int vf, int min_tx_rate, 1424 int max_tx_rate); 1425 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1426 int vf, bool setting); 1427 int (*ndo_set_vf_trust)(struct net_device *dev, 1428 int vf, bool setting); 1429 int (*ndo_get_vf_config)(struct net_device *dev, 1430 int vf, 1431 struct ifla_vf_info *ivf); 1432 int (*ndo_set_vf_link_state)(struct net_device *dev, 1433 int vf, int link_state); 1434 int (*ndo_get_vf_stats)(struct net_device *dev, 1435 int vf, 1436 struct ifla_vf_stats 1437 *vf_stats); 1438 int (*ndo_set_vf_port)(struct net_device *dev, 1439 int vf, 1440 struct nlattr *port[]); 1441 int (*ndo_get_vf_port)(struct net_device *dev, 1442 int vf, struct sk_buff *skb); 1443 int (*ndo_get_vf_guid)(struct net_device *dev, 1444 int vf, 1445 struct ifla_vf_guid *node_guid, 1446 struct ifla_vf_guid *port_guid); 1447 int (*ndo_set_vf_guid)(struct net_device *dev, 1448 int vf, u64 guid, 1449 int guid_type); 1450 int (*ndo_set_vf_rss_query_en)( 1451 struct net_device *dev, 1452 int vf, bool setting); 1453 int (*ndo_setup_tc)(struct net_device *dev, 1454 enum tc_setup_type type, 1455 void *type_data); 1456 #if IS_ENABLED(CONFIG_FCOE) 1457 int (*ndo_fcoe_enable)(struct net_device *dev); 1458 int (*ndo_fcoe_disable)(struct net_device *dev); 1459 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1460 u16 xid, 1461 struct scatterlist *sgl, 1462 unsigned int sgc); 1463 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1464 u16 xid); 1465 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1466 u16 xid, 1467 struct scatterlist *sgl, 1468 unsigned int sgc); 1469 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1470 struct netdev_fcoe_hbainfo *hbainfo); 1471 #endif 1472 1473 #if IS_ENABLED(CONFIG_LIBFCOE) 1474 #define NETDEV_FCOE_WWNN 0 1475 #define NETDEV_FCOE_WWPN 1 1476 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1477 u64 *wwn, int type); 1478 #endif 1479 1480 #ifdef CONFIG_RFS_ACCEL 1481 int (*ndo_rx_flow_steer)(struct net_device *dev, 1482 const struct sk_buff *skb, 1483 u16 rxq_index, 1484 u32 flow_id); 1485 #endif 1486 int (*ndo_add_slave)(struct net_device *dev, 1487 struct net_device *slave_dev, 1488 struct netlink_ext_ack *extack); 1489 int (*ndo_del_slave)(struct net_device *dev, 1490 struct net_device *slave_dev); 1491 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1492 struct sk_buff *skb, 1493 bool all_slaves); 1494 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1495 struct sock *sk); 1496 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1497 netdev_features_t features); 1498 int (*ndo_set_features)(struct net_device *dev, 1499 netdev_features_t features); 1500 int (*ndo_neigh_construct)(struct net_device *dev, 1501 struct neighbour *n); 1502 void (*ndo_neigh_destroy)(struct net_device *dev, 1503 struct neighbour *n); 1504 1505 int (*ndo_fdb_add)(struct ndmsg *ndm, 1506 struct nlattr *tb[], 1507 struct net_device *dev, 1508 const unsigned char *addr, 1509 u16 vid, 1510 u16 flags, 1511 struct netlink_ext_ack *extack); 1512 int (*ndo_fdb_del)(struct ndmsg *ndm, 1513 struct nlattr *tb[], 1514 struct net_device *dev, 1515 const unsigned char *addr, 1516 u16 vid); 1517 int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, 1518 struct nlattr *tb[], 1519 struct net_device *dev, 1520 u16 vid, 1521 struct netlink_ext_ack *extack); 1522 int (*ndo_fdb_dump)(struct sk_buff *skb, 1523 struct netlink_callback *cb, 1524 struct net_device *dev, 1525 struct net_device *filter_dev, 1526 int *idx); 1527 int (*ndo_fdb_get)(struct sk_buff *skb, 1528 struct nlattr *tb[], 1529 struct net_device *dev, 1530 const unsigned char *addr, 1531 u16 vid, u32 portid, u32 seq, 1532 struct netlink_ext_ack *extack); 1533 int (*ndo_bridge_setlink)(struct net_device *dev, 1534 struct nlmsghdr *nlh, 1535 u16 flags, 1536 struct netlink_ext_ack *extack); 1537 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1538 u32 pid, u32 seq, 1539 struct net_device *dev, 1540 u32 filter_mask, 1541 int nlflags); 1542 int (*ndo_bridge_dellink)(struct net_device *dev, 1543 struct nlmsghdr *nlh, 1544 u16 flags); 1545 int (*ndo_change_carrier)(struct net_device *dev, 1546 bool new_carrier); 1547 int (*ndo_get_phys_port_id)(struct net_device *dev, 1548 struct netdev_phys_item_id *ppid); 1549 int (*ndo_get_port_parent_id)(struct net_device *dev, 1550 struct netdev_phys_item_id *ppid); 1551 int (*ndo_get_phys_port_name)(struct net_device *dev, 1552 char *name, size_t len); 1553 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1554 struct net_device *dev); 1555 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1556 void *priv); 1557 1558 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1559 int queue_index, 1560 u32 maxrate); 1561 int (*ndo_get_iflink)(const struct net_device *dev); 1562 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1563 struct sk_buff *skb); 1564 void (*ndo_set_rx_headroom)(struct net_device *dev, 1565 int needed_headroom); 1566 int (*ndo_bpf)(struct net_device *dev, 1567 struct netdev_bpf *bpf); 1568 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1569 struct xdp_frame **xdp, 1570 u32 flags); 1571 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1572 struct xdp_buff *xdp); 1573 int (*ndo_xsk_wakeup)(struct net_device *dev, 1574 u32 queue_id, u32 flags); 1575 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); 1576 int (*ndo_tunnel_ctl)(struct net_device *dev, 1577 struct ip_tunnel_parm *p, int cmd); 1578 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1579 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1580 struct net_device_path *path); 1581 }; 1582 1583 /** 1584 * enum netdev_priv_flags - &struct net_device priv_flags 1585 * 1586 * These are the &struct net_device, they are only set internally 1587 * by drivers and used in the kernel. These flags are invisible to 1588 * userspace; this means that the order of these flags can change 1589 * during any kernel release. 1590 * 1591 * You should have a pretty good reason to be extending these flags. 1592 * 1593 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1594 * @IFF_EBRIDGE: Ethernet bridging device 1595 * @IFF_BONDING: bonding master or slave 1596 * @IFF_ISATAP: ISATAP interface (RFC4214) 1597 * @IFF_WAN_HDLC: WAN HDLC device 1598 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1599 * release skb->dst 1600 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1601 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1602 * @IFF_MACVLAN_PORT: device used as macvlan port 1603 * @IFF_BRIDGE_PORT: device used as bridge port 1604 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1605 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1606 * @IFF_UNICAST_FLT: Supports unicast filtering 1607 * @IFF_TEAM_PORT: device used as team port 1608 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1609 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1610 * change when it's running 1611 * @IFF_MACVLAN: Macvlan device 1612 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1613 * underlying stacked devices 1614 * @IFF_L3MDEV_MASTER: device is an L3 master device 1615 * @IFF_NO_QUEUE: device can run without qdisc attached 1616 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1617 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1618 * @IFF_TEAM: device is a team device 1619 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1620 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1621 * entity (i.e. the master device for bridged veth) 1622 * @IFF_MACSEC: device is a MACsec device 1623 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1624 * @IFF_FAILOVER: device is a failover master device 1625 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1626 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1627 * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running 1628 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1629 * skb_headlen(skb) == 0 (data starts from frag0) 1630 * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN 1631 */ 1632 enum netdev_priv_flags { 1633 IFF_802_1Q_VLAN = 1<<0, 1634 IFF_EBRIDGE = 1<<1, 1635 IFF_BONDING = 1<<2, 1636 IFF_ISATAP = 1<<3, 1637 IFF_WAN_HDLC = 1<<4, 1638 IFF_XMIT_DST_RELEASE = 1<<5, 1639 IFF_DONT_BRIDGE = 1<<6, 1640 IFF_DISABLE_NETPOLL = 1<<7, 1641 IFF_MACVLAN_PORT = 1<<8, 1642 IFF_BRIDGE_PORT = 1<<9, 1643 IFF_OVS_DATAPATH = 1<<10, 1644 IFF_TX_SKB_SHARING = 1<<11, 1645 IFF_UNICAST_FLT = 1<<12, 1646 IFF_TEAM_PORT = 1<<13, 1647 IFF_SUPP_NOFCS = 1<<14, 1648 IFF_LIVE_ADDR_CHANGE = 1<<15, 1649 IFF_MACVLAN = 1<<16, 1650 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1651 IFF_L3MDEV_MASTER = 1<<18, 1652 IFF_NO_QUEUE = 1<<19, 1653 IFF_OPENVSWITCH = 1<<20, 1654 IFF_L3MDEV_SLAVE = 1<<21, 1655 IFF_TEAM = 1<<22, 1656 IFF_RXFH_CONFIGURED = 1<<23, 1657 IFF_PHONY_HEADROOM = 1<<24, 1658 IFF_MACSEC = 1<<25, 1659 IFF_NO_RX_HANDLER = 1<<26, 1660 IFF_FAILOVER = 1<<27, 1661 IFF_FAILOVER_SLAVE = 1<<28, 1662 IFF_L3MDEV_RX_HANDLER = 1<<29, 1663 IFF_LIVE_RENAME_OK = 1<<30, 1664 IFF_TX_SKB_NO_LINEAR = 1<<31, 1665 IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), 1666 }; 1667 1668 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1669 #define IFF_EBRIDGE IFF_EBRIDGE 1670 #define IFF_BONDING IFF_BONDING 1671 #define IFF_ISATAP IFF_ISATAP 1672 #define IFF_WAN_HDLC IFF_WAN_HDLC 1673 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1674 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1675 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1676 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1677 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1678 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1679 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1680 #define IFF_UNICAST_FLT IFF_UNICAST_FLT 1681 #define IFF_TEAM_PORT IFF_TEAM_PORT 1682 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1683 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1684 #define IFF_MACVLAN IFF_MACVLAN 1685 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1686 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1687 #define IFF_NO_QUEUE IFF_NO_QUEUE 1688 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1689 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1690 #define IFF_TEAM IFF_TEAM 1691 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED 1692 #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM 1693 #define IFF_MACSEC IFF_MACSEC 1694 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1695 #define IFF_FAILOVER IFF_FAILOVER 1696 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1697 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1698 #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK 1699 #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR 1700 1701 /* Specifies the type of the struct net_device::ml_priv pointer */ 1702 enum netdev_ml_priv_type { 1703 ML_PRIV_NONE, 1704 ML_PRIV_CAN, 1705 }; 1706 1707 /** 1708 * struct net_device - The DEVICE structure. 1709 * 1710 * Actually, this whole structure is a big mistake. It mixes I/O 1711 * data with strictly "high-level" data, and it has to know about 1712 * almost every data structure used in the INET module. 1713 * 1714 * @name: This is the first field of the "visible" part of this structure 1715 * (i.e. as seen by users in the "Space.c" file). It is the name 1716 * of the interface. 1717 * 1718 * @name_node: Name hashlist node 1719 * @ifalias: SNMP alias 1720 * @mem_end: Shared memory end 1721 * @mem_start: Shared memory start 1722 * @base_addr: Device I/O address 1723 * @irq: Device IRQ number 1724 * 1725 * @state: Generic network queuing layer state, see netdev_state_t 1726 * @dev_list: The global list of network devices 1727 * @napi_list: List entry used for polling NAPI devices 1728 * @unreg_list: List entry when we are unregistering the 1729 * device; see the function unregister_netdev 1730 * @close_list: List entry used when we are closing the device 1731 * @ptype_all: Device-specific packet handlers for all protocols 1732 * @ptype_specific: Device-specific, protocol-specific packet handlers 1733 * 1734 * @adj_list: Directly linked devices, like slaves for bonding 1735 * @features: Currently active device features 1736 * @hw_features: User-changeable features 1737 * 1738 * @wanted_features: User-requested features 1739 * @vlan_features: Mask of features inheritable by VLAN devices 1740 * 1741 * @hw_enc_features: Mask of features inherited by encapsulating devices 1742 * This field indicates what encapsulation 1743 * offloads the hardware is capable of doing, 1744 * and drivers will need to set them appropriately. 1745 * 1746 * @mpls_features: Mask of features inheritable by MPLS 1747 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1748 * 1749 * @ifindex: interface index 1750 * @group: The group the device belongs to 1751 * 1752 * @stats: Statistics struct, which was left as a legacy, use 1753 * rtnl_link_stats64 instead 1754 * 1755 * @core_stats: core networking counters, 1756 * do not use this in drivers 1757 * @carrier_up_count: Number of times the carrier has been up 1758 * @carrier_down_count: Number of times the carrier has been down 1759 * 1760 * @wireless_handlers: List of functions to handle Wireless Extensions, 1761 * instead of ioctl, 1762 * see <net/iw_handler.h> for details. 1763 * @wireless_data: Instance data managed by the core of wireless extensions 1764 * 1765 * @netdev_ops: Includes several pointers to callbacks, 1766 * if one wants to override the ndo_*() functions 1767 * @ethtool_ops: Management operations 1768 * @l3mdev_ops: Layer 3 master device operations 1769 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1770 * discovery handling. Necessary for e.g. 6LoWPAN. 1771 * @xfrmdev_ops: Transformation offload operations 1772 * @tlsdev_ops: Transport Layer Security offload operations 1773 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1774 * of Layer 2 headers. 1775 * 1776 * @flags: Interface flags (a la BSD) 1777 * @priv_flags: Like 'flags' but invisible to userspace, 1778 * see if.h for the definitions 1779 * @gflags: Global flags ( kept as legacy ) 1780 * @padded: How much padding added by alloc_netdev() 1781 * @operstate: RFC2863 operstate 1782 * @link_mode: Mapping policy to operstate 1783 * @if_port: Selectable AUI, TP, ... 1784 * @dma: DMA channel 1785 * @mtu: Interface MTU value 1786 * @min_mtu: Interface Minimum MTU value 1787 * @max_mtu: Interface Maximum MTU value 1788 * @type: Interface hardware type 1789 * @hard_header_len: Maximum hardware header length. 1790 * @min_header_len: Minimum hardware header length 1791 * 1792 * @needed_headroom: Extra headroom the hardware may need, but not in all 1793 * cases can this be guaranteed 1794 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1795 * cases can this be guaranteed. Some cases also use 1796 * LL_MAX_HEADER instead to allocate the skb 1797 * 1798 * interface address info: 1799 * 1800 * @perm_addr: Permanent hw address 1801 * @addr_assign_type: Hw address assignment type 1802 * @addr_len: Hardware address length 1803 * @upper_level: Maximum depth level of upper devices. 1804 * @lower_level: Maximum depth level of lower devices. 1805 * @neigh_priv_len: Used in neigh_alloc() 1806 * @dev_id: Used to differentiate devices that share 1807 * the same link layer address 1808 * @dev_port: Used to differentiate devices that share 1809 * the same function 1810 * @addr_list_lock: XXX: need comments on this one 1811 * @name_assign_type: network interface name assignment type 1812 * @uc_promisc: Counter that indicates promiscuous mode 1813 * has been enabled due to the need to listen to 1814 * additional unicast addresses in a device that 1815 * does not implement ndo_set_rx_mode() 1816 * @uc: unicast mac addresses 1817 * @mc: multicast mac addresses 1818 * @dev_addrs: list of device hw addresses 1819 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1820 * @promiscuity: Number of times the NIC is told to work in 1821 * promiscuous mode; if it becomes 0 the NIC will 1822 * exit promiscuous mode 1823 * @allmulti: Counter, enables or disables allmulticast mode 1824 * 1825 * @vlan_info: VLAN info 1826 * @dsa_ptr: dsa specific data 1827 * @tipc_ptr: TIPC specific data 1828 * @atalk_ptr: AppleTalk link 1829 * @ip_ptr: IPv4 specific data 1830 * @dn_ptr: DECnet specific data 1831 * @ip6_ptr: IPv6 specific data 1832 * @ax25_ptr: AX.25 specific data 1833 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1834 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1835 * device struct 1836 * @mpls_ptr: mpls_dev struct pointer 1837 * @mctp_ptr: MCTP specific data 1838 * 1839 * @dev_addr: Hw address (before bcast, 1840 * because most packets are unicast) 1841 * 1842 * @_rx: Array of RX queues 1843 * @num_rx_queues: Number of RX queues 1844 * allocated at register_netdev() time 1845 * @real_num_rx_queues: Number of RX queues currently active in device 1846 * @xdp_prog: XDP sockets filter program pointer 1847 * @gro_flush_timeout: timeout for GRO layer in NAPI 1848 * @napi_defer_hard_irqs: If not zero, provides a counter that would 1849 * allow to avoid NIC hard IRQ, on busy queues. 1850 * 1851 * @rx_handler: handler for received packets 1852 * @rx_handler_data: XXX: need comments on this one 1853 * @miniq_ingress: ingress/clsact qdisc specific data for 1854 * ingress processing 1855 * @ingress_queue: XXX: need comments on this one 1856 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1857 * @broadcast: hw bcast address 1858 * 1859 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1860 * indexed by RX queue number. Assigned by driver. 1861 * This must only be set if the ndo_rx_flow_steer 1862 * operation is defined 1863 * @index_hlist: Device index hash chain 1864 * 1865 * @_tx: Array of TX queues 1866 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1867 * @real_num_tx_queues: Number of TX queues currently active in device 1868 * @qdisc: Root qdisc from userspace point of view 1869 * @tx_queue_len: Max frames per queue allowed 1870 * @tx_global_lock: XXX: need comments on this one 1871 * @xdp_bulkq: XDP device bulk queue 1872 * @xps_maps: all CPUs/RXQs maps for XPS device 1873 * 1874 * @xps_maps: XXX: need comments on this one 1875 * @miniq_egress: clsact qdisc specific data for 1876 * egress processing 1877 * @nf_hooks_egress: netfilter hooks executed for egress packets 1878 * @qdisc_hash: qdisc hash table 1879 * @watchdog_timeo: Represents the timeout that is used by 1880 * the watchdog (see dev_watchdog()) 1881 * @watchdog_timer: List of timers 1882 * 1883 * @proto_down_reason: reason a netdev interface is held down 1884 * @pcpu_refcnt: Number of references to this device 1885 * @dev_refcnt: Number of references to this device 1886 * @refcnt_tracker: Tracker directory for tracked references to this device 1887 * @todo_list: Delayed register/unregister 1888 * @link_watch_list: XXX: need comments on this one 1889 * 1890 * @reg_state: Register/unregister state machine 1891 * @dismantle: Device is going to be freed 1892 * @rtnl_link_state: This enum represents the phases of creating 1893 * a new link 1894 * 1895 * @needs_free_netdev: Should unregister perform free_netdev? 1896 * @priv_destructor: Called from unregister 1897 * @npinfo: XXX: need comments on this one 1898 * @nd_net: Network namespace this network device is inside 1899 * 1900 * @ml_priv: Mid-layer private 1901 * @ml_priv_type: Mid-layer private type 1902 * @lstats: Loopback statistics 1903 * @tstats: Tunnel statistics 1904 * @dstats: Dummy statistics 1905 * @vstats: Virtual ethernet statistics 1906 * 1907 * @garp_port: GARP 1908 * @mrp_port: MRP 1909 * 1910 * @dm_private: Drop monitor private 1911 * 1912 * @dev: Class/net/name entry 1913 * @sysfs_groups: Space for optional device, statistics and wireless 1914 * sysfs groups 1915 * 1916 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1917 * @rtnl_link_ops: Rtnl_link_ops 1918 * 1919 * @gso_max_size: Maximum size of generic segmentation offload 1920 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1921 * @gso_max_segs: Maximum number of segments that can be passed to the 1922 * NIC for GSO 1923 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1924 * 1925 * @dcbnl_ops: Data Center Bridging netlink ops 1926 * @num_tc: Number of traffic classes in the net device 1927 * @tc_to_txq: XXX: need comments on this one 1928 * @prio_tc_map: XXX: need comments on this one 1929 * 1930 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1931 * 1932 * @priomap: XXX: need comments on this one 1933 * @phydev: Physical device may attach itself 1934 * for hardware timestamping 1935 * @sfp_bus: attached &struct sfp_bus structure. 1936 * 1937 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1938 * 1939 * @proto_down: protocol port state information can be sent to the 1940 * switch driver and used to set the phys state of the 1941 * switch port. 1942 * 1943 * @wol_enabled: Wake-on-LAN is enabled 1944 * 1945 * @threaded: napi threaded mode is enabled 1946 * 1947 * @net_notifier_list: List of per-net netdev notifier block 1948 * that follow this device when it is moved 1949 * to another network namespace. 1950 * 1951 * @macsec_ops: MACsec offloading ops 1952 * 1953 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 1954 * offload capabilities of the device 1955 * @udp_tunnel_nic: UDP tunnel offload state 1956 * @xdp_state: stores info on attached XDP BPF programs 1957 * 1958 * @nested_level: Used as a parameter of spin_lock_nested() of 1959 * dev->addr_list_lock. 1960 * @unlink_list: As netif_addr_lock() can be called recursively, 1961 * keep a list of interfaces to be deleted. 1962 * @gro_max_size: Maximum size of aggregated packet in generic 1963 * receive offload (GRO) 1964 * 1965 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 1966 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 1967 * @watchdog_dev_tracker: refcount tracker used by watchdog. 1968 * @dev_registered_tracker: tracker for reference held while 1969 * registered 1970 * @offload_xstats_l3: L3 HW stats for this netdevice. 1971 * 1972 * FIXME: cleanup struct net_device such that network protocol info 1973 * moves out. 1974 */ 1975 1976 struct net_device { 1977 char name[IFNAMSIZ]; 1978 struct netdev_name_node *name_node; 1979 struct dev_ifalias __rcu *ifalias; 1980 /* 1981 * I/O specific fields 1982 * FIXME: Merge these and struct ifmap into one 1983 */ 1984 unsigned long mem_end; 1985 unsigned long mem_start; 1986 unsigned long base_addr; 1987 1988 /* 1989 * Some hardware also needs these fields (state,dev_list, 1990 * napi_list,unreg_list,close_list) but they are not 1991 * part of the usual set specified in Space.c. 1992 */ 1993 1994 unsigned long state; 1995 1996 struct list_head dev_list; 1997 struct list_head napi_list; 1998 struct list_head unreg_list; 1999 struct list_head close_list; 2000 struct list_head ptype_all; 2001 struct list_head ptype_specific; 2002 2003 struct { 2004 struct list_head upper; 2005 struct list_head lower; 2006 } adj_list; 2007 2008 /* Read-mostly cache-line for fast-path access */ 2009 unsigned int flags; 2010 unsigned long long priv_flags; 2011 const struct net_device_ops *netdev_ops; 2012 int ifindex; 2013 unsigned short gflags; 2014 unsigned short hard_header_len; 2015 2016 /* Note : dev->mtu is often read without holding a lock. 2017 * Writers usually hold RTNL. 2018 * It is recommended to use READ_ONCE() to annotate the reads, 2019 * and to use WRITE_ONCE() to annotate the writes. 2020 */ 2021 unsigned int mtu; 2022 unsigned short needed_headroom; 2023 unsigned short needed_tailroom; 2024 2025 netdev_features_t features; 2026 netdev_features_t hw_features; 2027 netdev_features_t wanted_features; 2028 netdev_features_t vlan_features; 2029 netdev_features_t hw_enc_features; 2030 netdev_features_t mpls_features; 2031 netdev_features_t gso_partial_features; 2032 2033 unsigned int min_mtu; 2034 unsigned int max_mtu; 2035 unsigned short type; 2036 unsigned char min_header_len; 2037 unsigned char name_assign_type; 2038 2039 int group; 2040 2041 struct net_device_stats stats; /* not used by modern drivers */ 2042 2043 struct net_device_core_stats __percpu *core_stats; 2044 2045 /* Stats to monitor link on/off, flapping */ 2046 atomic_t carrier_up_count; 2047 atomic_t carrier_down_count; 2048 2049 #ifdef CONFIG_WIRELESS_EXT 2050 const struct iw_handler_def *wireless_handlers; 2051 struct iw_public_data *wireless_data; 2052 #endif 2053 const struct ethtool_ops *ethtool_ops; 2054 #ifdef CONFIG_NET_L3_MASTER_DEV 2055 const struct l3mdev_ops *l3mdev_ops; 2056 #endif 2057 #if IS_ENABLED(CONFIG_IPV6) 2058 const struct ndisc_ops *ndisc_ops; 2059 #endif 2060 2061 #ifdef CONFIG_XFRM_OFFLOAD 2062 const struct xfrmdev_ops *xfrmdev_ops; 2063 #endif 2064 2065 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2066 const struct tlsdev_ops *tlsdev_ops; 2067 #endif 2068 2069 const struct header_ops *header_ops; 2070 2071 unsigned char operstate; 2072 unsigned char link_mode; 2073 2074 unsigned char if_port; 2075 unsigned char dma; 2076 2077 /* Interface address info. */ 2078 unsigned char perm_addr[MAX_ADDR_LEN]; 2079 unsigned char addr_assign_type; 2080 unsigned char addr_len; 2081 unsigned char upper_level; 2082 unsigned char lower_level; 2083 2084 unsigned short neigh_priv_len; 2085 unsigned short dev_id; 2086 unsigned short dev_port; 2087 unsigned short padded; 2088 2089 spinlock_t addr_list_lock; 2090 int irq; 2091 2092 struct netdev_hw_addr_list uc; 2093 struct netdev_hw_addr_list mc; 2094 struct netdev_hw_addr_list dev_addrs; 2095 2096 #ifdef CONFIG_SYSFS 2097 struct kset *queues_kset; 2098 #endif 2099 #ifdef CONFIG_LOCKDEP 2100 struct list_head unlink_list; 2101 #endif 2102 unsigned int promiscuity; 2103 unsigned int allmulti; 2104 bool uc_promisc; 2105 #ifdef CONFIG_LOCKDEP 2106 unsigned char nested_level; 2107 #endif 2108 2109 2110 /* Protocol-specific pointers */ 2111 2112 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2113 struct vlan_info __rcu *vlan_info; 2114 #endif 2115 #if IS_ENABLED(CONFIG_NET_DSA) 2116 struct dsa_port *dsa_ptr; 2117 #endif 2118 #if IS_ENABLED(CONFIG_TIPC) 2119 struct tipc_bearer __rcu *tipc_ptr; 2120 #endif 2121 #if IS_ENABLED(CONFIG_ATALK) 2122 void *atalk_ptr; 2123 #endif 2124 struct in_device __rcu *ip_ptr; 2125 #if IS_ENABLED(CONFIG_DECNET) 2126 struct dn_dev __rcu *dn_ptr; 2127 #endif 2128 struct inet6_dev __rcu *ip6_ptr; 2129 #if IS_ENABLED(CONFIG_AX25) 2130 void *ax25_ptr; 2131 #endif 2132 struct wireless_dev *ieee80211_ptr; 2133 struct wpan_dev *ieee802154_ptr; 2134 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2135 struct mpls_dev __rcu *mpls_ptr; 2136 #endif 2137 #if IS_ENABLED(CONFIG_MCTP) 2138 struct mctp_dev __rcu *mctp_ptr; 2139 #endif 2140 2141 /* 2142 * Cache lines mostly used on receive path (including eth_type_trans()) 2143 */ 2144 /* Interface address info used in eth_type_trans() */ 2145 const unsigned char *dev_addr; 2146 2147 struct netdev_rx_queue *_rx; 2148 unsigned int num_rx_queues; 2149 unsigned int real_num_rx_queues; 2150 2151 struct bpf_prog __rcu *xdp_prog; 2152 unsigned long gro_flush_timeout; 2153 int napi_defer_hard_irqs; 2154 #define GRO_MAX_SIZE 65536 2155 unsigned int gro_max_size; 2156 rx_handler_func_t __rcu *rx_handler; 2157 void __rcu *rx_handler_data; 2158 2159 #ifdef CONFIG_NET_CLS_ACT 2160 struct mini_Qdisc __rcu *miniq_ingress; 2161 #endif 2162 struct netdev_queue __rcu *ingress_queue; 2163 #ifdef CONFIG_NETFILTER_INGRESS 2164 struct nf_hook_entries __rcu *nf_hooks_ingress; 2165 #endif 2166 2167 unsigned char broadcast[MAX_ADDR_LEN]; 2168 #ifdef CONFIG_RFS_ACCEL 2169 struct cpu_rmap *rx_cpu_rmap; 2170 #endif 2171 struct hlist_node index_hlist; 2172 2173 /* 2174 * Cache lines mostly used on transmit path 2175 */ 2176 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 2177 unsigned int num_tx_queues; 2178 unsigned int real_num_tx_queues; 2179 struct Qdisc __rcu *qdisc; 2180 unsigned int tx_queue_len; 2181 spinlock_t tx_global_lock; 2182 2183 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2184 2185 #ifdef CONFIG_XPS 2186 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2187 #endif 2188 #ifdef CONFIG_NET_CLS_ACT 2189 struct mini_Qdisc __rcu *miniq_egress; 2190 #endif 2191 #ifdef CONFIG_NETFILTER_EGRESS 2192 struct nf_hook_entries __rcu *nf_hooks_egress; 2193 #endif 2194 2195 #ifdef CONFIG_NET_SCHED 2196 DECLARE_HASHTABLE (qdisc_hash, 4); 2197 #endif 2198 /* These may be needed for future network-power-down code. */ 2199 struct timer_list watchdog_timer; 2200 int watchdog_timeo; 2201 2202 u32 proto_down_reason; 2203 2204 struct list_head todo_list; 2205 2206 #ifdef CONFIG_PCPU_DEV_REFCNT 2207 int __percpu *pcpu_refcnt; 2208 #else 2209 refcount_t dev_refcnt; 2210 #endif 2211 struct ref_tracker_dir refcnt_tracker; 2212 2213 struct list_head link_watch_list; 2214 2215 enum { NETREG_UNINITIALIZED=0, 2216 NETREG_REGISTERED, /* completed register_netdevice */ 2217 NETREG_UNREGISTERING, /* called unregister_netdevice */ 2218 NETREG_UNREGISTERED, /* completed unregister todo */ 2219 NETREG_RELEASED, /* called free_netdev */ 2220 NETREG_DUMMY, /* dummy device for NAPI poll */ 2221 } reg_state:8; 2222 2223 bool dismantle; 2224 2225 enum { 2226 RTNL_LINK_INITIALIZED, 2227 RTNL_LINK_INITIALIZING, 2228 } rtnl_link_state:16; 2229 2230 bool needs_free_netdev; 2231 void (*priv_destructor)(struct net_device *dev); 2232 2233 #ifdef CONFIG_NETPOLL 2234 struct netpoll_info __rcu *npinfo; 2235 #endif 2236 2237 possible_net_t nd_net; 2238 2239 /* mid-layer private */ 2240 void *ml_priv; 2241 enum netdev_ml_priv_type ml_priv_type; 2242 2243 union { 2244 struct pcpu_lstats __percpu *lstats; 2245 struct pcpu_sw_netstats __percpu *tstats; 2246 struct pcpu_dstats __percpu *dstats; 2247 }; 2248 2249 #if IS_ENABLED(CONFIG_GARP) 2250 struct garp_port __rcu *garp_port; 2251 #endif 2252 #if IS_ENABLED(CONFIG_MRP) 2253 struct mrp_port __rcu *mrp_port; 2254 #endif 2255 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2256 struct dm_hw_stat_delta __rcu *dm_private; 2257 #endif 2258 struct device dev; 2259 const struct attribute_group *sysfs_groups[4]; 2260 const struct attribute_group *sysfs_rx_queue_group; 2261 2262 const struct rtnl_link_ops *rtnl_link_ops; 2263 2264 /* for setting kernel sock attribute on TCP connection setup */ 2265 #define GSO_MAX_SIZE 65536 2266 unsigned int gso_max_size; 2267 #define TSO_LEGACY_MAX_SIZE 65536 2268 #define TSO_MAX_SIZE UINT_MAX 2269 unsigned int tso_max_size; 2270 #define GSO_MAX_SEGS 65535 2271 u16 gso_max_segs; 2272 #define TSO_MAX_SEGS U16_MAX 2273 u16 tso_max_segs; 2274 2275 #ifdef CONFIG_DCB 2276 const struct dcbnl_rtnl_ops *dcbnl_ops; 2277 #endif 2278 s16 num_tc; 2279 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2280 u8 prio_tc_map[TC_BITMASK + 1]; 2281 2282 #if IS_ENABLED(CONFIG_FCOE) 2283 unsigned int fcoe_ddp_xid; 2284 #endif 2285 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2286 struct netprio_map __rcu *priomap; 2287 #endif 2288 struct phy_device *phydev; 2289 struct sfp_bus *sfp_bus; 2290 struct lock_class_key *qdisc_tx_busylock; 2291 bool proto_down; 2292 unsigned wol_enabled:1; 2293 unsigned threaded:1; 2294 2295 struct list_head net_notifier_list; 2296 2297 #if IS_ENABLED(CONFIG_MACSEC) 2298 /* MACsec management functions */ 2299 const struct macsec_ops *macsec_ops; 2300 #endif 2301 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2302 struct udp_tunnel_nic *udp_tunnel_nic; 2303 2304 /* protected by rtnl_lock */ 2305 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2306 2307 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2308 netdevice_tracker linkwatch_dev_tracker; 2309 netdevice_tracker watchdog_dev_tracker; 2310 netdevice_tracker dev_registered_tracker; 2311 struct rtnl_hw_stats64 *offload_xstats_l3; 2312 }; 2313 #define to_net_dev(d) container_of(d, struct net_device, dev) 2314 2315 static inline bool netif_elide_gro(const struct net_device *dev) 2316 { 2317 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2318 return true; 2319 return false; 2320 } 2321 2322 #define NETDEV_ALIGN 32 2323 2324 static inline 2325 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2326 { 2327 return dev->prio_tc_map[prio & TC_BITMASK]; 2328 } 2329 2330 static inline 2331 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2332 { 2333 if (tc >= dev->num_tc) 2334 return -EINVAL; 2335 2336 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2337 return 0; 2338 } 2339 2340 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2341 void netdev_reset_tc(struct net_device *dev); 2342 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2343 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2344 2345 static inline 2346 int netdev_get_num_tc(struct net_device *dev) 2347 { 2348 return dev->num_tc; 2349 } 2350 2351 static inline void net_prefetch(void *p) 2352 { 2353 prefetch(p); 2354 #if L1_CACHE_BYTES < 128 2355 prefetch((u8 *)p + L1_CACHE_BYTES); 2356 #endif 2357 } 2358 2359 static inline void net_prefetchw(void *p) 2360 { 2361 prefetchw(p); 2362 #if L1_CACHE_BYTES < 128 2363 prefetchw((u8 *)p + L1_CACHE_BYTES); 2364 #endif 2365 } 2366 2367 void netdev_unbind_sb_channel(struct net_device *dev, 2368 struct net_device *sb_dev); 2369 int netdev_bind_sb_channel_queue(struct net_device *dev, 2370 struct net_device *sb_dev, 2371 u8 tc, u16 count, u16 offset); 2372 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2373 static inline int netdev_get_sb_channel(struct net_device *dev) 2374 { 2375 return max_t(int, -dev->num_tc, 0); 2376 } 2377 2378 static inline 2379 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2380 unsigned int index) 2381 { 2382 return &dev->_tx[index]; 2383 } 2384 2385 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2386 const struct sk_buff *skb) 2387 { 2388 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2389 } 2390 2391 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2392 void (*f)(struct net_device *, 2393 struct netdev_queue *, 2394 void *), 2395 void *arg) 2396 { 2397 unsigned int i; 2398 2399 for (i = 0; i < dev->num_tx_queues; i++) 2400 f(dev, &dev->_tx[i], arg); 2401 } 2402 2403 #define netdev_lockdep_set_classes(dev) \ 2404 { \ 2405 static struct lock_class_key qdisc_tx_busylock_key; \ 2406 static struct lock_class_key qdisc_xmit_lock_key; \ 2407 static struct lock_class_key dev_addr_list_lock_key; \ 2408 unsigned int i; \ 2409 \ 2410 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2411 lockdep_set_class(&(dev)->addr_list_lock, \ 2412 &dev_addr_list_lock_key); \ 2413 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2414 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2415 &qdisc_xmit_lock_key); \ 2416 } 2417 2418 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2419 struct net_device *sb_dev); 2420 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2421 struct sk_buff *skb, 2422 struct net_device *sb_dev); 2423 2424 /* returns the headroom that the master device needs to take in account 2425 * when forwarding to this dev 2426 */ 2427 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2428 { 2429 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2430 } 2431 2432 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2433 { 2434 if (dev->netdev_ops->ndo_set_rx_headroom) 2435 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2436 } 2437 2438 /* set the device rx headroom to the dev's default */ 2439 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2440 { 2441 netdev_set_rx_headroom(dev, -1); 2442 } 2443 2444 static inline void *netdev_get_ml_priv(struct net_device *dev, 2445 enum netdev_ml_priv_type type) 2446 { 2447 if (dev->ml_priv_type != type) 2448 return NULL; 2449 2450 return dev->ml_priv; 2451 } 2452 2453 static inline void netdev_set_ml_priv(struct net_device *dev, 2454 void *ml_priv, 2455 enum netdev_ml_priv_type type) 2456 { 2457 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2458 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2459 dev->ml_priv_type, type); 2460 WARN(!dev->ml_priv_type && dev->ml_priv, 2461 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2462 2463 dev->ml_priv = ml_priv; 2464 dev->ml_priv_type = type; 2465 } 2466 2467 /* 2468 * Net namespace inlines 2469 */ 2470 static inline 2471 struct net *dev_net(const struct net_device *dev) 2472 { 2473 return read_pnet(&dev->nd_net); 2474 } 2475 2476 static inline 2477 void dev_net_set(struct net_device *dev, struct net *net) 2478 { 2479 write_pnet(&dev->nd_net, net); 2480 } 2481 2482 /** 2483 * netdev_priv - access network device private data 2484 * @dev: network device 2485 * 2486 * Get network device private data 2487 */ 2488 static inline void *netdev_priv(const struct net_device *dev) 2489 { 2490 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 2491 } 2492 2493 /* Set the sysfs physical device reference for the network logical device 2494 * if set prior to registration will cause a symlink during initialization. 2495 */ 2496 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2497 2498 /* Set the sysfs device type for the network logical device to allow 2499 * fine-grained identification of different network device types. For 2500 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2501 */ 2502 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2503 2504 /* Default NAPI poll() weight 2505 * Device drivers are strongly advised to not use bigger value 2506 */ 2507 #define NAPI_POLL_WEIGHT 64 2508 2509 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2510 int (*poll)(struct napi_struct *, int), int weight); 2511 2512 /** 2513 * netif_napi_add() - initialize a NAPI context 2514 * @dev: network device 2515 * @napi: NAPI context 2516 * @poll: polling function 2517 * @weight: default weight 2518 * 2519 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2520 * *any* of the other NAPI-related functions. 2521 */ 2522 static inline void 2523 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2524 int (*poll)(struct napi_struct *, int), int weight) 2525 { 2526 netif_napi_add_weight(dev, napi, poll, weight); 2527 } 2528 2529 static inline void 2530 netif_napi_add_tx_weight(struct net_device *dev, 2531 struct napi_struct *napi, 2532 int (*poll)(struct napi_struct *, int), 2533 int weight) 2534 { 2535 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2536 netif_napi_add_weight(dev, napi, poll, weight); 2537 } 2538 2539 #define netif_tx_napi_add netif_napi_add_tx_weight 2540 2541 /** 2542 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2543 * @dev: network device 2544 * @napi: NAPI context 2545 * @poll: polling function 2546 * 2547 * This variant of netif_napi_add() should be used from drivers using NAPI 2548 * to exclusively poll a TX queue. 2549 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2550 */ 2551 static inline void netif_napi_add_tx(struct net_device *dev, 2552 struct napi_struct *napi, 2553 int (*poll)(struct napi_struct *, int)) 2554 { 2555 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2556 } 2557 2558 /** 2559 * __netif_napi_del - remove a NAPI context 2560 * @napi: NAPI context 2561 * 2562 * Warning: caller must observe RCU grace period before freeing memory 2563 * containing @napi. Drivers might want to call this helper to combine 2564 * all the needed RCU grace periods into a single one. 2565 */ 2566 void __netif_napi_del(struct napi_struct *napi); 2567 2568 /** 2569 * netif_napi_del - remove a NAPI context 2570 * @napi: NAPI context 2571 * 2572 * netif_napi_del() removes a NAPI context from the network device NAPI list 2573 */ 2574 static inline void netif_napi_del(struct napi_struct *napi) 2575 { 2576 __netif_napi_del(napi); 2577 synchronize_net(); 2578 } 2579 2580 struct packet_type { 2581 __be16 type; /* This is really htons(ether_type). */ 2582 bool ignore_outgoing; 2583 struct net_device *dev; /* NULL is wildcarded here */ 2584 netdevice_tracker dev_tracker; 2585 int (*func) (struct sk_buff *, 2586 struct net_device *, 2587 struct packet_type *, 2588 struct net_device *); 2589 void (*list_func) (struct list_head *, 2590 struct packet_type *, 2591 struct net_device *); 2592 bool (*id_match)(struct packet_type *ptype, 2593 struct sock *sk); 2594 struct net *af_packet_net; 2595 void *af_packet_priv; 2596 struct list_head list; 2597 }; 2598 2599 struct offload_callbacks { 2600 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2601 netdev_features_t features); 2602 struct sk_buff *(*gro_receive)(struct list_head *head, 2603 struct sk_buff *skb); 2604 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2605 }; 2606 2607 struct packet_offload { 2608 __be16 type; /* This is really htons(ether_type). */ 2609 u16 priority; 2610 struct offload_callbacks callbacks; 2611 struct list_head list; 2612 }; 2613 2614 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2615 struct pcpu_sw_netstats { 2616 u64 rx_packets; 2617 u64 rx_bytes; 2618 u64 tx_packets; 2619 u64 tx_bytes; 2620 struct u64_stats_sync syncp; 2621 } __aligned(4 * sizeof(u64)); 2622 2623 struct pcpu_lstats { 2624 u64_stats_t packets; 2625 u64_stats_t bytes; 2626 struct u64_stats_sync syncp; 2627 } __aligned(2 * sizeof(u64)); 2628 2629 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2630 2631 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2632 { 2633 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2634 2635 u64_stats_update_begin(&tstats->syncp); 2636 tstats->rx_bytes += len; 2637 tstats->rx_packets++; 2638 u64_stats_update_end(&tstats->syncp); 2639 } 2640 2641 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2642 unsigned int packets, 2643 unsigned int len) 2644 { 2645 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2646 2647 u64_stats_update_begin(&tstats->syncp); 2648 tstats->tx_bytes += len; 2649 tstats->tx_packets += packets; 2650 u64_stats_update_end(&tstats->syncp); 2651 } 2652 2653 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2654 { 2655 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2656 2657 u64_stats_update_begin(&lstats->syncp); 2658 u64_stats_add(&lstats->bytes, len); 2659 u64_stats_inc(&lstats->packets); 2660 u64_stats_update_end(&lstats->syncp); 2661 } 2662 2663 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2664 ({ \ 2665 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2666 if (pcpu_stats) { \ 2667 int __cpu; \ 2668 for_each_possible_cpu(__cpu) { \ 2669 typeof(type) *stat; \ 2670 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2671 u64_stats_init(&stat->syncp); \ 2672 } \ 2673 } \ 2674 pcpu_stats; \ 2675 }) 2676 2677 #define netdev_alloc_pcpu_stats(type) \ 2678 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2679 2680 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 2681 ({ \ 2682 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2683 if (pcpu_stats) { \ 2684 int __cpu; \ 2685 for_each_possible_cpu(__cpu) { \ 2686 typeof(type) *stat; \ 2687 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2688 u64_stats_init(&stat->syncp); \ 2689 } \ 2690 } \ 2691 pcpu_stats; \ 2692 }) 2693 2694 enum netdev_lag_tx_type { 2695 NETDEV_LAG_TX_TYPE_UNKNOWN, 2696 NETDEV_LAG_TX_TYPE_RANDOM, 2697 NETDEV_LAG_TX_TYPE_BROADCAST, 2698 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2699 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2700 NETDEV_LAG_TX_TYPE_HASH, 2701 }; 2702 2703 enum netdev_lag_hash { 2704 NETDEV_LAG_HASH_NONE, 2705 NETDEV_LAG_HASH_L2, 2706 NETDEV_LAG_HASH_L34, 2707 NETDEV_LAG_HASH_L23, 2708 NETDEV_LAG_HASH_E23, 2709 NETDEV_LAG_HASH_E34, 2710 NETDEV_LAG_HASH_VLAN_SRCMAC, 2711 NETDEV_LAG_HASH_UNKNOWN, 2712 }; 2713 2714 struct netdev_lag_upper_info { 2715 enum netdev_lag_tx_type tx_type; 2716 enum netdev_lag_hash hash_type; 2717 }; 2718 2719 struct netdev_lag_lower_state_info { 2720 u8 link_up : 1, 2721 tx_enabled : 1; 2722 }; 2723 2724 #include <linux/notifier.h> 2725 2726 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2727 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2728 * adding new types. 2729 */ 2730 enum netdev_cmd { 2731 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2732 NETDEV_DOWN, 2733 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2734 detected a hardware crash and restarted 2735 - we can use this eg to kick tcp sessions 2736 once done */ 2737 NETDEV_CHANGE, /* Notify device state change */ 2738 NETDEV_REGISTER, 2739 NETDEV_UNREGISTER, 2740 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2741 NETDEV_CHANGEADDR, /* notify after the address change */ 2742 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2743 NETDEV_GOING_DOWN, 2744 NETDEV_CHANGENAME, 2745 NETDEV_FEAT_CHANGE, 2746 NETDEV_BONDING_FAILOVER, 2747 NETDEV_PRE_UP, 2748 NETDEV_PRE_TYPE_CHANGE, 2749 NETDEV_POST_TYPE_CHANGE, 2750 NETDEV_POST_INIT, 2751 NETDEV_RELEASE, 2752 NETDEV_NOTIFY_PEERS, 2753 NETDEV_JOIN, 2754 NETDEV_CHANGEUPPER, 2755 NETDEV_RESEND_IGMP, 2756 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2757 NETDEV_CHANGEINFODATA, 2758 NETDEV_BONDING_INFO, 2759 NETDEV_PRECHANGEUPPER, 2760 NETDEV_CHANGELOWERSTATE, 2761 NETDEV_UDP_TUNNEL_PUSH_INFO, 2762 NETDEV_UDP_TUNNEL_DROP_INFO, 2763 NETDEV_CHANGE_TX_QUEUE_LEN, 2764 NETDEV_CVLAN_FILTER_PUSH_INFO, 2765 NETDEV_CVLAN_FILTER_DROP_INFO, 2766 NETDEV_SVLAN_FILTER_PUSH_INFO, 2767 NETDEV_SVLAN_FILTER_DROP_INFO, 2768 NETDEV_OFFLOAD_XSTATS_ENABLE, 2769 NETDEV_OFFLOAD_XSTATS_DISABLE, 2770 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 2771 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 2772 }; 2773 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2774 2775 int register_netdevice_notifier(struct notifier_block *nb); 2776 int unregister_netdevice_notifier(struct notifier_block *nb); 2777 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2778 int unregister_netdevice_notifier_net(struct net *net, 2779 struct notifier_block *nb); 2780 int register_netdevice_notifier_dev_net(struct net_device *dev, 2781 struct notifier_block *nb, 2782 struct netdev_net_notifier *nn); 2783 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2784 struct notifier_block *nb, 2785 struct netdev_net_notifier *nn); 2786 2787 struct netdev_notifier_info { 2788 struct net_device *dev; 2789 struct netlink_ext_ack *extack; 2790 }; 2791 2792 struct netdev_notifier_info_ext { 2793 struct netdev_notifier_info info; /* must be first */ 2794 union { 2795 u32 mtu; 2796 } ext; 2797 }; 2798 2799 struct netdev_notifier_change_info { 2800 struct netdev_notifier_info info; /* must be first */ 2801 unsigned int flags_changed; 2802 }; 2803 2804 struct netdev_notifier_changeupper_info { 2805 struct netdev_notifier_info info; /* must be first */ 2806 struct net_device *upper_dev; /* new upper dev */ 2807 bool master; /* is upper dev master */ 2808 bool linking; /* is the notification for link or unlink */ 2809 void *upper_info; /* upper dev info */ 2810 }; 2811 2812 struct netdev_notifier_changelowerstate_info { 2813 struct netdev_notifier_info info; /* must be first */ 2814 void *lower_state_info; /* is lower dev state */ 2815 }; 2816 2817 struct netdev_notifier_pre_changeaddr_info { 2818 struct netdev_notifier_info info; /* must be first */ 2819 const unsigned char *dev_addr; 2820 }; 2821 2822 enum netdev_offload_xstats_type { 2823 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 2824 }; 2825 2826 struct netdev_notifier_offload_xstats_info { 2827 struct netdev_notifier_info info; /* must be first */ 2828 enum netdev_offload_xstats_type type; 2829 2830 union { 2831 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 2832 struct netdev_notifier_offload_xstats_rd *report_delta; 2833 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 2834 struct netdev_notifier_offload_xstats_ru *report_used; 2835 }; 2836 }; 2837 2838 int netdev_offload_xstats_enable(struct net_device *dev, 2839 enum netdev_offload_xstats_type type, 2840 struct netlink_ext_ack *extack); 2841 int netdev_offload_xstats_disable(struct net_device *dev, 2842 enum netdev_offload_xstats_type type); 2843 bool netdev_offload_xstats_enabled(const struct net_device *dev, 2844 enum netdev_offload_xstats_type type); 2845 int netdev_offload_xstats_get(struct net_device *dev, 2846 enum netdev_offload_xstats_type type, 2847 struct rtnl_hw_stats64 *stats, bool *used, 2848 struct netlink_ext_ack *extack); 2849 void 2850 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 2851 const struct rtnl_hw_stats64 *stats); 2852 void 2853 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 2854 void netdev_offload_xstats_push_delta(struct net_device *dev, 2855 enum netdev_offload_xstats_type type, 2856 const struct rtnl_hw_stats64 *stats); 2857 2858 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2859 struct net_device *dev) 2860 { 2861 info->dev = dev; 2862 info->extack = NULL; 2863 } 2864 2865 static inline struct net_device * 2866 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2867 { 2868 return info->dev; 2869 } 2870 2871 static inline struct netlink_ext_ack * 2872 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 2873 { 2874 return info->extack; 2875 } 2876 2877 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2878 2879 2880 extern rwlock_t dev_base_lock; /* Device list lock */ 2881 2882 #define for_each_netdev(net, d) \ 2883 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 2884 #define for_each_netdev_reverse(net, d) \ 2885 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 2886 #define for_each_netdev_rcu(net, d) \ 2887 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 2888 #define for_each_netdev_safe(net, d, n) \ 2889 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 2890 #define for_each_netdev_continue(net, d) \ 2891 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 2892 #define for_each_netdev_continue_reverse(net, d) \ 2893 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 2894 dev_list) 2895 #define for_each_netdev_continue_rcu(net, d) \ 2896 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2897 #define for_each_netdev_in_bond_rcu(bond, slave) \ 2898 for_each_netdev_rcu(&init_net, slave) \ 2899 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 2900 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2901 2902 static inline struct net_device *next_net_device(struct net_device *dev) 2903 { 2904 struct list_head *lh; 2905 struct net *net; 2906 2907 net = dev_net(dev); 2908 lh = dev->dev_list.next; 2909 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2910 } 2911 2912 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 2913 { 2914 struct list_head *lh; 2915 struct net *net; 2916 2917 net = dev_net(dev); 2918 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 2919 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2920 } 2921 2922 static inline struct net_device *first_net_device(struct net *net) 2923 { 2924 return list_empty(&net->dev_base_head) ? NULL : 2925 net_device_entry(net->dev_base_head.next); 2926 } 2927 2928 static inline struct net_device *first_net_device_rcu(struct net *net) 2929 { 2930 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 2931 2932 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2933 } 2934 2935 int netdev_boot_setup_check(struct net_device *dev); 2936 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 2937 const char *hwaddr); 2938 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 2939 void dev_add_pack(struct packet_type *pt); 2940 void dev_remove_pack(struct packet_type *pt); 2941 void __dev_remove_pack(struct packet_type *pt); 2942 void dev_add_offload(struct packet_offload *po); 2943 void dev_remove_offload(struct packet_offload *po); 2944 2945 int dev_get_iflink(const struct net_device *dev); 2946 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 2947 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 2948 struct net_device_path_stack *stack); 2949 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2950 unsigned short mask); 2951 struct net_device *dev_get_by_name(struct net *net, const char *name); 2952 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 2953 struct net_device *__dev_get_by_name(struct net *net, const char *name); 2954 bool netdev_name_in_use(struct net *net, const char *name); 2955 int dev_alloc_name(struct net_device *dev, const char *name); 2956 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 2957 void dev_close(struct net_device *dev); 2958 void dev_close_many(struct list_head *head, bool unlink); 2959 void dev_disable_lro(struct net_device *dev); 2960 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2961 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 2962 struct net_device *sb_dev); 2963 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 2964 struct net_device *sb_dev); 2965 2966 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 2967 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2968 2969 static inline int dev_queue_xmit(struct sk_buff *skb) 2970 { 2971 return __dev_queue_xmit(skb, NULL); 2972 } 2973 2974 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 2975 struct net_device *sb_dev) 2976 { 2977 return __dev_queue_xmit(skb, sb_dev); 2978 } 2979 2980 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 2981 { 2982 int ret; 2983 2984 ret = __dev_direct_xmit(skb, queue_id); 2985 if (!dev_xmit_complete(ret)) 2986 kfree_skb(skb); 2987 return ret; 2988 } 2989 2990 int register_netdevice(struct net_device *dev); 2991 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2992 void unregister_netdevice_many(struct list_head *head); 2993 static inline void unregister_netdevice(struct net_device *dev) 2994 { 2995 unregister_netdevice_queue(dev, NULL); 2996 } 2997 2998 int netdev_refcnt_read(const struct net_device *dev); 2999 void free_netdev(struct net_device *dev); 3000 void netdev_freemem(struct net_device *dev); 3001 int init_dummy_netdev(struct net_device *dev); 3002 3003 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3004 struct sk_buff *skb, 3005 bool all_slaves); 3006 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3007 struct sock *sk); 3008 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3009 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3010 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3011 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 3012 int dev_restart(struct net_device *dev); 3013 3014 3015 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3016 unsigned short type, 3017 const void *daddr, const void *saddr, 3018 unsigned int len) 3019 { 3020 if (!dev->header_ops || !dev->header_ops->create) 3021 return 0; 3022 3023 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3024 } 3025 3026 static inline int dev_parse_header(const struct sk_buff *skb, 3027 unsigned char *haddr) 3028 { 3029 const struct net_device *dev = skb->dev; 3030 3031 if (!dev->header_ops || !dev->header_ops->parse) 3032 return 0; 3033 return dev->header_ops->parse(skb, haddr); 3034 } 3035 3036 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3037 { 3038 const struct net_device *dev = skb->dev; 3039 3040 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3041 return 0; 3042 return dev->header_ops->parse_protocol(skb); 3043 } 3044 3045 /* ll_header must have at least hard_header_len allocated */ 3046 static inline bool dev_validate_header(const struct net_device *dev, 3047 char *ll_header, int len) 3048 { 3049 if (likely(len >= dev->hard_header_len)) 3050 return true; 3051 if (len < dev->min_header_len) 3052 return false; 3053 3054 if (capable(CAP_SYS_RAWIO)) { 3055 memset(ll_header + len, 0, dev->hard_header_len - len); 3056 return true; 3057 } 3058 3059 if (dev->header_ops && dev->header_ops->validate) 3060 return dev->header_ops->validate(ll_header, len); 3061 3062 return false; 3063 } 3064 3065 static inline bool dev_has_header(const struct net_device *dev) 3066 { 3067 return dev->header_ops && dev->header_ops->create; 3068 } 3069 3070 /* 3071 * Incoming packets are placed on per-CPU queues 3072 */ 3073 struct softnet_data { 3074 struct list_head poll_list; 3075 struct sk_buff_head process_queue; 3076 3077 /* stats */ 3078 unsigned int processed; 3079 unsigned int time_squeeze; 3080 unsigned int received_rps; 3081 #ifdef CONFIG_RPS 3082 struct softnet_data *rps_ipi_list; 3083 #endif 3084 #ifdef CONFIG_NET_FLOW_LIMIT 3085 struct sd_flow_limit __rcu *flow_limit; 3086 #endif 3087 struct Qdisc *output_queue; 3088 struct Qdisc **output_queue_tailp; 3089 struct sk_buff *completion_queue; 3090 #ifdef CONFIG_XFRM_OFFLOAD 3091 struct sk_buff_head xfrm_backlog; 3092 #endif 3093 /* written and read only by owning cpu: */ 3094 struct { 3095 u16 recursion; 3096 u8 more; 3097 #ifdef CONFIG_NET_EGRESS 3098 u8 skip_txqueue; 3099 #endif 3100 } xmit; 3101 #ifdef CONFIG_RPS 3102 /* input_queue_head should be written by cpu owning this struct, 3103 * and only read by other cpus. Worth using a cache line. 3104 */ 3105 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3106 3107 /* Elements below can be accessed between CPUs for RPS/RFS */ 3108 call_single_data_t csd ____cacheline_aligned_in_smp; 3109 struct softnet_data *rps_ipi_next; 3110 unsigned int cpu; 3111 unsigned int input_queue_tail; 3112 #endif 3113 unsigned int dropped; 3114 struct sk_buff_head input_pkt_queue; 3115 struct napi_struct backlog; 3116 3117 /* Another possibly contended cache line */ 3118 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3119 int defer_count; 3120 struct sk_buff *defer_list; 3121 call_single_data_t defer_csd; 3122 }; 3123 3124 static inline void input_queue_head_incr(struct softnet_data *sd) 3125 { 3126 #ifdef CONFIG_RPS 3127 sd->input_queue_head++; 3128 #endif 3129 } 3130 3131 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 3132 unsigned int *qtail) 3133 { 3134 #ifdef CONFIG_RPS 3135 *qtail = ++sd->input_queue_tail; 3136 #endif 3137 } 3138 3139 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3140 3141 static inline int dev_recursion_level(void) 3142 { 3143 return this_cpu_read(softnet_data.xmit.recursion); 3144 } 3145 3146 #define XMIT_RECURSION_LIMIT 8 3147 static inline bool dev_xmit_recursion(void) 3148 { 3149 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3150 XMIT_RECURSION_LIMIT); 3151 } 3152 3153 static inline void dev_xmit_recursion_inc(void) 3154 { 3155 __this_cpu_inc(softnet_data.xmit.recursion); 3156 } 3157 3158 static inline void dev_xmit_recursion_dec(void) 3159 { 3160 __this_cpu_dec(softnet_data.xmit.recursion); 3161 } 3162 3163 void __netif_schedule(struct Qdisc *q); 3164 void netif_schedule_queue(struct netdev_queue *txq); 3165 3166 static inline void netif_tx_schedule_all(struct net_device *dev) 3167 { 3168 unsigned int i; 3169 3170 for (i = 0; i < dev->num_tx_queues; i++) 3171 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3172 } 3173 3174 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3175 { 3176 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3177 } 3178 3179 /** 3180 * netif_start_queue - allow transmit 3181 * @dev: network device 3182 * 3183 * Allow upper layers to call the device hard_start_xmit routine. 3184 */ 3185 static inline void netif_start_queue(struct net_device *dev) 3186 { 3187 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3188 } 3189 3190 static inline void netif_tx_start_all_queues(struct net_device *dev) 3191 { 3192 unsigned int i; 3193 3194 for (i = 0; i < dev->num_tx_queues; i++) { 3195 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3196 netif_tx_start_queue(txq); 3197 } 3198 } 3199 3200 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3201 3202 /** 3203 * netif_wake_queue - restart transmit 3204 * @dev: network device 3205 * 3206 * Allow upper layers to call the device hard_start_xmit routine. 3207 * Used for flow control when transmit resources are available. 3208 */ 3209 static inline void netif_wake_queue(struct net_device *dev) 3210 { 3211 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3212 } 3213 3214 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3215 { 3216 unsigned int i; 3217 3218 for (i = 0; i < dev->num_tx_queues; i++) { 3219 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3220 netif_tx_wake_queue(txq); 3221 } 3222 } 3223 3224 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3225 { 3226 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3227 } 3228 3229 /** 3230 * netif_stop_queue - stop transmitted packets 3231 * @dev: network device 3232 * 3233 * Stop upper layers calling the device hard_start_xmit routine. 3234 * Used for flow control when transmit resources are unavailable. 3235 */ 3236 static inline void netif_stop_queue(struct net_device *dev) 3237 { 3238 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3239 } 3240 3241 void netif_tx_stop_all_queues(struct net_device *dev); 3242 3243 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3244 { 3245 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3246 } 3247 3248 /** 3249 * netif_queue_stopped - test if transmit queue is flowblocked 3250 * @dev: network device 3251 * 3252 * Test if transmit queue on device is currently unable to send. 3253 */ 3254 static inline bool netif_queue_stopped(const struct net_device *dev) 3255 { 3256 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3257 } 3258 3259 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3260 { 3261 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3262 } 3263 3264 static inline bool 3265 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3266 { 3267 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3268 } 3269 3270 static inline bool 3271 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3272 { 3273 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3274 } 3275 3276 /** 3277 * netdev_queue_set_dql_min_limit - set dql minimum limit 3278 * @dev_queue: pointer to transmit queue 3279 * @min_limit: dql minimum limit 3280 * 3281 * Forces xmit_more() to return true until the minimum threshold 3282 * defined by @min_limit is reached (or until the tx queue is 3283 * empty). Warning: to be use with care, misuse will impact the 3284 * latency. 3285 */ 3286 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3287 unsigned int min_limit) 3288 { 3289 #ifdef CONFIG_BQL 3290 dev_queue->dql.min_limit = min_limit; 3291 #endif 3292 } 3293 3294 /** 3295 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3296 * @dev_queue: pointer to transmit queue 3297 * 3298 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3299 * to give appropriate hint to the CPU. 3300 */ 3301 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3302 { 3303 #ifdef CONFIG_BQL 3304 prefetchw(&dev_queue->dql.num_queued); 3305 #endif 3306 } 3307 3308 /** 3309 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3310 * @dev_queue: pointer to transmit queue 3311 * 3312 * BQL enabled drivers might use this helper in their TX completion path, 3313 * to give appropriate hint to the CPU. 3314 */ 3315 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3316 { 3317 #ifdef CONFIG_BQL 3318 prefetchw(&dev_queue->dql.limit); 3319 #endif 3320 } 3321 3322 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3323 unsigned int bytes) 3324 { 3325 #ifdef CONFIG_BQL 3326 dql_queued(&dev_queue->dql, bytes); 3327 3328 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3329 return; 3330 3331 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3332 3333 /* 3334 * The XOFF flag must be set before checking the dql_avail below, 3335 * because in netdev_tx_completed_queue we update the dql_completed 3336 * before checking the XOFF flag. 3337 */ 3338 smp_mb(); 3339 3340 /* check again in case another CPU has just made room avail */ 3341 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3342 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3343 #endif 3344 } 3345 3346 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3347 * that they should not test BQL status themselves. 3348 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3349 * skb of a batch. 3350 * Returns true if the doorbell must be used to kick the NIC. 3351 */ 3352 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3353 unsigned int bytes, 3354 bool xmit_more) 3355 { 3356 if (xmit_more) { 3357 #ifdef CONFIG_BQL 3358 dql_queued(&dev_queue->dql, bytes); 3359 #endif 3360 return netif_tx_queue_stopped(dev_queue); 3361 } 3362 netdev_tx_sent_queue(dev_queue, bytes); 3363 return true; 3364 } 3365 3366 /** 3367 * netdev_sent_queue - report the number of bytes queued to hardware 3368 * @dev: network device 3369 * @bytes: number of bytes queued to the hardware device queue 3370 * 3371 * Report the number of bytes queued for sending/completion to the network 3372 * device hardware queue. @bytes should be a good approximation and should 3373 * exactly match netdev_completed_queue() @bytes 3374 */ 3375 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3376 { 3377 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3378 } 3379 3380 static inline bool __netdev_sent_queue(struct net_device *dev, 3381 unsigned int bytes, 3382 bool xmit_more) 3383 { 3384 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3385 xmit_more); 3386 } 3387 3388 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3389 unsigned int pkts, unsigned int bytes) 3390 { 3391 #ifdef CONFIG_BQL 3392 if (unlikely(!bytes)) 3393 return; 3394 3395 dql_completed(&dev_queue->dql, bytes); 3396 3397 /* 3398 * Without the memory barrier there is a small possiblity that 3399 * netdev_tx_sent_queue will miss the update and cause the queue to 3400 * be stopped forever 3401 */ 3402 smp_mb(); 3403 3404 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3405 return; 3406 3407 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3408 netif_schedule_queue(dev_queue); 3409 #endif 3410 } 3411 3412 /** 3413 * netdev_completed_queue - report bytes and packets completed by device 3414 * @dev: network device 3415 * @pkts: actual number of packets sent over the medium 3416 * @bytes: actual number of bytes sent over the medium 3417 * 3418 * Report the number of bytes and packets transmitted by the network device 3419 * hardware queue over the physical medium, @bytes must exactly match the 3420 * @bytes amount passed to netdev_sent_queue() 3421 */ 3422 static inline void netdev_completed_queue(struct net_device *dev, 3423 unsigned int pkts, unsigned int bytes) 3424 { 3425 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3426 } 3427 3428 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3429 { 3430 #ifdef CONFIG_BQL 3431 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3432 dql_reset(&q->dql); 3433 #endif 3434 } 3435 3436 /** 3437 * netdev_reset_queue - reset the packets and bytes count of a network device 3438 * @dev_queue: network device 3439 * 3440 * Reset the bytes and packet count of a network device and clear the 3441 * software flow control OFF bit for this network device 3442 */ 3443 static inline void netdev_reset_queue(struct net_device *dev_queue) 3444 { 3445 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 3446 } 3447 3448 /** 3449 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3450 * @dev: network device 3451 * @queue_index: given tx queue index 3452 * 3453 * Returns 0 if given tx queue index >= number of device tx queues, 3454 * otherwise returns the originally passed tx queue index. 3455 */ 3456 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3457 { 3458 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3459 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3460 dev->name, queue_index, 3461 dev->real_num_tx_queues); 3462 return 0; 3463 } 3464 3465 return queue_index; 3466 } 3467 3468 /** 3469 * netif_running - test if up 3470 * @dev: network device 3471 * 3472 * Test if the device has been brought up. 3473 */ 3474 static inline bool netif_running(const struct net_device *dev) 3475 { 3476 return test_bit(__LINK_STATE_START, &dev->state); 3477 } 3478 3479 /* 3480 * Routines to manage the subqueues on a device. We only need start, 3481 * stop, and a check if it's stopped. All other device management is 3482 * done at the overall netdevice level. 3483 * Also test the device if we're multiqueue. 3484 */ 3485 3486 /** 3487 * netif_start_subqueue - allow sending packets on subqueue 3488 * @dev: network device 3489 * @queue_index: sub queue index 3490 * 3491 * Start individual transmit queue of a device with multiple transmit queues. 3492 */ 3493 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3494 { 3495 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3496 3497 netif_tx_start_queue(txq); 3498 } 3499 3500 /** 3501 * netif_stop_subqueue - stop sending packets on subqueue 3502 * @dev: network device 3503 * @queue_index: sub queue index 3504 * 3505 * Stop individual transmit queue of a device with multiple transmit queues. 3506 */ 3507 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3508 { 3509 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3510 netif_tx_stop_queue(txq); 3511 } 3512 3513 /** 3514 * __netif_subqueue_stopped - test status of subqueue 3515 * @dev: network device 3516 * @queue_index: sub queue index 3517 * 3518 * Check individual transmit queue of a device with multiple transmit queues. 3519 */ 3520 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3521 u16 queue_index) 3522 { 3523 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3524 3525 return netif_tx_queue_stopped(txq); 3526 } 3527 3528 /** 3529 * netif_subqueue_stopped - test status of subqueue 3530 * @dev: network device 3531 * @skb: sub queue buffer pointer 3532 * 3533 * Check individual transmit queue of a device with multiple transmit queues. 3534 */ 3535 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3536 struct sk_buff *skb) 3537 { 3538 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3539 } 3540 3541 /** 3542 * netif_wake_subqueue - allow sending packets on subqueue 3543 * @dev: network device 3544 * @queue_index: sub queue index 3545 * 3546 * Resume individual transmit queue of a device with multiple transmit queues. 3547 */ 3548 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3549 { 3550 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3551 3552 netif_tx_wake_queue(txq); 3553 } 3554 3555 #ifdef CONFIG_XPS 3556 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3557 u16 index); 3558 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3559 u16 index, enum xps_map_type type); 3560 3561 /** 3562 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3563 * @j: CPU/Rx queue index 3564 * @mask: bitmask of all cpus/rx queues 3565 * @nr_bits: number of bits in the bitmask 3566 * 3567 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3568 */ 3569 static inline bool netif_attr_test_mask(unsigned long j, 3570 const unsigned long *mask, 3571 unsigned int nr_bits) 3572 { 3573 cpu_max_bits_warn(j, nr_bits); 3574 return test_bit(j, mask); 3575 } 3576 3577 /** 3578 * netif_attr_test_online - Test for online CPU/Rx queue 3579 * @j: CPU/Rx queue index 3580 * @online_mask: bitmask for CPUs/Rx queues that are online 3581 * @nr_bits: number of bits in the bitmask 3582 * 3583 * Returns true if a CPU/Rx queue is online. 3584 */ 3585 static inline bool netif_attr_test_online(unsigned long j, 3586 const unsigned long *online_mask, 3587 unsigned int nr_bits) 3588 { 3589 cpu_max_bits_warn(j, nr_bits); 3590 3591 if (online_mask) 3592 return test_bit(j, online_mask); 3593 3594 return (j < nr_bits); 3595 } 3596 3597 /** 3598 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3599 * @n: CPU/Rx queue index 3600 * @srcp: the cpumask/Rx queue mask pointer 3601 * @nr_bits: number of bits in the bitmask 3602 * 3603 * Returns >= nr_bits if no further CPUs/Rx queues set. 3604 */ 3605 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3606 unsigned int nr_bits) 3607 { 3608 /* -1 is a legal arg here. */ 3609 if (n != -1) 3610 cpu_max_bits_warn(n, nr_bits); 3611 3612 if (srcp) 3613 return find_next_bit(srcp, nr_bits, n + 1); 3614 3615 return n + 1; 3616 } 3617 3618 /** 3619 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3620 * @n: CPU/Rx queue index 3621 * @src1p: the first CPUs/Rx queues mask pointer 3622 * @src2p: the second CPUs/Rx queues mask pointer 3623 * @nr_bits: number of bits in the bitmask 3624 * 3625 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3626 */ 3627 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3628 const unsigned long *src2p, 3629 unsigned int nr_bits) 3630 { 3631 /* -1 is a legal arg here. */ 3632 if (n != -1) 3633 cpu_max_bits_warn(n, nr_bits); 3634 3635 if (src1p && src2p) 3636 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3637 else if (src1p) 3638 return find_next_bit(src1p, nr_bits, n + 1); 3639 else if (src2p) 3640 return find_next_bit(src2p, nr_bits, n + 1); 3641 3642 return n + 1; 3643 } 3644 #else 3645 static inline int netif_set_xps_queue(struct net_device *dev, 3646 const struct cpumask *mask, 3647 u16 index) 3648 { 3649 return 0; 3650 } 3651 3652 static inline int __netif_set_xps_queue(struct net_device *dev, 3653 const unsigned long *mask, 3654 u16 index, enum xps_map_type type) 3655 { 3656 return 0; 3657 } 3658 #endif 3659 3660 /** 3661 * netif_is_multiqueue - test if device has multiple transmit queues 3662 * @dev: network device 3663 * 3664 * Check if device has multiple transmit queues 3665 */ 3666 static inline bool netif_is_multiqueue(const struct net_device *dev) 3667 { 3668 return dev->num_tx_queues > 1; 3669 } 3670 3671 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3672 3673 #ifdef CONFIG_SYSFS 3674 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3675 #else 3676 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3677 unsigned int rxqs) 3678 { 3679 dev->real_num_rx_queues = rxqs; 3680 return 0; 3681 } 3682 #endif 3683 int netif_set_real_num_queues(struct net_device *dev, 3684 unsigned int txq, unsigned int rxq); 3685 3686 static inline struct netdev_rx_queue * 3687 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) 3688 { 3689 return dev->_rx + rxq; 3690 } 3691 3692 #ifdef CONFIG_SYSFS 3693 static inline unsigned int get_netdev_rx_queue_index( 3694 struct netdev_rx_queue *queue) 3695 { 3696 struct net_device *dev = queue->dev; 3697 int index = queue - dev->_rx; 3698 3699 BUG_ON(index >= dev->num_rx_queues); 3700 return index; 3701 } 3702 #endif 3703 3704 int netif_get_num_default_rss_queues(void); 3705 3706 enum skb_free_reason { 3707 SKB_REASON_CONSUMED, 3708 SKB_REASON_DROPPED, 3709 }; 3710 3711 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); 3712 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); 3713 3714 /* 3715 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3716 * interrupt context or with hardware interrupts being disabled. 3717 * (in_hardirq() || irqs_disabled()) 3718 * 3719 * We provide four helpers that can be used in following contexts : 3720 * 3721 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3722 * replacing kfree_skb(skb) 3723 * 3724 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3725 * Typically used in place of consume_skb(skb) in TX completion path 3726 * 3727 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3728 * replacing kfree_skb(skb) 3729 * 3730 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3731 * and consumed a packet. Used in place of consume_skb(skb) 3732 */ 3733 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3734 { 3735 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); 3736 } 3737 3738 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3739 { 3740 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); 3741 } 3742 3743 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3744 { 3745 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); 3746 } 3747 3748 static inline void dev_consume_skb_any(struct sk_buff *skb) 3749 { 3750 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); 3751 } 3752 3753 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3754 struct bpf_prog *xdp_prog); 3755 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3756 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3757 int netif_rx(struct sk_buff *skb); 3758 int __netif_rx(struct sk_buff *skb); 3759 3760 int netif_receive_skb(struct sk_buff *skb); 3761 int netif_receive_skb_core(struct sk_buff *skb); 3762 void netif_receive_skb_list_internal(struct list_head *head); 3763 void netif_receive_skb_list(struct list_head *head); 3764 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3765 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3766 struct sk_buff *napi_get_frags(struct napi_struct *napi); 3767 gro_result_t napi_gro_frags(struct napi_struct *napi); 3768 struct packet_offload *gro_find_receive_by_type(__be16 type); 3769 struct packet_offload *gro_find_complete_by_type(__be16 type); 3770 3771 static inline void napi_free_frags(struct napi_struct *napi) 3772 { 3773 kfree_skb(napi->skb); 3774 napi->skb = NULL; 3775 } 3776 3777 bool netdev_is_rx_handler_busy(struct net_device *dev); 3778 int netdev_rx_handler_register(struct net_device *dev, 3779 rx_handler_func_t *rx_handler, 3780 void *rx_handler_data); 3781 void netdev_rx_handler_unregister(struct net_device *dev); 3782 3783 bool dev_valid_name(const char *name); 3784 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 3785 { 3786 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 3787 } 3788 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 3789 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 3790 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3791 void __user *data, bool *need_copyout); 3792 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 3793 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 3794 unsigned int dev_get_flags(const struct net_device *); 3795 int __dev_change_flags(struct net_device *dev, unsigned int flags, 3796 struct netlink_ext_ack *extack); 3797 int dev_change_flags(struct net_device *dev, unsigned int flags, 3798 struct netlink_ext_ack *extack); 3799 void __dev_notify_flags(struct net_device *, unsigned int old_flags, 3800 unsigned int gchanges); 3801 int dev_set_alias(struct net_device *, const char *, size_t); 3802 int dev_get_alias(const struct net_device *, char *, size_t); 3803 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 3804 const char *pat, int new_ifindex); 3805 static inline 3806 int dev_change_net_namespace(struct net_device *dev, struct net *net, 3807 const char *pat) 3808 { 3809 return __dev_change_net_namespace(dev, net, pat, 0); 3810 } 3811 int __dev_set_mtu(struct net_device *, int); 3812 int dev_set_mtu(struct net_device *, int); 3813 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 3814 struct netlink_ext_ack *extack); 3815 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 3816 struct netlink_ext_ack *extack); 3817 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 3818 struct netlink_ext_ack *extack); 3819 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 3820 int dev_get_port_parent_id(struct net_device *dev, 3821 struct netdev_phys_item_id *ppid, bool recurse); 3822 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 3823 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 3824 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3825 struct netdev_queue *txq, int *ret); 3826 3827 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 3828 u8 dev_xdp_prog_count(struct net_device *dev); 3829 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 3830 3831 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3832 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3833 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 3834 bool is_skb_forwardable(const struct net_device *dev, 3835 const struct sk_buff *skb); 3836 3837 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 3838 const struct sk_buff *skb, 3839 const bool check_mtu) 3840 { 3841 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 3842 unsigned int len; 3843 3844 if (!(dev->flags & IFF_UP)) 3845 return false; 3846 3847 if (!check_mtu) 3848 return true; 3849 3850 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 3851 if (skb->len <= len) 3852 return true; 3853 3854 /* if TSO is enabled, we don't care about the length as the packet 3855 * could be forwarded without being segmented before 3856 */ 3857 if (skb_is_gso(skb)) 3858 return true; 3859 3860 return false; 3861 } 3862 3863 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); 3864 3865 static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) 3866 { 3867 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 3868 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 3869 3870 if (likely(p)) 3871 return p; 3872 3873 return netdev_core_stats_alloc(dev); 3874 } 3875 3876 #define DEV_CORE_STATS_INC(FIELD) \ 3877 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 3878 { \ 3879 struct net_device_core_stats __percpu *p; \ 3880 \ 3881 p = dev_core_stats(dev); \ 3882 if (p) \ 3883 this_cpu_inc(p->FIELD); \ 3884 } 3885 DEV_CORE_STATS_INC(rx_dropped) 3886 DEV_CORE_STATS_INC(tx_dropped) 3887 DEV_CORE_STATS_INC(rx_nohandler) 3888 DEV_CORE_STATS_INC(rx_otherhost_dropped) 3889 3890 static __always_inline int ____dev_forward_skb(struct net_device *dev, 3891 struct sk_buff *skb, 3892 const bool check_mtu) 3893 { 3894 if (skb_orphan_frags(skb, GFP_ATOMIC) || 3895 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 3896 dev_core_stats_rx_dropped_inc(dev); 3897 kfree_skb(skb); 3898 return NET_RX_DROP; 3899 } 3900 3901 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 3902 skb->priority = 0; 3903 return 0; 3904 } 3905 3906 bool dev_nit_active(struct net_device *dev); 3907 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3908 3909 static inline void __dev_put(struct net_device *dev) 3910 { 3911 if (dev) { 3912 #ifdef CONFIG_PCPU_DEV_REFCNT 3913 this_cpu_dec(*dev->pcpu_refcnt); 3914 #else 3915 refcount_dec(&dev->dev_refcnt); 3916 #endif 3917 } 3918 } 3919 3920 static inline void __dev_hold(struct net_device *dev) 3921 { 3922 if (dev) { 3923 #ifdef CONFIG_PCPU_DEV_REFCNT 3924 this_cpu_inc(*dev->pcpu_refcnt); 3925 #else 3926 refcount_inc(&dev->dev_refcnt); 3927 #endif 3928 } 3929 } 3930 3931 static inline void __netdev_tracker_alloc(struct net_device *dev, 3932 netdevice_tracker *tracker, 3933 gfp_t gfp) 3934 { 3935 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 3936 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 3937 #endif 3938 } 3939 3940 /* netdev_tracker_alloc() can upgrade a prior untracked reference 3941 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 3942 */ 3943 static inline void netdev_tracker_alloc(struct net_device *dev, 3944 netdevice_tracker *tracker, gfp_t gfp) 3945 { 3946 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 3947 refcount_dec(&dev->refcnt_tracker.no_tracker); 3948 __netdev_tracker_alloc(dev, tracker, gfp); 3949 #endif 3950 } 3951 3952 static inline void netdev_tracker_free(struct net_device *dev, 3953 netdevice_tracker *tracker) 3954 { 3955 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 3956 ref_tracker_free(&dev->refcnt_tracker, tracker); 3957 #endif 3958 } 3959 3960 static inline void dev_hold_track(struct net_device *dev, 3961 netdevice_tracker *tracker, gfp_t gfp) 3962 { 3963 if (dev) { 3964 __dev_hold(dev); 3965 __netdev_tracker_alloc(dev, tracker, gfp); 3966 } 3967 } 3968 3969 static inline void dev_put_track(struct net_device *dev, 3970 netdevice_tracker *tracker) 3971 { 3972 if (dev) { 3973 netdev_tracker_free(dev, tracker); 3974 __dev_put(dev); 3975 } 3976 } 3977 3978 /** 3979 * dev_hold - get reference to device 3980 * @dev: network device 3981 * 3982 * Hold reference to device to keep it from being freed. 3983 * Try using dev_hold_track() instead. 3984 */ 3985 static inline void dev_hold(struct net_device *dev) 3986 { 3987 dev_hold_track(dev, NULL, GFP_ATOMIC); 3988 } 3989 3990 /** 3991 * dev_put - release reference to device 3992 * @dev: network device 3993 * 3994 * Release reference to device to allow it to be freed. 3995 * Try using dev_put_track() instead. 3996 */ 3997 static inline void dev_put(struct net_device *dev) 3998 { 3999 dev_put_track(dev, NULL); 4000 } 4001 4002 static inline void dev_replace_track(struct net_device *odev, 4003 struct net_device *ndev, 4004 netdevice_tracker *tracker, 4005 gfp_t gfp) 4006 { 4007 if (odev) 4008 netdev_tracker_free(odev, tracker); 4009 4010 __dev_hold(ndev); 4011 __dev_put(odev); 4012 4013 if (ndev) 4014 __netdev_tracker_alloc(ndev, tracker, gfp); 4015 } 4016 4017 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4018 * and _off may be called from IRQ context, but it is caller 4019 * who is responsible for serialization of these calls. 4020 * 4021 * The name carrier is inappropriate, these functions should really be 4022 * called netif_lowerlayer_*() because they represent the state of any 4023 * kind of lower layer not just hardware media. 4024 */ 4025 void linkwatch_fire_event(struct net_device *dev); 4026 4027 /** 4028 * netif_carrier_ok - test if carrier present 4029 * @dev: network device 4030 * 4031 * Check if carrier is present on device 4032 */ 4033 static inline bool netif_carrier_ok(const struct net_device *dev) 4034 { 4035 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4036 } 4037 4038 unsigned long dev_trans_start(struct net_device *dev); 4039 4040 void __netdev_watchdog_up(struct net_device *dev); 4041 4042 void netif_carrier_on(struct net_device *dev); 4043 void netif_carrier_off(struct net_device *dev); 4044 void netif_carrier_event(struct net_device *dev); 4045 4046 /** 4047 * netif_dormant_on - mark device as dormant. 4048 * @dev: network device 4049 * 4050 * Mark device as dormant (as per RFC2863). 4051 * 4052 * The dormant state indicates that the relevant interface is not 4053 * actually in a condition to pass packets (i.e., it is not 'up') but is 4054 * in a "pending" state, waiting for some external event. For "on- 4055 * demand" interfaces, this new state identifies the situation where the 4056 * interface is waiting for events to place it in the up state. 4057 */ 4058 static inline void netif_dormant_on(struct net_device *dev) 4059 { 4060 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4061 linkwatch_fire_event(dev); 4062 } 4063 4064 /** 4065 * netif_dormant_off - set device as not dormant. 4066 * @dev: network device 4067 * 4068 * Device is not in dormant state. 4069 */ 4070 static inline void netif_dormant_off(struct net_device *dev) 4071 { 4072 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4073 linkwatch_fire_event(dev); 4074 } 4075 4076 /** 4077 * netif_dormant - test if device is dormant 4078 * @dev: network device 4079 * 4080 * Check if device is dormant. 4081 */ 4082 static inline bool netif_dormant(const struct net_device *dev) 4083 { 4084 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4085 } 4086 4087 4088 /** 4089 * netif_testing_on - mark device as under test. 4090 * @dev: network device 4091 * 4092 * Mark device as under test (as per RFC2863). 4093 * 4094 * The testing state indicates that some test(s) must be performed on 4095 * the interface. After completion, of the test, the interface state 4096 * will change to up, dormant, or down, as appropriate. 4097 */ 4098 static inline void netif_testing_on(struct net_device *dev) 4099 { 4100 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4101 linkwatch_fire_event(dev); 4102 } 4103 4104 /** 4105 * netif_testing_off - set device as not under test. 4106 * @dev: network device 4107 * 4108 * Device is not in testing state. 4109 */ 4110 static inline void netif_testing_off(struct net_device *dev) 4111 { 4112 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4113 linkwatch_fire_event(dev); 4114 } 4115 4116 /** 4117 * netif_testing - test if device is under test 4118 * @dev: network device 4119 * 4120 * Check if device is under test 4121 */ 4122 static inline bool netif_testing(const struct net_device *dev) 4123 { 4124 return test_bit(__LINK_STATE_TESTING, &dev->state); 4125 } 4126 4127 4128 /** 4129 * netif_oper_up - test if device is operational 4130 * @dev: network device 4131 * 4132 * Check if carrier is operational 4133 */ 4134 static inline bool netif_oper_up(const struct net_device *dev) 4135 { 4136 return (dev->operstate == IF_OPER_UP || 4137 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 4138 } 4139 4140 /** 4141 * netif_device_present - is device available or removed 4142 * @dev: network device 4143 * 4144 * Check if device has not been removed from system. 4145 */ 4146 static inline bool netif_device_present(const struct net_device *dev) 4147 { 4148 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4149 } 4150 4151 void netif_device_detach(struct net_device *dev); 4152 4153 void netif_device_attach(struct net_device *dev); 4154 4155 /* 4156 * Network interface message level settings 4157 */ 4158 4159 enum { 4160 NETIF_MSG_DRV_BIT, 4161 NETIF_MSG_PROBE_BIT, 4162 NETIF_MSG_LINK_BIT, 4163 NETIF_MSG_TIMER_BIT, 4164 NETIF_MSG_IFDOWN_BIT, 4165 NETIF_MSG_IFUP_BIT, 4166 NETIF_MSG_RX_ERR_BIT, 4167 NETIF_MSG_TX_ERR_BIT, 4168 NETIF_MSG_TX_QUEUED_BIT, 4169 NETIF_MSG_INTR_BIT, 4170 NETIF_MSG_TX_DONE_BIT, 4171 NETIF_MSG_RX_STATUS_BIT, 4172 NETIF_MSG_PKTDATA_BIT, 4173 NETIF_MSG_HW_BIT, 4174 NETIF_MSG_WOL_BIT, 4175 4176 /* When you add a new bit above, update netif_msg_class_names array 4177 * in net/ethtool/common.c 4178 */ 4179 NETIF_MSG_CLASS_COUNT, 4180 }; 4181 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4182 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4183 4184 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4185 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4186 4187 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4188 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4189 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4190 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4191 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4192 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4193 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4194 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4195 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4196 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4197 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4198 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4199 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4200 #define NETIF_MSG_HW __NETIF_MSG(HW) 4201 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4202 4203 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4204 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4205 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4206 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4207 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4208 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4209 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4210 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4211 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4212 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4213 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4214 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4215 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4216 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4217 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4218 4219 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4220 { 4221 /* use default */ 4222 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4223 return default_msg_enable_bits; 4224 if (debug_value == 0) /* no output */ 4225 return 0; 4226 /* set low N bits */ 4227 return (1U << debug_value) - 1; 4228 } 4229 4230 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4231 { 4232 spin_lock(&txq->_xmit_lock); 4233 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4234 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4235 } 4236 4237 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4238 { 4239 __acquire(&txq->_xmit_lock); 4240 return true; 4241 } 4242 4243 static inline void __netif_tx_release(struct netdev_queue *txq) 4244 { 4245 __release(&txq->_xmit_lock); 4246 } 4247 4248 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4249 { 4250 spin_lock_bh(&txq->_xmit_lock); 4251 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4252 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4253 } 4254 4255 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4256 { 4257 bool ok = spin_trylock(&txq->_xmit_lock); 4258 4259 if (likely(ok)) { 4260 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4261 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4262 } 4263 return ok; 4264 } 4265 4266 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4267 { 4268 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4269 WRITE_ONCE(txq->xmit_lock_owner, -1); 4270 spin_unlock(&txq->_xmit_lock); 4271 } 4272 4273 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4274 { 4275 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4276 WRITE_ONCE(txq->xmit_lock_owner, -1); 4277 spin_unlock_bh(&txq->_xmit_lock); 4278 } 4279 4280 /* 4281 * txq->trans_start can be read locklessly from dev_watchdog() 4282 */ 4283 static inline void txq_trans_update(struct netdev_queue *txq) 4284 { 4285 if (txq->xmit_lock_owner != -1) 4286 WRITE_ONCE(txq->trans_start, jiffies); 4287 } 4288 4289 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4290 { 4291 unsigned long now = jiffies; 4292 4293 if (READ_ONCE(txq->trans_start) != now) 4294 WRITE_ONCE(txq->trans_start, now); 4295 } 4296 4297 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4298 static inline void netif_trans_update(struct net_device *dev) 4299 { 4300 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4301 4302 txq_trans_cond_update(txq); 4303 } 4304 4305 /** 4306 * netif_tx_lock - grab network device transmit lock 4307 * @dev: network device 4308 * 4309 * Get network device transmit lock 4310 */ 4311 void netif_tx_lock(struct net_device *dev); 4312 4313 static inline void netif_tx_lock_bh(struct net_device *dev) 4314 { 4315 local_bh_disable(); 4316 netif_tx_lock(dev); 4317 } 4318 4319 void netif_tx_unlock(struct net_device *dev); 4320 4321 static inline void netif_tx_unlock_bh(struct net_device *dev) 4322 { 4323 netif_tx_unlock(dev); 4324 local_bh_enable(); 4325 } 4326 4327 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4328 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4329 __netif_tx_lock(txq, cpu); \ 4330 } else { \ 4331 __netif_tx_acquire(txq); \ 4332 } \ 4333 } 4334 4335 #define HARD_TX_TRYLOCK(dev, txq) \ 4336 (((dev->features & NETIF_F_LLTX) == 0) ? \ 4337 __netif_tx_trylock(txq) : \ 4338 __netif_tx_acquire(txq)) 4339 4340 #define HARD_TX_UNLOCK(dev, txq) { \ 4341 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4342 __netif_tx_unlock(txq); \ 4343 } else { \ 4344 __netif_tx_release(txq); \ 4345 } \ 4346 } 4347 4348 static inline void netif_tx_disable(struct net_device *dev) 4349 { 4350 unsigned int i; 4351 int cpu; 4352 4353 local_bh_disable(); 4354 cpu = smp_processor_id(); 4355 spin_lock(&dev->tx_global_lock); 4356 for (i = 0; i < dev->num_tx_queues; i++) { 4357 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4358 4359 __netif_tx_lock(txq, cpu); 4360 netif_tx_stop_queue(txq); 4361 __netif_tx_unlock(txq); 4362 } 4363 spin_unlock(&dev->tx_global_lock); 4364 local_bh_enable(); 4365 } 4366 4367 static inline void netif_addr_lock(struct net_device *dev) 4368 { 4369 unsigned char nest_level = 0; 4370 4371 #ifdef CONFIG_LOCKDEP 4372 nest_level = dev->nested_level; 4373 #endif 4374 spin_lock_nested(&dev->addr_list_lock, nest_level); 4375 } 4376 4377 static inline void netif_addr_lock_bh(struct net_device *dev) 4378 { 4379 unsigned char nest_level = 0; 4380 4381 #ifdef CONFIG_LOCKDEP 4382 nest_level = dev->nested_level; 4383 #endif 4384 local_bh_disable(); 4385 spin_lock_nested(&dev->addr_list_lock, nest_level); 4386 } 4387 4388 static inline void netif_addr_unlock(struct net_device *dev) 4389 { 4390 spin_unlock(&dev->addr_list_lock); 4391 } 4392 4393 static inline void netif_addr_unlock_bh(struct net_device *dev) 4394 { 4395 spin_unlock_bh(&dev->addr_list_lock); 4396 } 4397 4398 /* 4399 * dev_addrs walker. Should be used only for read access. Call with 4400 * rcu_read_lock held. 4401 */ 4402 #define for_each_dev_addr(dev, ha) \ 4403 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4404 4405 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4406 4407 void ether_setup(struct net_device *dev); 4408 4409 /* Support for loadable net-drivers */ 4410 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4411 unsigned char name_assign_type, 4412 void (*setup)(struct net_device *), 4413 unsigned int txqs, unsigned int rxqs); 4414 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4415 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4416 4417 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4418 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4419 count) 4420 4421 int register_netdev(struct net_device *dev); 4422 void unregister_netdev(struct net_device *dev); 4423 4424 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4425 4426 /* General hardware address lists handling functions */ 4427 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4428 struct netdev_hw_addr_list *from_list, int addr_len); 4429 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4430 struct netdev_hw_addr_list *from_list, int addr_len); 4431 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4432 struct net_device *dev, 4433 int (*sync)(struct net_device *, const unsigned char *), 4434 int (*unsync)(struct net_device *, 4435 const unsigned char *)); 4436 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4437 struct net_device *dev, 4438 int (*sync)(struct net_device *, 4439 const unsigned char *, int), 4440 int (*unsync)(struct net_device *, 4441 const unsigned char *, int)); 4442 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4443 struct net_device *dev, 4444 int (*unsync)(struct net_device *, 4445 const unsigned char *, int)); 4446 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4447 struct net_device *dev, 4448 int (*unsync)(struct net_device *, 4449 const unsigned char *)); 4450 void __hw_addr_init(struct netdev_hw_addr_list *list); 4451 4452 /* Functions used for device addresses handling */ 4453 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4454 const void *addr, size_t len); 4455 4456 static inline void 4457 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4458 { 4459 dev_addr_mod(dev, 0, addr, len); 4460 } 4461 4462 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4463 { 4464 __dev_addr_set(dev, addr, dev->addr_len); 4465 } 4466 4467 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4468 unsigned char addr_type); 4469 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4470 unsigned char addr_type); 4471 4472 /* Functions used for unicast addresses handling */ 4473 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4474 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4475 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4476 int dev_uc_sync(struct net_device *to, struct net_device *from); 4477 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4478 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4479 void dev_uc_flush(struct net_device *dev); 4480 void dev_uc_init(struct net_device *dev); 4481 4482 /** 4483 * __dev_uc_sync - Synchonize device's unicast list 4484 * @dev: device to sync 4485 * @sync: function to call if address should be added 4486 * @unsync: function to call if address should be removed 4487 * 4488 * Add newly added addresses to the interface, and release 4489 * addresses that have been deleted. 4490 */ 4491 static inline int __dev_uc_sync(struct net_device *dev, 4492 int (*sync)(struct net_device *, 4493 const unsigned char *), 4494 int (*unsync)(struct net_device *, 4495 const unsigned char *)) 4496 { 4497 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4498 } 4499 4500 /** 4501 * __dev_uc_unsync - Remove synchronized addresses from device 4502 * @dev: device to sync 4503 * @unsync: function to call if address should be removed 4504 * 4505 * Remove all addresses that were added to the device by dev_uc_sync(). 4506 */ 4507 static inline void __dev_uc_unsync(struct net_device *dev, 4508 int (*unsync)(struct net_device *, 4509 const unsigned char *)) 4510 { 4511 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4512 } 4513 4514 /* Functions used for multicast addresses handling */ 4515 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4516 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4517 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4518 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4519 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4520 int dev_mc_sync(struct net_device *to, struct net_device *from); 4521 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4522 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4523 void dev_mc_flush(struct net_device *dev); 4524 void dev_mc_init(struct net_device *dev); 4525 4526 /** 4527 * __dev_mc_sync - Synchonize device's multicast list 4528 * @dev: device to sync 4529 * @sync: function to call if address should be added 4530 * @unsync: function to call if address should be removed 4531 * 4532 * Add newly added addresses to the interface, and release 4533 * addresses that have been deleted. 4534 */ 4535 static inline int __dev_mc_sync(struct net_device *dev, 4536 int (*sync)(struct net_device *, 4537 const unsigned char *), 4538 int (*unsync)(struct net_device *, 4539 const unsigned char *)) 4540 { 4541 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4542 } 4543 4544 /** 4545 * __dev_mc_unsync - Remove synchronized addresses from device 4546 * @dev: device to sync 4547 * @unsync: function to call if address should be removed 4548 * 4549 * Remove all addresses that were added to the device by dev_mc_sync(). 4550 */ 4551 static inline void __dev_mc_unsync(struct net_device *dev, 4552 int (*unsync)(struct net_device *, 4553 const unsigned char *)) 4554 { 4555 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4556 } 4557 4558 /* Functions used for secondary unicast and multicast support */ 4559 void dev_set_rx_mode(struct net_device *dev); 4560 int dev_set_promiscuity(struct net_device *dev, int inc); 4561 int dev_set_allmulti(struct net_device *dev, int inc); 4562 void netdev_state_change(struct net_device *dev); 4563 void __netdev_notify_peers(struct net_device *dev); 4564 void netdev_notify_peers(struct net_device *dev); 4565 void netdev_features_change(struct net_device *dev); 4566 /* Load a device via the kmod */ 4567 void dev_load(struct net *net, const char *name); 4568 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4569 struct rtnl_link_stats64 *storage); 4570 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4571 const struct net_device_stats *netdev_stats); 4572 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4573 const struct pcpu_sw_netstats __percpu *netstats); 4574 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4575 4576 extern int netdev_max_backlog; 4577 extern int dev_rx_weight; 4578 extern int dev_tx_weight; 4579 extern int gro_normal_batch; 4580 4581 enum { 4582 NESTED_SYNC_IMM_BIT, 4583 NESTED_SYNC_TODO_BIT, 4584 }; 4585 4586 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4587 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4588 4589 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4590 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4591 4592 struct netdev_nested_priv { 4593 unsigned char flags; 4594 void *data; 4595 }; 4596 4597 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4598 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4599 struct list_head **iter); 4600 4601 /* iterate through upper list, must be called under RCU read lock */ 4602 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4603 for (iter = &(dev)->adj_list.upper, \ 4604 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4605 updev; \ 4606 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4607 4608 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4609 int (*fn)(struct net_device *upper_dev, 4610 struct netdev_nested_priv *priv), 4611 struct netdev_nested_priv *priv); 4612 4613 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4614 struct net_device *upper_dev); 4615 4616 bool netdev_has_any_upper_dev(struct net_device *dev); 4617 4618 void *netdev_lower_get_next_private(struct net_device *dev, 4619 struct list_head **iter); 4620 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4621 struct list_head **iter); 4622 4623 #define netdev_for_each_lower_private(dev, priv, iter) \ 4624 for (iter = (dev)->adj_list.lower.next, \ 4625 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4626 priv; \ 4627 priv = netdev_lower_get_next_private(dev, &(iter))) 4628 4629 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4630 for (iter = &(dev)->adj_list.lower, \ 4631 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4632 priv; \ 4633 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4634 4635 void *netdev_lower_get_next(struct net_device *dev, 4636 struct list_head **iter); 4637 4638 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4639 for (iter = (dev)->adj_list.lower.next, \ 4640 ldev = netdev_lower_get_next(dev, &(iter)); \ 4641 ldev; \ 4642 ldev = netdev_lower_get_next(dev, &(iter))) 4643 4644 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4645 struct list_head **iter); 4646 int netdev_walk_all_lower_dev(struct net_device *dev, 4647 int (*fn)(struct net_device *lower_dev, 4648 struct netdev_nested_priv *priv), 4649 struct netdev_nested_priv *priv); 4650 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4651 int (*fn)(struct net_device *lower_dev, 4652 struct netdev_nested_priv *priv), 4653 struct netdev_nested_priv *priv); 4654 4655 void *netdev_adjacent_get_private(struct list_head *adj_list); 4656 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4657 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4658 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4659 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4660 struct netlink_ext_ack *extack); 4661 int netdev_master_upper_dev_link(struct net_device *dev, 4662 struct net_device *upper_dev, 4663 void *upper_priv, void *upper_info, 4664 struct netlink_ext_ack *extack); 4665 void netdev_upper_dev_unlink(struct net_device *dev, 4666 struct net_device *upper_dev); 4667 int netdev_adjacent_change_prepare(struct net_device *old_dev, 4668 struct net_device *new_dev, 4669 struct net_device *dev, 4670 struct netlink_ext_ack *extack); 4671 void netdev_adjacent_change_commit(struct net_device *old_dev, 4672 struct net_device *new_dev, 4673 struct net_device *dev); 4674 void netdev_adjacent_change_abort(struct net_device *old_dev, 4675 struct net_device *new_dev, 4676 struct net_device *dev); 4677 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4678 void *netdev_lower_dev_get_private(struct net_device *dev, 4679 struct net_device *lower_dev); 4680 void netdev_lower_state_changed(struct net_device *lower_dev, 4681 void *lower_state_info); 4682 4683 /* RSS keys are 40 or 52 bytes long */ 4684 #define NETDEV_RSS_KEY_LEN 52 4685 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4686 void netdev_rss_key_fill(void *buffer, size_t len); 4687 4688 int skb_checksum_help(struct sk_buff *skb); 4689 int skb_crc32c_csum_help(struct sk_buff *skb); 4690 int skb_csum_hwoffload_help(struct sk_buff *skb, 4691 const netdev_features_t features); 4692 4693 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 4694 netdev_features_t features, bool tx_path); 4695 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 4696 netdev_features_t features, __be16 type); 4697 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 4698 netdev_features_t features); 4699 4700 struct netdev_bonding_info { 4701 ifslave slave; 4702 ifbond master; 4703 }; 4704 4705 struct netdev_notifier_bonding_info { 4706 struct netdev_notifier_info info; /* must be first */ 4707 struct netdev_bonding_info bonding_info; 4708 }; 4709 4710 void netdev_bonding_info_change(struct net_device *dev, 4711 struct netdev_bonding_info *bonding_info); 4712 4713 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4714 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4715 #else 4716 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4717 const void *data) 4718 { 4719 } 4720 #endif 4721 4722 static inline 4723 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 4724 { 4725 return __skb_gso_segment(skb, features, true); 4726 } 4727 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4728 4729 static inline bool can_checksum_protocol(netdev_features_t features, 4730 __be16 protocol) 4731 { 4732 if (protocol == htons(ETH_P_FCOE)) 4733 return !!(features & NETIF_F_FCOE_CRC); 4734 4735 /* Assume this is an IP checksum (not SCTP CRC) */ 4736 4737 if (features & NETIF_F_HW_CSUM) { 4738 /* Can checksum everything */ 4739 return true; 4740 } 4741 4742 switch (protocol) { 4743 case htons(ETH_P_IP): 4744 return !!(features & NETIF_F_IP_CSUM); 4745 case htons(ETH_P_IPV6): 4746 return !!(features & NETIF_F_IPV6_CSUM); 4747 default: 4748 return false; 4749 } 4750 } 4751 4752 #ifdef CONFIG_BUG 4753 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4754 #else 4755 static inline void netdev_rx_csum_fault(struct net_device *dev, 4756 struct sk_buff *skb) 4757 { 4758 } 4759 #endif 4760 /* rx skb timestamps */ 4761 void net_enable_timestamp(void); 4762 void net_disable_timestamp(void); 4763 4764 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4765 struct sk_buff *skb, struct net_device *dev, 4766 bool more) 4767 { 4768 __this_cpu_write(softnet_data.xmit.more, more); 4769 return ops->ndo_start_xmit(skb, dev); 4770 } 4771 4772 static inline bool netdev_xmit_more(void) 4773 { 4774 return __this_cpu_read(softnet_data.xmit.more); 4775 } 4776 4777 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 4778 struct netdev_queue *txq, bool more) 4779 { 4780 const struct net_device_ops *ops = dev->netdev_ops; 4781 netdev_tx_t rc; 4782 4783 rc = __netdev_start_xmit(ops, skb, dev, more); 4784 if (rc == NETDEV_TX_OK) 4785 txq_trans_update(txq); 4786 4787 return rc; 4788 } 4789 4790 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 4791 const void *ns); 4792 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 4793 const void *ns); 4794 4795 extern const struct kobj_ns_type_operations net_ns_type_operations; 4796 4797 const char *netdev_drivername(const struct net_device *dev); 4798 4799 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 4800 netdev_features_t f2) 4801 { 4802 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 4803 if (f1 & NETIF_F_HW_CSUM) 4804 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4805 else 4806 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4807 } 4808 4809 return f1 & f2; 4810 } 4811 4812 static inline netdev_features_t netdev_get_wanted_features( 4813 struct net_device *dev) 4814 { 4815 return (dev->features & ~dev->hw_features) | dev->wanted_features; 4816 } 4817 netdev_features_t netdev_increment_features(netdev_features_t all, 4818 netdev_features_t one, netdev_features_t mask); 4819 4820 /* Allow TSO being used on stacked device : 4821 * Performing the GSO segmentation before last device 4822 * is a performance improvement. 4823 */ 4824 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 4825 netdev_features_t mask) 4826 { 4827 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 4828 } 4829 4830 int __netdev_update_features(struct net_device *dev); 4831 void netdev_update_features(struct net_device *dev); 4832 void netdev_change_features(struct net_device *dev); 4833 4834 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4835 struct net_device *dev); 4836 4837 netdev_features_t passthru_features_check(struct sk_buff *skb, 4838 struct net_device *dev, 4839 netdev_features_t features); 4840 netdev_features_t netif_skb_features(struct sk_buff *skb); 4841 4842 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4843 { 4844 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 4845 4846 /* check flags correspondence */ 4847 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4848 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 4849 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 4850 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 4851 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 4852 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 4853 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 4854 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 4855 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 4856 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 4857 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 4858 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4859 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4860 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4861 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 4862 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 4863 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 4864 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 4865 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 4866 4867 return (features & feature) == feature; 4868 } 4869 4870 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 4871 { 4872 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 4873 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 4874 } 4875 4876 static inline bool netif_needs_gso(struct sk_buff *skb, 4877 netdev_features_t features) 4878 { 4879 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 4880 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 4881 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 4882 } 4883 4884 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 4885 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 4886 void netif_inherit_tso_max(struct net_device *to, 4887 const struct net_device *from); 4888 4889 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, 4890 int pulled_hlen, u16 mac_offset, 4891 int mac_len) 4892 { 4893 skb->protocol = protocol; 4894 skb->encapsulation = 1; 4895 skb_push(skb, pulled_hlen); 4896 skb_reset_transport_header(skb); 4897 skb->mac_header = mac_offset; 4898 skb->network_header = skb->mac_header + mac_len; 4899 skb->mac_len = mac_len; 4900 } 4901 4902 static inline bool netif_is_macsec(const struct net_device *dev) 4903 { 4904 return dev->priv_flags & IFF_MACSEC; 4905 } 4906 4907 static inline bool netif_is_macvlan(const struct net_device *dev) 4908 { 4909 return dev->priv_flags & IFF_MACVLAN; 4910 } 4911 4912 static inline bool netif_is_macvlan_port(const struct net_device *dev) 4913 { 4914 return dev->priv_flags & IFF_MACVLAN_PORT; 4915 } 4916 4917 static inline bool netif_is_bond_master(const struct net_device *dev) 4918 { 4919 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 4920 } 4921 4922 static inline bool netif_is_bond_slave(const struct net_device *dev) 4923 { 4924 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 4925 } 4926 4927 static inline bool netif_supports_nofcs(struct net_device *dev) 4928 { 4929 return dev->priv_flags & IFF_SUPP_NOFCS; 4930 } 4931 4932 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 4933 { 4934 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 4935 } 4936 4937 static inline bool netif_is_l3_master(const struct net_device *dev) 4938 { 4939 return dev->priv_flags & IFF_L3MDEV_MASTER; 4940 } 4941 4942 static inline bool netif_is_l3_slave(const struct net_device *dev) 4943 { 4944 return dev->priv_flags & IFF_L3MDEV_SLAVE; 4945 } 4946 4947 static inline bool netif_is_bridge_master(const struct net_device *dev) 4948 { 4949 return dev->priv_flags & IFF_EBRIDGE; 4950 } 4951 4952 static inline bool netif_is_bridge_port(const struct net_device *dev) 4953 { 4954 return dev->priv_flags & IFF_BRIDGE_PORT; 4955 } 4956 4957 static inline bool netif_is_ovs_master(const struct net_device *dev) 4958 { 4959 return dev->priv_flags & IFF_OPENVSWITCH; 4960 } 4961 4962 static inline bool netif_is_ovs_port(const struct net_device *dev) 4963 { 4964 return dev->priv_flags & IFF_OVS_DATAPATH; 4965 } 4966 4967 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 4968 { 4969 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 4970 } 4971 4972 static inline bool netif_is_team_master(const struct net_device *dev) 4973 { 4974 return dev->priv_flags & IFF_TEAM; 4975 } 4976 4977 static inline bool netif_is_team_port(const struct net_device *dev) 4978 { 4979 return dev->priv_flags & IFF_TEAM_PORT; 4980 } 4981 4982 static inline bool netif_is_lag_master(const struct net_device *dev) 4983 { 4984 return netif_is_bond_master(dev) || netif_is_team_master(dev); 4985 } 4986 4987 static inline bool netif_is_lag_port(const struct net_device *dev) 4988 { 4989 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 4990 } 4991 4992 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 4993 { 4994 return dev->priv_flags & IFF_RXFH_CONFIGURED; 4995 } 4996 4997 static inline bool netif_is_failover(const struct net_device *dev) 4998 { 4999 return dev->priv_flags & IFF_FAILOVER; 5000 } 5001 5002 static inline bool netif_is_failover_slave(const struct net_device *dev) 5003 { 5004 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5005 } 5006 5007 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5008 static inline void netif_keep_dst(struct net_device *dev) 5009 { 5010 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5011 } 5012 5013 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5014 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5015 { 5016 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5017 return netif_is_macsec(dev); 5018 } 5019 5020 extern struct pernet_operations __net_initdata loopback_net_ops; 5021 5022 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5023 5024 /* netdev_printk helpers, similar to dev_printk */ 5025 5026 static inline const char *netdev_name(const struct net_device *dev) 5027 { 5028 if (!dev->name[0] || strchr(dev->name, '%')) 5029 return "(unnamed net_device)"; 5030 return dev->name; 5031 } 5032 5033 static inline bool netdev_unregistering(const struct net_device *dev) 5034 { 5035 return dev->reg_state == NETREG_UNREGISTERING; 5036 } 5037 5038 static inline const char *netdev_reg_state(const struct net_device *dev) 5039 { 5040 switch (dev->reg_state) { 5041 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5042 case NETREG_REGISTERED: return ""; 5043 case NETREG_UNREGISTERING: return " (unregistering)"; 5044 case NETREG_UNREGISTERED: return " (unregistered)"; 5045 case NETREG_RELEASED: return " (released)"; 5046 case NETREG_DUMMY: return " (dummy)"; 5047 } 5048 5049 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 5050 return " (unknown)"; 5051 } 5052 5053 __printf(3, 4) __cold 5054 void netdev_printk(const char *level, const struct net_device *dev, 5055 const char *format, ...); 5056 __printf(2, 3) __cold 5057 void netdev_emerg(const struct net_device *dev, const char *format, ...); 5058 __printf(2, 3) __cold 5059 void netdev_alert(const struct net_device *dev, const char *format, ...); 5060 __printf(2, 3) __cold 5061 void netdev_crit(const struct net_device *dev, const char *format, ...); 5062 __printf(2, 3) __cold 5063 void netdev_err(const struct net_device *dev, const char *format, ...); 5064 __printf(2, 3) __cold 5065 void netdev_warn(const struct net_device *dev, const char *format, ...); 5066 __printf(2, 3) __cold 5067 void netdev_notice(const struct net_device *dev, const char *format, ...); 5068 __printf(2, 3) __cold 5069 void netdev_info(const struct net_device *dev, const char *format, ...); 5070 5071 #define netdev_level_once(level, dev, fmt, ...) \ 5072 do { \ 5073 static bool __section(".data.once") __print_once; \ 5074 \ 5075 if (!__print_once) { \ 5076 __print_once = true; \ 5077 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ 5078 } \ 5079 } while (0) 5080 5081 #define netdev_emerg_once(dev, fmt, ...) \ 5082 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) 5083 #define netdev_alert_once(dev, fmt, ...) \ 5084 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) 5085 #define netdev_crit_once(dev, fmt, ...) \ 5086 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) 5087 #define netdev_err_once(dev, fmt, ...) \ 5088 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) 5089 #define netdev_warn_once(dev, fmt, ...) \ 5090 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) 5091 #define netdev_notice_once(dev, fmt, ...) \ 5092 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) 5093 #define netdev_info_once(dev, fmt, ...) \ 5094 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) 5095 5096 #define MODULE_ALIAS_NETDEV(device) \ 5097 MODULE_ALIAS("netdev-" device) 5098 5099 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 5100 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 5101 #define netdev_dbg(__dev, format, args...) \ 5102 do { \ 5103 dynamic_netdev_dbg(__dev, format, ##args); \ 5104 } while (0) 5105 #elif defined(DEBUG) 5106 #define netdev_dbg(__dev, format, args...) \ 5107 netdev_printk(KERN_DEBUG, __dev, format, ##args) 5108 #else 5109 #define netdev_dbg(__dev, format, args...) \ 5110 ({ \ 5111 if (0) \ 5112 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 5113 }) 5114 #endif 5115 5116 #if defined(VERBOSE_DEBUG) 5117 #define netdev_vdbg netdev_dbg 5118 #else 5119 5120 #define netdev_vdbg(dev, format, args...) \ 5121 ({ \ 5122 if (0) \ 5123 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 5124 0; \ 5125 }) 5126 #endif 5127 5128 /* 5129 * netdev_WARN() acts like dev_printk(), but with the key difference 5130 * of using a WARN/WARN_ON to get the message out, including the 5131 * file/line information and a backtrace. 5132 */ 5133 #define netdev_WARN(dev, format, args...) \ 5134 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5135 netdev_reg_state(dev), ##args) 5136 5137 #define netdev_WARN_ONCE(dev, format, args...) \ 5138 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5139 netdev_reg_state(dev), ##args) 5140 5141 /* netif printk helpers, similar to netdev_printk */ 5142 5143 #define netif_printk(priv, type, level, dev, fmt, args...) \ 5144 do { \ 5145 if (netif_msg_##type(priv)) \ 5146 netdev_printk(level, (dev), fmt, ##args); \ 5147 } while (0) 5148 5149 #define netif_level(level, priv, type, dev, fmt, args...) \ 5150 do { \ 5151 if (netif_msg_##type(priv)) \ 5152 netdev_##level(dev, fmt, ##args); \ 5153 } while (0) 5154 5155 #define netif_emerg(priv, type, dev, fmt, args...) \ 5156 netif_level(emerg, priv, type, dev, fmt, ##args) 5157 #define netif_alert(priv, type, dev, fmt, args...) \ 5158 netif_level(alert, priv, type, dev, fmt, ##args) 5159 #define netif_crit(priv, type, dev, fmt, args...) \ 5160 netif_level(crit, priv, type, dev, fmt, ##args) 5161 #define netif_err(priv, type, dev, fmt, args...) \ 5162 netif_level(err, priv, type, dev, fmt, ##args) 5163 #define netif_warn(priv, type, dev, fmt, args...) \ 5164 netif_level(warn, priv, type, dev, fmt, ##args) 5165 #define netif_notice(priv, type, dev, fmt, args...) \ 5166 netif_level(notice, priv, type, dev, fmt, ##args) 5167 #define netif_info(priv, type, dev, fmt, args...) \ 5168 netif_level(info, priv, type, dev, fmt, ##args) 5169 5170 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 5171 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 5172 #define netif_dbg(priv, type, netdev, format, args...) \ 5173 do { \ 5174 if (netif_msg_##type(priv)) \ 5175 dynamic_netdev_dbg(netdev, format, ##args); \ 5176 } while (0) 5177 #elif defined(DEBUG) 5178 #define netif_dbg(priv, type, dev, format, args...) \ 5179 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 5180 #else 5181 #define netif_dbg(priv, type, dev, format, args...) \ 5182 ({ \ 5183 if (0) \ 5184 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 5185 0; \ 5186 }) 5187 #endif 5188 5189 /* if @cond then downgrade to debug, else print at @level */ 5190 #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ 5191 do { \ 5192 if (cond) \ 5193 netif_dbg(priv, type, netdev, fmt, ##args); \ 5194 else \ 5195 netif_ ## level(priv, type, netdev, fmt, ##args); \ 5196 } while (0) 5197 5198 #if defined(VERBOSE_DEBUG) 5199 #define netif_vdbg netif_dbg 5200 #else 5201 #define netif_vdbg(priv, type, dev, format, args...) \ 5202 ({ \ 5203 if (0) \ 5204 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 5205 0; \ 5206 }) 5207 #endif 5208 5209 /* 5210 * The list of packet types we will receive (as opposed to discard) 5211 * and the routines to invoke. 5212 * 5213 * Why 16. Because with 16 the only overlap we get on a hash of the 5214 * low nibble of the protocol value is RARP/SNAP/X.25. 5215 * 5216 * 0800 IP 5217 * 0001 802.3 5218 * 0002 AX.25 5219 * 0004 802.2 5220 * 8035 RARP 5221 * 0005 SNAP 5222 * 0805 X.25 5223 * 0806 ARP 5224 * 8137 IPX 5225 * 0009 Localtalk 5226 * 86DD IPv6 5227 */ 5228 #define PTYPE_HASH_SIZE (16) 5229 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5230 5231 extern struct list_head ptype_all __read_mostly; 5232 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5233 5234 extern struct net_device *blackhole_netdev; 5235 5236 #endif /* _LINUX_NETDEVICE_H */ 5237