1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 15 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 16 * Bjorn Ekwall. <bj0rn@blox.se> 17 * Pekka Riikonen <priikone@poseidon.pspt.fi> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 44 #include <linux/netdev_features.h> 45 #include <linux/neighbour.h> 46 #include <uapi/linux/netdevice.h> 47 #include <uapi/linux/if_bonding.h> 48 #include <uapi/linux/pkt_cls.h> 49 #include <uapi/linux/netdev.h> 50 #include <linux/hashtable.h> 51 #include <linux/rbtree.h> 52 #include <net/net_trackers.h> 53 #include <net/net_debug.h> 54 #include <net/dropreason-core.h> 55 56 struct netpoll_info; 57 struct device; 58 struct ethtool_ops; 59 struct kernel_hwtstamp_config; 60 struct phy_device; 61 struct dsa_port; 62 struct ip_tunnel_parm; 63 struct macsec_context; 64 struct macsec_ops; 65 struct netdev_name_node; 66 struct sd_flow_limit; 67 struct sfp_bus; 68 /* 802.11 specific */ 69 struct wireless_dev; 70 /* 802.15.4 specific */ 71 struct wpan_dev; 72 struct mpls_dev; 73 /* UDP Tunnel offloads */ 74 struct udp_tunnel_info; 75 struct udp_tunnel_nic_info; 76 struct udp_tunnel_nic; 77 struct bpf_prog; 78 struct xdp_buff; 79 struct xdp_frame; 80 struct xdp_metadata_ops; 81 struct xdp_md; 82 83 typedef u32 xdp_features_t; 84 85 void synchronize_net(void); 86 void netdev_set_default_ethtool_ops(struct net_device *dev, 87 const struct ethtool_ops *ops); 88 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 89 90 /* Backlog congestion levels */ 91 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 92 #define NET_RX_DROP 1 /* packet dropped */ 93 94 #define MAX_NEST_DEV 8 95 96 /* 97 * Transmit return codes: transmit return codes originate from three different 98 * namespaces: 99 * 100 * - qdisc return codes 101 * - driver transmit return codes 102 * - errno values 103 * 104 * Drivers are allowed to return any one of those in their hard_start_xmit() 105 * function. Real network devices commonly used with qdiscs should only return 106 * the driver transmit return codes though - when qdiscs are used, the actual 107 * transmission happens asynchronously, so the value is not propagated to 108 * higher layers. Virtual network devices transmit synchronously; in this case 109 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 110 * others are propagated to higher layers. 111 */ 112 113 /* qdisc ->enqueue() return codes. */ 114 #define NET_XMIT_SUCCESS 0x00 115 #define NET_XMIT_DROP 0x01 /* skb dropped */ 116 #define NET_XMIT_CN 0x02 /* congestion notification */ 117 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 118 119 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 120 * indicates that the device will soon be dropping packets, or already drops 121 * some packets of the same priority; prompting us to send less aggressively. */ 122 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 123 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 124 125 /* Driver transmit return codes */ 126 #define NETDEV_TX_MASK 0xf0 127 128 enum netdev_tx { 129 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 130 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 131 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 132 }; 133 typedef enum netdev_tx netdev_tx_t; 134 135 /* 136 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 137 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 138 */ 139 static inline bool dev_xmit_complete(int rc) 140 { 141 /* 142 * Positive cases with an skb consumed by a driver: 143 * - successful transmission (rc == NETDEV_TX_OK) 144 * - error while transmitting (rc < 0) 145 * - error while queueing to a different device (rc & NET_XMIT_MASK) 146 */ 147 if (likely(rc < NET_XMIT_MASK)) 148 return true; 149 150 return false; 151 } 152 153 /* 154 * Compute the worst-case header length according to the protocols 155 * used. 156 */ 157 158 #if defined(CONFIG_HYPERV_NET) 159 # define LL_MAX_HEADER 128 160 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 161 # if defined(CONFIG_MAC80211_MESH) 162 # define LL_MAX_HEADER 128 163 # else 164 # define LL_MAX_HEADER 96 165 # endif 166 #else 167 # define LL_MAX_HEADER 32 168 #endif 169 170 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 171 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 172 #define MAX_HEADER LL_MAX_HEADER 173 #else 174 #define MAX_HEADER (LL_MAX_HEADER + 48) 175 #endif 176 177 /* 178 * Old network device statistics. Fields are native words 179 * (unsigned long) so they can be read and written atomically. 180 */ 181 182 #define NET_DEV_STAT(FIELD) \ 183 union { \ 184 unsigned long FIELD; \ 185 atomic_long_t __##FIELD; \ 186 } 187 188 struct net_device_stats { 189 NET_DEV_STAT(rx_packets); 190 NET_DEV_STAT(tx_packets); 191 NET_DEV_STAT(rx_bytes); 192 NET_DEV_STAT(tx_bytes); 193 NET_DEV_STAT(rx_errors); 194 NET_DEV_STAT(tx_errors); 195 NET_DEV_STAT(rx_dropped); 196 NET_DEV_STAT(tx_dropped); 197 NET_DEV_STAT(multicast); 198 NET_DEV_STAT(collisions); 199 NET_DEV_STAT(rx_length_errors); 200 NET_DEV_STAT(rx_over_errors); 201 NET_DEV_STAT(rx_crc_errors); 202 NET_DEV_STAT(rx_frame_errors); 203 NET_DEV_STAT(rx_fifo_errors); 204 NET_DEV_STAT(rx_missed_errors); 205 NET_DEV_STAT(tx_aborted_errors); 206 NET_DEV_STAT(tx_carrier_errors); 207 NET_DEV_STAT(tx_fifo_errors); 208 NET_DEV_STAT(tx_heartbeat_errors); 209 NET_DEV_STAT(tx_window_errors); 210 NET_DEV_STAT(rx_compressed); 211 NET_DEV_STAT(tx_compressed); 212 }; 213 #undef NET_DEV_STAT 214 215 /* per-cpu stats, allocated on demand. 216 * Try to fit them in a single cache line, for dev_get_stats() sake. 217 */ 218 struct net_device_core_stats { 219 unsigned long rx_dropped; 220 unsigned long tx_dropped; 221 unsigned long rx_nohandler; 222 unsigned long rx_otherhost_dropped; 223 } __aligned(4 * sizeof(unsigned long)); 224 225 #include <linux/cache.h> 226 #include <linux/skbuff.h> 227 228 #ifdef CONFIG_RPS 229 #include <linux/static_key.h> 230 extern struct static_key_false rps_needed; 231 extern struct static_key_false rfs_needed; 232 #endif 233 234 struct neighbour; 235 struct neigh_parms; 236 struct sk_buff; 237 238 struct netdev_hw_addr { 239 struct list_head list; 240 struct rb_node node; 241 unsigned char addr[MAX_ADDR_LEN]; 242 unsigned char type; 243 #define NETDEV_HW_ADDR_T_LAN 1 244 #define NETDEV_HW_ADDR_T_SAN 2 245 #define NETDEV_HW_ADDR_T_UNICAST 3 246 #define NETDEV_HW_ADDR_T_MULTICAST 4 247 bool global_use; 248 int sync_cnt; 249 int refcount; 250 int synced; 251 struct rcu_head rcu_head; 252 }; 253 254 struct netdev_hw_addr_list { 255 struct list_head list; 256 int count; 257 258 /* Auxiliary tree for faster lookup on addition and deletion */ 259 struct rb_root tree; 260 }; 261 262 #define netdev_hw_addr_list_count(l) ((l)->count) 263 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 264 #define netdev_hw_addr_list_for_each(ha, l) \ 265 list_for_each_entry(ha, &(l)->list, list) 266 267 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 268 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 269 #define netdev_for_each_uc_addr(ha, dev) \ 270 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 271 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 272 netdev_for_each_uc_addr((_ha), (_dev)) \ 273 if ((_ha)->sync_cnt) 274 275 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 276 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 277 #define netdev_for_each_mc_addr(ha, dev) \ 278 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 279 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 280 netdev_for_each_mc_addr((_ha), (_dev)) \ 281 if ((_ha)->sync_cnt) 282 283 struct hh_cache { 284 unsigned int hh_len; 285 seqlock_t hh_lock; 286 287 /* cached hardware header; allow for machine alignment needs. */ 288 #define HH_DATA_MOD 16 289 #define HH_DATA_OFF(__len) \ 290 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 291 #define HH_DATA_ALIGN(__len) \ 292 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 293 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 294 }; 295 296 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 297 * Alternative is: 298 * dev->hard_header_len ? (dev->hard_header_len + 299 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 300 * 301 * We could use other alignment values, but we must maintain the 302 * relationship HH alignment <= LL alignment. 303 */ 304 #define LL_RESERVED_SPACE(dev) \ 305 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 306 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 307 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 308 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 309 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 310 311 struct header_ops { 312 int (*create) (struct sk_buff *skb, struct net_device *dev, 313 unsigned short type, const void *daddr, 314 const void *saddr, unsigned int len); 315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 316 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 317 void (*cache_update)(struct hh_cache *hh, 318 const struct net_device *dev, 319 const unsigned char *haddr); 320 bool (*validate)(const char *ll_header, unsigned int len); 321 __be16 (*parse_protocol)(const struct sk_buff *skb); 322 }; 323 324 /* These flag bits are private to the generic network queueing 325 * layer; they may not be explicitly referenced by any other 326 * code. 327 */ 328 329 enum netdev_state_t { 330 __LINK_STATE_START, 331 __LINK_STATE_PRESENT, 332 __LINK_STATE_NOCARRIER, 333 __LINK_STATE_LINKWATCH_PENDING, 334 __LINK_STATE_DORMANT, 335 __LINK_STATE_TESTING, 336 }; 337 338 struct gro_list { 339 struct list_head list; 340 int count; 341 }; 342 343 /* 344 * size of gro hash buckets, must less than bit number of 345 * napi_struct::gro_bitmask 346 */ 347 #define GRO_HASH_BUCKETS 8 348 349 /* 350 * Structure for NAPI scheduling similar to tasklet but with weighting 351 */ 352 struct napi_struct { 353 /* The poll_list must only be managed by the entity which 354 * changes the state of the NAPI_STATE_SCHED bit. This means 355 * whoever atomically sets that bit can add this napi_struct 356 * to the per-CPU poll_list, and whoever clears that bit 357 * can remove from the list right before clearing the bit. 358 */ 359 struct list_head poll_list; 360 361 unsigned long state; 362 int weight; 363 int defer_hard_irqs_count; 364 unsigned long gro_bitmask; 365 int (*poll)(struct napi_struct *, int); 366 #ifdef CONFIG_NETPOLL 367 /* CPU actively polling if netpoll is configured */ 368 int poll_owner; 369 #endif 370 /* CPU on which NAPI has been scheduled for processing */ 371 int list_owner; 372 struct net_device *dev; 373 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 374 struct sk_buff *skb; 375 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 376 int rx_count; /* length of rx_list */ 377 unsigned int napi_id; 378 struct hrtimer timer; 379 struct task_struct *thread; 380 /* control-path-only fields follow */ 381 struct list_head dev_list; 382 struct hlist_node napi_hash_node; 383 }; 384 385 enum { 386 NAPI_STATE_SCHED, /* Poll is scheduled */ 387 NAPI_STATE_MISSED, /* reschedule a napi */ 388 NAPI_STATE_DISABLE, /* Disable pending */ 389 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 390 NAPI_STATE_LISTED, /* NAPI added to system lists */ 391 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 392 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 393 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 394 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 395 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 396 }; 397 398 enum { 399 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 400 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 401 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 402 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 403 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 404 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 405 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 406 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 407 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 408 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 409 }; 410 411 enum gro_result { 412 GRO_MERGED, 413 GRO_MERGED_FREE, 414 GRO_HELD, 415 GRO_NORMAL, 416 GRO_CONSUMED, 417 }; 418 typedef enum gro_result gro_result_t; 419 420 /* 421 * enum rx_handler_result - Possible return values for rx_handlers. 422 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 423 * further. 424 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 425 * case skb->dev was changed by rx_handler. 426 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 427 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 428 * 429 * rx_handlers are functions called from inside __netif_receive_skb(), to do 430 * special processing of the skb, prior to delivery to protocol handlers. 431 * 432 * Currently, a net_device can only have a single rx_handler registered. Trying 433 * to register a second rx_handler will return -EBUSY. 434 * 435 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 436 * To unregister a rx_handler on a net_device, use 437 * netdev_rx_handler_unregister(). 438 * 439 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 440 * do with the skb. 441 * 442 * If the rx_handler consumed the skb in some way, it should return 443 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 444 * the skb to be delivered in some other way. 445 * 446 * If the rx_handler changed skb->dev, to divert the skb to another 447 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 448 * new device will be called if it exists. 449 * 450 * If the rx_handler decides the skb should be ignored, it should return 451 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 452 * are registered on exact device (ptype->dev == skb->dev). 453 * 454 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 455 * delivered, it should return RX_HANDLER_PASS. 456 * 457 * A device without a registered rx_handler will behave as if rx_handler 458 * returned RX_HANDLER_PASS. 459 */ 460 461 enum rx_handler_result { 462 RX_HANDLER_CONSUMED, 463 RX_HANDLER_ANOTHER, 464 RX_HANDLER_EXACT, 465 RX_HANDLER_PASS, 466 }; 467 typedef enum rx_handler_result rx_handler_result_t; 468 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 469 470 void __napi_schedule(struct napi_struct *n); 471 void __napi_schedule_irqoff(struct napi_struct *n); 472 473 static inline bool napi_disable_pending(struct napi_struct *n) 474 { 475 return test_bit(NAPI_STATE_DISABLE, &n->state); 476 } 477 478 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 479 { 480 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 481 } 482 483 bool napi_schedule_prep(struct napi_struct *n); 484 485 /** 486 * napi_schedule - schedule NAPI poll 487 * @n: NAPI context 488 * 489 * Schedule NAPI poll routine to be called if it is not already 490 * running. 491 */ 492 static inline void napi_schedule(struct napi_struct *n) 493 { 494 if (napi_schedule_prep(n)) 495 __napi_schedule(n); 496 } 497 498 /** 499 * napi_schedule_irqoff - schedule NAPI poll 500 * @n: NAPI context 501 * 502 * Variant of napi_schedule(), assuming hard irqs are masked. 503 */ 504 static inline void napi_schedule_irqoff(struct napi_struct *n) 505 { 506 if (napi_schedule_prep(n)) 507 __napi_schedule_irqoff(n); 508 } 509 510 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 511 static inline bool napi_reschedule(struct napi_struct *napi) 512 { 513 if (napi_schedule_prep(napi)) { 514 __napi_schedule(napi); 515 return true; 516 } 517 return false; 518 } 519 520 /** 521 * napi_complete_done - NAPI processing complete 522 * @n: NAPI context 523 * @work_done: number of packets processed 524 * 525 * Mark NAPI processing as complete. Should only be called if poll budget 526 * has not been completely consumed. 527 * Prefer over napi_complete(). 528 * Return false if device should avoid rearming interrupts. 529 */ 530 bool napi_complete_done(struct napi_struct *n, int work_done); 531 532 static inline bool napi_complete(struct napi_struct *n) 533 { 534 return napi_complete_done(n, 0); 535 } 536 537 int dev_set_threaded(struct net_device *dev, bool threaded); 538 539 /** 540 * napi_disable - prevent NAPI from scheduling 541 * @n: NAPI context 542 * 543 * Stop NAPI from being scheduled on this context. 544 * Waits till any outstanding processing completes. 545 */ 546 void napi_disable(struct napi_struct *n); 547 548 void napi_enable(struct napi_struct *n); 549 550 /** 551 * napi_synchronize - wait until NAPI is not running 552 * @n: NAPI context 553 * 554 * Wait until NAPI is done being scheduled on this context. 555 * Waits till any outstanding processing completes but 556 * does not disable future activations. 557 */ 558 static inline void napi_synchronize(const struct napi_struct *n) 559 { 560 if (IS_ENABLED(CONFIG_SMP)) 561 while (test_bit(NAPI_STATE_SCHED, &n->state)) 562 msleep(1); 563 else 564 barrier(); 565 } 566 567 /** 568 * napi_if_scheduled_mark_missed - if napi is running, set the 569 * NAPIF_STATE_MISSED 570 * @n: NAPI context 571 * 572 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 573 * NAPI is scheduled. 574 **/ 575 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 576 { 577 unsigned long val, new; 578 579 val = READ_ONCE(n->state); 580 do { 581 if (val & NAPIF_STATE_DISABLE) 582 return true; 583 584 if (!(val & NAPIF_STATE_SCHED)) 585 return false; 586 587 new = val | NAPIF_STATE_MISSED; 588 } while (!try_cmpxchg(&n->state, &val, new)); 589 590 return true; 591 } 592 593 enum netdev_queue_state_t { 594 __QUEUE_STATE_DRV_XOFF, 595 __QUEUE_STATE_STACK_XOFF, 596 __QUEUE_STATE_FROZEN, 597 }; 598 599 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 600 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 601 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 602 603 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 604 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 605 QUEUE_STATE_FROZEN) 606 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 607 QUEUE_STATE_FROZEN) 608 609 /* 610 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 611 * netif_tx_* functions below are used to manipulate this flag. The 612 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 613 * queue independently. The netif_xmit_*stopped functions below are called 614 * to check if the queue has been stopped by the driver or stack (either 615 * of the XOFF bits are set in the state). Drivers should not need to call 616 * netif_xmit*stopped functions, they should only be using netif_tx_*. 617 */ 618 619 struct netdev_queue { 620 /* 621 * read-mostly part 622 */ 623 struct net_device *dev; 624 netdevice_tracker dev_tracker; 625 626 struct Qdisc __rcu *qdisc; 627 struct Qdisc __rcu *qdisc_sleeping; 628 #ifdef CONFIG_SYSFS 629 struct kobject kobj; 630 #endif 631 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 632 int numa_node; 633 #endif 634 unsigned long tx_maxrate; 635 /* 636 * Number of TX timeouts for this queue 637 * (/sys/class/net/DEV/Q/trans_timeout) 638 */ 639 atomic_long_t trans_timeout; 640 641 /* Subordinate device that the queue has been assigned to */ 642 struct net_device *sb_dev; 643 #ifdef CONFIG_XDP_SOCKETS 644 struct xsk_buff_pool *pool; 645 #endif 646 /* 647 * write-mostly part 648 */ 649 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 650 int xmit_lock_owner; 651 /* 652 * Time (in jiffies) of last Tx 653 */ 654 unsigned long trans_start; 655 656 unsigned long state; 657 658 #ifdef CONFIG_BQL 659 struct dql dql; 660 #endif 661 } ____cacheline_aligned_in_smp; 662 663 extern int sysctl_fb_tunnels_only_for_init_net; 664 extern int sysctl_devconf_inherit_init_net; 665 666 /* 667 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 668 * == 1 : For initns only 669 * == 2 : For none. 670 */ 671 static inline bool net_has_fallback_tunnels(const struct net *net) 672 { 673 #if IS_ENABLED(CONFIG_SYSCTL) 674 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 675 676 return !fb_tunnels_only_for_init_net || 677 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 678 #else 679 return true; 680 #endif 681 } 682 683 static inline int net_inherit_devconf(void) 684 { 685 #if IS_ENABLED(CONFIG_SYSCTL) 686 return READ_ONCE(sysctl_devconf_inherit_init_net); 687 #else 688 return 0; 689 #endif 690 } 691 692 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 693 { 694 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 695 return q->numa_node; 696 #else 697 return NUMA_NO_NODE; 698 #endif 699 } 700 701 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 702 { 703 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 704 q->numa_node = node; 705 #endif 706 } 707 708 #ifdef CONFIG_RPS 709 /* 710 * This structure holds an RPS map which can be of variable length. The 711 * map is an array of CPUs. 712 */ 713 struct rps_map { 714 unsigned int len; 715 struct rcu_head rcu; 716 u16 cpus[]; 717 }; 718 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 719 720 /* 721 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 722 * tail pointer for that CPU's input queue at the time of last enqueue, and 723 * a hardware filter index. 724 */ 725 struct rps_dev_flow { 726 u16 cpu; 727 u16 filter; 728 unsigned int last_qtail; 729 }; 730 #define RPS_NO_FILTER 0xffff 731 732 /* 733 * The rps_dev_flow_table structure contains a table of flow mappings. 734 */ 735 struct rps_dev_flow_table { 736 unsigned int mask; 737 struct rcu_head rcu; 738 struct rps_dev_flow flows[]; 739 }; 740 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 741 ((_num) * sizeof(struct rps_dev_flow))) 742 743 /* 744 * The rps_sock_flow_table contains mappings of flows to the last CPU 745 * on which they were processed by the application (set in recvmsg). 746 * Each entry is a 32bit value. Upper part is the high-order bits 747 * of flow hash, lower part is CPU number. 748 * rps_cpu_mask is used to partition the space, depending on number of 749 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 750 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 751 * meaning we use 32-6=26 bits for the hash. 752 */ 753 struct rps_sock_flow_table { 754 u32 mask; 755 756 u32 ents[] ____cacheline_aligned_in_smp; 757 }; 758 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 759 760 #define RPS_NO_CPU 0xffff 761 762 extern u32 rps_cpu_mask; 763 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 764 765 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 766 u32 hash) 767 { 768 if (table && hash) { 769 unsigned int index = hash & table->mask; 770 u32 val = hash & ~rps_cpu_mask; 771 772 /* We only give a hint, preemption can change CPU under us */ 773 val |= raw_smp_processor_id(); 774 775 /* The following WRITE_ONCE() is paired with the READ_ONCE() 776 * here, and another one in get_rps_cpu(). 777 */ 778 if (READ_ONCE(table->ents[index]) != val) 779 WRITE_ONCE(table->ents[index], val); 780 } 781 } 782 783 #ifdef CONFIG_RFS_ACCEL 784 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 785 u16 filter_id); 786 #endif 787 #endif /* CONFIG_RPS */ 788 789 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 790 enum xps_map_type { 791 XPS_CPUS = 0, 792 XPS_RXQS, 793 XPS_MAPS_MAX, 794 }; 795 796 #ifdef CONFIG_XPS 797 /* 798 * This structure holds an XPS map which can be of variable length. The 799 * map is an array of queues. 800 */ 801 struct xps_map { 802 unsigned int len; 803 unsigned int alloc_len; 804 struct rcu_head rcu; 805 u16 queues[]; 806 }; 807 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 808 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 809 - sizeof(struct xps_map)) / sizeof(u16)) 810 811 /* 812 * This structure holds all XPS maps for device. Maps are indexed by CPU. 813 * 814 * We keep track of the number of cpus/rxqs used when the struct is allocated, 815 * in nr_ids. This will help not accessing out-of-bound memory. 816 * 817 * We keep track of the number of traffic classes used when the struct is 818 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 819 * not crossing its upper bound, as the original dev->num_tc can be updated in 820 * the meantime. 821 */ 822 struct xps_dev_maps { 823 struct rcu_head rcu; 824 unsigned int nr_ids; 825 s16 num_tc; 826 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 827 }; 828 829 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 830 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 831 832 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 833 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 834 835 #endif /* CONFIG_XPS */ 836 837 #define TC_MAX_QUEUE 16 838 #define TC_BITMASK 15 839 /* HW offloaded queuing disciplines txq count and offset maps */ 840 struct netdev_tc_txq { 841 u16 count; 842 u16 offset; 843 }; 844 845 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 846 /* 847 * This structure is to hold information about the device 848 * configured to run FCoE protocol stack. 849 */ 850 struct netdev_fcoe_hbainfo { 851 char manufacturer[64]; 852 char serial_number[64]; 853 char hardware_version[64]; 854 char driver_version[64]; 855 char optionrom_version[64]; 856 char firmware_version[64]; 857 char model[256]; 858 char model_description[256]; 859 }; 860 #endif 861 862 #define MAX_PHYS_ITEM_ID_LEN 32 863 864 /* This structure holds a unique identifier to identify some 865 * physical item (port for example) used by a netdevice. 866 */ 867 struct netdev_phys_item_id { 868 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 869 unsigned char id_len; 870 }; 871 872 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 873 struct netdev_phys_item_id *b) 874 { 875 return a->id_len == b->id_len && 876 memcmp(a->id, b->id, a->id_len) == 0; 877 } 878 879 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 880 struct sk_buff *skb, 881 struct net_device *sb_dev); 882 883 enum net_device_path_type { 884 DEV_PATH_ETHERNET = 0, 885 DEV_PATH_VLAN, 886 DEV_PATH_BRIDGE, 887 DEV_PATH_PPPOE, 888 DEV_PATH_DSA, 889 DEV_PATH_MTK_WDMA, 890 }; 891 892 struct net_device_path { 893 enum net_device_path_type type; 894 const struct net_device *dev; 895 union { 896 struct { 897 u16 id; 898 __be16 proto; 899 u8 h_dest[ETH_ALEN]; 900 } encap; 901 struct { 902 enum { 903 DEV_PATH_BR_VLAN_KEEP, 904 DEV_PATH_BR_VLAN_TAG, 905 DEV_PATH_BR_VLAN_UNTAG, 906 DEV_PATH_BR_VLAN_UNTAG_HW, 907 } vlan_mode; 908 u16 vlan_id; 909 __be16 vlan_proto; 910 } bridge; 911 struct { 912 int port; 913 u16 proto; 914 } dsa; 915 struct { 916 u8 wdma_idx; 917 u8 queue; 918 u16 wcid; 919 u8 bss; 920 } mtk_wdma; 921 }; 922 }; 923 924 #define NET_DEVICE_PATH_STACK_MAX 5 925 #define NET_DEVICE_PATH_VLAN_MAX 2 926 927 struct net_device_path_stack { 928 int num_paths; 929 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 930 }; 931 932 struct net_device_path_ctx { 933 const struct net_device *dev; 934 u8 daddr[ETH_ALEN]; 935 936 int num_vlans; 937 struct { 938 u16 id; 939 __be16 proto; 940 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 941 }; 942 943 enum tc_setup_type { 944 TC_QUERY_CAPS, 945 TC_SETUP_QDISC_MQPRIO, 946 TC_SETUP_CLSU32, 947 TC_SETUP_CLSFLOWER, 948 TC_SETUP_CLSMATCHALL, 949 TC_SETUP_CLSBPF, 950 TC_SETUP_BLOCK, 951 TC_SETUP_QDISC_CBS, 952 TC_SETUP_QDISC_RED, 953 TC_SETUP_QDISC_PRIO, 954 TC_SETUP_QDISC_MQ, 955 TC_SETUP_QDISC_ETF, 956 TC_SETUP_ROOT_QDISC, 957 TC_SETUP_QDISC_GRED, 958 TC_SETUP_QDISC_TAPRIO, 959 TC_SETUP_FT, 960 TC_SETUP_QDISC_ETS, 961 TC_SETUP_QDISC_TBF, 962 TC_SETUP_QDISC_FIFO, 963 TC_SETUP_QDISC_HTB, 964 TC_SETUP_ACT, 965 }; 966 967 /* These structures hold the attributes of bpf state that are being passed 968 * to the netdevice through the bpf op. 969 */ 970 enum bpf_netdev_command { 971 /* Set or clear a bpf program used in the earliest stages of packet 972 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 973 * is responsible for calling bpf_prog_put on any old progs that are 974 * stored. In case of error, the callee need not release the new prog 975 * reference, but on success it takes ownership and must bpf_prog_put 976 * when it is no longer used. 977 */ 978 XDP_SETUP_PROG, 979 XDP_SETUP_PROG_HW, 980 /* BPF program for offload callbacks, invoked at program load time. */ 981 BPF_OFFLOAD_MAP_ALLOC, 982 BPF_OFFLOAD_MAP_FREE, 983 XDP_SETUP_XSK_POOL, 984 }; 985 986 struct bpf_prog_offload_ops; 987 struct netlink_ext_ack; 988 struct xdp_umem; 989 struct xdp_dev_bulk_queue; 990 struct bpf_xdp_link; 991 992 enum bpf_xdp_mode { 993 XDP_MODE_SKB = 0, 994 XDP_MODE_DRV = 1, 995 XDP_MODE_HW = 2, 996 __MAX_XDP_MODE 997 }; 998 999 struct bpf_xdp_entity { 1000 struct bpf_prog *prog; 1001 struct bpf_xdp_link *link; 1002 }; 1003 1004 struct netdev_bpf { 1005 enum bpf_netdev_command command; 1006 union { 1007 /* XDP_SETUP_PROG */ 1008 struct { 1009 u32 flags; 1010 struct bpf_prog *prog; 1011 struct netlink_ext_ack *extack; 1012 }; 1013 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 1014 struct { 1015 struct bpf_offloaded_map *offmap; 1016 }; 1017 /* XDP_SETUP_XSK_POOL */ 1018 struct { 1019 struct xsk_buff_pool *pool; 1020 u16 queue_id; 1021 } xsk; 1022 }; 1023 }; 1024 1025 /* Flags for ndo_xsk_wakeup. */ 1026 #define XDP_WAKEUP_RX (1 << 0) 1027 #define XDP_WAKEUP_TX (1 << 1) 1028 1029 #ifdef CONFIG_XFRM_OFFLOAD 1030 struct xfrmdev_ops { 1031 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 1032 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1033 void (*xdo_dev_state_free) (struct xfrm_state *x); 1034 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1035 struct xfrm_state *x); 1036 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1037 void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); 1038 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1039 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1040 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1041 }; 1042 #endif 1043 1044 struct dev_ifalias { 1045 struct rcu_head rcuhead; 1046 char ifalias[]; 1047 }; 1048 1049 struct devlink; 1050 struct tlsdev_ops; 1051 1052 struct netdev_net_notifier { 1053 struct list_head list; 1054 struct notifier_block *nb; 1055 }; 1056 1057 /* 1058 * This structure defines the management hooks for network devices. 1059 * The following hooks can be defined; unless noted otherwise, they are 1060 * optional and can be filled with a null pointer. 1061 * 1062 * int (*ndo_init)(struct net_device *dev); 1063 * This function is called once when a network device is registered. 1064 * The network device can use this for any late stage initialization 1065 * or semantic validation. It can fail with an error code which will 1066 * be propagated back to register_netdev. 1067 * 1068 * void (*ndo_uninit)(struct net_device *dev); 1069 * This function is called when device is unregistered or when registration 1070 * fails. It is not called if init fails. 1071 * 1072 * int (*ndo_open)(struct net_device *dev); 1073 * This function is called when a network device transitions to the up 1074 * state. 1075 * 1076 * int (*ndo_stop)(struct net_device *dev); 1077 * This function is called when a network device transitions to the down 1078 * state. 1079 * 1080 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1081 * struct net_device *dev); 1082 * Called when a packet needs to be transmitted. 1083 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1084 * the queue before that can happen; it's for obsolete devices and weird 1085 * corner cases, but the stack really does a non-trivial amount 1086 * of useless work if you return NETDEV_TX_BUSY. 1087 * Required; cannot be NULL. 1088 * 1089 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1090 * struct net_device *dev 1091 * netdev_features_t features); 1092 * Called by core transmit path to determine if device is capable of 1093 * performing offload operations on a given packet. This is to give 1094 * the device an opportunity to implement any restrictions that cannot 1095 * be otherwise expressed by feature flags. The check is called with 1096 * the set of features that the stack has calculated and it returns 1097 * those the driver believes to be appropriate. 1098 * 1099 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1100 * struct net_device *sb_dev); 1101 * Called to decide which queue to use when device supports multiple 1102 * transmit queues. 1103 * 1104 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1105 * This function is called to allow device receiver to make 1106 * changes to configuration when multicast or promiscuous is enabled. 1107 * 1108 * void (*ndo_set_rx_mode)(struct net_device *dev); 1109 * This function is called device changes address list filtering. 1110 * If driver handles unicast address filtering, it should set 1111 * IFF_UNICAST_FLT in its priv_flags. 1112 * 1113 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1114 * This function is called when the Media Access Control address 1115 * needs to be changed. If this interface is not defined, the 1116 * MAC address can not be changed. 1117 * 1118 * int (*ndo_validate_addr)(struct net_device *dev); 1119 * Test if Media Access Control address is valid for the device. 1120 * 1121 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1122 * Old-style ioctl entry point. This is used internally by the 1123 * appletalk and ieee802154 subsystems but is no longer called by 1124 * the device ioctl handler. 1125 * 1126 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1127 * Used by the bonding driver for its device specific ioctls: 1128 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1129 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1130 * 1131 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1132 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1133 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1134 * 1135 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1136 * Used to set network devices bus interface parameters. This interface 1137 * is retained for legacy reasons; new devices should use the bus 1138 * interface (PCI) for low level management. 1139 * 1140 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1141 * Called when a user wants to change the Maximum Transfer Unit 1142 * of a device. 1143 * 1144 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1145 * Callback used when the transmitter has not made any progress 1146 * for dev->watchdog ticks. 1147 * 1148 * void (*ndo_get_stats64)(struct net_device *dev, 1149 * struct rtnl_link_stats64 *storage); 1150 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1151 * Called when a user wants to get the network device usage 1152 * statistics. Drivers must do one of the following: 1153 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1154 * rtnl_link_stats64 structure passed by the caller. 1155 * 2. Define @ndo_get_stats to update a net_device_stats structure 1156 * (which should normally be dev->stats) and return a pointer to 1157 * it. The structure may be changed asynchronously only if each 1158 * field is written atomically. 1159 * 3. Update dev->stats asynchronously and atomically, and define 1160 * neither operation. 1161 * 1162 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1163 * Return true if this device supports offload stats of this attr_id. 1164 * 1165 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1166 * void *attr_data) 1167 * Get statistics for offload operations by attr_id. Write it into the 1168 * attr_data pointer. 1169 * 1170 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1171 * If device supports VLAN filtering this function is called when a 1172 * VLAN id is registered. 1173 * 1174 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1175 * If device supports VLAN filtering this function is called when a 1176 * VLAN id is unregistered. 1177 * 1178 * void (*ndo_poll_controller)(struct net_device *dev); 1179 * 1180 * SR-IOV management functions. 1181 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1182 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1183 * u8 qos, __be16 proto); 1184 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1185 * int max_tx_rate); 1186 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1187 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1188 * int (*ndo_get_vf_config)(struct net_device *dev, 1189 * int vf, struct ifla_vf_info *ivf); 1190 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1191 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1192 * struct nlattr *port[]); 1193 * 1194 * Enable or disable the VF ability to query its RSS Redirection Table and 1195 * Hash Key. This is needed since on some devices VF share this information 1196 * with PF and querying it may introduce a theoretical security risk. 1197 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1198 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1199 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1200 * void *type_data); 1201 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1202 * This is always called from the stack with the rtnl lock held and netif 1203 * tx queues stopped. This allows the netdevice to perform queue 1204 * management safely. 1205 * 1206 * Fiber Channel over Ethernet (FCoE) offload functions. 1207 * int (*ndo_fcoe_enable)(struct net_device *dev); 1208 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1209 * so the underlying device can perform whatever needed configuration or 1210 * initialization to support acceleration of FCoE traffic. 1211 * 1212 * int (*ndo_fcoe_disable)(struct net_device *dev); 1213 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1214 * so the underlying device can perform whatever needed clean-ups to 1215 * stop supporting acceleration of FCoE traffic. 1216 * 1217 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1218 * struct scatterlist *sgl, unsigned int sgc); 1219 * Called when the FCoE Initiator wants to initialize an I/O that 1220 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1221 * perform necessary setup and returns 1 to indicate the device is set up 1222 * successfully to perform DDP on this I/O, otherwise this returns 0. 1223 * 1224 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1225 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1226 * indicated by the FC exchange id 'xid', so the underlying device can 1227 * clean up and reuse resources for later DDP requests. 1228 * 1229 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1230 * struct scatterlist *sgl, unsigned int sgc); 1231 * Called when the FCoE Target wants to initialize an I/O that 1232 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1233 * perform necessary setup and returns 1 to indicate the device is set up 1234 * successfully to perform DDP on this I/O, otherwise this returns 0. 1235 * 1236 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1237 * struct netdev_fcoe_hbainfo *hbainfo); 1238 * Called when the FCoE Protocol stack wants information on the underlying 1239 * device. This information is utilized by the FCoE protocol stack to 1240 * register attributes with Fiber Channel management service as per the 1241 * FC-GS Fabric Device Management Information(FDMI) specification. 1242 * 1243 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1244 * Called when the underlying device wants to override default World Wide 1245 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1246 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1247 * protocol stack to use. 1248 * 1249 * RFS acceleration. 1250 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1251 * u16 rxq_index, u32 flow_id); 1252 * Set hardware filter for RFS. rxq_index is the target queue index; 1253 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1254 * Return the filter ID on success, or a negative error code. 1255 * 1256 * Slave management functions (for bridge, bonding, etc). 1257 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1258 * Called to make another netdev an underling. 1259 * 1260 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1261 * Called to release previously enslaved netdev. 1262 * 1263 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1264 * struct sk_buff *skb, 1265 * bool all_slaves); 1266 * Get the xmit slave of master device. If all_slaves is true, function 1267 * assume all the slaves can transmit. 1268 * 1269 * Feature/offload setting functions. 1270 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1271 * netdev_features_t features); 1272 * Adjusts the requested feature flags according to device-specific 1273 * constraints, and returns the resulting flags. Must not modify 1274 * the device state. 1275 * 1276 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1277 * Called to update device configuration to new features. Passed 1278 * feature set might be less than what was returned by ndo_fix_features()). 1279 * Must return >0 or -errno if it changed dev->features itself. 1280 * 1281 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1282 * struct net_device *dev, 1283 * const unsigned char *addr, u16 vid, u16 flags, 1284 * struct netlink_ext_ack *extack); 1285 * Adds an FDB entry to dev for addr. 1286 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1287 * struct net_device *dev, 1288 * const unsigned char *addr, u16 vid) 1289 * Deletes the FDB entry from dev coresponding to addr. 1290 * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], 1291 * struct net_device *dev, 1292 * u16 vid, 1293 * struct netlink_ext_ack *extack); 1294 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1295 * struct net_device *dev, struct net_device *filter_dev, 1296 * int *idx) 1297 * Used to add FDB entries to dump requests. Implementers should add 1298 * entries to skb and update idx with the number of entries. 1299 * 1300 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1301 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1302 * Adds an MDB entry to dev. 1303 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1304 * struct netlink_ext_ack *extack); 1305 * Deletes the MDB entry from dev. 1306 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1307 * struct netlink_callback *cb); 1308 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1309 * callback is used by core rtnetlink code. 1310 * 1311 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1312 * u16 flags, struct netlink_ext_ack *extack) 1313 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1314 * struct net_device *dev, u32 filter_mask, 1315 * int nlflags) 1316 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1317 * u16 flags); 1318 * 1319 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1320 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1321 * which do not represent real hardware may define this to allow their 1322 * userspace components to manage their virtual carrier state. Devices 1323 * that determine carrier state from physical hardware properties (eg 1324 * network cables) or protocol-dependent mechanisms (eg 1325 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1326 * 1327 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1328 * struct netdev_phys_item_id *ppid); 1329 * Called to get ID of physical port of this device. If driver does 1330 * not implement this, it is assumed that the hw is not able to have 1331 * multiple net devices on single physical port. 1332 * 1333 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1334 * struct netdev_phys_item_id *ppid) 1335 * Called to get the parent ID of the physical port of this device. 1336 * 1337 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1338 * struct net_device *dev) 1339 * Called by upper layer devices to accelerate switching or other 1340 * station functionality into hardware. 'pdev is the lowerdev 1341 * to use for the offload and 'dev' is the net device that will 1342 * back the offload. Returns a pointer to the private structure 1343 * the upper layer will maintain. 1344 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1345 * Called by upper layer device to delete the station created 1346 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1347 * the station and priv is the structure returned by the add 1348 * operation. 1349 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1350 * int queue_index, u32 maxrate); 1351 * Called when a user wants to set a max-rate limitation of specific 1352 * TX queue. 1353 * int (*ndo_get_iflink)(const struct net_device *dev); 1354 * Called to get the iflink value of this device. 1355 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1356 * This function is used to get egress tunnel information for given skb. 1357 * This is useful for retrieving outer tunnel header parameters while 1358 * sampling packet. 1359 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1360 * This function is used to specify the headroom that the skb must 1361 * consider when allocation skb during packet reception. Setting 1362 * appropriate rx headroom value allows avoiding skb head copy on 1363 * forward. Setting a negative value resets the rx headroom to the 1364 * default value. 1365 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1366 * This function is used to set or query state related to XDP on the 1367 * netdevice and manage BPF offload. See definition of 1368 * enum bpf_netdev_command for details. 1369 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1370 * u32 flags); 1371 * This function is used to submit @n XDP packets for transmit on a 1372 * netdevice. Returns number of frames successfully transmitted, frames 1373 * that got dropped are freed/returned via xdp_return_frame(). 1374 * Returns negative number, means general error invoking ndo, meaning 1375 * no frames were xmit'ed and core-caller will free all frames. 1376 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1377 * struct xdp_buff *xdp); 1378 * Get the xmit slave of master device based on the xdp_buff. 1379 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1380 * This function is used to wake up the softirq, ksoftirqd or kthread 1381 * responsible for sending and/or receiving packets on a specific 1382 * queue id bound to an AF_XDP socket. The flags field specifies if 1383 * only RX, only Tx, or both should be woken up using the flags 1384 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1385 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1386 * int cmd); 1387 * Add, change, delete or get information on an IPv4 tunnel. 1388 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1389 * If a device is paired with a peer device, return the peer instance. 1390 * The caller must be under RCU read context. 1391 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1392 * Get the forwarding path to reach the real device from the HW destination address 1393 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1394 * const struct skb_shared_hwtstamps *hwtstamps, 1395 * bool cycles); 1396 * Get hardware timestamp based on normal/adjustable time or free running 1397 * cycle counter. This function is required if physical clock supports a 1398 * free running cycle counter. 1399 * 1400 * int (*ndo_hwtstamp_get)(struct net_device *dev, 1401 * struct kernel_hwtstamp_config *kernel_config); 1402 * Get the currently configured hardware timestamping parameters for the 1403 * NIC device. 1404 * 1405 * int (*ndo_hwtstamp_set)(struct net_device *dev, 1406 * struct kernel_hwtstamp_config *kernel_config, 1407 * struct netlink_ext_ack *extack); 1408 * Change the hardware timestamping parameters for NIC device. 1409 */ 1410 struct net_device_ops { 1411 int (*ndo_init)(struct net_device *dev); 1412 void (*ndo_uninit)(struct net_device *dev); 1413 int (*ndo_open)(struct net_device *dev); 1414 int (*ndo_stop)(struct net_device *dev); 1415 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1416 struct net_device *dev); 1417 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1418 struct net_device *dev, 1419 netdev_features_t features); 1420 u16 (*ndo_select_queue)(struct net_device *dev, 1421 struct sk_buff *skb, 1422 struct net_device *sb_dev); 1423 void (*ndo_change_rx_flags)(struct net_device *dev, 1424 int flags); 1425 void (*ndo_set_rx_mode)(struct net_device *dev); 1426 int (*ndo_set_mac_address)(struct net_device *dev, 1427 void *addr); 1428 int (*ndo_validate_addr)(struct net_device *dev); 1429 int (*ndo_do_ioctl)(struct net_device *dev, 1430 struct ifreq *ifr, int cmd); 1431 int (*ndo_eth_ioctl)(struct net_device *dev, 1432 struct ifreq *ifr, int cmd); 1433 int (*ndo_siocbond)(struct net_device *dev, 1434 struct ifreq *ifr, int cmd); 1435 int (*ndo_siocwandev)(struct net_device *dev, 1436 struct if_settings *ifs); 1437 int (*ndo_siocdevprivate)(struct net_device *dev, 1438 struct ifreq *ifr, 1439 void __user *data, int cmd); 1440 int (*ndo_set_config)(struct net_device *dev, 1441 struct ifmap *map); 1442 int (*ndo_change_mtu)(struct net_device *dev, 1443 int new_mtu); 1444 int (*ndo_neigh_setup)(struct net_device *dev, 1445 struct neigh_parms *); 1446 void (*ndo_tx_timeout) (struct net_device *dev, 1447 unsigned int txqueue); 1448 1449 void (*ndo_get_stats64)(struct net_device *dev, 1450 struct rtnl_link_stats64 *storage); 1451 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1452 int (*ndo_get_offload_stats)(int attr_id, 1453 const struct net_device *dev, 1454 void *attr_data); 1455 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1456 1457 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1458 __be16 proto, u16 vid); 1459 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1460 __be16 proto, u16 vid); 1461 #ifdef CONFIG_NET_POLL_CONTROLLER 1462 void (*ndo_poll_controller)(struct net_device *dev); 1463 int (*ndo_netpoll_setup)(struct net_device *dev, 1464 struct netpoll_info *info); 1465 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1466 #endif 1467 int (*ndo_set_vf_mac)(struct net_device *dev, 1468 int queue, u8 *mac); 1469 int (*ndo_set_vf_vlan)(struct net_device *dev, 1470 int queue, u16 vlan, 1471 u8 qos, __be16 proto); 1472 int (*ndo_set_vf_rate)(struct net_device *dev, 1473 int vf, int min_tx_rate, 1474 int max_tx_rate); 1475 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1476 int vf, bool setting); 1477 int (*ndo_set_vf_trust)(struct net_device *dev, 1478 int vf, bool setting); 1479 int (*ndo_get_vf_config)(struct net_device *dev, 1480 int vf, 1481 struct ifla_vf_info *ivf); 1482 int (*ndo_set_vf_link_state)(struct net_device *dev, 1483 int vf, int link_state); 1484 int (*ndo_get_vf_stats)(struct net_device *dev, 1485 int vf, 1486 struct ifla_vf_stats 1487 *vf_stats); 1488 int (*ndo_set_vf_port)(struct net_device *dev, 1489 int vf, 1490 struct nlattr *port[]); 1491 int (*ndo_get_vf_port)(struct net_device *dev, 1492 int vf, struct sk_buff *skb); 1493 int (*ndo_get_vf_guid)(struct net_device *dev, 1494 int vf, 1495 struct ifla_vf_guid *node_guid, 1496 struct ifla_vf_guid *port_guid); 1497 int (*ndo_set_vf_guid)(struct net_device *dev, 1498 int vf, u64 guid, 1499 int guid_type); 1500 int (*ndo_set_vf_rss_query_en)( 1501 struct net_device *dev, 1502 int vf, bool setting); 1503 int (*ndo_setup_tc)(struct net_device *dev, 1504 enum tc_setup_type type, 1505 void *type_data); 1506 #if IS_ENABLED(CONFIG_FCOE) 1507 int (*ndo_fcoe_enable)(struct net_device *dev); 1508 int (*ndo_fcoe_disable)(struct net_device *dev); 1509 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1510 u16 xid, 1511 struct scatterlist *sgl, 1512 unsigned int sgc); 1513 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1514 u16 xid); 1515 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1516 u16 xid, 1517 struct scatterlist *sgl, 1518 unsigned int sgc); 1519 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1520 struct netdev_fcoe_hbainfo *hbainfo); 1521 #endif 1522 1523 #if IS_ENABLED(CONFIG_LIBFCOE) 1524 #define NETDEV_FCOE_WWNN 0 1525 #define NETDEV_FCOE_WWPN 1 1526 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1527 u64 *wwn, int type); 1528 #endif 1529 1530 #ifdef CONFIG_RFS_ACCEL 1531 int (*ndo_rx_flow_steer)(struct net_device *dev, 1532 const struct sk_buff *skb, 1533 u16 rxq_index, 1534 u32 flow_id); 1535 #endif 1536 int (*ndo_add_slave)(struct net_device *dev, 1537 struct net_device *slave_dev, 1538 struct netlink_ext_ack *extack); 1539 int (*ndo_del_slave)(struct net_device *dev, 1540 struct net_device *slave_dev); 1541 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1542 struct sk_buff *skb, 1543 bool all_slaves); 1544 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1545 struct sock *sk); 1546 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1547 netdev_features_t features); 1548 int (*ndo_set_features)(struct net_device *dev, 1549 netdev_features_t features); 1550 int (*ndo_neigh_construct)(struct net_device *dev, 1551 struct neighbour *n); 1552 void (*ndo_neigh_destroy)(struct net_device *dev, 1553 struct neighbour *n); 1554 1555 int (*ndo_fdb_add)(struct ndmsg *ndm, 1556 struct nlattr *tb[], 1557 struct net_device *dev, 1558 const unsigned char *addr, 1559 u16 vid, 1560 u16 flags, 1561 struct netlink_ext_ack *extack); 1562 int (*ndo_fdb_del)(struct ndmsg *ndm, 1563 struct nlattr *tb[], 1564 struct net_device *dev, 1565 const unsigned char *addr, 1566 u16 vid, struct netlink_ext_ack *extack); 1567 int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, 1568 struct nlattr *tb[], 1569 struct net_device *dev, 1570 u16 vid, 1571 struct netlink_ext_ack *extack); 1572 int (*ndo_fdb_dump)(struct sk_buff *skb, 1573 struct netlink_callback *cb, 1574 struct net_device *dev, 1575 struct net_device *filter_dev, 1576 int *idx); 1577 int (*ndo_fdb_get)(struct sk_buff *skb, 1578 struct nlattr *tb[], 1579 struct net_device *dev, 1580 const unsigned char *addr, 1581 u16 vid, u32 portid, u32 seq, 1582 struct netlink_ext_ack *extack); 1583 int (*ndo_mdb_add)(struct net_device *dev, 1584 struct nlattr *tb[], 1585 u16 nlmsg_flags, 1586 struct netlink_ext_ack *extack); 1587 int (*ndo_mdb_del)(struct net_device *dev, 1588 struct nlattr *tb[], 1589 struct netlink_ext_ack *extack); 1590 int (*ndo_mdb_dump)(struct net_device *dev, 1591 struct sk_buff *skb, 1592 struct netlink_callback *cb); 1593 int (*ndo_bridge_setlink)(struct net_device *dev, 1594 struct nlmsghdr *nlh, 1595 u16 flags, 1596 struct netlink_ext_ack *extack); 1597 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1598 u32 pid, u32 seq, 1599 struct net_device *dev, 1600 u32 filter_mask, 1601 int nlflags); 1602 int (*ndo_bridge_dellink)(struct net_device *dev, 1603 struct nlmsghdr *nlh, 1604 u16 flags); 1605 int (*ndo_change_carrier)(struct net_device *dev, 1606 bool new_carrier); 1607 int (*ndo_get_phys_port_id)(struct net_device *dev, 1608 struct netdev_phys_item_id *ppid); 1609 int (*ndo_get_port_parent_id)(struct net_device *dev, 1610 struct netdev_phys_item_id *ppid); 1611 int (*ndo_get_phys_port_name)(struct net_device *dev, 1612 char *name, size_t len); 1613 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1614 struct net_device *dev); 1615 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1616 void *priv); 1617 1618 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1619 int queue_index, 1620 u32 maxrate); 1621 int (*ndo_get_iflink)(const struct net_device *dev); 1622 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1623 struct sk_buff *skb); 1624 void (*ndo_set_rx_headroom)(struct net_device *dev, 1625 int needed_headroom); 1626 int (*ndo_bpf)(struct net_device *dev, 1627 struct netdev_bpf *bpf); 1628 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1629 struct xdp_frame **xdp, 1630 u32 flags); 1631 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1632 struct xdp_buff *xdp); 1633 int (*ndo_xsk_wakeup)(struct net_device *dev, 1634 u32 queue_id, u32 flags); 1635 int (*ndo_tunnel_ctl)(struct net_device *dev, 1636 struct ip_tunnel_parm *p, int cmd); 1637 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1638 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1639 struct net_device_path *path); 1640 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1641 const struct skb_shared_hwtstamps *hwtstamps, 1642 bool cycles); 1643 int (*ndo_hwtstamp_get)(struct net_device *dev, 1644 struct kernel_hwtstamp_config *kernel_config); 1645 int (*ndo_hwtstamp_set)(struct net_device *dev, 1646 struct kernel_hwtstamp_config *kernel_config, 1647 struct netlink_ext_ack *extack); 1648 }; 1649 1650 /** 1651 * enum netdev_priv_flags - &struct net_device priv_flags 1652 * 1653 * These are the &struct net_device, they are only set internally 1654 * by drivers and used in the kernel. These flags are invisible to 1655 * userspace; this means that the order of these flags can change 1656 * during any kernel release. 1657 * 1658 * You should have a pretty good reason to be extending these flags. 1659 * 1660 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1661 * @IFF_EBRIDGE: Ethernet bridging device 1662 * @IFF_BONDING: bonding master or slave 1663 * @IFF_ISATAP: ISATAP interface (RFC4214) 1664 * @IFF_WAN_HDLC: WAN HDLC device 1665 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1666 * release skb->dst 1667 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1668 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1669 * @IFF_MACVLAN_PORT: device used as macvlan port 1670 * @IFF_BRIDGE_PORT: device used as bridge port 1671 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1672 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1673 * @IFF_UNICAST_FLT: Supports unicast filtering 1674 * @IFF_TEAM_PORT: device used as team port 1675 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1676 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1677 * change when it's running 1678 * @IFF_MACVLAN: Macvlan device 1679 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1680 * underlying stacked devices 1681 * @IFF_L3MDEV_MASTER: device is an L3 master device 1682 * @IFF_NO_QUEUE: device can run without qdisc attached 1683 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1684 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1685 * @IFF_TEAM: device is a team device 1686 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1687 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1688 * entity (i.e. the master device for bridged veth) 1689 * @IFF_MACSEC: device is a MACsec device 1690 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1691 * @IFF_FAILOVER: device is a failover master device 1692 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1693 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1694 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1695 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1696 * skb_headlen(skb) == 0 (data starts from frag0) 1697 * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN 1698 * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to 1699 * ndo_hwtstamp_set() for all timestamp requests regardless of source, 1700 * even if those aren't HWTSTAMP_SOURCE_NETDEV. 1701 */ 1702 enum netdev_priv_flags { 1703 IFF_802_1Q_VLAN = 1<<0, 1704 IFF_EBRIDGE = 1<<1, 1705 IFF_BONDING = 1<<2, 1706 IFF_ISATAP = 1<<3, 1707 IFF_WAN_HDLC = 1<<4, 1708 IFF_XMIT_DST_RELEASE = 1<<5, 1709 IFF_DONT_BRIDGE = 1<<6, 1710 IFF_DISABLE_NETPOLL = 1<<7, 1711 IFF_MACVLAN_PORT = 1<<8, 1712 IFF_BRIDGE_PORT = 1<<9, 1713 IFF_OVS_DATAPATH = 1<<10, 1714 IFF_TX_SKB_SHARING = 1<<11, 1715 IFF_UNICAST_FLT = 1<<12, 1716 IFF_TEAM_PORT = 1<<13, 1717 IFF_SUPP_NOFCS = 1<<14, 1718 IFF_LIVE_ADDR_CHANGE = 1<<15, 1719 IFF_MACVLAN = 1<<16, 1720 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1721 IFF_L3MDEV_MASTER = 1<<18, 1722 IFF_NO_QUEUE = 1<<19, 1723 IFF_OPENVSWITCH = 1<<20, 1724 IFF_L3MDEV_SLAVE = 1<<21, 1725 IFF_TEAM = 1<<22, 1726 IFF_RXFH_CONFIGURED = 1<<23, 1727 IFF_PHONY_HEADROOM = 1<<24, 1728 IFF_MACSEC = 1<<25, 1729 IFF_NO_RX_HANDLER = 1<<26, 1730 IFF_FAILOVER = 1<<27, 1731 IFF_FAILOVER_SLAVE = 1<<28, 1732 IFF_L3MDEV_RX_HANDLER = 1<<29, 1733 IFF_NO_ADDRCONF = BIT_ULL(30), 1734 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1735 IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), 1736 IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33), 1737 }; 1738 1739 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1740 #define IFF_EBRIDGE IFF_EBRIDGE 1741 #define IFF_BONDING IFF_BONDING 1742 #define IFF_ISATAP IFF_ISATAP 1743 #define IFF_WAN_HDLC IFF_WAN_HDLC 1744 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1745 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1746 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1747 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1748 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1749 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1750 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1751 #define IFF_UNICAST_FLT IFF_UNICAST_FLT 1752 #define IFF_TEAM_PORT IFF_TEAM_PORT 1753 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1754 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1755 #define IFF_MACVLAN IFF_MACVLAN 1756 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1757 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1758 #define IFF_NO_QUEUE IFF_NO_QUEUE 1759 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1760 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1761 #define IFF_TEAM IFF_TEAM 1762 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED 1763 #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM 1764 #define IFF_MACSEC IFF_MACSEC 1765 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1766 #define IFF_FAILOVER IFF_FAILOVER 1767 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1768 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1769 #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR 1770 1771 /* Specifies the type of the struct net_device::ml_priv pointer */ 1772 enum netdev_ml_priv_type { 1773 ML_PRIV_NONE, 1774 ML_PRIV_CAN, 1775 }; 1776 1777 /** 1778 * struct net_device - The DEVICE structure. 1779 * 1780 * Actually, this whole structure is a big mistake. It mixes I/O 1781 * data with strictly "high-level" data, and it has to know about 1782 * almost every data structure used in the INET module. 1783 * 1784 * @name: This is the first field of the "visible" part of this structure 1785 * (i.e. as seen by users in the "Space.c" file). It is the name 1786 * of the interface. 1787 * 1788 * @name_node: Name hashlist node 1789 * @ifalias: SNMP alias 1790 * @mem_end: Shared memory end 1791 * @mem_start: Shared memory start 1792 * @base_addr: Device I/O address 1793 * @irq: Device IRQ number 1794 * 1795 * @state: Generic network queuing layer state, see netdev_state_t 1796 * @dev_list: The global list of network devices 1797 * @napi_list: List entry used for polling NAPI devices 1798 * @unreg_list: List entry when we are unregistering the 1799 * device; see the function unregister_netdev 1800 * @close_list: List entry used when we are closing the device 1801 * @ptype_all: Device-specific packet handlers for all protocols 1802 * @ptype_specific: Device-specific, protocol-specific packet handlers 1803 * 1804 * @adj_list: Directly linked devices, like slaves for bonding 1805 * @features: Currently active device features 1806 * @hw_features: User-changeable features 1807 * 1808 * @wanted_features: User-requested features 1809 * @vlan_features: Mask of features inheritable by VLAN devices 1810 * 1811 * @hw_enc_features: Mask of features inherited by encapsulating devices 1812 * This field indicates what encapsulation 1813 * offloads the hardware is capable of doing, 1814 * and drivers will need to set them appropriately. 1815 * 1816 * @mpls_features: Mask of features inheritable by MPLS 1817 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1818 * 1819 * @ifindex: interface index 1820 * @group: The group the device belongs to 1821 * 1822 * @stats: Statistics struct, which was left as a legacy, use 1823 * rtnl_link_stats64 instead 1824 * 1825 * @core_stats: core networking counters, 1826 * do not use this in drivers 1827 * @carrier_up_count: Number of times the carrier has been up 1828 * @carrier_down_count: Number of times the carrier has been down 1829 * 1830 * @wireless_handlers: List of functions to handle Wireless Extensions, 1831 * instead of ioctl, 1832 * see <net/iw_handler.h> for details. 1833 * @wireless_data: Instance data managed by the core of wireless extensions 1834 * 1835 * @netdev_ops: Includes several pointers to callbacks, 1836 * if one wants to override the ndo_*() functions 1837 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1838 * @ethtool_ops: Management operations 1839 * @l3mdev_ops: Layer 3 master device operations 1840 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1841 * discovery handling. Necessary for e.g. 6LoWPAN. 1842 * @xfrmdev_ops: Transformation offload operations 1843 * @tlsdev_ops: Transport Layer Security offload operations 1844 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1845 * of Layer 2 headers. 1846 * 1847 * @flags: Interface flags (a la BSD) 1848 * @xdp_features: XDP capability supported by the device 1849 * @priv_flags: Like 'flags' but invisible to userspace, 1850 * see if.h for the definitions 1851 * @gflags: Global flags ( kept as legacy ) 1852 * @padded: How much padding added by alloc_netdev() 1853 * @operstate: RFC2863 operstate 1854 * @link_mode: Mapping policy to operstate 1855 * @if_port: Selectable AUI, TP, ... 1856 * @dma: DMA channel 1857 * @mtu: Interface MTU value 1858 * @min_mtu: Interface Minimum MTU value 1859 * @max_mtu: Interface Maximum MTU value 1860 * @type: Interface hardware type 1861 * @hard_header_len: Maximum hardware header length. 1862 * @min_header_len: Minimum hardware header length 1863 * 1864 * @needed_headroom: Extra headroom the hardware may need, but not in all 1865 * cases can this be guaranteed 1866 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1867 * cases can this be guaranteed. Some cases also use 1868 * LL_MAX_HEADER instead to allocate the skb 1869 * 1870 * interface address info: 1871 * 1872 * @perm_addr: Permanent hw address 1873 * @addr_assign_type: Hw address assignment type 1874 * @addr_len: Hardware address length 1875 * @upper_level: Maximum depth level of upper devices. 1876 * @lower_level: Maximum depth level of lower devices. 1877 * @neigh_priv_len: Used in neigh_alloc() 1878 * @dev_id: Used to differentiate devices that share 1879 * the same link layer address 1880 * @dev_port: Used to differentiate devices that share 1881 * the same function 1882 * @addr_list_lock: XXX: need comments on this one 1883 * @name_assign_type: network interface name assignment type 1884 * @uc_promisc: Counter that indicates promiscuous mode 1885 * has been enabled due to the need to listen to 1886 * additional unicast addresses in a device that 1887 * does not implement ndo_set_rx_mode() 1888 * @uc: unicast mac addresses 1889 * @mc: multicast mac addresses 1890 * @dev_addrs: list of device hw addresses 1891 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1892 * @promiscuity: Number of times the NIC is told to work in 1893 * promiscuous mode; if it becomes 0 the NIC will 1894 * exit promiscuous mode 1895 * @allmulti: Counter, enables or disables allmulticast mode 1896 * 1897 * @vlan_info: VLAN info 1898 * @dsa_ptr: dsa specific data 1899 * @tipc_ptr: TIPC specific data 1900 * @atalk_ptr: AppleTalk link 1901 * @ip_ptr: IPv4 specific data 1902 * @ip6_ptr: IPv6 specific data 1903 * @ax25_ptr: AX.25 specific data 1904 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1905 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1906 * device struct 1907 * @mpls_ptr: mpls_dev struct pointer 1908 * @mctp_ptr: MCTP specific data 1909 * 1910 * @dev_addr: Hw address (before bcast, 1911 * because most packets are unicast) 1912 * 1913 * @_rx: Array of RX queues 1914 * @num_rx_queues: Number of RX queues 1915 * allocated at register_netdev() time 1916 * @real_num_rx_queues: Number of RX queues currently active in device 1917 * @xdp_prog: XDP sockets filter program pointer 1918 * @gro_flush_timeout: timeout for GRO layer in NAPI 1919 * @napi_defer_hard_irqs: If not zero, provides a counter that would 1920 * allow to avoid NIC hard IRQ, on busy queues. 1921 * 1922 * @rx_handler: handler for received packets 1923 * @rx_handler_data: XXX: need comments on this one 1924 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1925 * @ingress_queue: XXX: need comments on this one 1926 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1927 * @broadcast: hw bcast address 1928 * 1929 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1930 * indexed by RX queue number. Assigned by driver. 1931 * This must only be set if the ndo_rx_flow_steer 1932 * operation is defined 1933 * @index_hlist: Device index hash chain 1934 * 1935 * @_tx: Array of TX queues 1936 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1937 * @real_num_tx_queues: Number of TX queues currently active in device 1938 * @qdisc: Root qdisc from userspace point of view 1939 * @tx_queue_len: Max frames per queue allowed 1940 * @tx_global_lock: XXX: need comments on this one 1941 * @xdp_bulkq: XDP device bulk queue 1942 * @xps_maps: all CPUs/RXQs maps for XPS device 1943 * 1944 * @xps_maps: XXX: need comments on this one 1945 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1946 * @nf_hooks_egress: netfilter hooks executed for egress packets 1947 * @qdisc_hash: qdisc hash table 1948 * @watchdog_timeo: Represents the timeout that is used by 1949 * the watchdog (see dev_watchdog()) 1950 * @watchdog_timer: List of timers 1951 * 1952 * @proto_down_reason: reason a netdev interface is held down 1953 * @pcpu_refcnt: Number of references to this device 1954 * @dev_refcnt: Number of references to this device 1955 * @refcnt_tracker: Tracker directory for tracked references to this device 1956 * @todo_list: Delayed register/unregister 1957 * @link_watch_list: XXX: need comments on this one 1958 * 1959 * @reg_state: Register/unregister state machine 1960 * @dismantle: Device is going to be freed 1961 * @rtnl_link_state: This enum represents the phases of creating 1962 * a new link 1963 * 1964 * @needs_free_netdev: Should unregister perform free_netdev? 1965 * @priv_destructor: Called from unregister 1966 * @npinfo: XXX: need comments on this one 1967 * @nd_net: Network namespace this network device is inside 1968 * 1969 * @ml_priv: Mid-layer private 1970 * @ml_priv_type: Mid-layer private type 1971 * @lstats: Loopback statistics 1972 * @tstats: Tunnel statistics 1973 * @dstats: Dummy statistics 1974 * @vstats: Virtual ethernet statistics 1975 * 1976 * @garp_port: GARP 1977 * @mrp_port: MRP 1978 * 1979 * @dm_private: Drop monitor private 1980 * 1981 * @dev: Class/net/name entry 1982 * @sysfs_groups: Space for optional device, statistics and wireless 1983 * sysfs groups 1984 * 1985 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1986 * @rtnl_link_ops: Rtnl_link_ops 1987 * 1988 * @gso_max_size: Maximum size of generic segmentation offload 1989 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1990 * @gso_max_segs: Maximum number of segments that can be passed to the 1991 * NIC for GSO 1992 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 1993 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 1994 * for IPv4. 1995 * 1996 * @dcbnl_ops: Data Center Bridging netlink ops 1997 * @num_tc: Number of traffic classes in the net device 1998 * @tc_to_txq: XXX: need comments on this one 1999 * @prio_tc_map: XXX: need comments on this one 2000 * 2001 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 2002 * 2003 * @priomap: XXX: need comments on this one 2004 * @phydev: Physical device may attach itself 2005 * for hardware timestamping 2006 * @sfp_bus: attached &struct sfp_bus structure. 2007 * 2008 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 2009 * 2010 * @proto_down: protocol port state information can be sent to the 2011 * switch driver and used to set the phys state of the 2012 * switch port. 2013 * 2014 * @wol_enabled: Wake-on-LAN is enabled 2015 * 2016 * @threaded: napi threaded mode is enabled 2017 * 2018 * @net_notifier_list: List of per-net netdev notifier block 2019 * that follow this device when it is moved 2020 * to another network namespace. 2021 * 2022 * @macsec_ops: MACsec offloading ops 2023 * 2024 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2025 * offload capabilities of the device 2026 * @udp_tunnel_nic: UDP tunnel offload state 2027 * @xdp_state: stores info on attached XDP BPF programs 2028 * 2029 * @nested_level: Used as a parameter of spin_lock_nested() of 2030 * dev->addr_list_lock. 2031 * @unlink_list: As netif_addr_lock() can be called recursively, 2032 * keep a list of interfaces to be deleted. 2033 * @gro_max_size: Maximum size of aggregated packet in generic 2034 * receive offload (GRO) 2035 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2036 * receive offload (GRO), for IPv4. 2037 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2038 * zero copy driver 2039 * 2040 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2041 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2042 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2043 * @dev_registered_tracker: tracker for reference held while 2044 * registered 2045 * @offload_xstats_l3: L3 HW stats for this netdevice. 2046 * 2047 * @devlink_port: Pointer to related devlink port structure. 2048 * Assigned by a driver before netdev registration using 2049 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2050 * during the time netdevice is registered. 2051 * 2052 * FIXME: cleanup struct net_device such that network protocol info 2053 * moves out. 2054 */ 2055 2056 struct net_device { 2057 char name[IFNAMSIZ]; 2058 struct netdev_name_node *name_node; 2059 struct dev_ifalias __rcu *ifalias; 2060 /* 2061 * I/O specific fields 2062 * FIXME: Merge these and struct ifmap into one 2063 */ 2064 unsigned long mem_end; 2065 unsigned long mem_start; 2066 unsigned long base_addr; 2067 2068 /* 2069 * Some hardware also needs these fields (state,dev_list, 2070 * napi_list,unreg_list,close_list) but they are not 2071 * part of the usual set specified in Space.c. 2072 */ 2073 2074 unsigned long state; 2075 2076 struct list_head dev_list; 2077 struct list_head napi_list; 2078 struct list_head unreg_list; 2079 struct list_head close_list; 2080 struct list_head ptype_all; 2081 struct list_head ptype_specific; 2082 2083 struct { 2084 struct list_head upper; 2085 struct list_head lower; 2086 } adj_list; 2087 2088 /* Read-mostly cache-line for fast-path access */ 2089 unsigned int flags; 2090 xdp_features_t xdp_features; 2091 unsigned long long priv_flags; 2092 const struct net_device_ops *netdev_ops; 2093 const struct xdp_metadata_ops *xdp_metadata_ops; 2094 int ifindex; 2095 unsigned short gflags; 2096 unsigned short hard_header_len; 2097 2098 /* Note : dev->mtu is often read without holding a lock. 2099 * Writers usually hold RTNL. 2100 * It is recommended to use READ_ONCE() to annotate the reads, 2101 * and to use WRITE_ONCE() to annotate the writes. 2102 */ 2103 unsigned int mtu; 2104 unsigned short needed_headroom; 2105 unsigned short needed_tailroom; 2106 2107 netdev_features_t features; 2108 netdev_features_t hw_features; 2109 netdev_features_t wanted_features; 2110 netdev_features_t vlan_features; 2111 netdev_features_t hw_enc_features; 2112 netdev_features_t mpls_features; 2113 netdev_features_t gso_partial_features; 2114 2115 unsigned int min_mtu; 2116 unsigned int max_mtu; 2117 unsigned short type; 2118 unsigned char min_header_len; 2119 unsigned char name_assign_type; 2120 2121 int group; 2122 2123 struct net_device_stats stats; /* not used by modern drivers */ 2124 2125 struct net_device_core_stats __percpu *core_stats; 2126 2127 /* Stats to monitor link on/off, flapping */ 2128 atomic_t carrier_up_count; 2129 atomic_t carrier_down_count; 2130 2131 #ifdef CONFIG_WIRELESS_EXT 2132 const struct iw_handler_def *wireless_handlers; 2133 struct iw_public_data *wireless_data; 2134 #endif 2135 const struct ethtool_ops *ethtool_ops; 2136 #ifdef CONFIG_NET_L3_MASTER_DEV 2137 const struct l3mdev_ops *l3mdev_ops; 2138 #endif 2139 #if IS_ENABLED(CONFIG_IPV6) 2140 const struct ndisc_ops *ndisc_ops; 2141 #endif 2142 2143 #ifdef CONFIG_XFRM_OFFLOAD 2144 const struct xfrmdev_ops *xfrmdev_ops; 2145 #endif 2146 2147 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2148 const struct tlsdev_ops *tlsdev_ops; 2149 #endif 2150 2151 const struct header_ops *header_ops; 2152 2153 unsigned char operstate; 2154 unsigned char link_mode; 2155 2156 unsigned char if_port; 2157 unsigned char dma; 2158 2159 /* Interface address info. */ 2160 unsigned char perm_addr[MAX_ADDR_LEN]; 2161 unsigned char addr_assign_type; 2162 unsigned char addr_len; 2163 unsigned char upper_level; 2164 unsigned char lower_level; 2165 2166 unsigned short neigh_priv_len; 2167 unsigned short dev_id; 2168 unsigned short dev_port; 2169 unsigned short padded; 2170 2171 spinlock_t addr_list_lock; 2172 int irq; 2173 2174 struct netdev_hw_addr_list uc; 2175 struct netdev_hw_addr_list mc; 2176 struct netdev_hw_addr_list dev_addrs; 2177 2178 #ifdef CONFIG_SYSFS 2179 struct kset *queues_kset; 2180 #endif 2181 #ifdef CONFIG_LOCKDEP 2182 struct list_head unlink_list; 2183 #endif 2184 unsigned int promiscuity; 2185 unsigned int allmulti; 2186 bool uc_promisc; 2187 #ifdef CONFIG_LOCKDEP 2188 unsigned char nested_level; 2189 #endif 2190 2191 2192 /* Protocol-specific pointers */ 2193 2194 struct in_device __rcu *ip_ptr; 2195 struct inet6_dev __rcu *ip6_ptr; 2196 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2197 struct vlan_info __rcu *vlan_info; 2198 #endif 2199 #if IS_ENABLED(CONFIG_NET_DSA) 2200 struct dsa_port *dsa_ptr; 2201 #endif 2202 #if IS_ENABLED(CONFIG_TIPC) 2203 struct tipc_bearer __rcu *tipc_ptr; 2204 #endif 2205 #if IS_ENABLED(CONFIG_ATALK) 2206 void *atalk_ptr; 2207 #endif 2208 #if IS_ENABLED(CONFIG_AX25) 2209 void *ax25_ptr; 2210 #endif 2211 #if IS_ENABLED(CONFIG_CFG80211) 2212 struct wireless_dev *ieee80211_ptr; 2213 #endif 2214 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2215 struct wpan_dev *ieee802154_ptr; 2216 #endif 2217 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2218 struct mpls_dev __rcu *mpls_ptr; 2219 #endif 2220 #if IS_ENABLED(CONFIG_MCTP) 2221 struct mctp_dev __rcu *mctp_ptr; 2222 #endif 2223 2224 /* 2225 * Cache lines mostly used on receive path (including eth_type_trans()) 2226 */ 2227 /* Interface address info used in eth_type_trans() */ 2228 const unsigned char *dev_addr; 2229 2230 struct netdev_rx_queue *_rx; 2231 unsigned int num_rx_queues; 2232 unsigned int real_num_rx_queues; 2233 2234 struct bpf_prog __rcu *xdp_prog; 2235 unsigned long gro_flush_timeout; 2236 int napi_defer_hard_irqs; 2237 #define GRO_LEGACY_MAX_SIZE 65536u 2238 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2239 * and shinfo->gso_segs is a 16bit field. 2240 */ 2241 #define GRO_MAX_SIZE (8 * 65535u) 2242 unsigned int gro_max_size; 2243 unsigned int gro_ipv4_max_size; 2244 unsigned int xdp_zc_max_segs; 2245 rx_handler_func_t __rcu *rx_handler; 2246 void __rcu *rx_handler_data; 2247 #ifdef CONFIG_NET_XGRESS 2248 struct bpf_mprog_entry __rcu *tcx_ingress; 2249 #endif 2250 struct netdev_queue __rcu *ingress_queue; 2251 #ifdef CONFIG_NETFILTER_INGRESS 2252 struct nf_hook_entries __rcu *nf_hooks_ingress; 2253 #endif 2254 2255 unsigned char broadcast[MAX_ADDR_LEN]; 2256 #ifdef CONFIG_RFS_ACCEL 2257 struct cpu_rmap *rx_cpu_rmap; 2258 #endif 2259 struct hlist_node index_hlist; 2260 2261 /* 2262 * Cache lines mostly used on transmit path 2263 */ 2264 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 2265 unsigned int num_tx_queues; 2266 unsigned int real_num_tx_queues; 2267 struct Qdisc __rcu *qdisc; 2268 unsigned int tx_queue_len; 2269 spinlock_t tx_global_lock; 2270 2271 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2272 2273 #ifdef CONFIG_XPS 2274 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2275 #endif 2276 #ifdef CONFIG_NET_XGRESS 2277 struct bpf_mprog_entry __rcu *tcx_egress; 2278 #endif 2279 #ifdef CONFIG_NETFILTER_EGRESS 2280 struct nf_hook_entries __rcu *nf_hooks_egress; 2281 #endif 2282 2283 #ifdef CONFIG_NET_SCHED 2284 DECLARE_HASHTABLE (qdisc_hash, 4); 2285 #endif 2286 /* These may be needed for future network-power-down code. */ 2287 struct timer_list watchdog_timer; 2288 int watchdog_timeo; 2289 2290 u32 proto_down_reason; 2291 2292 struct list_head todo_list; 2293 2294 #ifdef CONFIG_PCPU_DEV_REFCNT 2295 int __percpu *pcpu_refcnt; 2296 #else 2297 refcount_t dev_refcnt; 2298 #endif 2299 struct ref_tracker_dir refcnt_tracker; 2300 2301 struct list_head link_watch_list; 2302 2303 enum { NETREG_UNINITIALIZED=0, 2304 NETREG_REGISTERED, /* completed register_netdevice */ 2305 NETREG_UNREGISTERING, /* called unregister_netdevice */ 2306 NETREG_UNREGISTERED, /* completed unregister todo */ 2307 NETREG_RELEASED, /* called free_netdev */ 2308 NETREG_DUMMY, /* dummy device for NAPI poll */ 2309 } reg_state:8; 2310 2311 bool dismantle; 2312 2313 enum { 2314 RTNL_LINK_INITIALIZED, 2315 RTNL_LINK_INITIALIZING, 2316 } rtnl_link_state:16; 2317 2318 bool needs_free_netdev; 2319 void (*priv_destructor)(struct net_device *dev); 2320 2321 #ifdef CONFIG_NETPOLL 2322 struct netpoll_info __rcu *npinfo; 2323 #endif 2324 2325 possible_net_t nd_net; 2326 2327 /* mid-layer private */ 2328 void *ml_priv; 2329 enum netdev_ml_priv_type ml_priv_type; 2330 2331 union { 2332 struct pcpu_lstats __percpu *lstats; 2333 struct pcpu_sw_netstats __percpu *tstats; 2334 struct pcpu_dstats __percpu *dstats; 2335 }; 2336 2337 #if IS_ENABLED(CONFIG_GARP) 2338 struct garp_port __rcu *garp_port; 2339 #endif 2340 #if IS_ENABLED(CONFIG_MRP) 2341 struct mrp_port __rcu *mrp_port; 2342 #endif 2343 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2344 struct dm_hw_stat_delta __rcu *dm_private; 2345 #endif 2346 struct device dev; 2347 const struct attribute_group *sysfs_groups[4]; 2348 const struct attribute_group *sysfs_rx_queue_group; 2349 2350 const struct rtnl_link_ops *rtnl_link_ops; 2351 2352 /* for setting kernel sock attribute on TCP connection setup */ 2353 #define GSO_MAX_SEGS 65535u 2354 #define GSO_LEGACY_MAX_SIZE 65536u 2355 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2356 * and shinfo->gso_segs is a 16bit field. 2357 */ 2358 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2359 2360 unsigned int gso_max_size; 2361 #define TSO_LEGACY_MAX_SIZE 65536 2362 #define TSO_MAX_SIZE UINT_MAX 2363 unsigned int tso_max_size; 2364 u16 gso_max_segs; 2365 #define TSO_MAX_SEGS U16_MAX 2366 u16 tso_max_segs; 2367 unsigned int gso_ipv4_max_size; 2368 2369 #ifdef CONFIG_DCB 2370 const struct dcbnl_rtnl_ops *dcbnl_ops; 2371 #endif 2372 s16 num_tc; 2373 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2374 u8 prio_tc_map[TC_BITMASK + 1]; 2375 2376 #if IS_ENABLED(CONFIG_FCOE) 2377 unsigned int fcoe_ddp_xid; 2378 #endif 2379 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2380 struct netprio_map __rcu *priomap; 2381 #endif 2382 struct phy_device *phydev; 2383 struct sfp_bus *sfp_bus; 2384 struct lock_class_key *qdisc_tx_busylock; 2385 bool proto_down; 2386 unsigned wol_enabled:1; 2387 unsigned threaded:1; 2388 2389 struct list_head net_notifier_list; 2390 2391 #if IS_ENABLED(CONFIG_MACSEC) 2392 /* MACsec management functions */ 2393 const struct macsec_ops *macsec_ops; 2394 #endif 2395 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2396 struct udp_tunnel_nic *udp_tunnel_nic; 2397 2398 /* protected by rtnl_lock */ 2399 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2400 2401 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2402 netdevice_tracker linkwatch_dev_tracker; 2403 netdevice_tracker watchdog_dev_tracker; 2404 netdevice_tracker dev_registered_tracker; 2405 struct rtnl_hw_stats64 *offload_xstats_l3; 2406 2407 struct devlink_port *devlink_port; 2408 }; 2409 #define to_net_dev(d) container_of(d, struct net_device, dev) 2410 2411 /* 2412 * Driver should use this to assign devlink port instance to a netdevice 2413 * before it registers the netdevice. Therefore devlink_port is static 2414 * during the netdev lifetime after it is registered. 2415 */ 2416 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2417 ({ \ 2418 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2419 ((dev)->devlink_port = (port)); \ 2420 }) 2421 2422 static inline bool netif_elide_gro(const struct net_device *dev) 2423 { 2424 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2425 return true; 2426 return false; 2427 } 2428 2429 #define NETDEV_ALIGN 32 2430 2431 static inline 2432 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2433 { 2434 return dev->prio_tc_map[prio & TC_BITMASK]; 2435 } 2436 2437 static inline 2438 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2439 { 2440 if (tc >= dev->num_tc) 2441 return -EINVAL; 2442 2443 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2444 return 0; 2445 } 2446 2447 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2448 void netdev_reset_tc(struct net_device *dev); 2449 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2450 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2451 2452 static inline 2453 int netdev_get_num_tc(struct net_device *dev) 2454 { 2455 return dev->num_tc; 2456 } 2457 2458 static inline void net_prefetch(void *p) 2459 { 2460 prefetch(p); 2461 #if L1_CACHE_BYTES < 128 2462 prefetch((u8 *)p + L1_CACHE_BYTES); 2463 #endif 2464 } 2465 2466 static inline void net_prefetchw(void *p) 2467 { 2468 prefetchw(p); 2469 #if L1_CACHE_BYTES < 128 2470 prefetchw((u8 *)p + L1_CACHE_BYTES); 2471 #endif 2472 } 2473 2474 void netdev_unbind_sb_channel(struct net_device *dev, 2475 struct net_device *sb_dev); 2476 int netdev_bind_sb_channel_queue(struct net_device *dev, 2477 struct net_device *sb_dev, 2478 u8 tc, u16 count, u16 offset); 2479 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2480 static inline int netdev_get_sb_channel(struct net_device *dev) 2481 { 2482 return max_t(int, -dev->num_tc, 0); 2483 } 2484 2485 static inline 2486 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2487 unsigned int index) 2488 { 2489 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2490 return &dev->_tx[index]; 2491 } 2492 2493 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2494 const struct sk_buff *skb) 2495 { 2496 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2497 } 2498 2499 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2500 void (*f)(struct net_device *, 2501 struct netdev_queue *, 2502 void *), 2503 void *arg) 2504 { 2505 unsigned int i; 2506 2507 for (i = 0; i < dev->num_tx_queues; i++) 2508 f(dev, &dev->_tx[i], arg); 2509 } 2510 2511 #define netdev_lockdep_set_classes(dev) \ 2512 { \ 2513 static struct lock_class_key qdisc_tx_busylock_key; \ 2514 static struct lock_class_key qdisc_xmit_lock_key; \ 2515 static struct lock_class_key dev_addr_list_lock_key; \ 2516 unsigned int i; \ 2517 \ 2518 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2519 lockdep_set_class(&(dev)->addr_list_lock, \ 2520 &dev_addr_list_lock_key); \ 2521 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2522 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2523 &qdisc_xmit_lock_key); \ 2524 } 2525 2526 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2527 struct net_device *sb_dev); 2528 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2529 struct sk_buff *skb, 2530 struct net_device *sb_dev); 2531 2532 /* returns the headroom that the master device needs to take in account 2533 * when forwarding to this dev 2534 */ 2535 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2536 { 2537 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2538 } 2539 2540 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2541 { 2542 if (dev->netdev_ops->ndo_set_rx_headroom) 2543 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2544 } 2545 2546 /* set the device rx headroom to the dev's default */ 2547 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2548 { 2549 netdev_set_rx_headroom(dev, -1); 2550 } 2551 2552 static inline void *netdev_get_ml_priv(struct net_device *dev, 2553 enum netdev_ml_priv_type type) 2554 { 2555 if (dev->ml_priv_type != type) 2556 return NULL; 2557 2558 return dev->ml_priv; 2559 } 2560 2561 static inline void netdev_set_ml_priv(struct net_device *dev, 2562 void *ml_priv, 2563 enum netdev_ml_priv_type type) 2564 { 2565 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2566 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2567 dev->ml_priv_type, type); 2568 WARN(!dev->ml_priv_type && dev->ml_priv, 2569 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2570 2571 dev->ml_priv = ml_priv; 2572 dev->ml_priv_type = type; 2573 } 2574 2575 /* 2576 * Net namespace inlines 2577 */ 2578 static inline 2579 struct net *dev_net(const struct net_device *dev) 2580 { 2581 return read_pnet(&dev->nd_net); 2582 } 2583 2584 static inline 2585 void dev_net_set(struct net_device *dev, struct net *net) 2586 { 2587 write_pnet(&dev->nd_net, net); 2588 } 2589 2590 /** 2591 * netdev_priv - access network device private data 2592 * @dev: network device 2593 * 2594 * Get network device private data 2595 */ 2596 static inline void *netdev_priv(const struct net_device *dev) 2597 { 2598 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 2599 } 2600 2601 /* Set the sysfs physical device reference for the network logical device 2602 * if set prior to registration will cause a symlink during initialization. 2603 */ 2604 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2605 2606 /* Set the sysfs device type for the network logical device to allow 2607 * fine-grained identification of different network device types. For 2608 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2609 */ 2610 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2611 2612 /* Default NAPI poll() weight 2613 * Device drivers are strongly advised to not use bigger value 2614 */ 2615 #define NAPI_POLL_WEIGHT 64 2616 2617 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2618 int (*poll)(struct napi_struct *, int), int weight); 2619 2620 /** 2621 * netif_napi_add() - initialize a NAPI context 2622 * @dev: network device 2623 * @napi: NAPI context 2624 * @poll: polling function 2625 * 2626 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2627 * *any* of the other NAPI-related functions. 2628 */ 2629 static inline void 2630 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2631 int (*poll)(struct napi_struct *, int)) 2632 { 2633 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2634 } 2635 2636 static inline void 2637 netif_napi_add_tx_weight(struct net_device *dev, 2638 struct napi_struct *napi, 2639 int (*poll)(struct napi_struct *, int), 2640 int weight) 2641 { 2642 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2643 netif_napi_add_weight(dev, napi, poll, weight); 2644 } 2645 2646 /** 2647 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2648 * @dev: network device 2649 * @napi: NAPI context 2650 * @poll: polling function 2651 * 2652 * This variant of netif_napi_add() should be used from drivers using NAPI 2653 * to exclusively poll a TX queue. 2654 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2655 */ 2656 static inline void netif_napi_add_tx(struct net_device *dev, 2657 struct napi_struct *napi, 2658 int (*poll)(struct napi_struct *, int)) 2659 { 2660 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2661 } 2662 2663 /** 2664 * __netif_napi_del - remove a NAPI context 2665 * @napi: NAPI context 2666 * 2667 * Warning: caller must observe RCU grace period before freeing memory 2668 * containing @napi. Drivers might want to call this helper to combine 2669 * all the needed RCU grace periods into a single one. 2670 */ 2671 void __netif_napi_del(struct napi_struct *napi); 2672 2673 /** 2674 * netif_napi_del - remove a NAPI context 2675 * @napi: NAPI context 2676 * 2677 * netif_napi_del() removes a NAPI context from the network device NAPI list 2678 */ 2679 static inline void netif_napi_del(struct napi_struct *napi) 2680 { 2681 __netif_napi_del(napi); 2682 synchronize_net(); 2683 } 2684 2685 struct packet_type { 2686 __be16 type; /* This is really htons(ether_type). */ 2687 bool ignore_outgoing; 2688 struct net_device *dev; /* NULL is wildcarded here */ 2689 netdevice_tracker dev_tracker; 2690 int (*func) (struct sk_buff *, 2691 struct net_device *, 2692 struct packet_type *, 2693 struct net_device *); 2694 void (*list_func) (struct list_head *, 2695 struct packet_type *, 2696 struct net_device *); 2697 bool (*id_match)(struct packet_type *ptype, 2698 struct sock *sk); 2699 struct net *af_packet_net; 2700 void *af_packet_priv; 2701 struct list_head list; 2702 }; 2703 2704 struct offload_callbacks { 2705 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2706 netdev_features_t features); 2707 struct sk_buff *(*gro_receive)(struct list_head *head, 2708 struct sk_buff *skb); 2709 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2710 }; 2711 2712 struct packet_offload { 2713 __be16 type; /* This is really htons(ether_type). */ 2714 u16 priority; 2715 struct offload_callbacks callbacks; 2716 struct list_head list; 2717 }; 2718 2719 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2720 struct pcpu_sw_netstats { 2721 u64_stats_t rx_packets; 2722 u64_stats_t rx_bytes; 2723 u64_stats_t tx_packets; 2724 u64_stats_t tx_bytes; 2725 struct u64_stats_sync syncp; 2726 } __aligned(4 * sizeof(u64)); 2727 2728 struct pcpu_lstats { 2729 u64_stats_t packets; 2730 u64_stats_t bytes; 2731 struct u64_stats_sync syncp; 2732 } __aligned(2 * sizeof(u64)); 2733 2734 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2735 2736 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2737 { 2738 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2739 2740 u64_stats_update_begin(&tstats->syncp); 2741 u64_stats_add(&tstats->rx_bytes, len); 2742 u64_stats_inc(&tstats->rx_packets); 2743 u64_stats_update_end(&tstats->syncp); 2744 } 2745 2746 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2747 unsigned int packets, 2748 unsigned int len) 2749 { 2750 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2751 2752 u64_stats_update_begin(&tstats->syncp); 2753 u64_stats_add(&tstats->tx_bytes, len); 2754 u64_stats_add(&tstats->tx_packets, packets); 2755 u64_stats_update_end(&tstats->syncp); 2756 } 2757 2758 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2759 { 2760 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2761 2762 u64_stats_update_begin(&lstats->syncp); 2763 u64_stats_add(&lstats->bytes, len); 2764 u64_stats_inc(&lstats->packets); 2765 u64_stats_update_end(&lstats->syncp); 2766 } 2767 2768 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2769 ({ \ 2770 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2771 if (pcpu_stats) { \ 2772 int __cpu; \ 2773 for_each_possible_cpu(__cpu) { \ 2774 typeof(type) *stat; \ 2775 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2776 u64_stats_init(&stat->syncp); \ 2777 } \ 2778 } \ 2779 pcpu_stats; \ 2780 }) 2781 2782 #define netdev_alloc_pcpu_stats(type) \ 2783 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2784 2785 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 2786 ({ \ 2787 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2788 if (pcpu_stats) { \ 2789 int __cpu; \ 2790 for_each_possible_cpu(__cpu) { \ 2791 typeof(type) *stat; \ 2792 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2793 u64_stats_init(&stat->syncp); \ 2794 } \ 2795 } \ 2796 pcpu_stats; \ 2797 }) 2798 2799 enum netdev_lag_tx_type { 2800 NETDEV_LAG_TX_TYPE_UNKNOWN, 2801 NETDEV_LAG_TX_TYPE_RANDOM, 2802 NETDEV_LAG_TX_TYPE_BROADCAST, 2803 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2804 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2805 NETDEV_LAG_TX_TYPE_HASH, 2806 }; 2807 2808 enum netdev_lag_hash { 2809 NETDEV_LAG_HASH_NONE, 2810 NETDEV_LAG_HASH_L2, 2811 NETDEV_LAG_HASH_L34, 2812 NETDEV_LAG_HASH_L23, 2813 NETDEV_LAG_HASH_E23, 2814 NETDEV_LAG_HASH_E34, 2815 NETDEV_LAG_HASH_VLAN_SRCMAC, 2816 NETDEV_LAG_HASH_UNKNOWN, 2817 }; 2818 2819 struct netdev_lag_upper_info { 2820 enum netdev_lag_tx_type tx_type; 2821 enum netdev_lag_hash hash_type; 2822 }; 2823 2824 struct netdev_lag_lower_state_info { 2825 u8 link_up : 1, 2826 tx_enabled : 1; 2827 }; 2828 2829 #include <linux/notifier.h> 2830 2831 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2832 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2833 * adding new types. 2834 */ 2835 enum netdev_cmd { 2836 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2837 NETDEV_DOWN, 2838 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2839 detected a hardware crash and restarted 2840 - we can use this eg to kick tcp sessions 2841 once done */ 2842 NETDEV_CHANGE, /* Notify device state change */ 2843 NETDEV_REGISTER, 2844 NETDEV_UNREGISTER, 2845 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2846 NETDEV_CHANGEADDR, /* notify after the address change */ 2847 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2848 NETDEV_GOING_DOWN, 2849 NETDEV_CHANGENAME, 2850 NETDEV_FEAT_CHANGE, 2851 NETDEV_BONDING_FAILOVER, 2852 NETDEV_PRE_UP, 2853 NETDEV_PRE_TYPE_CHANGE, 2854 NETDEV_POST_TYPE_CHANGE, 2855 NETDEV_POST_INIT, 2856 NETDEV_PRE_UNINIT, 2857 NETDEV_RELEASE, 2858 NETDEV_NOTIFY_PEERS, 2859 NETDEV_JOIN, 2860 NETDEV_CHANGEUPPER, 2861 NETDEV_RESEND_IGMP, 2862 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2863 NETDEV_CHANGEINFODATA, 2864 NETDEV_BONDING_INFO, 2865 NETDEV_PRECHANGEUPPER, 2866 NETDEV_CHANGELOWERSTATE, 2867 NETDEV_UDP_TUNNEL_PUSH_INFO, 2868 NETDEV_UDP_TUNNEL_DROP_INFO, 2869 NETDEV_CHANGE_TX_QUEUE_LEN, 2870 NETDEV_CVLAN_FILTER_PUSH_INFO, 2871 NETDEV_CVLAN_FILTER_DROP_INFO, 2872 NETDEV_SVLAN_FILTER_PUSH_INFO, 2873 NETDEV_SVLAN_FILTER_DROP_INFO, 2874 NETDEV_OFFLOAD_XSTATS_ENABLE, 2875 NETDEV_OFFLOAD_XSTATS_DISABLE, 2876 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 2877 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 2878 NETDEV_XDP_FEAT_CHANGE, 2879 }; 2880 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2881 2882 int register_netdevice_notifier(struct notifier_block *nb); 2883 int unregister_netdevice_notifier(struct notifier_block *nb); 2884 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2885 int unregister_netdevice_notifier_net(struct net *net, 2886 struct notifier_block *nb); 2887 int register_netdevice_notifier_dev_net(struct net_device *dev, 2888 struct notifier_block *nb, 2889 struct netdev_net_notifier *nn); 2890 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2891 struct notifier_block *nb, 2892 struct netdev_net_notifier *nn); 2893 2894 struct netdev_notifier_info { 2895 struct net_device *dev; 2896 struct netlink_ext_ack *extack; 2897 }; 2898 2899 struct netdev_notifier_info_ext { 2900 struct netdev_notifier_info info; /* must be first */ 2901 union { 2902 u32 mtu; 2903 } ext; 2904 }; 2905 2906 struct netdev_notifier_change_info { 2907 struct netdev_notifier_info info; /* must be first */ 2908 unsigned int flags_changed; 2909 }; 2910 2911 struct netdev_notifier_changeupper_info { 2912 struct netdev_notifier_info info; /* must be first */ 2913 struct net_device *upper_dev; /* new upper dev */ 2914 bool master; /* is upper dev master */ 2915 bool linking; /* is the notification for link or unlink */ 2916 void *upper_info; /* upper dev info */ 2917 }; 2918 2919 struct netdev_notifier_changelowerstate_info { 2920 struct netdev_notifier_info info; /* must be first */ 2921 void *lower_state_info; /* is lower dev state */ 2922 }; 2923 2924 struct netdev_notifier_pre_changeaddr_info { 2925 struct netdev_notifier_info info; /* must be first */ 2926 const unsigned char *dev_addr; 2927 }; 2928 2929 enum netdev_offload_xstats_type { 2930 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 2931 }; 2932 2933 struct netdev_notifier_offload_xstats_info { 2934 struct netdev_notifier_info info; /* must be first */ 2935 enum netdev_offload_xstats_type type; 2936 2937 union { 2938 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 2939 struct netdev_notifier_offload_xstats_rd *report_delta; 2940 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 2941 struct netdev_notifier_offload_xstats_ru *report_used; 2942 }; 2943 }; 2944 2945 int netdev_offload_xstats_enable(struct net_device *dev, 2946 enum netdev_offload_xstats_type type, 2947 struct netlink_ext_ack *extack); 2948 int netdev_offload_xstats_disable(struct net_device *dev, 2949 enum netdev_offload_xstats_type type); 2950 bool netdev_offload_xstats_enabled(const struct net_device *dev, 2951 enum netdev_offload_xstats_type type); 2952 int netdev_offload_xstats_get(struct net_device *dev, 2953 enum netdev_offload_xstats_type type, 2954 struct rtnl_hw_stats64 *stats, bool *used, 2955 struct netlink_ext_ack *extack); 2956 void 2957 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 2958 const struct rtnl_hw_stats64 *stats); 2959 void 2960 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 2961 void netdev_offload_xstats_push_delta(struct net_device *dev, 2962 enum netdev_offload_xstats_type type, 2963 const struct rtnl_hw_stats64 *stats); 2964 2965 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2966 struct net_device *dev) 2967 { 2968 info->dev = dev; 2969 info->extack = NULL; 2970 } 2971 2972 static inline struct net_device * 2973 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2974 { 2975 return info->dev; 2976 } 2977 2978 static inline struct netlink_ext_ack * 2979 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 2980 { 2981 return info->extack; 2982 } 2983 2984 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2985 int call_netdevice_notifiers_info(unsigned long val, 2986 struct netdev_notifier_info *info); 2987 2988 extern rwlock_t dev_base_lock; /* Device list lock */ 2989 2990 #define for_each_netdev(net, d) \ 2991 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 2992 #define for_each_netdev_reverse(net, d) \ 2993 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 2994 #define for_each_netdev_rcu(net, d) \ 2995 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 2996 #define for_each_netdev_safe(net, d, n) \ 2997 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 2998 #define for_each_netdev_continue(net, d) \ 2999 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3000 #define for_each_netdev_continue_reverse(net, d) \ 3001 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3002 dev_list) 3003 #define for_each_netdev_continue_rcu(net, d) \ 3004 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3005 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3006 for_each_netdev_rcu(&init_net, slave) \ 3007 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3008 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3009 3010 #define for_each_netdev_dump(net, d, ifindex) \ 3011 xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex)) 3012 3013 static inline struct net_device *next_net_device(struct net_device *dev) 3014 { 3015 struct list_head *lh; 3016 struct net *net; 3017 3018 net = dev_net(dev); 3019 lh = dev->dev_list.next; 3020 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3021 } 3022 3023 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3024 { 3025 struct list_head *lh; 3026 struct net *net; 3027 3028 net = dev_net(dev); 3029 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3030 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3031 } 3032 3033 static inline struct net_device *first_net_device(struct net *net) 3034 { 3035 return list_empty(&net->dev_base_head) ? NULL : 3036 net_device_entry(net->dev_base_head.next); 3037 } 3038 3039 static inline struct net_device *first_net_device_rcu(struct net *net) 3040 { 3041 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3042 3043 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3044 } 3045 3046 int netdev_boot_setup_check(struct net_device *dev); 3047 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3048 const char *hwaddr); 3049 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3050 void dev_add_pack(struct packet_type *pt); 3051 void dev_remove_pack(struct packet_type *pt); 3052 void __dev_remove_pack(struct packet_type *pt); 3053 void dev_add_offload(struct packet_offload *po); 3054 void dev_remove_offload(struct packet_offload *po); 3055 3056 int dev_get_iflink(const struct net_device *dev); 3057 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3058 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3059 struct net_device_path_stack *stack); 3060 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3061 unsigned short mask); 3062 struct net_device *dev_get_by_name(struct net *net, const char *name); 3063 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3064 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3065 bool netdev_name_in_use(struct net *net, const char *name); 3066 int dev_alloc_name(struct net_device *dev, const char *name); 3067 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3068 void dev_close(struct net_device *dev); 3069 void dev_close_many(struct list_head *head, bool unlink); 3070 void dev_disable_lro(struct net_device *dev); 3071 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3072 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3073 struct net_device *sb_dev); 3074 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3075 struct net_device *sb_dev); 3076 3077 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3078 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3079 3080 static inline int dev_queue_xmit(struct sk_buff *skb) 3081 { 3082 return __dev_queue_xmit(skb, NULL); 3083 } 3084 3085 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3086 struct net_device *sb_dev) 3087 { 3088 return __dev_queue_xmit(skb, sb_dev); 3089 } 3090 3091 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3092 { 3093 int ret; 3094 3095 ret = __dev_direct_xmit(skb, queue_id); 3096 if (!dev_xmit_complete(ret)) 3097 kfree_skb(skb); 3098 return ret; 3099 } 3100 3101 int register_netdevice(struct net_device *dev); 3102 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3103 void unregister_netdevice_many(struct list_head *head); 3104 static inline void unregister_netdevice(struct net_device *dev) 3105 { 3106 unregister_netdevice_queue(dev, NULL); 3107 } 3108 3109 int netdev_refcnt_read(const struct net_device *dev); 3110 void free_netdev(struct net_device *dev); 3111 void netdev_freemem(struct net_device *dev); 3112 int init_dummy_netdev(struct net_device *dev); 3113 3114 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3115 struct sk_buff *skb, 3116 bool all_slaves); 3117 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3118 struct sock *sk); 3119 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3120 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3121 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3122 netdevice_tracker *tracker, gfp_t gfp); 3123 struct net_device *netdev_get_by_name(struct net *net, const char *name, 3124 netdevice_tracker *tracker, gfp_t gfp); 3125 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3126 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 3127 3128 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3129 unsigned short type, 3130 const void *daddr, const void *saddr, 3131 unsigned int len) 3132 { 3133 if (!dev->header_ops || !dev->header_ops->create) 3134 return 0; 3135 3136 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3137 } 3138 3139 static inline int dev_parse_header(const struct sk_buff *skb, 3140 unsigned char *haddr) 3141 { 3142 const struct net_device *dev = skb->dev; 3143 3144 if (!dev->header_ops || !dev->header_ops->parse) 3145 return 0; 3146 return dev->header_ops->parse(skb, haddr); 3147 } 3148 3149 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3150 { 3151 const struct net_device *dev = skb->dev; 3152 3153 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3154 return 0; 3155 return dev->header_ops->parse_protocol(skb); 3156 } 3157 3158 /* ll_header must have at least hard_header_len allocated */ 3159 static inline bool dev_validate_header(const struct net_device *dev, 3160 char *ll_header, int len) 3161 { 3162 if (likely(len >= dev->hard_header_len)) 3163 return true; 3164 if (len < dev->min_header_len) 3165 return false; 3166 3167 if (capable(CAP_SYS_RAWIO)) { 3168 memset(ll_header + len, 0, dev->hard_header_len - len); 3169 return true; 3170 } 3171 3172 if (dev->header_ops && dev->header_ops->validate) 3173 return dev->header_ops->validate(ll_header, len); 3174 3175 return false; 3176 } 3177 3178 static inline bool dev_has_header(const struct net_device *dev) 3179 { 3180 return dev->header_ops && dev->header_ops->create; 3181 } 3182 3183 /* 3184 * Incoming packets are placed on per-CPU queues 3185 */ 3186 struct softnet_data { 3187 struct list_head poll_list; 3188 struct sk_buff_head process_queue; 3189 3190 /* stats */ 3191 unsigned int processed; 3192 unsigned int time_squeeze; 3193 #ifdef CONFIG_RPS 3194 struct softnet_data *rps_ipi_list; 3195 #endif 3196 3197 bool in_net_rx_action; 3198 bool in_napi_threaded_poll; 3199 3200 #ifdef CONFIG_NET_FLOW_LIMIT 3201 struct sd_flow_limit __rcu *flow_limit; 3202 #endif 3203 struct Qdisc *output_queue; 3204 struct Qdisc **output_queue_tailp; 3205 struct sk_buff *completion_queue; 3206 #ifdef CONFIG_XFRM_OFFLOAD 3207 struct sk_buff_head xfrm_backlog; 3208 #endif 3209 /* written and read only by owning cpu: */ 3210 struct { 3211 u16 recursion; 3212 u8 more; 3213 #ifdef CONFIG_NET_EGRESS 3214 u8 skip_txqueue; 3215 #endif 3216 } xmit; 3217 #ifdef CONFIG_RPS 3218 /* input_queue_head should be written by cpu owning this struct, 3219 * and only read by other cpus. Worth using a cache line. 3220 */ 3221 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3222 3223 /* Elements below can be accessed between CPUs for RPS/RFS */ 3224 call_single_data_t csd ____cacheline_aligned_in_smp; 3225 struct softnet_data *rps_ipi_next; 3226 unsigned int cpu; 3227 unsigned int input_queue_tail; 3228 #endif 3229 unsigned int received_rps; 3230 unsigned int dropped; 3231 struct sk_buff_head input_pkt_queue; 3232 struct napi_struct backlog; 3233 3234 /* Another possibly contended cache line */ 3235 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3236 int defer_count; 3237 int defer_ipi_scheduled; 3238 struct sk_buff *defer_list; 3239 call_single_data_t defer_csd; 3240 }; 3241 3242 static inline void input_queue_head_incr(struct softnet_data *sd) 3243 { 3244 #ifdef CONFIG_RPS 3245 sd->input_queue_head++; 3246 #endif 3247 } 3248 3249 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 3250 unsigned int *qtail) 3251 { 3252 #ifdef CONFIG_RPS 3253 *qtail = ++sd->input_queue_tail; 3254 #endif 3255 } 3256 3257 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3258 3259 static inline int dev_recursion_level(void) 3260 { 3261 return this_cpu_read(softnet_data.xmit.recursion); 3262 } 3263 3264 #define XMIT_RECURSION_LIMIT 8 3265 static inline bool dev_xmit_recursion(void) 3266 { 3267 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3268 XMIT_RECURSION_LIMIT); 3269 } 3270 3271 static inline void dev_xmit_recursion_inc(void) 3272 { 3273 __this_cpu_inc(softnet_data.xmit.recursion); 3274 } 3275 3276 static inline void dev_xmit_recursion_dec(void) 3277 { 3278 __this_cpu_dec(softnet_data.xmit.recursion); 3279 } 3280 3281 void __netif_schedule(struct Qdisc *q); 3282 void netif_schedule_queue(struct netdev_queue *txq); 3283 3284 static inline void netif_tx_schedule_all(struct net_device *dev) 3285 { 3286 unsigned int i; 3287 3288 for (i = 0; i < dev->num_tx_queues; i++) 3289 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3290 } 3291 3292 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3293 { 3294 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3295 } 3296 3297 /** 3298 * netif_start_queue - allow transmit 3299 * @dev: network device 3300 * 3301 * Allow upper layers to call the device hard_start_xmit routine. 3302 */ 3303 static inline void netif_start_queue(struct net_device *dev) 3304 { 3305 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3306 } 3307 3308 static inline void netif_tx_start_all_queues(struct net_device *dev) 3309 { 3310 unsigned int i; 3311 3312 for (i = 0; i < dev->num_tx_queues; i++) { 3313 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3314 netif_tx_start_queue(txq); 3315 } 3316 } 3317 3318 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3319 3320 /** 3321 * netif_wake_queue - restart transmit 3322 * @dev: network device 3323 * 3324 * Allow upper layers to call the device hard_start_xmit routine. 3325 * Used for flow control when transmit resources are available. 3326 */ 3327 static inline void netif_wake_queue(struct net_device *dev) 3328 { 3329 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3330 } 3331 3332 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3333 { 3334 unsigned int i; 3335 3336 for (i = 0; i < dev->num_tx_queues; i++) { 3337 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3338 netif_tx_wake_queue(txq); 3339 } 3340 } 3341 3342 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3343 { 3344 /* Must be an atomic op see netif_txq_try_stop() */ 3345 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3346 } 3347 3348 /** 3349 * netif_stop_queue - stop transmitted packets 3350 * @dev: network device 3351 * 3352 * Stop upper layers calling the device hard_start_xmit routine. 3353 * Used for flow control when transmit resources are unavailable. 3354 */ 3355 static inline void netif_stop_queue(struct net_device *dev) 3356 { 3357 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3358 } 3359 3360 void netif_tx_stop_all_queues(struct net_device *dev); 3361 3362 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3363 { 3364 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3365 } 3366 3367 /** 3368 * netif_queue_stopped - test if transmit queue is flowblocked 3369 * @dev: network device 3370 * 3371 * Test if transmit queue on device is currently unable to send. 3372 */ 3373 static inline bool netif_queue_stopped(const struct net_device *dev) 3374 { 3375 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3376 } 3377 3378 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3379 { 3380 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3381 } 3382 3383 static inline bool 3384 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3385 { 3386 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3387 } 3388 3389 static inline bool 3390 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3391 { 3392 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3393 } 3394 3395 /** 3396 * netdev_queue_set_dql_min_limit - set dql minimum limit 3397 * @dev_queue: pointer to transmit queue 3398 * @min_limit: dql minimum limit 3399 * 3400 * Forces xmit_more() to return true until the minimum threshold 3401 * defined by @min_limit is reached (or until the tx queue is 3402 * empty). Warning: to be use with care, misuse will impact the 3403 * latency. 3404 */ 3405 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3406 unsigned int min_limit) 3407 { 3408 #ifdef CONFIG_BQL 3409 dev_queue->dql.min_limit = min_limit; 3410 #endif 3411 } 3412 3413 /** 3414 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3415 * @dev_queue: pointer to transmit queue 3416 * 3417 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3418 * to give appropriate hint to the CPU. 3419 */ 3420 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3421 { 3422 #ifdef CONFIG_BQL 3423 prefetchw(&dev_queue->dql.num_queued); 3424 #endif 3425 } 3426 3427 /** 3428 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3429 * @dev_queue: pointer to transmit queue 3430 * 3431 * BQL enabled drivers might use this helper in their TX completion path, 3432 * to give appropriate hint to the CPU. 3433 */ 3434 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3435 { 3436 #ifdef CONFIG_BQL 3437 prefetchw(&dev_queue->dql.limit); 3438 #endif 3439 } 3440 3441 /** 3442 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3443 * @dev_queue: network device queue 3444 * @bytes: number of bytes queued to the device queue 3445 * 3446 * Report the number of bytes queued for sending/completion to the network 3447 * device hardware queue. @bytes should be a good approximation and should 3448 * exactly match netdev_completed_queue() @bytes. 3449 * This is typically called once per packet, from ndo_start_xmit(). 3450 */ 3451 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3452 unsigned int bytes) 3453 { 3454 #ifdef CONFIG_BQL 3455 dql_queued(&dev_queue->dql, bytes); 3456 3457 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3458 return; 3459 3460 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3461 3462 /* 3463 * The XOFF flag must be set before checking the dql_avail below, 3464 * because in netdev_tx_completed_queue we update the dql_completed 3465 * before checking the XOFF flag. 3466 */ 3467 smp_mb(); 3468 3469 /* check again in case another CPU has just made room avail */ 3470 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3471 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3472 #endif 3473 } 3474 3475 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3476 * that they should not test BQL status themselves. 3477 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3478 * skb of a batch. 3479 * Returns true if the doorbell must be used to kick the NIC. 3480 */ 3481 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3482 unsigned int bytes, 3483 bool xmit_more) 3484 { 3485 if (xmit_more) { 3486 #ifdef CONFIG_BQL 3487 dql_queued(&dev_queue->dql, bytes); 3488 #endif 3489 return netif_tx_queue_stopped(dev_queue); 3490 } 3491 netdev_tx_sent_queue(dev_queue, bytes); 3492 return true; 3493 } 3494 3495 /** 3496 * netdev_sent_queue - report the number of bytes queued to hardware 3497 * @dev: network device 3498 * @bytes: number of bytes queued to the hardware device queue 3499 * 3500 * Report the number of bytes queued for sending/completion to the network 3501 * device hardware queue#0. @bytes should be a good approximation and should 3502 * exactly match netdev_completed_queue() @bytes. 3503 * This is typically called once per packet, from ndo_start_xmit(). 3504 */ 3505 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3506 { 3507 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3508 } 3509 3510 static inline bool __netdev_sent_queue(struct net_device *dev, 3511 unsigned int bytes, 3512 bool xmit_more) 3513 { 3514 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3515 xmit_more); 3516 } 3517 3518 /** 3519 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3520 * @dev_queue: network device queue 3521 * @pkts: number of packets (currently ignored) 3522 * @bytes: number of bytes dequeued from the device queue 3523 * 3524 * Must be called at most once per TX completion round (and not per 3525 * individual packet), so that BQL can adjust its limits appropriately. 3526 */ 3527 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3528 unsigned int pkts, unsigned int bytes) 3529 { 3530 #ifdef CONFIG_BQL 3531 if (unlikely(!bytes)) 3532 return; 3533 3534 dql_completed(&dev_queue->dql, bytes); 3535 3536 /* 3537 * Without the memory barrier there is a small possiblity that 3538 * netdev_tx_sent_queue will miss the update and cause the queue to 3539 * be stopped forever 3540 */ 3541 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3542 3543 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3544 return; 3545 3546 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3547 netif_schedule_queue(dev_queue); 3548 #endif 3549 } 3550 3551 /** 3552 * netdev_completed_queue - report bytes and packets completed by device 3553 * @dev: network device 3554 * @pkts: actual number of packets sent over the medium 3555 * @bytes: actual number of bytes sent over the medium 3556 * 3557 * Report the number of bytes and packets transmitted by the network device 3558 * hardware queue over the physical medium, @bytes must exactly match the 3559 * @bytes amount passed to netdev_sent_queue() 3560 */ 3561 static inline void netdev_completed_queue(struct net_device *dev, 3562 unsigned int pkts, unsigned int bytes) 3563 { 3564 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3565 } 3566 3567 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3568 { 3569 #ifdef CONFIG_BQL 3570 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3571 dql_reset(&q->dql); 3572 #endif 3573 } 3574 3575 /** 3576 * netdev_reset_queue - reset the packets and bytes count of a network device 3577 * @dev_queue: network device 3578 * 3579 * Reset the bytes and packet count of a network device and clear the 3580 * software flow control OFF bit for this network device 3581 */ 3582 static inline void netdev_reset_queue(struct net_device *dev_queue) 3583 { 3584 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 3585 } 3586 3587 /** 3588 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3589 * @dev: network device 3590 * @queue_index: given tx queue index 3591 * 3592 * Returns 0 if given tx queue index >= number of device tx queues, 3593 * otherwise returns the originally passed tx queue index. 3594 */ 3595 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3596 { 3597 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3598 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3599 dev->name, queue_index, 3600 dev->real_num_tx_queues); 3601 return 0; 3602 } 3603 3604 return queue_index; 3605 } 3606 3607 /** 3608 * netif_running - test if up 3609 * @dev: network device 3610 * 3611 * Test if the device has been brought up. 3612 */ 3613 static inline bool netif_running(const struct net_device *dev) 3614 { 3615 return test_bit(__LINK_STATE_START, &dev->state); 3616 } 3617 3618 /* 3619 * Routines to manage the subqueues on a device. We only need start, 3620 * stop, and a check if it's stopped. All other device management is 3621 * done at the overall netdevice level. 3622 * Also test the device if we're multiqueue. 3623 */ 3624 3625 /** 3626 * netif_start_subqueue - allow sending packets on subqueue 3627 * @dev: network device 3628 * @queue_index: sub queue index 3629 * 3630 * Start individual transmit queue of a device with multiple transmit queues. 3631 */ 3632 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3633 { 3634 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3635 3636 netif_tx_start_queue(txq); 3637 } 3638 3639 /** 3640 * netif_stop_subqueue - stop sending packets on subqueue 3641 * @dev: network device 3642 * @queue_index: sub queue index 3643 * 3644 * Stop individual transmit queue of a device with multiple transmit queues. 3645 */ 3646 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3647 { 3648 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3649 netif_tx_stop_queue(txq); 3650 } 3651 3652 /** 3653 * __netif_subqueue_stopped - test status of subqueue 3654 * @dev: network device 3655 * @queue_index: sub queue index 3656 * 3657 * Check individual transmit queue of a device with multiple transmit queues. 3658 */ 3659 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3660 u16 queue_index) 3661 { 3662 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3663 3664 return netif_tx_queue_stopped(txq); 3665 } 3666 3667 /** 3668 * netif_subqueue_stopped - test status of subqueue 3669 * @dev: network device 3670 * @skb: sub queue buffer pointer 3671 * 3672 * Check individual transmit queue of a device with multiple transmit queues. 3673 */ 3674 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3675 struct sk_buff *skb) 3676 { 3677 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3678 } 3679 3680 /** 3681 * netif_wake_subqueue - allow sending packets on subqueue 3682 * @dev: network device 3683 * @queue_index: sub queue index 3684 * 3685 * Resume individual transmit queue of a device with multiple transmit queues. 3686 */ 3687 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3688 { 3689 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3690 3691 netif_tx_wake_queue(txq); 3692 } 3693 3694 #ifdef CONFIG_XPS 3695 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3696 u16 index); 3697 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3698 u16 index, enum xps_map_type type); 3699 3700 /** 3701 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3702 * @j: CPU/Rx queue index 3703 * @mask: bitmask of all cpus/rx queues 3704 * @nr_bits: number of bits in the bitmask 3705 * 3706 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3707 */ 3708 static inline bool netif_attr_test_mask(unsigned long j, 3709 const unsigned long *mask, 3710 unsigned int nr_bits) 3711 { 3712 cpu_max_bits_warn(j, nr_bits); 3713 return test_bit(j, mask); 3714 } 3715 3716 /** 3717 * netif_attr_test_online - Test for online CPU/Rx queue 3718 * @j: CPU/Rx queue index 3719 * @online_mask: bitmask for CPUs/Rx queues that are online 3720 * @nr_bits: number of bits in the bitmask 3721 * 3722 * Returns true if a CPU/Rx queue is online. 3723 */ 3724 static inline bool netif_attr_test_online(unsigned long j, 3725 const unsigned long *online_mask, 3726 unsigned int nr_bits) 3727 { 3728 cpu_max_bits_warn(j, nr_bits); 3729 3730 if (online_mask) 3731 return test_bit(j, online_mask); 3732 3733 return (j < nr_bits); 3734 } 3735 3736 /** 3737 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3738 * @n: CPU/Rx queue index 3739 * @srcp: the cpumask/Rx queue mask pointer 3740 * @nr_bits: number of bits in the bitmask 3741 * 3742 * Returns >= nr_bits if no further CPUs/Rx queues set. 3743 */ 3744 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3745 unsigned int nr_bits) 3746 { 3747 /* -1 is a legal arg here. */ 3748 if (n != -1) 3749 cpu_max_bits_warn(n, nr_bits); 3750 3751 if (srcp) 3752 return find_next_bit(srcp, nr_bits, n + 1); 3753 3754 return n + 1; 3755 } 3756 3757 /** 3758 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3759 * @n: CPU/Rx queue index 3760 * @src1p: the first CPUs/Rx queues mask pointer 3761 * @src2p: the second CPUs/Rx queues mask pointer 3762 * @nr_bits: number of bits in the bitmask 3763 * 3764 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3765 */ 3766 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3767 const unsigned long *src2p, 3768 unsigned int nr_bits) 3769 { 3770 /* -1 is a legal arg here. */ 3771 if (n != -1) 3772 cpu_max_bits_warn(n, nr_bits); 3773 3774 if (src1p && src2p) 3775 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3776 else if (src1p) 3777 return find_next_bit(src1p, nr_bits, n + 1); 3778 else if (src2p) 3779 return find_next_bit(src2p, nr_bits, n + 1); 3780 3781 return n + 1; 3782 } 3783 #else 3784 static inline int netif_set_xps_queue(struct net_device *dev, 3785 const struct cpumask *mask, 3786 u16 index) 3787 { 3788 return 0; 3789 } 3790 3791 static inline int __netif_set_xps_queue(struct net_device *dev, 3792 const unsigned long *mask, 3793 u16 index, enum xps_map_type type) 3794 { 3795 return 0; 3796 } 3797 #endif 3798 3799 /** 3800 * netif_is_multiqueue - test if device has multiple transmit queues 3801 * @dev: network device 3802 * 3803 * Check if device has multiple transmit queues 3804 */ 3805 static inline bool netif_is_multiqueue(const struct net_device *dev) 3806 { 3807 return dev->num_tx_queues > 1; 3808 } 3809 3810 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3811 3812 #ifdef CONFIG_SYSFS 3813 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3814 #else 3815 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3816 unsigned int rxqs) 3817 { 3818 dev->real_num_rx_queues = rxqs; 3819 return 0; 3820 } 3821 #endif 3822 int netif_set_real_num_queues(struct net_device *dev, 3823 unsigned int txq, unsigned int rxq); 3824 3825 int netif_get_num_default_rss_queues(void); 3826 3827 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3828 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3829 3830 /* 3831 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3832 * interrupt context or with hardware interrupts being disabled. 3833 * (in_hardirq() || irqs_disabled()) 3834 * 3835 * We provide four helpers that can be used in following contexts : 3836 * 3837 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3838 * replacing kfree_skb(skb) 3839 * 3840 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3841 * Typically used in place of consume_skb(skb) in TX completion path 3842 * 3843 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3844 * replacing kfree_skb(skb) 3845 * 3846 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3847 * and consumed a packet. Used in place of consume_skb(skb) 3848 */ 3849 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3850 { 3851 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3852 } 3853 3854 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3855 { 3856 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 3857 } 3858 3859 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3860 { 3861 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3862 } 3863 3864 static inline void dev_consume_skb_any(struct sk_buff *skb) 3865 { 3866 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 3867 } 3868 3869 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3870 struct bpf_prog *xdp_prog); 3871 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3872 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3873 int netif_rx(struct sk_buff *skb); 3874 int __netif_rx(struct sk_buff *skb); 3875 3876 int netif_receive_skb(struct sk_buff *skb); 3877 int netif_receive_skb_core(struct sk_buff *skb); 3878 void netif_receive_skb_list_internal(struct list_head *head); 3879 void netif_receive_skb_list(struct list_head *head); 3880 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3881 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3882 struct sk_buff *napi_get_frags(struct napi_struct *napi); 3883 void napi_get_frags_check(struct napi_struct *napi); 3884 gro_result_t napi_gro_frags(struct napi_struct *napi); 3885 struct packet_offload *gro_find_receive_by_type(__be16 type); 3886 struct packet_offload *gro_find_complete_by_type(__be16 type); 3887 3888 static inline void napi_free_frags(struct napi_struct *napi) 3889 { 3890 kfree_skb(napi->skb); 3891 napi->skb = NULL; 3892 } 3893 3894 bool netdev_is_rx_handler_busy(struct net_device *dev); 3895 int netdev_rx_handler_register(struct net_device *dev, 3896 rx_handler_func_t *rx_handler, 3897 void *rx_handler_data); 3898 void netdev_rx_handler_unregister(struct net_device *dev); 3899 3900 bool dev_valid_name(const char *name); 3901 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 3902 { 3903 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 3904 } 3905 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 3906 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 3907 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3908 void __user *data, bool *need_copyout); 3909 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 3910 int generic_hwtstamp_get_lower(struct net_device *dev, 3911 struct kernel_hwtstamp_config *kernel_cfg); 3912 int generic_hwtstamp_set_lower(struct net_device *dev, 3913 struct kernel_hwtstamp_config *kernel_cfg, 3914 struct netlink_ext_ack *extack); 3915 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 3916 unsigned int dev_get_flags(const struct net_device *); 3917 int __dev_change_flags(struct net_device *dev, unsigned int flags, 3918 struct netlink_ext_ack *extack); 3919 int dev_change_flags(struct net_device *dev, unsigned int flags, 3920 struct netlink_ext_ack *extack); 3921 int dev_set_alias(struct net_device *, const char *, size_t); 3922 int dev_get_alias(const struct net_device *, char *, size_t); 3923 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 3924 const char *pat, int new_ifindex); 3925 static inline 3926 int dev_change_net_namespace(struct net_device *dev, struct net *net, 3927 const char *pat) 3928 { 3929 return __dev_change_net_namespace(dev, net, pat, 0); 3930 } 3931 int __dev_set_mtu(struct net_device *, int); 3932 int dev_set_mtu(struct net_device *, int); 3933 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 3934 struct netlink_ext_ack *extack); 3935 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 3936 struct netlink_ext_ack *extack); 3937 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 3938 struct netlink_ext_ack *extack); 3939 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 3940 int dev_get_port_parent_id(struct net_device *dev, 3941 struct netdev_phys_item_id *ppid, bool recurse); 3942 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 3943 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 3944 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3945 struct netdev_queue *txq, int *ret); 3946 3947 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 3948 u8 dev_xdp_prog_count(struct net_device *dev); 3949 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 3950 3951 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3952 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3953 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 3954 bool is_skb_forwardable(const struct net_device *dev, 3955 const struct sk_buff *skb); 3956 3957 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 3958 const struct sk_buff *skb, 3959 const bool check_mtu) 3960 { 3961 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 3962 unsigned int len; 3963 3964 if (!(dev->flags & IFF_UP)) 3965 return false; 3966 3967 if (!check_mtu) 3968 return true; 3969 3970 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 3971 if (skb->len <= len) 3972 return true; 3973 3974 /* if TSO is enabled, we don't care about the length as the packet 3975 * could be forwarded without being segmented before 3976 */ 3977 if (skb_is_gso(skb)) 3978 return true; 3979 3980 return false; 3981 } 3982 3983 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); 3984 3985 static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) 3986 { 3987 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 3988 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 3989 3990 if (likely(p)) 3991 return p; 3992 3993 return netdev_core_stats_alloc(dev); 3994 } 3995 3996 #define DEV_CORE_STATS_INC(FIELD) \ 3997 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 3998 { \ 3999 struct net_device_core_stats __percpu *p; \ 4000 \ 4001 p = dev_core_stats(dev); \ 4002 if (p) \ 4003 this_cpu_inc(p->FIELD); \ 4004 } 4005 DEV_CORE_STATS_INC(rx_dropped) 4006 DEV_CORE_STATS_INC(tx_dropped) 4007 DEV_CORE_STATS_INC(rx_nohandler) 4008 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4009 4010 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4011 struct sk_buff *skb, 4012 const bool check_mtu) 4013 { 4014 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4015 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4016 dev_core_stats_rx_dropped_inc(dev); 4017 kfree_skb(skb); 4018 return NET_RX_DROP; 4019 } 4020 4021 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4022 skb->priority = 0; 4023 return 0; 4024 } 4025 4026 bool dev_nit_active(struct net_device *dev); 4027 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4028 4029 static inline void __dev_put(struct net_device *dev) 4030 { 4031 if (dev) { 4032 #ifdef CONFIG_PCPU_DEV_REFCNT 4033 this_cpu_dec(*dev->pcpu_refcnt); 4034 #else 4035 refcount_dec(&dev->dev_refcnt); 4036 #endif 4037 } 4038 } 4039 4040 static inline void __dev_hold(struct net_device *dev) 4041 { 4042 if (dev) { 4043 #ifdef CONFIG_PCPU_DEV_REFCNT 4044 this_cpu_inc(*dev->pcpu_refcnt); 4045 #else 4046 refcount_inc(&dev->dev_refcnt); 4047 #endif 4048 } 4049 } 4050 4051 static inline void __netdev_tracker_alloc(struct net_device *dev, 4052 netdevice_tracker *tracker, 4053 gfp_t gfp) 4054 { 4055 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4056 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4057 #endif 4058 } 4059 4060 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4061 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4062 */ 4063 static inline void netdev_tracker_alloc(struct net_device *dev, 4064 netdevice_tracker *tracker, gfp_t gfp) 4065 { 4066 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4067 refcount_dec(&dev->refcnt_tracker.no_tracker); 4068 __netdev_tracker_alloc(dev, tracker, gfp); 4069 #endif 4070 } 4071 4072 static inline void netdev_tracker_free(struct net_device *dev, 4073 netdevice_tracker *tracker) 4074 { 4075 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4076 ref_tracker_free(&dev->refcnt_tracker, tracker); 4077 #endif 4078 } 4079 4080 static inline void netdev_hold(struct net_device *dev, 4081 netdevice_tracker *tracker, gfp_t gfp) 4082 { 4083 if (dev) { 4084 __dev_hold(dev); 4085 __netdev_tracker_alloc(dev, tracker, gfp); 4086 } 4087 } 4088 4089 static inline void netdev_put(struct net_device *dev, 4090 netdevice_tracker *tracker) 4091 { 4092 if (dev) { 4093 netdev_tracker_free(dev, tracker); 4094 __dev_put(dev); 4095 } 4096 } 4097 4098 /** 4099 * dev_hold - get reference to device 4100 * @dev: network device 4101 * 4102 * Hold reference to device to keep it from being freed. 4103 * Try using netdev_hold() instead. 4104 */ 4105 static inline void dev_hold(struct net_device *dev) 4106 { 4107 netdev_hold(dev, NULL, GFP_ATOMIC); 4108 } 4109 4110 /** 4111 * dev_put - release reference to device 4112 * @dev: network device 4113 * 4114 * Release reference to device to allow it to be freed. 4115 * Try using netdev_put() instead. 4116 */ 4117 static inline void dev_put(struct net_device *dev) 4118 { 4119 netdev_put(dev, NULL); 4120 } 4121 4122 static inline void netdev_ref_replace(struct net_device *odev, 4123 struct net_device *ndev, 4124 netdevice_tracker *tracker, 4125 gfp_t gfp) 4126 { 4127 if (odev) 4128 netdev_tracker_free(odev, tracker); 4129 4130 __dev_hold(ndev); 4131 __dev_put(odev); 4132 4133 if (ndev) 4134 __netdev_tracker_alloc(ndev, tracker, gfp); 4135 } 4136 4137 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4138 * and _off may be called from IRQ context, but it is caller 4139 * who is responsible for serialization of these calls. 4140 * 4141 * The name carrier is inappropriate, these functions should really be 4142 * called netif_lowerlayer_*() because they represent the state of any 4143 * kind of lower layer not just hardware media. 4144 */ 4145 void linkwatch_fire_event(struct net_device *dev); 4146 4147 /** 4148 * netif_carrier_ok - test if carrier present 4149 * @dev: network device 4150 * 4151 * Check if carrier is present on device 4152 */ 4153 static inline bool netif_carrier_ok(const struct net_device *dev) 4154 { 4155 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4156 } 4157 4158 unsigned long dev_trans_start(struct net_device *dev); 4159 4160 void __netdev_watchdog_up(struct net_device *dev); 4161 4162 void netif_carrier_on(struct net_device *dev); 4163 void netif_carrier_off(struct net_device *dev); 4164 void netif_carrier_event(struct net_device *dev); 4165 4166 /** 4167 * netif_dormant_on - mark device as dormant. 4168 * @dev: network device 4169 * 4170 * Mark device as dormant (as per RFC2863). 4171 * 4172 * The dormant state indicates that the relevant interface is not 4173 * actually in a condition to pass packets (i.e., it is not 'up') but is 4174 * in a "pending" state, waiting for some external event. For "on- 4175 * demand" interfaces, this new state identifies the situation where the 4176 * interface is waiting for events to place it in the up state. 4177 */ 4178 static inline void netif_dormant_on(struct net_device *dev) 4179 { 4180 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4181 linkwatch_fire_event(dev); 4182 } 4183 4184 /** 4185 * netif_dormant_off - set device as not dormant. 4186 * @dev: network device 4187 * 4188 * Device is not in dormant state. 4189 */ 4190 static inline void netif_dormant_off(struct net_device *dev) 4191 { 4192 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4193 linkwatch_fire_event(dev); 4194 } 4195 4196 /** 4197 * netif_dormant - test if device is dormant 4198 * @dev: network device 4199 * 4200 * Check if device is dormant. 4201 */ 4202 static inline bool netif_dormant(const struct net_device *dev) 4203 { 4204 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4205 } 4206 4207 4208 /** 4209 * netif_testing_on - mark device as under test. 4210 * @dev: network device 4211 * 4212 * Mark device as under test (as per RFC2863). 4213 * 4214 * The testing state indicates that some test(s) must be performed on 4215 * the interface. After completion, of the test, the interface state 4216 * will change to up, dormant, or down, as appropriate. 4217 */ 4218 static inline void netif_testing_on(struct net_device *dev) 4219 { 4220 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4221 linkwatch_fire_event(dev); 4222 } 4223 4224 /** 4225 * netif_testing_off - set device as not under test. 4226 * @dev: network device 4227 * 4228 * Device is not in testing state. 4229 */ 4230 static inline void netif_testing_off(struct net_device *dev) 4231 { 4232 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4233 linkwatch_fire_event(dev); 4234 } 4235 4236 /** 4237 * netif_testing - test if device is under test 4238 * @dev: network device 4239 * 4240 * Check if device is under test 4241 */ 4242 static inline bool netif_testing(const struct net_device *dev) 4243 { 4244 return test_bit(__LINK_STATE_TESTING, &dev->state); 4245 } 4246 4247 4248 /** 4249 * netif_oper_up - test if device is operational 4250 * @dev: network device 4251 * 4252 * Check if carrier is operational 4253 */ 4254 static inline bool netif_oper_up(const struct net_device *dev) 4255 { 4256 return (dev->operstate == IF_OPER_UP || 4257 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 4258 } 4259 4260 /** 4261 * netif_device_present - is device available or removed 4262 * @dev: network device 4263 * 4264 * Check if device has not been removed from system. 4265 */ 4266 static inline bool netif_device_present(const struct net_device *dev) 4267 { 4268 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4269 } 4270 4271 void netif_device_detach(struct net_device *dev); 4272 4273 void netif_device_attach(struct net_device *dev); 4274 4275 /* 4276 * Network interface message level settings 4277 */ 4278 4279 enum { 4280 NETIF_MSG_DRV_BIT, 4281 NETIF_MSG_PROBE_BIT, 4282 NETIF_MSG_LINK_BIT, 4283 NETIF_MSG_TIMER_BIT, 4284 NETIF_MSG_IFDOWN_BIT, 4285 NETIF_MSG_IFUP_BIT, 4286 NETIF_MSG_RX_ERR_BIT, 4287 NETIF_MSG_TX_ERR_BIT, 4288 NETIF_MSG_TX_QUEUED_BIT, 4289 NETIF_MSG_INTR_BIT, 4290 NETIF_MSG_TX_DONE_BIT, 4291 NETIF_MSG_RX_STATUS_BIT, 4292 NETIF_MSG_PKTDATA_BIT, 4293 NETIF_MSG_HW_BIT, 4294 NETIF_MSG_WOL_BIT, 4295 4296 /* When you add a new bit above, update netif_msg_class_names array 4297 * in net/ethtool/common.c 4298 */ 4299 NETIF_MSG_CLASS_COUNT, 4300 }; 4301 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4302 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4303 4304 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4305 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4306 4307 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4308 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4309 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4310 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4311 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4312 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4313 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4314 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4315 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4316 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4317 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4318 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4319 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4320 #define NETIF_MSG_HW __NETIF_MSG(HW) 4321 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4322 4323 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4324 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4325 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4326 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4327 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4328 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4329 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4330 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4331 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4332 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4333 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4334 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4335 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4336 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4337 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4338 4339 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4340 { 4341 /* use default */ 4342 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4343 return default_msg_enable_bits; 4344 if (debug_value == 0) /* no output */ 4345 return 0; 4346 /* set low N bits */ 4347 return (1U << debug_value) - 1; 4348 } 4349 4350 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4351 { 4352 spin_lock(&txq->_xmit_lock); 4353 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4354 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4355 } 4356 4357 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4358 { 4359 __acquire(&txq->_xmit_lock); 4360 return true; 4361 } 4362 4363 static inline void __netif_tx_release(struct netdev_queue *txq) 4364 { 4365 __release(&txq->_xmit_lock); 4366 } 4367 4368 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4369 { 4370 spin_lock_bh(&txq->_xmit_lock); 4371 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4372 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4373 } 4374 4375 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4376 { 4377 bool ok = spin_trylock(&txq->_xmit_lock); 4378 4379 if (likely(ok)) { 4380 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4381 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4382 } 4383 return ok; 4384 } 4385 4386 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4387 { 4388 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4389 WRITE_ONCE(txq->xmit_lock_owner, -1); 4390 spin_unlock(&txq->_xmit_lock); 4391 } 4392 4393 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4394 { 4395 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4396 WRITE_ONCE(txq->xmit_lock_owner, -1); 4397 spin_unlock_bh(&txq->_xmit_lock); 4398 } 4399 4400 /* 4401 * txq->trans_start can be read locklessly from dev_watchdog() 4402 */ 4403 static inline void txq_trans_update(struct netdev_queue *txq) 4404 { 4405 if (txq->xmit_lock_owner != -1) 4406 WRITE_ONCE(txq->trans_start, jiffies); 4407 } 4408 4409 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4410 { 4411 unsigned long now = jiffies; 4412 4413 if (READ_ONCE(txq->trans_start) != now) 4414 WRITE_ONCE(txq->trans_start, now); 4415 } 4416 4417 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4418 static inline void netif_trans_update(struct net_device *dev) 4419 { 4420 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4421 4422 txq_trans_cond_update(txq); 4423 } 4424 4425 /** 4426 * netif_tx_lock - grab network device transmit lock 4427 * @dev: network device 4428 * 4429 * Get network device transmit lock 4430 */ 4431 void netif_tx_lock(struct net_device *dev); 4432 4433 static inline void netif_tx_lock_bh(struct net_device *dev) 4434 { 4435 local_bh_disable(); 4436 netif_tx_lock(dev); 4437 } 4438 4439 void netif_tx_unlock(struct net_device *dev); 4440 4441 static inline void netif_tx_unlock_bh(struct net_device *dev) 4442 { 4443 netif_tx_unlock(dev); 4444 local_bh_enable(); 4445 } 4446 4447 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4448 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4449 __netif_tx_lock(txq, cpu); \ 4450 } else { \ 4451 __netif_tx_acquire(txq); \ 4452 } \ 4453 } 4454 4455 #define HARD_TX_TRYLOCK(dev, txq) \ 4456 (((dev->features & NETIF_F_LLTX) == 0) ? \ 4457 __netif_tx_trylock(txq) : \ 4458 __netif_tx_acquire(txq)) 4459 4460 #define HARD_TX_UNLOCK(dev, txq) { \ 4461 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4462 __netif_tx_unlock(txq); \ 4463 } else { \ 4464 __netif_tx_release(txq); \ 4465 } \ 4466 } 4467 4468 static inline void netif_tx_disable(struct net_device *dev) 4469 { 4470 unsigned int i; 4471 int cpu; 4472 4473 local_bh_disable(); 4474 cpu = smp_processor_id(); 4475 spin_lock(&dev->tx_global_lock); 4476 for (i = 0; i < dev->num_tx_queues; i++) { 4477 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4478 4479 __netif_tx_lock(txq, cpu); 4480 netif_tx_stop_queue(txq); 4481 __netif_tx_unlock(txq); 4482 } 4483 spin_unlock(&dev->tx_global_lock); 4484 local_bh_enable(); 4485 } 4486 4487 static inline void netif_addr_lock(struct net_device *dev) 4488 { 4489 unsigned char nest_level = 0; 4490 4491 #ifdef CONFIG_LOCKDEP 4492 nest_level = dev->nested_level; 4493 #endif 4494 spin_lock_nested(&dev->addr_list_lock, nest_level); 4495 } 4496 4497 static inline void netif_addr_lock_bh(struct net_device *dev) 4498 { 4499 unsigned char nest_level = 0; 4500 4501 #ifdef CONFIG_LOCKDEP 4502 nest_level = dev->nested_level; 4503 #endif 4504 local_bh_disable(); 4505 spin_lock_nested(&dev->addr_list_lock, nest_level); 4506 } 4507 4508 static inline void netif_addr_unlock(struct net_device *dev) 4509 { 4510 spin_unlock(&dev->addr_list_lock); 4511 } 4512 4513 static inline void netif_addr_unlock_bh(struct net_device *dev) 4514 { 4515 spin_unlock_bh(&dev->addr_list_lock); 4516 } 4517 4518 /* 4519 * dev_addrs walker. Should be used only for read access. Call with 4520 * rcu_read_lock held. 4521 */ 4522 #define for_each_dev_addr(dev, ha) \ 4523 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4524 4525 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4526 4527 void ether_setup(struct net_device *dev); 4528 4529 /* Support for loadable net-drivers */ 4530 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4531 unsigned char name_assign_type, 4532 void (*setup)(struct net_device *), 4533 unsigned int txqs, unsigned int rxqs); 4534 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4535 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4536 4537 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4538 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4539 count) 4540 4541 int register_netdev(struct net_device *dev); 4542 void unregister_netdev(struct net_device *dev); 4543 4544 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4545 4546 /* General hardware address lists handling functions */ 4547 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4548 struct netdev_hw_addr_list *from_list, int addr_len); 4549 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4550 struct netdev_hw_addr_list *from_list, int addr_len); 4551 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4552 struct net_device *dev, 4553 int (*sync)(struct net_device *, const unsigned char *), 4554 int (*unsync)(struct net_device *, 4555 const unsigned char *)); 4556 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4557 struct net_device *dev, 4558 int (*sync)(struct net_device *, 4559 const unsigned char *, int), 4560 int (*unsync)(struct net_device *, 4561 const unsigned char *, int)); 4562 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4563 struct net_device *dev, 4564 int (*unsync)(struct net_device *, 4565 const unsigned char *, int)); 4566 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4567 struct net_device *dev, 4568 int (*unsync)(struct net_device *, 4569 const unsigned char *)); 4570 void __hw_addr_init(struct netdev_hw_addr_list *list); 4571 4572 /* Functions used for device addresses handling */ 4573 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4574 const void *addr, size_t len); 4575 4576 static inline void 4577 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4578 { 4579 dev_addr_mod(dev, 0, addr, len); 4580 } 4581 4582 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4583 { 4584 __dev_addr_set(dev, addr, dev->addr_len); 4585 } 4586 4587 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4588 unsigned char addr_type); 4589 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4590 unsigned char addr_type); 4591 4592 /* Functions used for unicast addresses handling */ 4593 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4594 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4595 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4596 int dev_uc_sync(struct net_device *to, struct net_device *from); 4597 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4598 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4599 void dev_uc_flush(struct net_device *dev); 4600 void dev_uc_init(struct net_device *dev); 4601 4602 /** 4603 * __dev_uc_sync - Synchonize device's unicast list 4604 * @dev: device to sync 4605 * @sync: function to call if address should be added 4606 * @unsync: function to call if address should be removed 4607 * 4608 * Add newly added addresses to the interface, and release 4609 * addresses that have been deleted. 4610 */ 4611 static inline int __dev_uc_sync(struct net_device *dev, 4612 int (*sync)(struct net_device *, 4613 const unsigned char *), 4614 int (*unsync)(struct net_device *, 4615 const unsigned char *)) 4616 { 4617 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4618 } 4619 4620 /** 4621 * __dev_uc_unsync - Remove synchronized addresses from device 4622 * @dev: device to sync 4623 * @unsync: function to call if address should be removed 4624 * 4625 * Remove all addresses that were added to the device by dev_uc_sync(). 4626 */ 4627 static inline void __dev_uc_unsync(struct net_device *dev, 4628 int (*unsync)(struct net_device *, 4629 const unsigned char *)) 4630 { 4631 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4632 } 4633 4634 /* Functions used for multicast addresses handling */ 4635 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4636 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4637 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4638 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4639 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4640 int dev_mc_sync(struct net_device *to, struct net_device *from); 4641 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4642 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4643 void dev_mc_flush(struct net_device *dev); 4644 void dev_mc_init(struct net_device *dev); 4645 4646 /** 4647 * __dev_mc_sync - Synchonize device's multicast list 4648 * @dev: device to sync 4649 * @sync: function to call if address should be added 4650 * @unsync: function to call if address should be removed 4651 * 4652 * Add newly added addresses to the interface, and release 4653 * addresses that have been deleted. 4654 */ 4655 static inline int __dev_mc_sync(struct net_device *dev, 4656 int (*sync)(struct net_device *, 4657 const unsigned char *), 4658 int (*unsync)(struct net_device *, 4659 const unsigned char *)) 4660 { 4661 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4662 } 4663 4664 /** 4665 * __dev_mc_unsync - Remove synchronized addresses from device 4666 * @dev: device to sync 4667 * @unsync: function to call if address should be removed 4668 * 4669 * Remove all addresses that were added to the device by dev_mc_sync(). 4670 */ 4671 static inline void __dev_mc_unsync(struct net_device *dev, 4672 int (*unsync)(struct net_device *, 4673 const unsigned char *)) 4674 { 4675 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4676 } 4677 4678 /* Functions used for secondary unicast and multicast support */ 4679 void dev_set_rx_mode(struct net_device *dev); 4680 int dev_set_promiscuity(struct net_device *dev, int inc); 4681 int dev_set_allmulti(struct net_device *dev, int inc); 4682 void netdev_state_change(struct net_device *dev); 4683 void __netdev_notify_peers(struct net_device *dev); 4684 void netdev_notify_peers(struct net_device *dev); 4685 void netdev_features_change(struct net_device *dev); 4686 /* Load a device via the kmod */ 4687 void dev_load(struct net *net, const char *name); 4688 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4689 struct rtnl_link_stats64 *storage); 4690 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4691 const struct net_device_stats *netdev_stats); 4692 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4693 const struct pcpu_sw_netstats __percpu *netstats); 4694 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4695 4696 extern int netdev_max_backlog; 4697 extern int dev_rx_weight; 4698 extern int dev_tx_weight; 4699 extern int gro_normal_batch; 4700 4701 enum { 4702 NESTED_SYNC_IMM_BIT, 4703 NESTED_SYNC_TODO_BIT, 4704 }; 4705 4706 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4707 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4708 4709 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4710 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4711 4712 struct netdev_nested_priv { 4713 unsigned char flags; 4714 void *data; 4715 }; 4716 4717 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4718 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4719 struct list_head **iter); 4720 4721 /* iterate through upper list, must be called under RCU read lock */ 4722 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4723 for (iter = &(dev)->adj_list.upper, \ 4724 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4725 updev; \ 4726 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4727 4728 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4729 int (*fn)(struct net_device *upper_dev, 4730 struct netdev_nested_priv *priv), 4731 struct netdev_nested_priv *priv); 4732 4733 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4734 struct net_device *upper_dev); 4735 4736 bool netdev_has_any_upper_dev(struct net_device *dev); 4737 4738 void *netdev_lower_get_next_private(struct net_device *dev, 4739 struct list_head **iter); 4740 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4741 struct list_head **iter); 4742 4743 #define netdev_for_each_lower_private(dev, priv, iter) \ 4744 for (iter = (dev)->adj_list.lower.next, \ 4745 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4746 priv; \ 4747 priv = netdev_lower_get_next_private(dev, &(iter))) 4748 4749 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4750 for (iter = &(dev)->adj_list.lower, \ 4751 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4752 priv; \ 4753 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4754 4755 void *netdev_lower_get_next(struct net_device *dev, 4756 struct list_head **iter); 4757 4758 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4759 for (iter = (dev)->adj_list.lower.next, \ 4760 ldev = netdev_lower_get_next(dev, &(iter)); \ 4761 ldev; \ 4762 ldev = netdev_lower_get_next(dev, &(iter))) 4763 4764 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4765 struct list_head **iter); 4766 int netdev_walk_all_lower_dev(struct net_device *dev, 4767 int (*fn)(struct net_device *lower_dev, 4768 struct netdev_nested_priv *priv), 4769 struct netdev_nested_priv *priv); 4770 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4771 int (*fn)(struct net_device *lower_dev, 4772 struct netdev_nested_priv *priv), 4773 struct netdev_nested_priv *priv); 4774 4775 void *netdev_adjacent_get_private(struct list_head *adj_list); 4776 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4777 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4778 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4779 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4780 struct netlink_ext_ack *extack); 4781 int netdev_master_upper_dev_link(struct net_device *dev, 4782 struct net_device *upper_dev, 4783 void *upper_priv, void *upper_info, 4784 struct netlink_ext_ack *extack); 4785 void netdev_upper_dev_unlink(struct net_device *dev, 4786 struct net_device *upper_dev); 4787 int netdev_adjacent_change_prepare(struct net_device *old_dev, 4788 struct net_device *new_dev, 4789 struct net_device *dev, 4790 struct netlink_ext_ack *extack); 4791 void netdev_adjacent_change_commit(struct net_device *old_dev, 4792 struct net_device *new_dev, 4793 struct net_device *dev); 4794 void netdev_adjacent_change_abort(struct net_device *old_dev, 4795 struct net_device *new_dev, 4796 struct net_device *dev); 4797 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4798 void *netdev_lower_dev_get_private(struct net_device *dev, 4799 struct net_device *lower_dev); 4800 void netdev_lower_state_changed(struct net_device *lower_dev, 4801 void *lower_state_info); 4802 4803 /* RSS keys are 40 or 52 bytes long */ 4804 #define NETDEV_RSS_KEY_LEN 52 4805 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4806 void netdev_rss_key_fill(void *buffer, size_t len); 4807 4808 int skb_checksum_help(struct sk_buff *skb); 4809 int skb_crc32c_csum_help(struct sk_buff *skb); 4810 int skb_csum_hwoffload_help(struct sk_buff *skb, 4811 const netdev_features_t features); 4812 4813 struct netdev_bonding_info { 4814 ifslave slave; 4815 ifbond master; 4816 }; 4817 4818 struct netdev_notifier_bonding_info { 4819 struct netdev_notifier_info info; /* must be first */ 4820 struct netdev_bonding_info bonding_info; 4821 }; 4822 4823 void netdev_bonding_info_change(struct net_device *dev, 4824 struct netdev_bonding_info *bonding_info); 4825 4826 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4827 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4828 #else 4829 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4830 const void *data) 4831 { 4832 } 4833 #endif 4834 4835 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4836 4837 static inline bool can_checksum_protocol(netdev_features_t features, 4838 __be16 protocol) 4839 { 4840 if (protocol == htons(ETH_P_FCOE)) 4841 return !!(features & NETIF_F_FCOE_CRC); 4842 4843 /* Assume this is an IP checksum (not SCTP CRC) */ 4844 4845 if (features & NETIF_F_HW_CSUM) { 4846 /* Can checksum everything */ 4847 return true; 4848 } 4849 4850 switch (protocol) { 4851 case htons(ETH_P_IP): 4852 return !!(features & NETIF_F_IP_CSUM); 4853 case htons(ETH_P_IPV6): 4854 return !!(features & NETIF_F_IPV6_CSUM); 4855 default: 4856 return false; 4857 } 4858 } 4859 4860 #ifdef CONFIG_BUG 4861 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4862 #else 4863 static inline void netdev_rx_csum_fault(struct net_device *dev, 4864 struct sk_buff *skb) 4865 { 4866 } 4867 #endif 4868 /* rx skb timestamps */ 4869 void net_enable_timestamp(void); 4870 void net_disable_timestamp(void); 4871 4872 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 4873 const struct skb_shared_hwtstamps *hwtstamps, 4874 bool cycles) 4875 { 4876 const struct net_device_ops *ops = dev->netdev_ops; 4877 4878 if (ops->ndo_get_tstamp) 4879 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 4880 4881 return hwtstamps->hwtstamp; 4882 } 4883 4884 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4885 struct sk_buff *skb, struct net_device *dev, 4886 bool more) 4887 { 4888 __this_cpu_write(softnet_data.xmit.more, more); 4889 return ops->ndo_start_xmit(skb, dev); 4890 } 4891 4892 static inline bool netdev_xmit_more(void) 4893 { 4894 return __this_cpu_read(softnet_data.xmit.more); 4895 } 4896 4897 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 4898 struct netdev_queue *txq, bool more) 4899 { 4900 const struct net_device_ops *ops = dev->netdev_ops; 4901 netdev_tx_t rc; 4902 4903 rc = __netdev_start_xmit(ops, skb, dev, more); 4904 if (rc == NETDEV_TX_OK) 4905 txq_trans_update(txq); 4906 4907 return rc; 4908 } 4909 4910 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 4911 const void *ns); 4912 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 4913 const void *ns); 4914 4915 extern const struct kobj_ns_type_operations net_ns_type_operations; 4916 4917 const char *netdev_drivername(const struct net_device *dev); 4918 4919 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 4920 netdev_features_t f2) 4921 { 4922 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 4923 if (f1 & NETIF_F_HW_CSUM) 4924 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4925 else 4926 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4927 } 4928 4929 return f1 & f2; 4930 } 4931 4932 static inline netdev_features_t netdev_get_wanted_features( 4933 struct net_device *dev) 4934 { 4935 return (dev->features & ~dev->hw_features) | dev->wanted_features; 4936 } 4937 netdev_features_t netdev_increment_features(netdev_features_t all, 4938 netdev_features_t one, netdev_features_t mask); 4939 4940 /* Allow TSO being used on stacked device : 4941 * Performing the GSO segmentation before last device 4942 * is a performance improvement. 4943 */ 4944 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 4945 netdev_features_t mask) 4946 { 4947 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 4948 } 4949 4950 int __netdev_update_features(struct net_device *dev); 4951 void netdev_update_features(struct net_device *dev); 4952 void netdev_change_features(struct net_device *dev); 4953 4954 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4955 struct net_device *dev); 4956 4957 netdev_features_t passthru_features_check(struct sk_buff *skb, 4958 struct net_device *dev, 4959 netdev_features_t features); 4960 netdev_features_t netif_skb_features(struct sk_buff *skb); 4961 void skb_warn_bad_offload(const struct sk_buff *skb); 4962 4963 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4964 { 4965 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 4966 4967 /* check flags correspondence */ 4968 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4969 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 4970 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 4971 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 4972 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 4973 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 4974 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 4975 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 4976 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 4977 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 4978 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 4979 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4980 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4981 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4982 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 4983 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 4984 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 4985 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 4986 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 4987 4988 return (features & feature) == feature; 4989 } 4990 4991 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 4992 { 4993 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 4994 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 4995 } 4996 4997 static inline bool netif_needs_gso(struct sk_buff *skb, 4998 netdev_features_t features) 4999 { 5000 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5001 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5002 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5003 } 5004 5005 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5006 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5007 void netif_inherit_tso_max(struct net_device *to, 5008 const struct net_device *from); 5009 5010 static inline bool netif_is_macsec(const struct net_device *dev) 5011 { 5012 return dev->priv_flags & IFF_MACSEC; 5013 } 5014 5015 static inline bool netif_is_macvlan(const struct net_device *dev) 5016 { 5017 return dev->priv_flags & IFF_MACVLAN; 5018 } 5019 5020 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5021 { 5022 return dev->priv_flags & IFF_MACVLAN_PORT; 5023 } 5024 5025 static inline bool netif_is_bond_master(const struct net_device *dev) 5026 { 5027 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5028 } 5029 5030 static inline bool netif_is_bond_slave(const struct net_device *dev) 5031 { 5032 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5033 } 5034 5035 static inline bool netif_supports_nofcs(struct net_device *dev) 5036 { 5037 return dev->priv_flags & IFF_SUPP_NOFCS; 5038 } 5039 5040 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5041 { 5042 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5043 } 5044 5045 static inline bool netif_is_l3_master(const struct net_device *dev) 5046 { 5047 return dev->priv_flags & IFF_L3MDEV_MASTER; 5048 } 5049 5050 static inline bool netif_is_l3_slave(const struct net_device *dev) 5051 { 5052 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5053 } 5054 5055 static inline int dev_sdif(const struct net_device *dev) 5056 { 5057 #ifdef CONFIG_NET_L3_MASTER_DEV 5058 if (netif_is_l3_slave(dev)) 5059 return dev->ifindex; 5060 #endif 5061 return 0; 5062 } 5063 5064 static inline bool netif_is_bridge_master(const struct net_device *dev) 5065 { 5066 return dev->priv_flags & IFF_EBRIDGE; 5067 } 5068 5069 static inline bool netif_is_bridge_port(const struct net_device *dev) 5070 { 5071 return dev->priv_flags & IFF_BRIDGE_PORT; 5072 } 5073 5074 static inline bool netif_is_ovs_master(const struct net_device *dev) 5075 { 5076 return dev->priv_flags & IFF_OPENVSWITCH; 5077 } 5078 5079 static inline bool netif_is_ovs_port(const struct net_device *dev) 5080 { 5081 return dev->priv_flags & IFF_OVS_DATAPATH; 5082 } 5083 5084 static inline bool netif_is_any_bridge_master(const struct net_device *dev) 5085 { 5086 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); 5087 } 5088 5089 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5090 { 5091 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5092 } 5093 5094 static inline bool netif_is_team_master(const struct net_device *dev) 5095 { 5096 return dev->priv_flags & IFF_TEAM; 5097 } 5098 5099 static inline bool netif_is_team_port(const struct net_device *dev) 5100 { 5101 return dev->priv_flags & IFF_TEAM_PORT; 5102 } 5103 5104 static inline bool netif_is_lag_master(const struct net_device *dev) 5105 { 5106 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5107 } 5108 5109 static inline bool netif_is_lag_port(const struct net_device *dev) 5110 { 5111 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5112 } 5113 5114 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5115 { 5116 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5117 } 5118 5119 static inline bool netif_is_failover(const struct net_device *dev) 5120 { 5121 return dev->priv_flags & IFF_FAILOVER; 5122 } 5123 5124 static inline bool netif_is_failover_slave(const struct net_device *dev) 5125 { 5126 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5127 } 5128 5129 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5130 static inline void netif_keep_dst(struct net_device *dev) 5131 { 5132 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5133 } 5134 5135 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5136 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5137 { 5138 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5139 return netif_is_macsec(dev); 5140 } 5141 5142 extern struct pernet_operations __net_initdata loopback_net_ops; 5143 5144 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5145 5146 /* netdev_printk helpers, similar to dev_printk */ 5147 5148 static inline const char *netdev_name(const struct net_device *dev) 5149 { 5150 if (!dev->name[0] || strchr(dev->name, '%')) 5151 return "(unnamed net_device)"; 5152 return dev->name; 5153 } 5154 5155 static inline const char *netdev_reg_state(const struct net_device *dev) 5156 { 5157 switch (dev->reg_state) { 5158 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5159 case NETREG_REGISTERED: return ""; 5160 case NETREG_UNREGISTERING: return " (unregistering)"; 5161 case NETREG_UNREGISTERED: return " (unregistered)"; 5162 case NETREG_RELEASED: return " (released)"; 5163 case NETREG_DUMMY: return " (dummy)"; 5164 } 5165 5166 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 5167 return " (unknown)"; 5168 } 5169 5170 #define MODULE_ALIAS_NETDEV(device) \ 5171 MODULE_ALIAS("netdev-" device) 5172 5173 /* 5174 * netdev_WARN() acts like dev_printk(), but with the key difference 5175 * of using a WARN/WARN_ON to get the message out, including the 5176 * file/line information and a backtrace. 5177 */ 5178 #define netdev_WARN(dev, format, args...) \ 5179 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5180 netdev_reg_state(dev), ##args) 5181 5182 #define netdev_WARN_ONCE(dev, format, args...) \ 5183 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5184 netdev_reg_state(dev), ##args) 5185 5186 /* 5187 * The list of packet types we will receive (as opposed to discard) 5188 * and the routines to invoke. 5189 * 5190 * Why 16. Because with 16 the only overlap we get on a hash of the 5191 * low nibble of the protocol value is RARP/SNAP/X.25. 5192 * 5193 * 0800 IP 5194 * 0001 802.3 5195 * 0002 AX.25 5196 * 0004 802.2 5197 * 8035 RARP 5198 * 0005 SNAP 5199 * 0805 X.25 5200 * 0806 ARP 5201 * 8137 IPX 5202 * 0009 Localtalk 5203 * 86DD IPv6 5204 */ 5205 #define PTYPE_HASH_SIZE (16) 5206 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5207 5208 extern struct list_head ptype_all __read_mostly; 5209 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5210 5211 extern struct net_device *blackhole_netdev; 5212 5213 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5214 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5215 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5216 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5217 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) 5218 5219 #endif /* _LINUX_NETDEVICE_H */ 5220