1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the Interfaces handler. 8 * 9 * Version: @(#)dev.h 1.0.10 08/12/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 15 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 16 * Bjorn Ekwall. <bj0rn@blox.se> 17 * Pekka Riikonen <priikone@poseidon.pspt.fi> 18 * 19 * Moved to /usr/include/linux for NET3 20 */ 21 #ifndef _LINUX_NETDEVICE_H 22 #define _LINUX_NETDEVICE_H 23 24 #include <linux/timer.h> 25 #include <linux/bug.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/prefetch.h> 29 #include <asm/cache.h> 30 #include <asm/byteorder.h> 31 #include <asm/local.h> 32 33 #include <linux/percpu.h> 34 #include <linux/rculist.h> 35 #include <linux/workqueue.h> 36 #include <linux/dynamic_queue_limits.h> 37 38 #include <net/net_namespace.h> 39 #ifdef CONFIG_DCB 40 #include <net/dcbnl.h> 41 #endif 42 #include <net/netprio_cgroup.h> 43 #include <net/xdp.h> 44 45 #include <linux/netdev_features.h> 46 #include <linux/neighbour.h> 47 #include <uapi/linux/netdevice.h> 48 #include <uapi/linux/if_bonding.h> 49 #include <uapi/linux/pkt_cls.h> 50 #include <uapi/linux/netdev.h> 51 #include <linux/hashtable.h> 52 #include <linux/rbtree.h> 53 #include <net/net_trackers.h> 54 #include <net/net_debug.h> 55 #include <net/dropreason-core.h> 56 57 struct netpoll_info; 58 struct device; 59 struct ethtool_ops; 60 struct phy_device; 61 struct dsa_port; 62 struct ip_tunnel_parm; 63 struct macsec_context; 64 struct macsec_ops; 65 struct netdev_name_node; 66 struct sd_flow_limit; 67 struct sfp_bus; 68 /* 802.11 specific */ 69 struct wireless_dev; 70 /* 802.15.4 specific */ 71 struct wpan_dev; 72 struct mpls_dev; 73 /* UDP Tunnel offloads */ 74 struct udp_tunnel_info; 75 struct udp_tunnel_nic_info; 76 struct udp_tunnel_nic; 77 struct bpf_prog; 78 struct xdp_buff; 79 struct xdp_md; 80 81 void synchronize_net(void); 82 void netdev_set_default_ethtool_ops(struct net_device *dev, 83 const struct ethtool_ops *ops); 84 void netdev_sw_irq_coalesce_default_on(struct net_device *dev); 85 86 /* Backlog congestion levels */ 87 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 88 #define NET_RX_DROP 1 /* packet dropped */ 89 90 #define MAX_NEST_DEV 8 91 92 /* 93 * Transmit return codes: transmit return codes originate from three different 94 * namespaces: 95 * 96 * - qdisc return codes 97 * - driver transmit return codes 98 * - errno values 99 * 100 * Drivers are allowed to return any one of those in their hard_start_xmit() 101 * function. Real network devices commonly used with qdiscs should only return 102 * the driver transmit return codes though - when qdiscs are used, the actual 103 * transmission happens asynchronously, so the value is not propagated to 104 * higher layers. Virtual network devices transmit synchronously; in this case 105 * the driver transmit return codes are consumed by dev_queue_xmit(), and all 106 * others are propagated to higher layers. 107 */ 108 109 /* qdisc ->enqueue() return codes. */ 110 #define NET_XMIT_SUCCESS 0x00 111 #define NET_XMIT_DROP 0x01 /* skb dropped */ 112 #define NET_XMIT_CN 0x02 /* congestion notification */ 113 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 114 115 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 116 * indicates that the device will soon be dropping packets, or already drops 117 * some packets of the same priority; prompting us to send less aggressively. */ 118 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 119 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 120 121 /* Driver transmit return codes */ 122 #define NETDEV_TX_MASK 0xf0 123 124 enum netdev_tx { 125 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 126 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 127 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 128 }; 129 typedef enum netdev_tx netdev_tx_t; 130 131 /* 132 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 133 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 134 */ 135 static inline bool dev_xmit_complete(int rc) 136 { 137 /* 138 * Positive cases with an skb consumed by a driver: 139 * - successful transmission (rc == NETDEV_TX_OK) 140 * - error while transmitting (rc < 0) 141 * - error while queueing to a different device (rc & NET_XMIT_MASK) 142 */ 143 if (likely(rc < NET_XMIT_MASK)) 144 return true; 145 146 return false; 147 } 148 149 /* 150 * Compute the worst-case header length according to the protocols 151 * used. 152 */ 153 154 #if defined(CONFIG_HYPERV_NET) 155 # define LL_MAX_HEADER 128 156 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 157 # if defined(CONFIG_MAC80211_MESH) 158 # define LL_MAX_HEADER 128 159 # else 160 # define LL_MAX_HEADER 96 161 # endif 162 #else 163 # define LL_MAX_HEADER 32 164 #endif 165 166 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 167 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 168 #define MAX_HEADER LL_MAX_HEADER 169 #else 170 #define MAX_HEADER (LL_MAX_HEADER + 48) 171 #endif 172 173 /* 174 * Old network device statistics. Fields are native words 175 * (unsigned long) so they can be read and written atomically. 176 */ 177 178 #define NET_DEV_STAT(FIELD) \ 179 union { \ 180 unsigned long FIELD; \ 181 atomic_long_t __##FIELD; \ 182 } 183 184 struct net_device_stats { 185 NET_DEV_STAT(rx_packets); 186 NET_DEV_STAT(tx_packets); 187 NET_DEV_STAT(rx_bytes); 188 NET_DEV_STAT(tx_bytes); 189 NET_DEV_STAT(rx_errors); 190 NET_DEV_STAT(tx_errors); 191 NET_DEV_STAT(rx_dropped); 192 NET_DEV_STAT(tx_dropped); 193 NET_DEV_STAT(multicast); 194 NET_DEV_STAT(collisions); 195 NET_DEV_STAT(rx_length_errors); 196 NET_DEV_STAT(rx_over_errors); 197 NET_DEV_STAT(rx_crc_errors); 198 NET_DEV_STAT(rx_frame_errors); 199 NET_DEV_STAT(rx_fifo_errors); 200 NET_DEV_STAT(rx_missed_errors); 201 NET_DEV_STAT(tx_aborted_errors); 202 NET_DEV_STAT(tx_carrier_errors); 203 NET_DEV_STAT(tx_fifo_errors); 204 NET_DEV_STAT(tx_heartbeat_errors); 205 NET_DEV_STAT(tx_window_errors); 206 NET_DEV_STAT(rx_compressed); 207 NET_DEV_STAT(tx_compressed); 208 }; 209 #undef NET_DEV_STAT 210 211 /* per-cpu stats, allocated on demand. 212 * Try to fit them in a single cache line, for dev_get_stats() sake. 213 */ 214 struct net_device_core_stats { 215 unsigned long rx_dropped; 216 unsigned long tx_dropped; 217 unsigned long rx_nohandler; 218 unsigned long rx_otherhost_dropped; 219 } __aligned(4 * sizeof(unsigned long)); 220 221 #include <linux/cache.h> 222 #include <linux/skbuff.h> 223 224 #ifdef CONFIG_RPS 225 #include <linux/static_key.h> 226 extern struct static_key_false rps_needed; 227 extern struct static_key_false rfs_needed; 228 #endif 229 230 struct neighbour; 231 struct neigh_parms; 232 struct sk_buff; 233 234 struct netdev_hw_addr { 235 struct list_head list; 236 struct rb_node node; 237 unsigned char addr[MAX_ADDR_LEN]; 238 unsigned char type; 239 #define NETDEV_HW_ADDR_T_LAN 1 240 #define NETDEV_HW_ADDR_T_SAN 2 241 #define NETDEV_HW_ADDR_T_UNICAST 3 242 #define NETDEV_HW_ADDR_T_MULTICAST 4 243 bool global_use; 244 int sync_cnt; 245 int refcount; 246 int synced; 247 struct rcu_head rcu_head; 248 }; 249 250 struct netdev_hw_addr_list { 251 struct list_head list; 252 int count; 253 254 /* Auxiliary tree for faster lookup on addition and deletion */ 255 struct rb_root tree; 256 }; 257 258 #define netdev_hw_addr_list_count(l) ((l)->count) 259 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 260 #define netdev_hw_addr_list_for_each(ha, l) \ 261 list_for_each_entry(ha, &(l)->list, list) 262 263 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 264 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 265 #define netdev_for_each_uc_addr(ha, dev) \ 266 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 267 #define netdev_for_each_synced_uc_addr(_ha, _dev) \ 268 netdev_for_each_uc_addr((_ha), (_dev)) \ 269 if ((_ha)->sync_cnt) 270 271 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 272 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 273 #define netdev_for_each_mc_addr(ha, dev) \ 274 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 275 #define netdev_for_each_synced_mc_addr(_ha, _dev) \ 276 netdev_for_each_mc_addr((_ha), (_dev)) \ 277 if ((_ha)->sync_cnt) 278 279 struct hh_cache { 280 unsigned int hh_len; 281 seqlock_t hh_lock; 282 283 /* cached hardware header; allow for machine alignment needs. */ 284 #define HH_DATA_MOD 16 285 #define HH_DATA_OFF(__len) \ 286 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 287 #define HH_DATA_ALIGN(__len) \ 288 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 289 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 290 }; 291 292 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. 293 * Alternative is: 294 * dev->hard_header_len ? (dev->hard_header_len + 295 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 296 * 297 * We could use other alignment values, but we must maintain the 298 * relationship HH alignment <= LL alignment. 299 */ 300 #define LL_RESERVED_SPACE(dev) \ 301 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ 302 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 303 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ 305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 306 307 struct header_ops { 308 int (*create) (struct sk_buff *skb, struct net_device *dev, 309 unsigned short type, const void *daddr, 310 const void *saddr, unsigned int len); 311 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 312 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 313 void (*cache_update)(struct hh_cache *hh, 314 const struct net_device *dev, 315 const unsigned char *haddr); 316 bool (*validate)(const char *ll_header, unsigned int len); 317 __be16 (*parse_protocol)(const struct sk_buff *skb); 318 }; 319 320 /* These flag bits are private to the generic network queueing 321 * layer; they may not be explicitly referenced by any other 322 * code. 323 */ 324 325 enum netdev_state_t { 326 __LINK_STATE_START, 327 __LINK_STATE_PRESENT, 328 __LINK_STATE_NOCARRIER, 329 __LINK_STATE_LINKWATCH_PENDING, 330 __LINK_STATE_DORMANT, 331 __LINK_STATE_TESTING, 332 }; 333 334 struct gro_list { 335 struct list_head list; 336 int count; 337 }; 338 339 /* 340 * size of gro hash buckets, must less than bit number of 341 * napi_struct::gro_bitmask 342 */ 343 #define GRO_HASH_BUCKETS 8 344 345 /* 346 * Structure for NAPI scheduling similar to tasklet but with weighting 347 */ 348 struct napi_struct { 349 /* The poll_list must only be managed by the entity which 350 * changes the state of the NAPI_STATE_SCHED bit. This means 351 * whoever atomically sets that bit can add this napi_struct 352 * to the per-CPU poll_list, and whoever clears that bit 353 * can remove from the list right before clearing the bit. 354 */ 355 struct list_head poll_list; 356 357 unsigned long state; 358 int weight; 359 int defer_hard_irqs_count; 360 unsigned long gro_bitmask; 361 int (*poll)(struct napi_struct *, int); 362 #ifdef CONFIG_NETPOLL 363 /* CPU actively polling if netpoll is configured */ 364 int poll_owner; 365 #endif 366 /* CPU on which NAPI has been scheduled for processing */ 367 int list_owner; 368 struct net_device *dev; 369 struct gro_list gro_hash[GRO_HASH_BUCKETS]; 370 struct sk_buff *skb; 371 struct list_head rx_list; /* Pending GRO_NORMAL skbs */ 372 int rx_count; /* length of rx_list */ 373 unsigned int napi_id; 374 struct hrtimer timer; 375 struct task_struct *thread; 376 /* control-path-only fields follow */ 377 struct list_head dev_list; 378 struct hlist_node napi_hash_node; 379 }; 380 381 enum { 382 NAPI_STATE_SCHED, /* Poll is scheduled */ 383 NAPI_STATE_MISSED, /* reschedule a napi */ 384 NAPI_STATE_DISABLE, /* Disable pending */ 385 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 386 NAPI_STATE_LISTED, /* NAPI added to system lists */ 387 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ 388 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 389 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 390 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 391 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 392 }; 393 394 enum { 395 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 396 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 397 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 398 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 399 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), 400 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 401 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 402 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 403 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 404 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 405 }; 406 407 enum gro_result { 408 GRO_MERGED, 409 GRO_MERGED_FREE, 410 GRO_HELD, 411 GRO_NORMAL, 412 GRO_CONSUMED, 413 }; 414 typedef enum gro_result gro_result_t; 415 416 /* 417 * enum rx_handler_result - Possible return values for rx_handlers. 418 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 419 * further. 420 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 421 * case skb->dev was changed by rx_handler. 422 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 423 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. 424 * 425 * rx_handlers are functions called from inside __netif_receive_skb(), to do 426 * special processing of the skb, prior to delivery to protocol handlers. 427 * 428 * Currently, a net_device can only have a single rx_handler registered. Trying 429 * to register a second rx_handler will return -EBUSY. 430 * 431 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 432 * To unregister a rx_handler on a net_device, use 433 * netdev_rx_handler_unregister(). 434 * 435 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 436 * do with the skb. 437 * 438 * If the rx_handler consumed the skb in some way, it should return 439 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 440 * the skb to be delivered in some other way. 441 * 442 * If the rx_handler changed skb->dev, to divert the skb to another 443 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 444 * new device will be called if it exists. 445 * 446 * If the rx_handler decides the skb should be ignored, it should return 447 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 448 * are registered on exact device (ptype->dev == skb->dev). 449 * 450 * If the rx_handler didn't change skb->dev, but wants the skb to be normally 451 * delivered, it should return RX_HANDLER_PASS. 452 * 453 * A device without a registered rx_handler will behave as if rx_handler 454 * returned RX_HANDLER_PASS. 455 */ 456 457 enum rx_handler_result { 458 RX_HANDLER_CONSUMED, 459 RX_HANDLER_ANOTHER, 460 RX_HANDLER_EXACT, 461 RX_HANDLER_PASS, 462 }; 463 typedef enum rx_handler_result rx_handler_result_t; 464 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 465 466 void __napi_schedule(struct napi_struct *n); 467 void __napi_schedule_irqoff(struct napi_struct *n); 468 469 static inline bool napi_disable_pending(struct napi_struct *n) 470 { 471 return test_bit(NAPI_STATE_DISABLE, &n->state); 472 } 473 474 static inline bool napi_prefer_busy_poll(struct napi_struct *n) 475 { 476 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 477 } 478 479 bool napi_schedule_prep(struct napi_struct *n); 480 481 /** 482 * napi_schedule - schedule NAPI poll 483 * @n: NAPI context 484 * 485 * Schedule NAPI poll routine to be called if it is not already 486 * running. 487 */ 488 static inline void napi_schedule(struct napi_struct *n) 489 { 490 if (napi_schedule_prep(n)) 491 __napi_schedule(n); 492 } 493 494 /** 495 * napi_schedule_irqoff - schedule NAPI poll 496 * @n: NAPI context 497 * 498 * Variant of napi_schedule(), assuming hard irqs are masked. 499 */ 500 static inline void napi_schedule_irqoff(struct napi_struct *n) 501 { 502 if (napi_schedule_prep(n)) 503 __napi_schedule_irqoff(n); 504 } 505 506 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 507 static inline bool napi_reschedule(struct napi_struct *napi) 508 { 509 if (napi_schedule_prep(napi)) { 510 __napi_schedule(napi); 511 return true; 512 } 513 return false; 514 } 515 516 /** 517 * napi_complete_done - NAPI processing complete 518 * @n: NAPI context 519 * @work_done: number of packets processed 520 * 521 * Mark NAPI processing as complete. Should only be called if poll budget 522 * has not been completely consumed. 523 * Prefer over napi_complete(). 524 * Return false if device should avoid rearming interrupts. 525 */ 526 bool napi_complete_done(struct napi_struct *n, int work_done); 527 528 static inline bool napi_complete(struct napi_struct *n) 529 { 530 return napi_complete_done(n, 0); 531 } 532 533 int dev_set_threaded(struct net_device *dev, bool threaded); 534 535 /** 536 * napi_disable - prevent NAPI from scheduling 537 * @n: NAPI context 538 * 539 * Stop NAPI from being scheduled on this context. 540 * Waits till any outstanding processing completes. 541 */ 542 void napi_disable(struct napi_struct *n); 543 544 void napi_enable(struct napi_struct *n); 545 546 /** 547 * napi_synchronize - wait until NAPI is not running 548 * @n: NAPI context 549 * 550 * Wait until NAPI is done being scheduled on this context. 551 * Waits till any outstanding processing completes but 552 * does not disable future activations. 553 */ 554 static inline void napi_synchronize(const struct napi_struct *n) 555 { 556 if (IS_ENABLED(CONFIG_SMP)) 557 while (test_bit(NAPI_STATE_SCHED, &n->state)) 558 msleep(1); 559 else 560 barrier(); 561 } 562 563 /** 564 * napi_if_scheduled_mark_missed - if napi is running, set the 565 * NAPIF_STATE_MISSED 566 * @n: NAPI context 567 * 568 * If napi is running, set the NAPIF_STATE_MISSED, and return true if 569 * NAPI is scheduled. 570 **/ 571 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) 572 { 573 unsigned long val, new; 574 575 val = READ_ONCE(n->state); 576 do { 577 if (val & NAPIF_STATE_DISABLE) 578 return true; 579 580 if (!(val & NAPIF_STATE_SCHED)) 581 return false; 582 583 new = val | NAPIF_STATE_MISSED; 584 } while (!try_cmpxchg(&n->state, &val, new)); 585 586 return true; 587 } 588 589 enum netdev_queue_state_t { 590 __QUEUE_STATE_DRV_XOFF, 591 __QUEUE_STATE_STACK_XOFF, 592 __QUEUE_STATE_FROZEN, 593 }; 594 595 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 596 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 597 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 598 599 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 600 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 601 QUEUE_STATE_FROZEN) 602 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 603 QUEUE_STATE_FROZEN) 604 605 /* 606 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 607 * netif_tx_* functions below are used to manipulate this flag. The 608 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 609 * queue independently. The netif_xmit_*stopped functions below are called 610 * to check if the queue has been stopped by the driver or stack (either 611 * of the XOFF bits are set in the state). Drivers should not need to call 612 * netif_xmit*stopped functions, they should only be using netif_tx_*. 613 */ 614 615 struct netdev_queue { 616 /* 617 * read-mostly part 618 */ 619 struct net_device *dev; 620 netdevice_tracker dev_tracker; 621 622 struct Qdisc __rcu *qdisc; 623 struct Qdisc __rcu *qdisc_sleeping; 624 #ifdef CONFIG_SYSFS 625 struct kobject kobj; 626 #endif 627 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 628 int numa_node; 629 #endif 630 unsigned long tx_maxrate; 631 /* 632 * Number of TX timeouts for this queue 633 * (/sys/class/net/DEV/Q/trans_timeout) 634 */ 635 atomic_long_t trans_timeout; 636 637 /* Subordinate device that the queue has been assigned to */ 638 struct net_device *sb_dev; 639 #ifdef CONFIG_XDP_SOCKETS 640 struct xsk_buff_pool *pool; 641 #endif 642 /* 643 * write-mostly part 644 */ 645 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 646 int xmit_lock_owner; 647 /* 648 * Time (in jiffies) of last Tx 649 */ 650 unsigned long trans_start; 651 652 unsigned long state; 653 654 #ifdef CONFIG_BQL 655 struct dql dql; 656 #endif 657 } ____cacheline_aligned_in_smp; 658 659 extern int sysctl_fb_tunnels_only_for_init_net; 660 extern int sysctl_devconf_inherit_init_net; 661 662 /* 663 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns 664 * == 1 : For initns only 665 * == 2 : For none. 666 */ 667 static inline bool net_has_fallback_tunnels(const struct net *net) 668 { 669 #if IS_ENABLED(CONFIG_SYSCTL) 670 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); 671 672 return !fb_tunnels_only_for_init_net || 673 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); 674 #else 675 return true; 676 #endif 677 } 678 679 static inline int net_inherit_devconf(void) 680 { 681 #if IS_ENABLED(CONFIG_SYSCTL) 682 return READ_ONCE(sysctl_devconf_inherit_init_net); 683 #else 684 return 0; 685 #endif 686 } 687 688 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 689 { 690 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 691 return q->numa_node; 692 #else 693 return NUMA_NO_NODE; 694 #endif 695 } 696 697 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 698 { 699 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 700 q->numa_node = node; 701 #endif 702 } 703 704 #ifdef CONFIG_RPS 705 /* 706 * This structure holds an RPS map which can be of variable length. The 707 * map is an array of CPUs. 708 */ 709 struct rps_map { 710 unsigned int len; 711 struct rcu_head rcu; 712 u16 cpus[]; 713 }; 714 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 715 716 /* 717 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 718 * tail pointer for that CPU's input queue at the time of last enqueue, and 719 * a hardware filter index. 720 */ 721 struct rps_dev_flow { 722 u16 cpu; 723 u16 filter; 724 unsigned int last_qtail; 725 }; 726 #define RPS_NO_FILTER 0xffff 727 728 /* 729 * The rps_dev_flow_table structure contains a table of flow mappings. 730 */ 731 struct rps_dev_flow_table { 732 unsigned int mask; 733 struct rcu_head rcu; 734 struct rps_dev_flow flows[]; 735 }; 736 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 737 ((_num) * sizeof(struct rps_dev_flow))) 738 739 /* 740 * The rps_sock_flow_table contains mappings of flows to the last CPU 741 * on which they were processed by the application (set in recvmsg). 742 * Each entry is a 32bit value. Upper part is the high-order bits 743 * of flow hash, lower part is CPU number. 744 * rps_cpu_mask is used to partition the space, depending on number of 745 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 746 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, 747 * meaning we use 32-6=26 bits for the hash. 748 */ 749 struct rps_sock_flow_table { 750 u32 mask; 751 752 u32 ents[] ____cacheline_aligned_in_smp; 753 }; 754 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 755 756 #define RPS_NO_CPU 0xffff 757 758 extern u32 rps_cpu_mask; 759 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 760 761 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 762 u32 hash) 763 { 764 if (table && hash) { 765 unsigned int index = hash & table->mask; 766 u32 val = hash & ~rps_cpu_mask; 767 768 /* We only give a hint, preemption can change CPU under us */ 769 val |= raw_smp_processor_id(); 770 771 /* The following WRITE_ONCE() is paired with the READ_ONCE() 772 * here, and another one in get_rps_cpu(). 773 */ 774 if (READ_ONCE(table->ents[index]) != val) 775 WRITE_ONCE(table->ents[index], val); 776 } 777 } 778 779 #ifdef CONFIG_RFS_ACCEL 780 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 781 u16 filter_id); 782 #endif 783 #endif /* CONFIG_RPS */ 784 785 /* This structure contains an instance of an RX queue. */ 786 struct netdev_rx_queue { 787 struct xdp_rxq_info xdp_rxq; 788 #ifdef CONFIG_RPS 789 struct rps_map __rcu *rps_map; 790 struct rps_dev_flow_table __rcu *rps_flow_table; 791 #endif 792 struct kobject kobj; 793 struct net_device *dev; 794 netdevice_tracker dev_tracker; 795 796 #ifdef CONFIG_XDP_SOCKETS 797 struct xsk_buff_pool *pool; 798 #endif 799 } ____cacheline_aligned_in_smp; 800 801 /* 802 * RX queue sysfs structures and functions. 803 */ 804 struct rx_queue_attribute { 805 struct attribute attr; 806 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); 807 ssize_t (*store)(struct netdev_rx_queue *queue, 808 const char *buf, size_t len); 809 }; 810 811 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ 812 enum xps_map_type { 813 XPS_CPUS = 0, 814 XPS_RXQS, 815 XPS_MAPS_MAX, 816 }; 817 818 #ifdef CONFIG_XPS 819 /* 820 * This structure holds an XPS map which can be of variable length. The 821 * map is an array of queues. 822 */ 823 struct xps_map { 824 unsigned int len; 825 unsigned int alloc_len; 826 struct rcu_head rcu; 827 u16 queues[]; 828 }; 829 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 830 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 831 - sizeof(struct xps_map)) / sizeof(u16)) 832 833 /* 834 * This structure holds all XPS maps for device. Maps are indexed by CPU. 835 * 836 * We keep track of the number of cpus/rxqs used when the struct is allocated, 837 * in nr_ids. This will help not accessing out-of-bound memory. 838 * 839 * We keep track of the number of traffic classes used when the struct is 840 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're 841 * not crossing its upper bound, as the original dev->num_tc can be updated in 842 * the meantime. 843 */ 844 struct xps_dev_maps { 845 struct rcu_head rcu; 846 unsigned int nr_ids; 847 s16 num_tc; 848 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ 849 }; 850 851 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 852 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 853 854 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ 855 (_rxqs * (_tcs) * sizeof(struct xps_map *))) 856 857 #endif /* CONFIG_XPS */ 858 859 #define TC_MAX_QUEUE 16 860 #define TC_BITMASK 15 861 /* HW offloaded queuing disciplines txq count and offset maps */ 862 struct netdev_tc_txq { 863 u16 count; 864 u16 offset; 865 }; 866 867 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 868 /* 869 * This structure is to hold information about the device 870 * configured to run FCoE protocol stack. 871 */ 872 struct netdev_fcoe_hbainfo { 873 char manufacturer[64]; 874 char serial_number[64]; 875 char hardware_version[64]; 876 char driver_version[64]; 877 char optionrom_version[64]; 878 char firmware_version[64]; 879 char model[256]; 880 char model_description[256]; 881 }; 882 #endif 883 884 #define MAX_PHYS_ITEM_ID_LEN 32 885 886 /* This structure holds a unique identifier to identify some 887 * physical item (port for example) used by a netdevice. 888 */ 889 struct netdev_phys_item_id { 890 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 891 unsigned char id_len; 892 }; 893 894 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 895 struct netdev_phys_item_id *b) 896 { 897 return a->id_len == b->id_len && 898 memcmp(a->id, b->id, a->id_len) == 0; 899 } 900 901 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 902 struct sk_buff *skb, 903 struct net_device *sb_dev); 904 905 enum net_device_path_type { 906 DEV_PATH_ETHERNET = 0, 907 DEV_PATH_VLAN, 908 DEV_PATH_BRIDGE, 909 DEV_PATH_PPPOE, 910 DEV_PATH_DSA, 911 DEV_PATH_MTK_WDMA, 912 }; 913 914 struct net_device_path { 915 enum net_device_path_type type; 916 const struct net_device *dev; 917 union { 918 struct { 919 u16 id; 920 __be16 proto; 921 u8 h_dest[ETH_ALEN]; 922 } encap; 923 struct { 924 enum { 925 DEV_PATH_BR_VLAN_KEEP, 926 DEV_PATH_BR_VLAN_TAG, 927 DEV_PATH_BR_VLAN_UNTAG, 928 DEV_PATH_BR_VLAN_UNTAG_HW, 929 } vlan_mode; 930 u16 vlan_id; 931 __be16 vlan_proto; 932 } bridge; 933 struct { 934 int port; 935 u16 proto; 936 } dsa; 937 struct { 938 u8 wdma_idx; 939 u8 queue; 940 u16 wcid; 941 u8 bss; 942 } mtk_wdma; 943 }; 944 }; 945 946 #define NET_DEVICE_PATH_STACK_MAX 5 947 #define NET_DEVICE_PATH_VLAN_MAX 2 948 949 struct net_device_path_stack { 950 int num_paths; 951 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; 952 }; 953 954 struct net_device_path_ctx { 955 const struct net_device *dev; 956 u8 daddr[ETH_ALEN]; 957 958 int num_vlans; 959 struct { 960 u16 id; 961 __be16 proto; 962 } vlan[NET_DEVICE_PATH_VLAN_MAX]; 963 }; 964 965 enum tc_setup_type { 966 TC_QUERY_CAPS, 967 TC_SETUP_QDISC_MQPRIO, 968 TC_SETUP_CLSU32, 969 TC_SETUP_CLSFLOWER, 970 TC_SETUP_CLSMATCHALL, 971 TC_SETUP_CLSBPF, 972 TC_SETUP_BLOCK, 973 TC_SETUP_QDISC_CBS, 974 TC_SETUP_QDISC_RED, 975 TC_SETUP_QDISC_PRIO, 976 TC_SETUP_QDISC_MQ, 977 TC_SETUP_QDISC_ETF, 978 TC_SETUP_ROOT_QDISC, 979 TC_SETUP_QDISC_GRED, 980 TC_SETUP_QDISC_TAPRIO, 981 TC_SETUP_FT, 982 TC_SETUP_QDISC_ETS, 983 TC_SETUP_QDISC_TBF, 984 TC_SETUP_QDISC_FIFO, 985 TC_SETUP_QDISC_HTB, 986 TC_SETUP_ACT, 987 }; 988 989 /* These structures hold the attributes of bpf state that are being passed 990 * to the netdevice through the bpf op. 991 */ 992 enum bpf_netdev_command { 993 /* Set or clear a bpf program used in the earliest stages of packet 994 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee 995 * is responsible for calling bpf_prog_put on any old progs that are 996 * stored. In case of error, the callee need not release the new prog 997 * reference, but on success it takes ownership and must bpf_prog_put 998 * when it is no longer used. 999 */ 1000 XDP_SETUP_PROG, 1001 XDP_SETUP_PROG_HW, 1002 /* BPF program for offload callbacks, invoked at program load time. */ 1003 BPF_OFFLOAD_MAP_ALLOC, 1004 BPF_OFFLOAD_MAP_FREE, 1005 XDP_SETUP_XSK_POOL, 1006 }; 1007 1008 struct bpf_prog_offload_ops; 1009 struct netlink_ext_ack; 1010 struct xdp_umem; 1011 struct xdp_dev_bulk_queue; 1012 struct bpf_xdp_link; 1013 1014 enum bpf_xdp_mode { 1015 XDP_MODE_SKB = 0, 1016 XDP_MODE_DRV = 1, 1017 XDP_MODE_HW = 2, 1018 __MAX_XDP_MODE 1019 }; 1020 1021 struct bpf_xdp_entity { 1022 struct bpf_prog *prog; 1023 struct bpf_xdp_link *link; 1024 }; 1025 1026 struct netdev_bpf { 1027 enum bpf_netdev_command command; 1028 union { 1029 /* XDP_SETUP_PROG */ 1030 struct { 1031 u32 flags; 1032 struct bpf_prog *prog; 1033 struct netlink_ext_ack *extack; 1034 }; 1035 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ 1036 struct { 1037 struct bpf_offloaded_map *offmap; 1038 }; 1039 /* XDP_SETUP_XSK_POOL */ 1040 struct { 1041 struct xsk_buff_pool *pool; 1042 u16 queue_id; 1043 } xsk; 1044 }; 1045 }; 1046 1047 /* Flags for ndo_xsk_wakeup. */ 1048 #define XDP_WAKEUP_RX (1 << 0) 1049 #define XDP_WAKEUP_TX (1 << 1) 1050 1051 #ifdef CONFIG_XFRM_OFFLOAD 1052 struct xfrmdev_ops { 1053 int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); 1054 void (*xdo_dev_state_delete) (struct xfrm_state *x); 1055 void (*xdo_dev_state_free) (struct xfrm_state *x); 1056 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 1057 struct xfrm_state *x); 1058 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); 1059 void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); 1060 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); 1061 void (*xdo_dev_policy_delete) (struct xfrm_policy *x); 1062 void (*xdo_dev_policy_free) (struct xfrm_policy *x); 1063 }; 1064 #endif 1065 1066 struct dev_ifalias { 1067 struct rcu_head rcuhead; 1068 char ifalias[]; 1069 }; 1070 1071 struct devlink; 1072 struct tlsdev_ops; 1073 1074 struct netdev_net_notifier { 1075 struct list_head list; 1076 struct notifier_block *nb; 1077 }; 1078 1079 /* 1080 * This structure defines the management hooks for network devices. 1081 * The following hooks can be defined; unless noted otherwise, they are 1082 * optional and can be filled with a null pointer. 1083 * 1084 * int (*ndo_init)(struct net_device *dev); 1085 * This function is called once when a network device is registered. 1086 * The network device can use this for any late stage initialization 1087 * or semantic validation. It can fail with an error code which will 1088 * be propagated back to register_netdev. 1089 * 1090 * void (*ndo_uninit)(struct net_device *dev); 1091 * This function is called when device is unregistered or when registration 1092 * fails. It is not called if init fails. 1093 * 1094 * int (*ndo_open)(struct net_device *dev); 1095 * This function is called when a network device transitions to the up 1096 * state. 1097 * 1098 * int (*ndo_stop)(struct net_device *dev); 1099 * This function is called when a network device transitions to the down 1100 * state. 1101 * 1102 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1103 * struct net_device *dev); 1104 * Called when a packet needs to be transmitted. 1105 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 1106 * the queue before that can happen; it's for obsolete devices and weird 1107 * corner cases, but the stack really does a non-trivial amount 1108 * of useless work if you return NETDEV_TX_BUSY. 1109 * Required; cannot be NULL. 1110 * 1111 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1112 * struct net_device *dev 1113 * netdev_features_t features); 1114 * Called by core transmit path to determine if device is capable of 1115 * performing offload operations on a given packet. This is to give 1116 * the device an opportunity to implement any restrictions that cannot 1117 * be otherwise expressed by feature flags. The check is called with 1118 * the set of features that the stack has calculated and it returns 1119 * those the driver believes to be appropriate. 1120 * 1121 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 1122 * struct net_device *sb_dev); 1123 * Called to decide which queue to use when device supports multiple 1124 * transmit queues. 1125 * 1126 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 1127 * This function is called to allow device receiver to make 1128 * changes to configuration when multicast or promiscuous is enabled. 1129 * 1130 * void (*ndo_set_rx_mode)(struct net_device *dev); 1131 * This function is called device changes address list filtering. 1132 * If driver handles unicast address filtering, it should set 1133 * IFF_UNICAST_FLT in its priv_flags. 1134 * 1135 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 1136 * This function is called when the Media Access Control address 1137 * needs to be changed. If this interface is not defined, the 1138 * MAC address can not be changed. 1139 * 1140 * int (*ndo_validate_addr)(struct net_device *dev); 1141 * Test if Media Access Control address is valid for the device. 1142 * 1143 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1144 * Old-style ioctl entry point. This is used internally by the 1145 * appletalk and ieee802154 subsystems but is no longer called by 1146 * the device ioctl handler. 1147 * 1148 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); 1149 * Used by the bonding driver for its device specific ioctls: 1150 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, 1151 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY 1152 * 1153 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 1154 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, 1155 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. 1156 * 1157 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 1158 * Used to set network devices bus interface parameters. This interface 1159 * is retained for legacy reasons; new devices should use the bus 1160 * interface (PCI) for low level management. 1161 * 1162 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 1163 * Called when a user wants to change the Maximum Transfer Unit 1164 * of a device. 1165 * 1166 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); 1167 * Callback used when the transmitter has not made any progress 1168 * for dev->watchdog ticks. 1169 * 1170 * void (*ndo_get_stats64)(struct net_device *dev, 1171 * struct rtnl_link_stats64 *storage); 1172 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1173 * Called when a user wants to get the network device usage 1174 * statistics. Drivers must do one of the following: 1175 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 1176 * rtnl_link_stats64 structure passed by the caller. 1177 * 2. Define @ndo_get_stats to update a net_device_stats structure 1178 * (which should normally be dev->stats) and return a pointer to 1179 * it. The structure may be changed asynchronously only if each 1180 * field is written atomically. 1181 * 3. Update dev->stats asynchronously and atomically, and define 1182 * neither operation. 1183 * 1184 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) 1185 * Return true if this device supports offload stats of this attr_id. 1186 * 1187 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, 1188 * void *attr_data) 1189 * Get statistics for offload operations by attr_id. Write it into the 1190 * attr_data pointer. 1191 * 1192 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 1193 * If device supports VLAN filtering this function is called when a 1194 * VLAN id is registered. 1195 * 1196 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 1197 * If device supports VLAN filtering this function is called when a 1198 * VLAN id is unregistered. 1199 * 1200 * void (*ndo_poll_controller)(struct net_device *dev); 1201 * 1202 * SR-IOV management functions. 1203 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 1204 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, 1205 * u8 qos, __be16 proto); 1206 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 1207 * int max_tx_rate); 1208 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 1209 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 1210 * int (*ndo_get_vf_config)(struct net_device *dev, 1211 * int vf, struct ifla_vf_info *ivf); 1212 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 1213 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 1214 * struct nlattr *port[]); 1215 * 1216 * Enable or disable the VF ability to query its RSS Redirection Table and 1217 * Hash Key. This is needed since on some devices VF share this information 1218 * with PF and querying it may introduce a theoretical security risk. 1219 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 1220 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 1221 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, 1222 * void *type_data); 1223 * Called to setup any 'tc' scheduler, classifier or action on @dev. 1224 * This is always called from the stack with the rtnl lock held and netif 1225 * tx queues stopped. This allows the netdevice to perform queue 1226 * management safely. 1227 * 1228 * Fiber Channel over Ethernet (FCoE) offload functions. 1229 * int (*ndo_fcoe_enable)(struct net_device *dev); 1230 * Called when the FCoE protocol stack wants to start using LLD for FCoE 1231 * so the underlying device can perform whatever needed configuration or 1232 * initialization to support acceleration of FCoE traffic. 1233 * 1234 * int (*ndo_fcoe_disable)(struct net_device *dev); 1235 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 1236 * so the underlying device can perform whatever needed clean-ups to 1237 * stop supporting acceleration of FCoE traffic. 1238 * 1239 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 1240 * struct scatterlist *sgl, unsigned int sgc); 1241 * Called when the FCoE Initiator wants to initialize an I/O that 1242 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1243 * perform necessary setup and returns 1 to indicate the device is set up 1244 * successfully to perform DDP on this I/O, otherwise this returns 0. 1245 * 1246 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 1247 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 1248 * indicated by the FC exchange id 'xid', so the underlying device can 1249 * clean up and reuse resources for later DDP requests. 1250 * 1251 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 1252 * struct scatterlist *sgl, unsigned int sgc); 1253 * Called when the FCoE Target wants to initialize an I/O that 1254 * is a possible candidate for Direct Data Placement (DDP). The LLD can 1255 * perform necessary setup and returns 1 to indicate the device is set up 1256 * successfully to perform DDP on this I/O, otherwise this returns 0. 1257 * 1258 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1259 * struct netdev_fcoe_hbainfo *hbainfo); 1260 * Called when the FCoE Protocol stack wants information on the underlying 1261 * device. This information is utilized by the FCoE protocol stack to 1262 * register attributes with Fiber Channel management service as per the 1263 * FC-GS Fabric Device Management Information(FDMI) specification. 1264 * 1265 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 1266 * Called when the underlying device wants to override default World Wide 1267 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 1268 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 1269 * protocol stack to use. 1270 * 1271 * RFS acceleration. 1272 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 1273 * u16 rxq_index, u32 flow_id); 1274 * Set hardware filter for RFS. rxq_index is the target queue index; 1275 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 1276 * Return the filter ID on success, or a negative error code. 1277 * 1278 * Slave management functions (for bridge, bonding, etc). 1279 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 1280 * Called to make another netdev an underling. 1281 * 1282 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 1283 * Called to release previously enslaved netdev. 1284 * 1285 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, 1286 * struct sk_buff *skb, 1287 * bool all_slaves); 1288 * Get the xmit slave of master device. If all_slaves is true, function 1289 * assume all the slaves can transmit. 1290 * 1291 * Feature/offload setting functions. 1292 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1293 * netdev_features_t features); 1294 * Adjusts the requested feature flags according to device-specific 1295 * constraints, and returns the resulting flags. Must not modify 1296 * the device state. 1297 * 1298 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1299 * Called to update device configuration to new features. Passed 1300 * feature set might be less than what was returned by ndo_fix_features()). 1301 * Must return >0 or -errno if it changed dev->features itself. 1302 * 1303 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 1304 * struct net_device *dev, 1305 * const unsigned char *addr, u16 vid, u16 flags, 1306 * struct netlink_ext_ack *extack); 1307 * Adds an FDB entry to dev for addr. 1308 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 1309 * struct net_device *dev, 1310 * const unsigned char *addr, u16 vid) 1311 * Deletes the FDB entry from dev coresponding to addr. 1312 * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], 1313 * struct net_device *dev, 1314 * u16 vid, 1315 * struct netlink_ext_ack *extack); 1316 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1317 * struct net_device *dev, struct net_device *filter_dev, 1318 * int *idx) 1319 * Used to add FDB entries to dump requests. Implementers should add 1320 * entries to skb and update idx with the number of entries. 1321 * 1322 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], 1323 * u16 nlmsg_flags, struct netlink_ext_ack *extack); 1324 * Adds an MDB entry to dev. 1325 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], 1326 * struct netlink_ext_ack *extack); 1327 * Deletes the MDB entry from dev. 1328 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, 1329 * struct netlink_callback *cb); 1330 * Dumps MDB entries from dev. The first argument (marker) in the netlink 1331 * callback is used by core rtnetlink code. 1332 * 1333 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 1334 * u16 flags, struct netlink_ext_ack *extack) 1335 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 1336 * struct net_device *dev, u32 filter_mask, 1337 * int nlflags) 1338 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 1339 * u16 flags); 1340 * 1341 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 1342 * Called to change device carrier. Soft-devices (like dummy, team, etc) 1343 * which do not represent real hardware may define this to allow their 1344 * userspace components to manage their virtual carrier state. Devices 1345 * that determine carrier state from physical hardware properties (eg 1346 * network cables) or protocol-dependent mechanisms (eg 1347 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1348 * 1349 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1350 * struct netdev_phys_item_id *ppid); 1351 * Called to get ID of physical port of this device. If driver does 1352 * not implement this, it is assumed that the hw is not able to have 1353 * multiple net devices on single physical port. 1354 * 1355 * int (*ndo_get_port_parent_id)(struct net_device *dev, 1356 * struct netdev_phys_item_id *ppid) 1357 * Called to get the parent ID of the physical port of this device. 1358 * 1359 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1360 * struct net_device *dev) 1361 * Called by upper layer devices to accelerate switching or other 1362 * station functionality into hardware. 'pdev is the lowerdev 1363 * to use for the offload and 'dev' is the net device that will 1364 * back the offload. Returns a pointer to the private structure 1365 * the upper layer will maintain. 1366 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1367 * Called by upper layer device to delete the station created 1368 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1369 * the station and priv is the structure returned by the add 1370 * operation. 1371 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1372 * int queue_index, u32 maxrate); 1373 * Called when a user wants to set a max-rate limitation of specific 1374 * TX queue. 1375 * int (*ndo_get_iflink)(const struct net_device *dev); 1376 * Called to get the iflink value of this device. 1377 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1378 * This function is used to get egress tunnel information for given skb. 1379 * This is useful for retrieving outer tunnel header parameters while 1380 * sampling packet. 1381 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); 1382 * This function is used to specify the headroom that the skb must 1383 * consider when allocation skb during packet reception. Setting 1384 * appropriate rx headroom value allows avoiding skb head copy on 1385 * forward. Setting a negative value resets the rx headroom to the 1386 * default value. 1387 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); 1388 * This function is used to set or query state related to XDP on the 1389 * netdevice and manage BPF offload. See definition of 1390 * enum bpf_netdev_command for details. 1391 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, 1392 * u32 flags); 1393 * This function is used to submit @n XDP packets for transmit on a 1394 * netdevice. Returns number of frames successfully transmitted, frames 1395 * that got dropped are freed/returned via xdp_return_frame(). 1396 * Returns negative number, means general error invoking ndo, meaning 1397 * no frames were xmit'ed and core-caller will free all frames. 1398 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1399 * struct xdp_buff *xdp); 1400 * Get the xmit slave of master device based on the xdp_buff. 1401 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1402 * This function is used to wake up the softirq, ksoftirqd or kthread 1403 * responsible for sending and/or receiving packets on a specific 1404 * queue id bound to an AF_XDP socket. The flags field specifies if 1405 * only RX, only Tx, or both should be woken up using the flags 1406 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1407 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1408 * int cmd); 1409 * Add, change, delete or get information on an IPv4 tunnel. 1410 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); 1411 * If a device is paired with a peer device, return the peer instance. 1412 * The caller must be under RCU read context. 1413 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); 1414 * Get the forwarding path to reach the real device from the HW destination address 1415 * ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1416 * const struct skb_shared_hwtstamps *hwtstamps, 1417 * bool cycles); 1418 * Get hardware timestamp based on normal/adjustable time or free running 1419 * cycle counter. This function is required if physical clock supports a 1420 * free running cycle counter. 1421 */ 1422 struct net_device_ops { 1423 int (*ndo_init)(struct net_device *dev); 1424 void (*ndo_uninit)(struct net_device *dev); 1425 int (*ndo_open)(struct net_device *dev); 1426 int (*ndo_stop)(struct net_device *dev); 1427 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 1428 struct net_device *dev); 1429 netdev_features_t (*ndo_features_check)(struct sk_buff *skb, 1430 struct net_device *dev, 1431 netdev_features_t features); 1432 u16 (*ndo_select_queue)(struct net_device *dev, 1433 struct sk_buff *skb, 1434 struct net_device *sb_dev); 1435 void (*ndo_change_rx_flags)(struct net_device *dev, 1436 int flags); 1437 void (*ndo_set_rx_mode)(struct net_device *dev); 1438 int (*ndo_set_mac_address)(struct net_device *dev, 1439 void *addr); 1440 int (*ndo_validate_addr)(struct net_device *dev); 1441 int (*ndo_do_ioctl)(struct net_device *dev, 1442 struct ifreq *ifr, int cmd); 1443 int (*ndo_eth_ioctl)(struct net_device *dev, 1444 struct ifreq *ifr, int cmd); 1445 int (*ndo_siocbond)(struct net_device *dev, 1446 struct ifreq *ifr, int cmd); 1447 int (*ndo_siocwandev)(struct net_device *dev, 1448 struct if_settings *ifs); 1449 int (*ndo_siocdevprivate)(struct net_device *dev, 1450 struct ifreq *ifr, 1451 void __user *data, int cmd); 1452 int (*ndo_set_config)(struct net_device *dev, 1453 struct ifmap *map); 1454 int (*ndo_change_mtu)(struct net_device *dev, 1455 int new_mtu); 1456 int (*ndo_neigh_setup)(struct net_device *dev, 1457 struct neigh_parms *); 1458 void (*ndo_tx_timeout) (struct net_device *dev, 1459 unsigned int txqueue); 1460 1461 void (*ndo_get_stats64)(struct net_device *dev, 1462 struct rtnl_link_stats64 *storage); 1463 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1464 int (*ndo_get_offload_stats)(int attr_id, 1465 const struct net_device *dev, 1466 void *attr_data); 1467 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1468 1469 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1470 __be16 proto, u16 vid); 1471 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1472 __be16 proto, u16 vid); 1473 #ifdef CONFIG_NET_POLL_CONTROLLER 1474 void (*ndo_poll_controller)(struct net_device *dev); 1475 int (*ndo_netpoll_setup)(struct net_device *dev, 1476 struct netpoll_info *info); 1477 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1478 #endif 1479 int (*ndo_set_vf_mac)(struct net_device *dev, 1480 int queue, u8 *mac); 1481 int (*ndo_set_vf_vlan)(struct net_device *dev, 1482 int queue, u16 vlan, 1483 u8 qos, __be16 proto); 1484 int (*ndo_set_vf_rate)(struct net_device *dev, 1485 int vf, int min_tx_rate, 1486 int max_tx_rate); 1487 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1488 int vf, bool setting); 1489 int (*ndo_set_vf_trust)(struct net_device *dev, 1490 int vf, bool setting); 1491 int (*ndo_get_vf_config)(struct net_device *dev, 1492 int vf, 1493 struct ifla_vf_info *ivf); 1494 int (*ndo_set_vf_link_state)(struct net_device *dev, 1495 int vf, int link_state); 1496 int (*ndo_get_vf_stats)(struct net_device *dev, 1497 int vf, 1498 struct ifla_vf_stats 1499 *vf_stats); 1500 int (*ndo_set_vf_port)(struct net_device *dev, 1501 int vf, 1502 struct nlattr *port[]); 1503 int (*ndo_get_vf_port)(struct net_device *dev, 1504 int vf, struct sk_buff *skb); 1505 int (*ndo_get_vf_guid)(struct net_device *dev, 1506 int vf, 1507 struct ifla_vf_guid *node_guid, 1508 struct ifla_vf_guid *port_guid); 1509 int (*ndo_set_vf_guid)(struct net_device *dev, 1510 int vf, u64 guid, 1511 int guid_type); 1512 int (*ndo_set_vf_rss_query_en)( 1513 struct net_device *dev, 1514 int vf, bool setting); 1515 int (*ndo_setup_tc)(struct net_device *dev, 1516 enum tc_setup_type type, 1517 void *type_data); 1518 #if IS_ENABLED(CONFIG_FCOE) 1519 int (*ndo_fcoe_enable)(struct net_device *dev); 1520 int (*ndo_fcoe_disable)(struct net_device *dev); 1521 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1522 u16 xid, 1523 struct scatterlist *sgl, 1524 unsigned int sgc); 1525 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1526 u16 xid); 1527 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1528 u16 xid, 1529 struct scatterlist *sgl, 1530 unsigned int sgc); 1531 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1532 struct netdev_fcoe_hbainfo *hbainfo); 1533 #endif 1534 1535 #if IS_ENABLED(CONFIG_LIBFCOE) 1536 #define NETDEV_FCOE_WWNN 0 1537 #define NETDEV_FCOE_WWPN 1 1538 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1539 u64 *wwn, int type); 1540 #endif 1541 1542 #ifdef CONFIG_RFS_ACCEL 1543 int (*ndo_rx_flow_steer)(struct net_device *dev, 1544 const struct sk_buff *skb, 1545 u16 rxq_index, 1546 u32 flow_id); 1547 #endif 1548 int (*ndo_add_slave)(struct net_device *dev, 1549 struct net_device *slave_dev, 1550 struct netlink_ext_ack *extack); 1551 int (*ndo_del_slave)(struct net_device *dev, 1552 struct net_device *slave_dev); 1553 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, 1554 struct sk_buff *skb, 1555 bool all_slaves); 1556 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, 1557 struct sock *sk); 1558 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1559 netdev_features_t features); 1560 int (*ndo_set_features)(struct net_device *dev, 1561 netdev_features_t features); 1562 int (*ndo_neigh_construct)(struct net_device *dev, 1563 struct neighbour *n); 1564 void (*ndo_neigh_destroy)(struct net_device *dev, 1565 struct neighbour *n); 1566 1567 int (*ndo_fdb_add)(struct ndmsg *ndm, 1568 struct nlattr *tb[], 1569 struct net_device *dev, 1570 const unsigned char *addr, 1571 u16 vid, 1572 u16 flags, 1573 struct netlink_ext_ack *extack); 1574 int (*ndo_fdb_del)(struct ndmsg *ndm, 1575 struct nlattr *tb[], 1576 struct net_device *dev, 1577 const unsigned char *addr, 1578 u16 vid, struct netlink_ext_ack *extack); 1579 int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, 1580 struct nlattr *tb[], 1581 struct net_device *dev, 1582 u16 vid, 1583 struct netlink_ext_ack *extack); 1584 int (*ndo_fdb_dump)(struct sk_buff *skb, 1585 struct netlink_callback *cb, 1586 struct net_device *dev, 1587 struct net_device *filter_dev, 1588 int *idx); 1589 int (*ndo_fdb_get)(struct sk_buff *skb, 1590 struct nlattr *tb[], 1591 struct net_device *dev, 1592 const unsigned char *addr, 1593 u16 vid, u32 portid, u32 seq, 1594 struct netlink_ext_ack *extack); 1595 int (*ndo_mdb_add)(struct net_device *dev, 1596 struct nlattr *tb[], 1597 u16 nlmsg_flags, 1598 struct netlink_ext_ack *extack); 1599 int (*ndo_mdb_del)(struct net_device *dev, 1600 struct nlattr *tb[], 1601 struct netlink_ext_ack *extack); 1602 int (*ndo_mdb_dump)(struct net_device *dev, 1603 struct sk_buff *skb, 1604 struct netlink_callback *cb); 1605 int (*ndo_bridge_setlink)(struct net_device *dev, 1606 struct nlmsghdr *nlh, 1607 u16 flags, 1608 struct netlink_ext_ack *extack); 1609 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1610 u32 pid, u32 seq, 1611 struct net_device *dev, 1612 u32 filter_mask, 1613 int nlflags); 1614 int (*ndo_bridge_dellink)(struct net_device *dev, 1615 struct nlmsghdr *nlh, 1616 u16 flags); 1617 int (*ndo_change_carrier)(struct net_device *dev, 1618 bool new_carrier); 1619 int (*ndo_get_phys_port_id)(struct net_device *dev, 1620 struct netdev_phys_item_id *ppid); 1621 int (*ndo_get_port_parent_id)(struct net_device *dev, 1622 struct netdev_phys_item_id *ppid); 1623 int (*ndo_get_phys_port_name)(struct net_device *dev, 1624 char *name, size_t len); 1625 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1626 struct net_device *dev); 1627 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1628 void *priv); 1629 1630 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1631 int queue_index, 1632 u32 maxrate); 1633 int (*ndo_get_iflink)(const struct net_device *dev); 1634 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1635 struct sk_buff *skb); 1636 void (*ndo_set_rx_headroom)(struct net_device *dev, 1637 int needed_headroom); 1638 int (*ndo_bpf)(struct net_device *dev, 1639 struct netdev_bpf *bpf); 1640 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1641 struct xdp_frame **xdp, 1642 u32 flags); 1643 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, 1644 struct xdp_buff *xdp); 1645 int (*ndo_xsk_wakeup)(struct net_device *dev, 1646 u32 queue_id, u32 flags); 1647 int (*ndo_tunnel_ctl)(struct net_device *dev, 1648 struct ip_tunnel_parm *p, int cmd); 1649 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1650 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1651 struct net_device_path *path); 1652 ktime_t (*ndo_get_tstamp)(struct net_device *dev, 1653 const struct skb_shared_hwtstamps *hwtstamps, 1654 bool cycles); 1655 }; 1656 1657 struct xdp_metadata_ops { 1658 int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); 1659 int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash, 1660 enum xdp_rss_hash_type *rss_type); 1661 }; 1662 1663 /** 1664 * enum netdev_priv_flags - &struct net_device priv_flags 1665 * 1666 * These are the &struct net_device, they are only set internally 1667 * by drivers and used in the kernel. These flags are invisible to 1668 * userspace; this means that the order of these flags can change 1669 * during any kernel release. 1670 * 1671 * You should have a pretty good reason to be extending these flags. 1672 * 1673 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1674 * @IFF_EBRIDGE: Ethernet bridging device 1675 * @IFF_BONDING: bonding master or slave 1676 * @IFF_ISATAP: ISATAP interface (RFC4214) 1677 * @IFF_WAN_HDLC: WAN HDLC device 1678 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1679 * release skb->dst 1680 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1681 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1682 * @IFF_MACVLAN_PORT: device used as macvlan port 1683 * @IFF_BRIDGE_PORT: device used as bridge port 1684 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1685 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1686 * @IFF_UNICAST_FLT: Supports unicast filtering 1687 * @IFF_TEAM_PORT: device used as team port 1688 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1689 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1690 * change when it's running 1691 * @IFF_MACVLAN: Macvlan device 1692 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account 1693 * underlying stacked devices 1694 * @IFF_L3MDEV_MASTER: device is an L3 master device 1695 * @IFF_NO_QUEUE: device can run without qdisc attached 1696 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1697 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1698 * @IFF_TEAM: device is a team device 1699 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured 1700 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external 1701 * entity (i.e. the master device for bridged veth) 1702 * @IFF_MACSEC: device is a MACsec device 1703 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1704 * @IFF_FAILOVER: device is a failover master device 1705 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1706 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device 1707 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf 1708 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with 1709 * skb_headlen(skb) == 0 (data starts from frag0) 1710 * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN 1711 */ 1712 enum netdev_priv_flags { 1713 IFF_802_1Q_VLAN = 1<<0, 1714 IFF_EBRIDGE = 1<<1, 1715 IFF_BONDING = 1<<2, 1716 IFF_ISATAP = 1<<3, 1717 IFF_WAN_HDLC = 1<<4, 1718 IFF_XMIT_DST_RELEASE = 1<<5, 1719 IFF_DONT_BRIDGE = 1<<6, 1720 IFF_DISABLE_NETPOLL = 1<<7, 1721 IFF_MACVLAN_PORT = 1<<8, 1722 IFF_BRIDGE_PORT = 1<<9, 1723 IFF_OVS_DATAPATH = 1<<10, 1724 IFF_TX_SKB_SHARING = 1<<11, 1725 IFF_UNICAST_FLT = 1<<12, 1726 IFF_TEAM_PORT = 1<<13, 1727 IFF_SUPP_NOFCS = 1<<14, 1728 IFF_LIVE_ADDR_CHANGE = 1<<15, 1729 IFF_MACVLAN = 1<<16, 1730 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1731 IFF_L3MDEV_MASTER = 1<<18, 1732 IFF_NO_QUEUE = 1<<19, 1733 IFF_OPENVSWITCH = 1<<20, 1734 IFF_L3MDEV_SLAVE = 1<<21, 1735 IFF_TEAM = 1<<22, 1736 IFF_RXFH_CONFIGURED = 1<<23, 1737 IFF_PHONY_HEADROOM = 1<<24, 1738 IFF_MACSEC = 1<<25, 1739 IFF_NO_RX_HANDLER = 1<<26, 1740 IFF_FAILOVER = 1<<27, 1741 IFF_FAILOVER_SLAVE = 1<<28, 1742 IFF_L3MDEV_RX_HANDLER = 1<<29, 1743 IFF_NO_ADDRCONF = BIT_ULL(30), 1744 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), 1745 IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), 1746 }; 1747 1748 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1749 #define IFF_EBRIDGE IFF_EBRIDGE 1750 #define IFF_BONDING IFF_BONDING 1751 #define IFF_ISATAP IFF_ISATAP 1752 #define IFF_WAN_HDLC IFF_WAN_HDLC 1753 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1754 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1755 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1756 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1757 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1758 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1759 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1760 #define IFF_UNICAST_FLT IFF_UNICAST_FLT 1761 #define IFF_TEAM_PORT IFF_TEAM_PORT 1762 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1763 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1764 #define IFF_MACVLAN IFF_MACVLAN 1765 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1766 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1767 #define IFF_NO_QUEUE IFF_NO_QUEUE 1768 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1769 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1770 #define IFF_TEAM IFF_TEAM 1771 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED 1772 #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM 1773 #define IFF_MACSEC IFF_MACSEC 1774 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1775 #define IFF_FAILOVER IFF_FAILOVER 1776 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1777 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER 1778 #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR 1779 1780 /* Specifies the type of the struct net_device::ml_priv pointer */ 1781 enum netdev_ml_priv_type { 1782 ML_PRIV_NONE, 1783 ML_PRIV_CAN, 1784 }; 1785 1786 /** 1787 * struct net_device - The DEVICE structure. 1788 * 1789 * Actually, this whole structure is a big mistake. It mixes I/O 1790 * data with strictly "high-level" data, and it has to know about 1791 * almost every data structure used in the INET module. 1792 * 1793 * @name: This is the first field of the "visible" part of this structure 1794 * (i.e. as seen by users in the "Space.c" file). It is the name 1795 * of the interface. 1796 * 1797 * @name_node: Name hashlist node 1798 * @ifalias: SNMP alias 1799 * @mem_end: Shared memory end 1800 * @mem_start: Shared memory start 1801 * @base_addr: Device I/O address 1802 * @irq: Device IRQ number 1803 * 1804 * @state: Generic network queuing layer state, see netdev_state_t 1805 * @dev_list: The global list of network devices 1806 * @napi_list: List entry used for polling NAPI devices 1807 * @unreg_list: List entry when we are unregistering the 1808 * device; see the function unregister_netdev 1809 * @close_list: List entry used when we are closing the device 1810 * @ptype_all: Device-specific packet handlers for all protocols 1811 * @ptype_specific: Device-specific, protocol-specific packet handlers 1812 * 1813 * @adj_list: Directly linked devices, like slaves for bonding 1814 * @features: Currently active device features 1815 * @hw_features: User-changeable features 1816 * 1817 * @wanted_features: User-requested features 1818 * @vlan_features: Mask of features inheritable by VLAN devices 1819 * 1820 * @hw_enc_features: Mask of features inherited by encapsulating devices 1821 * This field indicates what encapsulation 1822 * offloads the hardware is capable of doing, 1823 * and drivers will need to set them appropriately. 1824 * 1825 * @mpls_features: Mask of features inheritable by MPLS 1826 * @gso_partial_features: value(s) from NETIF_F_GSO\* 1827 * 1828 * @ifindex: interface index 1829 * @group: The group the device belongs to 1830 * 1831 * @stats: Statistics struct, which was left as a legacy, use 1832 * rtnl_link_stats64 instead 1833 * 1834 * @core_stats: core networking counters, 1835 * do not use this in drivers 1836 * @carrier_up_count: Number of times the carrier has been up 1837 * @carrier_down_count: Number of times the carrier has been down 1838 * 1839 * @wireless_handlers: List of functions to handle Wireless Extensions, 1840 * instead of ioctl, 1841 * see <net/iw_handler.h> for details. 1842 * @wireless_data: Instance data managed by the core of wireless extensions 1843 * 1844 * @netdev_ops: Includes several pointers to callbacks, 1845 * if one wants to override the ndo_*() functions 1846 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. 1847 * @ethtool_ops: Management operations 1848 * @l3mdev_ops: Layer 3 master device operations 1849 * @ndisc_ops: Includes callbacks for different IPv6 neighbour 1850 * discovery handling. Necessary for e.g. 6LoWPAN. 1851 * @xfrmdev_ops: Transformation offload operations 1852 * @tlsdev_ops: Transport Layer Security offload operations 1853 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1854 * of Layer 2 headers. 1855 * 1856 * @flags: Interface flags (a la BSD) 1857 * @xdp_features: XDP capability supported by the device 1858 * @priv_flags: Like 'flags' but invisible to userspace, 1859 * see if.h for the definitions 1860 * @gflags: Global flags ( kept as legacy ) 1861 * @padded: How much padding added by alloc_netdev() 1862 * @operstate: RFC2863 operstate 1863 * @link_mode: Mapping policy to operstate 1864 * @if_port: Selectable AUI, TP, ... 1865 * @dma: DMA channel 1866 * @mtu: Interface MTU value 1867 * @min_mtu: Interface Minimum MTU value 1868 * @max_mtu: Interface Maximum MTU value 1869 * @type: Interface hardware type 1870 * @hard_header_len: Maximum hardware header length. 1871 * @min_header_len: Minimum hardware header length 1872 * 1873 * @needed_headroom: Extra headroom the hardware may need, but not in all 1874 * cases can this be guaranteed 1875 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1876 * cases can this be guaranteed. Some cases also use 1877 * LL_MAX_HEADER instead to allocate the skb 1878 * 1879 * interface address info: 1880 * 1881 * @perm_addr: Permanent hw address 1882 * @addr_assign_type: Hw address assignment type 1883 * @addr_len: Hardware address length 1884 * @upper_level: Maximum depth level of upper devices. 1885 * @lower_level: Maximum depth level of lower devices. 1886 * @neigh_priv_len: Used in neigh_alloc() 1887 * @dev_id: Used to differentiate devices that share 1888 * the same link layer address 1889 * @dev_port: Used to differentiate devices that share 1890 * the same function 1891 * @addr_list_lock: XXX: need comments on this one 1892 * @name_assign_type: network interface name assignment type 1893 * @uc_promisc: Counter that indicates promiscuous mode 1894 * has been enabled due to the need to listen to 1895 * additional unicast addresses in a device that 1896 * does not implement ndo_set_rx_mode() 1897 * @uc: unicast mac addresses 1898 * @mc: multicast mac addresses 1899 * @dev_addrs: list of device hw addresses 1900 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1901 * @promiscuity: Number of times the NIC is told to work in 1902 * promiscuous mode; if it becomes 0 the NIC will 1903 * exit promiscuous mode 1904 * @allmulti: Counter, enables or disables allmulticast mode 1905 * 1906 * @vlan_info: VLAN info 1907 * @dsa_ptr: dsa specific data 1908 * @tipc_ptr: TIPC specific data 1909 * @atalk_ptr: AppleTalk link 1910 * @ip_ptr: IPv4 specific data 1911 * @ip6_ptr: IPv6 specific data 1912 * @ax25_ptr: AX.25 specific data 1913 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1914 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network 1915 * device struct 1916 * @mpls_ptr: mpls_dev struct pointer 1917 * @mctp_ptr: MCTP specific data 1918 * 1919 * @dev_addr: Hw address (before bcast, 1920 * because most packets are unicast) 1921 * 1922 * @_rx: Array of RX queues 1923 * @num_rx_queues: Number of RX queues 1924 * allocated at register_netdev() time 1925 * @real_num_rx_queues: Number of RX queues currently active in device 1926 * @xdp_prog: XDP sockets filter program pointer 1927 * @gro_flush_timeout: timeout for GRO layer in NAPI 1928 * @napi_defer_hard_irqs: If not zero, provides a counter that would 1929 * allow to avoid NIC hard IRQ, on busy queues. 1930 * 1931 * @rx_handler: handler for received packets 1932 * @rx_handler_data: XXX: need comments on this one 1933 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing 1934 * @ingress_queue: XXX: need comments on this one 1935 * @nf_hooks_ingress: netfilter hooks executed for ingress packets 1936 * @broadcast: hw bcast address 1937 * 1938 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1939 * indexed by RX queue number. Assigned by driver. 1940 * This must only be set if the ndo_rx_flow_steer 1941 * operation is defined 1942 * @index_hlist: Device index hash chain 1943 * 1944 * @_tx: Array of TX queues 1945 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1946 * @real_num_tx_queues: Number of TX queues currently active in device 1947 * @qdisc: Root qdisc from userspace point of view 1948 * @tx_queue_len: Max frames per queue allowed 1949 * @tx_global_lock: XXX: need comments on this one 1950 * @xdp_bulkq: XDP device bulk queue 1951 * @xps_maps: all CPUs/RXQs maps for XPS device 1952 * 1953 * @xps_maps: XXX: need comments on this one 1954 * @tcx_egress: BPF & clsact qdisc specific data for egress processing 1955 * @nf_hooks_egress: netfilter hooks executed for egress packets 1956 * @qdisc_hash: qdisc hash table 1957 * @watchdog_timeo: Represents the timeout that is used by 1958 * the watchdog (see dev_watchdog()) 1959 * @watchdog_timer: List of timers 1960 * 1961 * @proto_down_reason: reason a netdev interface is held down 1962 * @pcpu_refcnt: Number of references to this device 1963 * @dev_refcnt: Number of references to this device 1964 * @refcnt_tracker: Tracker directory for tracked references to this device 1965 * @todo_list: Delayed register/unregister 1966 * @link_watch_list: XXX: need comments on this one 1967 * 1968 * @reg_state: Register/unregister state machine 1969 * @dismantle: Device is going to be freed 1970 * @rtnl_link_state: This enum represents the phases of creating 1971 * a new link 1972 * 1973 * @needs_free_netdev: Should unregister perform free_netdev? 1974 * @priv_destructor: Called from unregister 1975 * @npinfo: XXX: need comments on this one 1976 * @nd_net: Network namespace this network device is inside 1977 * 1978 * @ml_priv: Mid-layer private 1979 * @ml_priv_type: Mid-layer private type 1980 * @lstats: Loopback statistics 1981 * @tstats: Tunnel statistics 1982 * @dstats: Dummy statistics 1983 * @vstats: Virtual ethernet statistics 1984 * 1985 * @garp_port: GARP 1986 * @mrp_port: MRP 1987 * 1988 * @dm_private: Drop monitor private 1989 * 1990 * @dev: Class/net/name entry 1991 * @sysfs_groups: Space for optional device, statistics and wireless 1992 * sysfs groups 1993 * 1994 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1995 * @rtnl_link_ops: Rtnl_link_ops 1996 * 1997 * @gso_max_size: Maximum size of generic segmentation offload 1998 * @tso_max_size: Device (as in HW) limit on the max TSO request size 1999 * @gso_max_segs: Maximum number of segments that can be passed to the 2000 * NIC for GSO 2001 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count 2002 * @gso_ipv4_max_size: Maximum size of generic segmentation offload, 2003 * for IPv4. 2004 * 2005 * @dcbnl_ops: Data Center Bridging netlink ops 2006 * @num_tc: Number of traffic classes in the net device 2007 * @tc_to_txq: XXX: need comments on this one 2008 * @prio_tc_map: XXX: need comments on this one 2009 * 2010 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 2011 * 2012 * @priomap: XXX: need comments on this one 2013 * @phydev: Physical device may attach itself 2014 * for hardware timestamping 2015 * @sfp_bus: attached &struct sfp_bus structure. 2016 * 2017 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 2018 * 2019 * @proto_down: protocol port state information can be sent to the 2020 * switch driver and used to set the phys state of the 2021 * switch port. 2022 * 2023 * @wol_enabled: Wake-on-LAN is enabled 2024 * 2025 * @threaded: napi threaded mode is enabled 2026 * 2027 * @net_notifier_list: List of per-net netdev notifier block 2028 * that follow this device when it is moved 2029 * to another network namespace. 2030 * 2031 * @macsec_ops: MACsec offloading ops 2032 * 2033 * @udp_tunnel_nic_info: static structure describing the UDP tunnel 2034 * offload capabilities of the device 2035 * @udp_tunnel_nic: UDP tunnel offload state 2036 * @xdp_state: stores info on attached XDP BPF programs 2037 * 2038 * @nested_level: Used as a parameter of spin_lock_nested() of 2039 * dev->addr_list_lock. 2040 * @unlink_list: As netif_addr_lock() can be called recursively, 2041 * keep a list of interfaces to be deleted. 2042 * @gro_max_size: Maximum size of aggregated packet in generic 2043 * receive offload (GRO) 2044 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic 2045 * receive offload (GRO), for IPv4. 2046 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP 2047 * zero copy driver 2048 * 2049 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. 2050 * @linkwatch_dev_tracker: refcount tracker used by linkwatch. 2051 * @watchdog_dev_tracker: refcount tracker used by watchdog. 2052 * @dev_registered_tracker: tracker for reference held while 2053 * registered 2054 * @offload_xstats_l3: L3 HW stats for this netdevice. 2055 * 2056 * @devlink_port: Pointer to related devlink port structure. 2057 * Assigned by a driver before netdev registration using 2058 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static 2059 * during the time netdevice is registered. 2060 * 2061 * FIXME: cleanup struct net_device such that network protocol info 2062 * moves out. 2063 */ 2064 2065 struct net_device { 2066 char name[IFNAMSIZ]; 2067 struct netdev_name_node *name_node; 2068 struct dev_ifalias __rcu *ifalias; 2069 /* 2070 * I/O specific fields 2071 * FIXME: Merge these and struct ifmap into one 2072 */ 2073 unsigned long mem_end; 2074 unsigned long mem_start; 2075 unsigned long base_addr; 2076 2077 /* 2078 * Some hardware also needs these fields (state,dev_list, 2079 * napi_list,unreg_list,close_list) but they are not 2080 * part of the usual set specified in Space.c. 2081 */ 2082 2083 unsigned long state; 2084 2085 struct list_head dev_list; 2086 struct list_head napi_list; 2087 struct list_head unreg_list; 2088 struct list_head close_list; 2089 struct list_head ptype_all; 2090 struct list_head ptype_specific; 2091 2092 struct { 2093 struct list_head upper; 2094 struct list_head lower; 2095 } adj_list; 2096 2097 /* Read-mostly cache-line for fast-path access */ 2098 unsigned int flags; 2099 xdp_features_t xdp_features; 2100 unsigned long long priv_flags; 2101 const struct net_device_ops *netdev_ops; 2102 const struct xdp_metadata_ops *xdp_metadata_ops; 2103 int ifindex; 2104 unsigned short gflags; 2105 unsigned short hard_header_len; 2106 2107 /* Note : dev->mtu is often read without holding a lock. 2108 * Writers usually hold RTNL. 2109 * It is recommended to use READ_ONCE() to annotate the reads, 2110 * and to use WRITE_ONCE() to annotate the writes. 2111 */ 2112 unsigned int mtu; 2113 unsigned short needed_headroom; 2114 unsigned short needed_tailroom; 2115 2116 netdev_features_t features; 2117 netdev_features_t hw_features; 2118 netdev_features_t wanted_features; 2119 netdev_features_t vlan_features; 2120 netdev_features_t hw_enc_features; 2121 netdev_features_t mpls_features; 2122 netdev_features_t gso_partial_features; 2123 2124 unsigned int min_mtu; 2125 unsigned int max_mtu; 2126 unsigned short type; 2127 unsigned char min_header_len; 2128 unsigned char name_assign_type; 2129 2130 int group; 2131 2132 struct net_device_stats stats; /* not used by modern drivers */ 2133 2134 struct net_device_core_stats __percpu *core_stats; 2135 2136 /* Stats to monitor link on/off, flapping */ 2137 atomic_t carrier_up_count; 2138 atomic_t carrier_down_count; 2139 2140 #ifdef CONFIG_WIRELESS_EXT 2141 const struct iw_handler_def *wireless_handlers; 2142 struct iw_public_data *wireless_data; 2143 #endif 2144 const struct ethtool_ops *ethtool_ops; 2145 #ifdef CONFIG_NET_L3_MASTER_DEV 2146 const struct l3mdev_ops *l3mdev_ops; 2147 #endif 2148 #if IS_ENABLED(CONFIG_IPV6) 2149 const struct ndisc_ops *ndisc_ops; 2150 #endif 2151 2152 #ifdef CONFIG_XFRM_OFFLOAD 2153 const struct xfrmdev_ops *xfrmdev_ops; 2154 #endif 2155 2156 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2157 const struct tlsdev_ops *tlsdev_ops; 2158 #endif 2159 2160 const struct header_ops *header_ops; 2161 2162 unsigned char operstate; 2163 unsigned char link_mode; 2164 2165 unsigned char if_port; 2166 unsigned char dma; 2167 2168 /* Interface address info. */ 2169 unsigned char perm_addr[MAX_ADDR_LEN]; 2170 unsigned char addr_assign_type; 2171 unsigned char addr_len; 2172 unsigned char upper_level; 2173 unsigned char lower_level; 2174 2175 unsigned short neigh_priv_len; 2176 unsigned short dev_id; 2177 unsigned short dev_port; 2178 unsigned short padded; 2179 2180 spinlock_t addr_list_lock; 2181 int irq; 2182 2183 struct netdev_hw_addr_list uc; 2184 struct netdev_hw_addr_list mc; 2185 struct netdev_hw_addr_list dev_addrs; 2186 2187 #ifdef CONFIG_SYSFS 2188 struct kset *queues_kset; 2189 #endif 2190 #ifdef CONFIG_LOCKDEP 2191 struct list_head unlink_list; 2192 #endif 2193 unsigned int promiscuity; 2194 unsigned int allmulti; 2195 bool uc_promisc; 2196 #ifdef CONFIG_LOCKDEP 2197 unsigned char nested_level; 2198 #endif 2199 2200 2201 /* Protocol-specific pointers */ 2202 2203 struct in_device __rcu *ip_ptr; 2204 struct inet6_dev __rcu *ip6_ptr; 2205 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2206 struct vlan_info __rcu *vlan_info; 2207 #endif 2208 #if IS_ENABLED(CONFIG_NET_DSA) 2209 struct dsa_port *dsa_ptr; 2210 #endif 2211 #if IS_ENABLED(CONFIG_TIPC) 2212 struct tipc_bearer __rcu *tipc_ptr; 2213 #endif 2214 #if IS_ENABLED(CONFIG_ATALK) 2215 void *atalk_ptr; 2216 #endif 2217 #if IS_ENABLED(CONFIG_AX25) 2218 void *ax25_ptr; 2219 #endif 2220 #if IS_ENABLED(CONFIG_CFG80211) 2221 struct wireless_dev *ieee80211_ptr; 2222 #endif 2223 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) 2224 struct wpan_dev *ieee802154_ptr; 2225 #endif 2226 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 2227 struct mpls_dev __rcu *mpls_ptr; 2228 #endif 2229 #if IS_ENABLED(CONFIG_MCTP) 2230 struct mctp_dev __rcu *mctp_ptr; 2231 #endif 2232 2233 /* 2234 * Cache lines mostly used on receive path (including eth_type_trans()) 2235 */ 2236 /* Interface address info used in eth_type_trans() */ 2237 const unsigned char *dev_addr; 2238 2239 struct netdev_rx_queue *_rx; 2240 unsigned int num_rx_queues; 2241 unsigned int real_num_rx_queues; 2242 2243 struct bpf_prog __rcu *xdp_prog; 2244 unsigned long gro_flush_timeout; 2245 int napi_defer_hard_irqs; 2246 #define GRO_LEGACY_MAX_SIZE 65536u 2247 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2248 * and shinfo->gso_segs is a 16bit field. 2249 */ 2250 #define GRO_MAX_SIZE (8 * 65535u) 2251 unsigned int gro_max_size; 2252 unsigned int gro_ipv4_max_size; 2253 unsigned int xdp_zc_max_segs; 2254 rx_handler_func_t __rcu *rx_handler; 2255 void __rcu *rx_handler_data; 2256 #ifdef CONFIG_NET_XGRESS 2257 struct bpf_mprog_entry __rcu *tcx_ingress; 2258 #endif 2259 struct netdev_queue __rcu *ingress_queue; 2260 #ifdef CONFIG_NETFILTER_INGRESS 2261 struct nf_hook_entries __rcu *nf_hooks_ingress; 2262 #endif 2263 2264 unsigned char broadcast[MAX_ADDR_LEN]; 2265 #ifdef CONFIG_RFS_ACCEL 2266 struct cpu_rmap *rx_cpu_rmap; 2267 #endif 2268 struct hlist_node index_hlist; 2269 2270 /* 2271 * Cache lines mostly used on transmit path 2272 */ 2273 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 2274 unsigned int num_tx_queues; 2275 unsigned int real_num_tx_queues; 2276 struct Qdisc __rcu *qdisc; 2277 unsigned int tx_queue_len; 2278 spinlock_t tx_global_lock; 2279 2280 struct xdp_dev_bulk_queue __percpu *xdp_bulkq; 2281 2282 #ifdef CONFIG_XPS 2283 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; 2284 #endif 2285 #ifdef CONFIG_NET_XGRESS 2286 struct bpf_mprog_entry __rcu *tcx_egress; 2287 #endif 2288 #ifdef CONFIG_NETFILTER_EGRESS 2289 struct nf_hook_entries __rcu *nf_hooks_egress; 2290 #endif 2291 2292 #ifdef CONFIG_NET_SCHED 2293 DECLARE_HASHTABLE (qdisc_hash, 4); 2294 #endif 2295 /* These may be needed for future network-power-down code. */ 2296 struct timer_list watchdog_timer; 2297 int watchdog_timeo; 2298 2299 u32 proto_down_reason; 2300 2301 struct list_head todo_list; 2302 2303 #ifdef CONFIG_PCPU_DEV_REFCNT 2304 int __percpu *pcpu_refcnt; 2305 #else 2306 refcount_t dev_refcnt; 2307 #endif 2308 struct ref_tracker_dir refcnt_tracker; 2309 2310 struct list_head link_watch_list; 2311 2312 enum { NETREG_UNINITIALIZED=0, 2313 NETREG_REGISTERED, /* completed register_netdevice */ 2314 NETREG_UNREGISTERING, /* called unregister_netdevice */ 2315 NETREG_UNREGISTERED, /* completed unregister todo */ 2316 NETREG_RELEASED, /* called free_netdev */ 2317 NETREG_DUMMY, /* dummy device for NAPI poll */ 2318 } reg_state:8; 2319 2320 bool dismantle; 2321 2322 enum { 2323 RTNL_LINK_INITIALIZED, 2324 RTNL_LINK_INITIALIZING, 2325 } rtnl_link_state:16; 2326 2327 bool needs_free_netdev; 2328 void (*priv_destructor)(struct net_device *dev); 2329 2330 #ifdef CONFIG_NETPOLL 2331 struct netpoll_info __rcu *npinfo; 2332 #endif 2333 2334 possible_net_t nd_net; 2335 2336 /* mid-layer private */ 2337 void *ml_priv; 2338 enum netdev_ml_priv_type ml_priv_type; 2339 2340 union { 2341 struct pcpu_lstats __percpu *lstats; 2342 struct pcpu_sw_netstats __percpu *tstats; 2343 struct pcpu_dstats __percpu *dstats; 2344 }; 2345 2346 #if IS_ENABLED(CONFIG_GARP) 2347 struct garp_port __rcu *garp_port; 2348 #endif 2349 #if IS_ENABLED(CONFIG_MRP) 2350 struct mrp_port __rcu *mrp_port; 2351 #endif 2352 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) 2353 struct dm_hw_stat_delta __rcu *dm_private; 2354 #endif 2355 struct device dev; 2356 const struct attribute_group *sysfs_groups[4]; 2357 const struct attribute_group *sysfs_rx_queue_group; 2358 2359 const struct rtnl_link_ops *rtnl_link_ops; 2360 2361 /* for setting kernel sock attribute on TCP connection setup */ 2362 #define GSO_MAX_SEGS 65535u 2363 #define GSO_LEGACY_MAX_SIZE 65536u 2364 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), 2365 * and shinfo->gso_segs is a 16bit field. 2366 */ 2367 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) 2368 2369 unsigned int gso_max_size; 2370 #define TSO_LEGACY_MAX_SIZE 65536 2371 #define TSO_MAX_SIZE UINT_MAX 2372 unsigned int tso_max_size; 2373 u16 gso_max_segs; 2374 #define TSO_MAX_SEGS U16_MAX 2375 u16 tso_max_segs; 2376 unsigned int gso_ipv4_max_size; 2377 2378 #ifdef CONFIG_DCB 2379 const struct dcbnl_rtnl_ops *dcbnl_ops; 2380 #endif 2381 s16 num_tc; 2382 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2383 u8 prio_tc_map[TC_BITMASK + 1]; 2384 2385 #if IS_ENABLED(CONFIG_FCOE) 2386 unsigned int fcoe_ddp_xid; 2387 #endif 2388 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2389 struct netprio_map __rcu *priomap; 2390 #endif 2391 struct phy_device *phydev; 2392 struct sfp_bus *sfp_bus; 2393 struct lock_class_key *qdisc_tx_busylock; 2394 bool proto_down; 2395 unsigned wol_enabled:1; 2396 unsigned threaded:1; 2397 2398 struct list_head net_notifier_list; 2399 2400 #if IS_ENABLED(CONFIG_MACSEC) 2401 /* MACsec management functions */ 2402 const struct macsec_ops *macsec_ops; 2403 #endif 2404 const struct udp_tunnel_nic_info *udp_tunnel_nic_info; 2405 struct udp_tunnel_nic *udp_tunnel_nic; 2406 2407 /* protected by rtnl_lock */ 2408 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; 2409 2410 u8 dev_addr_shadow[MAX_ADDR_LEN]; 2411 netdevice_tracker linkwatch_dev_tracker; 2412 netdevice_tracker watchdog_dev_tracker; 2413 netdevice_tracker dev_registered_tracker; 2414 struct rtnl_hw_stats64 *offload_xstats_l3; 2415 2416 struct devlink_port *devlink_port; 2417 }; 2418 #define to_net_dev(d) container_of(d, struct net_device, dev) 2419 2420 /* 2421 * Driver should use this to assign devlink port instance to a netdevice 2422 * before it registers the netdevice. Therefore devlink_port is static 2423 * during the netdev lifetime after it is registered. 2424 */ 2425 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ 2426 ({ \ 2427 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ 2428 ((dev)->devlink_port = (port)); \ 2429 }) 2430 2431 static inline bool netif_elide_gro(const struct net_device *dev) 2432 { 2433 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) 2434 return true; 2435 return false; 2436 } 2437 2438 #define NETDEV_ALIGN 32 2439 2440 static inline 2441 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 2442 { 2443 return dev->prio_tc_map[prio & TC_BITMASK]; 2444 } 2445 2446 static inline 2447 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 2448 { 2449 if (tc >= dev->num_tc) 2450 return -EINVAL; 2451 2452 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 2453 return 0; 2454 } 2455 2456 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); 2457 void netdev_reset_tc(struct net_device *dev); 2458 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); 2459 int netdev_set_num_tc(struct net_device *dev, u8 num_tc); 2460 2461 static inline 2462 int netdev_get_num_tc(struct net_device *dev) 2463 { 2464 return dev->num_tc; 2465 } 2466 2467 static inline void net_prefetch(void *p) 2468 { 2469 prefetch(p); 2470 #if L1_CACHE_BYTES < 128 2471 prefetch((u8 *)p + L1_CACHE_BYTES); 2472 #endif 2473 } 2474 2475 static inline void net_prefetchw(void *p) 2476 { 2477 prefetchw(p); 2478 #if L1_CACHE_BYTES < 128 2479 prefetchw((u8 *)p + L1_CACHE_BYTES); 2480 #endif 2481 } 2482 2483 void netdev_unbind_sb_channel(struct net_device *dev, 2484 struct net_device *sb_dev); 2485 int netdev_bind_sb_channel_queue(struct net_device *dev, 2486 struct net_device *sb_dev, 2487 u8 tc, u16 count, u16 offset); 2488 int netdev_set_sb_channel(struct net_device *dev, u16 channel); 2489 static inline int netdev_get_sb_channel(struct net_device *dev) 2490 { 2491 return max_t(int, -dev->num_tc, 0); 2492 } 2493 2494 static inline 2495 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2496 unsigned int index) 2497 { 2498 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); 2499 return &dev->_tx[index]; 2500 } 2501 2502 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 2503 const struct sk_buff *skb) 2504 { 2505 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 2506 } 2507 2508 static inline void netdev_for_each_tx_queue(struct net_device *dev, 2509 void (*f)(struct net_device *, 2510 struct netdev_queue *, 2511 void *), 2512 void *arg) 2513 { 2514 unsigned int i; 2515 2516 for (i = 0; i < dev->num_tx_queues; i++) 2517 f(dev, &dev->_tx[i], arg); 2518 } 2519 2520 #define netdev_lockdep_set_classes(dev) \ 2521 { \ 2522 static struct lock_class_key qdisc_tx_busylock_key; \ 2523 static struct lock_class_key qdisc_xmit_lock_key; \ 2524 static struct lock_class_key dev_addr_list_lock_key; \ 2525 unsigned int i; \ 2526 \ 2527 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2528 lockdep_set_class(&(dev)->addr_list_lock, \ 2529 &dev_addr_list_lock_key); \ 2530 for (i = 0; i < (dev)->num_tx_queues; i++) \ 2531 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2532 &qdisc_xmit_lock_key); \ 2533 } 2534 2535 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2536 struct net_device *sb_dev); 2537 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2538 struct sk_buff *skb, 2539 struct net_device *sb_dev); 2540 2541 /* returns the headroom that the master device needs to take in account 2542 * when forwarding to this dev 2543 */ 2544 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) 2545 { 2546 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; 2547 } 2548 2549 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) 2550 { 2551 if (dev->netdev_ops->ndo_set_rx_headroom) 2552 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); 2553 } 2554 2555 /* set the device rx headroom to the dev's default */ 2556 static inline void netdev_reset_rx_headroom(struct net_device *dev) 2557 { 2558 netdev_set_rx_headroom(dev, -1); 2559 } 2560 2561 static inline void *netdev_get_ml_priv(struct net_device *dev, 2562 enum netdev_ml_priv_type type) 2563 { 2564 if (dev->ml_priv_type != type) 2565 return NULL; 2566 2567 return dev->ml_priv; 2568 } 2569 2570 static inline void netdev_set_ml_priv(struct net_device *dev, 2571 void *ml_priv, 2572 enum netdev_ml_priv_type type) 2573 { 2574 WARN(dev->ml_priv_type && dev->ml_priv_type != type, 2575 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", 2576 dev->ml_priv_type, type); 2577 WARN(!dev->ml_priv_type && dev->ml_priv, 2578 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); 2579 2580 dev->ml_priv = ml_priv; 2581 dev->ml_priv_type = type; 2582 } 2583 2584 /* 2585 * Net namespace inlines 2586 */ 2587 static inline 2588 struct net *dev_net(const struct net_device *dev) 2589 { 2590 return read_pnet(&dev->nd_net); 2591 } 2592 2593 static inline 2594 void dev_net_set(struct net_device *dev, struct net *net) 2595 { 2596 write_pnet(&dev->nd_net, net); 2597 } 2598 2599 /** 2600 * netdev_priv - access network device private data 2601 * @dev: network device 2602 * 2603 * Get network device private data 2604 */ 2605 static inline void *netdev_priv(const struct net_device *dev) 2606 { 2607 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 2608 } 2609 2610 /* Set the sysfs physical device reference for the network logical device 2611 * if set prior to registration will cause a symlink during initialization. 2612 */ 2613 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 2614 2615 /* Set the sysfs device type for the network logical device to allow 2616 * fine-grained identification of different network device types. For 2617 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. 2618 */ 2619 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2620 2621 /* Default NAPI poll() weight 2622 * Device drivers are strongly advised to not use bigger value 2623 */ 2624 #define NAPI_POLL_WEIGHT 64 2625 2626 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 2627 int (*poll)(struct napi_struct *, int), int weight); 2628 2629 /** 2630 * netif_napi_add() - initialize a NAPI context 2631 * @dev: network device 2632 * @napi: NAPI context 2633 * @poll: polling function 2634 * 2635 * netif_napi_add() must be used to initialize a NAPI context prior to calling 2636 * *any* of the other NAPI-related functions. 2637 */ 2638 static inline void 2639 netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2640 int (*poll)(struct napi_struct *, int)) 2641 { 2642 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2643 } 2644 2645 static inline void 2646 netif_napi_add_tx_weight(struct net_device *dev, 2647 struct napi_struct *napi, 2648 int (*poll)(struct napi_struct *, int), 2649 int weight) 2650 { 2651 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 2652 netif_napi_add_weight(dev, napi, poll, weight); 2653 } 2654 2655 /** 2656 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only 2657 * @dev: network device 2658 * @napi: NAPI context 2659 * @poll: polling function 2660 * 2661 * This variant of netif_napi_add() should be used from drivers using NAPI 2662 * to exclusively poll a TX queue. 2663 * This will avoid we add it into napi_hash[], thus polluting this hash table. 2664 */ 2665 static inline void netif_napi_add_tx(struct net_device *dev, 2666 struct napi_struct *napi, 2667 int (*poll)(struct napi_struct *, int)) 2668 { 2669 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); 2670 } 2671 2672 /** 2673 * __netif_napi_del - remove a NAPI context 2674 * @napi: NAPI context 2675 * 2676 * Warning: caller must observe RCU grace period before freeing memory 2677 * containing @napi. Drivers might want to call this helper to combine 2678 * all the needed RCU grace periods into a single one. 2679 */ 2680 void __netif_napi_del(struct napi_struct *napi); 2681 2682 /** 2683 * netif_napi_del - remove a NAPI context 2684 * @napi: NAPI context 2685 * 2686 * netif_napi_del() removes a NAPI context from the network device NAPI list 2687 */ 2688 static inline void netif_napi_del(struct napi_struct *napi) 2689 { 2690 __netif_napi_del(napi); 2691 synchronize_net(); 2692 } 2693 2694 struct packet_type { 2695 __be16 type; /* This is really htons(ether_type). */ 2696 bool ignore_outgoing; 2697 struct net_device *dev; /* NULL is wildcarded here */ 2698 netdevice_tracker dev_tracker; 2699 int (*func) (struct sk_buff *, 2700 struct net_device *, 2701 struct packet_type *, 2702 struct net_device *); 2703 void (*list_func) (struct list_head *, 2704 struct packet_type *, 2705 struct net_device *); 2706 bool (*id_match)(struct packet_type *ptype, 2707 struct sock *sk); 2708 struct net *af_packet_net; 2709 void *af_packet_priv; 2710 struct list_head list; 2711 }; 2712 2713 struct offload_callbacks { 2714 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2715 netdev_features_t features); 2716 struct sk_buff *(*gro_receive)(struct list_head *head, 2717 struct sk_buff *skb); 2718 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2719 }; 2720 2721 struct packet_offload { 2722 __be16 type; /* This is really htons(ether_type). */ 2723 u16 priority; 2724 struct offload_callbacks callbacks; 2725 struct list_head list; 2726 }; 2727 2728 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2729 struct pcpu_sw_netstats { 2730 u64_stats_t rx_packets; 2731 u64_stats_t rx_bytes; 2732 u64_stats_t tx_packets; 2733 u64_stats_t tx_bytes; 2734 struct u64_stats_sync syncp; 2735 } __aligned(4 * sizeof(u64)); 2736 2737 struct pcpu_lstats { 2738 u64_stats_t packets; 2739 u64_stats_t bytes; 2740 struct u64_stats_sync syncp; 2741 } __aligned(2 * sizeof(u64)); 2742 2743 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); 2744 2745 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 2746 { 2747 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2748 2749 u64_stats_update_begin(&tstats->syncp); 2750 u64_stats_add(&tstats->rx_bytes, len); 2751 u64_stats_inc(&tstats->rx_packets); 2752 u64_stats_update_end(&tstats->syncp); 2753 } 2754 2755 static inline void dev_sw_netstats_tx_add(struct net_device *dev, 2756 unsigned int packets, 2757 unsigned int len) 2758 { 2759 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 2760 2761 u64_stats_update_begin(&tstats->syncp); 2762 u64_stats_add(&tstats->tx_bytes, len); 2763 u64_stats_add(&tstats->tx_packets, packets); 2764 u64_stats_update_end(&tstats->syncp); 2765 } 2766 2767 static inline void dev_lstats_add(struct net_device *dev, unsigned int len) 2768 { 2769 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); 2770 2771 u64_stats_update_begin(&lstats->syncp); 2772 u64_stats_add(&lstats->bytes, len); 2773 u64_stats_inc(&lstats->packets); 2774 u64_stats_update_end(&lstats->syncp); 2775 } 2776 2777 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2778 ({ \ 2779 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2780 if (pcpu_stats) { \ 2781 int __cpu; \ 2782 for_each_possible_cpu(__cpu) { \ 2783 typeof(type) *stat; \ 2784 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2785 u64_stats_init(&stat->syncp); \ 2786 } \ 2787 } \ 2788 pcpu_stats; \ 2789 }) 2790 2791 #define netdev_alloc_pcpu_stats(type) \ 2792 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2793 2794 #define devm_netdev_alloc_pcpu_stats(dev, type) \ 2795 ({ \ 2796 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ 2797 if (pcpu_stats) { \ 2798 int __cpu; \ 2799 for_each_possible_cpu(__cpu) { \ 2800 typeof(type) *stat; \ 2801 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2802 u64_stats_init(&stat->syncp); \ 2803 } \ 2804 } \ 2805 pcpu_stats; \ 2806 }) 2807 2808 enum netdev_lag_tx_type { 2809 NETDEV_LAG_TX_TYPE_UNKNOWN, 2810 NETDEV_LAG_TX_TYPE_RANDOM, 2811 NETDEV_LAG_TX_TYPE_BROADCAST, 2812 NETDEV_LAG_TX_TYPE_ROUNDROBIN, 2813 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, 2814 NETDEV_LAG_TX_TYPE_HASH, 2815 }; 2816 2817 enum netdev_lag_hash { 2818 NETDEV_LAG_HASH_NONE, 2819 NETDEV_LAG_HASH_L2, 2820 NETDEV_LAG_HASH_L34, 2821 NETDEV_LAG_HASH_L23, 2822 NETDEV_LAG_HASH_E23, 2823 NETDEV_LAG_HASH_E34, 2824 NETDEV_LAG_HASH_VLAN_SRCMAC, 2825 NETDEV_LAG_HASH_UNKNOWN, 2826 }; 2827 2828 struct netdev_lag_upper_info { 2829 enum netdev_lag_tx_type tx_type; 2830 enum netdev_lag_hash hash_type; 2831 }; 2832 2833 struct netdev_lag_lower_state_info { 2834 u8 link_up : 1, 2835 tx_enabled : 1; 2836 }; 2837 2838 #include <linux/notifier.h> 2839 2840 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() 2841 * and the rtnetlink notification exclusion list in rtnetlink_event() when 2842 * adding new types. 2843 */ 2844 enum netdev_cmd { 2845 NETDEV_UP = 1, /* For now you can't veto a device up/down */ 2846 NETDEV_DOWN, 2847 NETDEV_REBOOT, /* Tell a protocol stack a network interface 2848 detected a hardware crash and restarted 2849 - we can use this eg to kick tcp sessions 2850 once done */ 2851 NETDEV_CHANGE, /* Notify device state change */ 2852 NETDEV_REGISTER, 2853 NETDEV_UNREGISTER, 2854 NETDEV_CHANGEMTU, /* notify after mtu change happened */ 2855 NETDEV_CHANGEADDR, /* notify after the address change */ 2856 NETDEV_PRE_CHANGEADDR, /* notify before the address change */ 2857 NETDEV_GOING_DOWN, 2858 NETDEV_CHANGENAME, 2859 NETDEV_FEAT_CHANGE, 2860 NETDEV_BONDING_FAILOVER, 2861 NETDEV_PRE_UP, 2862 NETDEV_PRE_TYPE_CHANGE, 2863 NETDEV_POST_TYPE_CHANGE, 2864 NETDEV_POST_INIT, 2865 NETDEV_PRE_UNINIT, 2866 NETDEV_RELEASE, 2867 NETDEV_NOTIFY_PEERS, 2868 NETDEV_JOIN, 2869 NETDEV_CHANGEUPPER, 2870 NETDEV_RESEND_IGMP, 2871 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ 2872 NETDEV_CHANGEINFODATA, 2873 NETDEV_BONDING_INFO, 2874 NETDEV_PRECHANGEUPPER, 2875 NETDEV_CHANGELOWERSTATE, 2876 NETDEV_UDP_TUNNEL_PUSH_INFO, 2877 NETDEV_UDP_TUNNEL_DROP_INFO, 2878 NETDEV_CHANGE_TX_QUEUE_LEN, 2879 NETDEV_CVLAN_FILTER_PUSH_INFO, 2880 NETDEV_CVLAN_FILTER_DROP_INFO, 2881 NETDEV_SVLAN_FILTER_PUSH_INFO, 2882 NETDEV_SVLAN_FILTER_DROP_INFO, 2883 NETDEV_OFFLOAD_XSTATS_ENABLE, 2884 NETDEV_OFFLOAD_XSTATS_DISABLE, 2885 NETDEV_OFFLOAD_XSTATS_REPORT_USED, 2886 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 2887 NETDEV_XDP_FEAT_CHANGE, 2888 }; 2889 const char *netdev_cmd_to_name(enum netdev_cmd cmd); 2890 2891 int register_netdevice_notifier(struct notifier_block *nb); 2892 int unregister_netdevice_notifier(struct notifier_block *nb); 2893 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); 2894 int unregister_netdevice_notifier_net(struct net *net, 2895 struct notifier_block *nb); 2896 int register_netdevice_notifier_dev_net(struct net_device *dev, 2897 struct notifier_block *nb, 2898 struct netdev_net_notifier *nn); 2899 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2900 struct notifier_block *nb, 2901 struct netdev_net_notifier *nn); 2902 2903 struct netdev_notifier_info { 2904 struct net_device *dev; 2905 struct netlink_ext_ack *extack; 2906 }; 2907 2908 struct netdev_notifier_info_ext { 2909 struct netdev_notifier_info info; /* must be first */ 2910 union { 2911 u32 mtu; 2912 } ext; 2913 }; 2914 2915 struct netdev_notifier_change_info { 2916 struct netdev_notifier_info info; /* must be first */ 2917 unsigned int flags_changed; 2918 }; 2919 2920 struct netdev_notifier_changeupper_info { 2921 struct netdev_notifier_info info; /* must be first */ 2922 struct net_device *upper_dev; /* new upper dev */ 2923 bool master; /* is upper dev master */ 2924 bool linking; /* is the notification for link or unlink */ 2925 void *upper_info; /* upper dev info */ 2926 }; 2927 2928 struct netdev_notifier_changelowerstate_info { 2929 struct netdev_notifier_info info; /* must be first */ 2930 void *lower_state_info; /* is lower dev state */ 2931 }; 2932 2933 struct netdev_notifier_pre_changeaddr_info { 2934 struct netdev_notifier_info info; /* must be first */ 2935 const unsigned char *dev_addr; 2936 }; 2937 2938 enum netdev_offload_xstats_type { 2939 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, 2940 }; 2941 2942 struct netdev_notifier_offload_xstats_info { 2943 struct netdev_notifier_info info; /* must be first */ 2944 enum netdev_offload_xstats_type type; 2945 2946 union { 2947 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ 2948 struct netdev_notifier_offload_xstats_rd *report_delta; 2949 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ 2950 struct netdev_notifier_offload_xstats_ru *report_used; 2951 }; 2952 }; 2953 2954 int netdev_offload_xstats_enable(struct net_device *dev, 2955 enum netdev_offload_xstats_type type, 2956 struct netlink_ext_ack *extack); 2957 int netdev_offload_xstats_disable(struct net_device *dev, 2958 enum netdev_offload_xstats_type type); 2959 bool netdev_offload_xstats_enabled(const struct net_device *dev, 2960 enum netdev_offload_xstats_type type); 2961 int netdev_offload_xstats_get(struct net_device *dev, 2962 enum netdev_offload_xstats_type type, 2963 struct rtnl_hw_stats64 *stats, bool *used, 2964 struct netlink_ext_ack *extack); 2965 void 2966 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, 2967 const struct rtnl_hw_stats64 *stats); 2968 void 2969 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); 2970 void netdev_offload_xstats_push_delta(struct net_device *dev, 2971 enum netdev_offload_xstats_type type, 2972 const struct rtnl_hw_stats64 *stats); 2973 2974 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2975 struct net_device *dev) 2976 { 2977 info->dev = dev; 2978 info->extack = NULL; 2979 } 2980 2981 static inline struct net_device * 2982 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2983 { 2984 return info->dev; 2985 } 2986 2987 static inline struct netlink_ext_ack * 2988 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) 2989 { 2990 return info->extack; 2991 } 2992 2993 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2994 int call_netdevice_notifiers_info(unsigned long val, 2995 struct netdev_notifier_info *info); 2996 2997 extern rwlock_t dev_base_lock; /* Device list lock */ 2998 2999 #define for_each_netdev(net, d) \ 3000 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 3001 #define for_each_netdev_reverse(net, d) \ 3002 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 3003 #define for_each_netdev_rcu(net, d) \ 3004 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 3005 #define for_each_netdev_safe(net, d, n) \ 3006 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 3007 #define for_each_netdev_continue(net, d) \ 3008 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 3009 #define for_each_netdev_continue_reverse(net, d) \ 3010 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ 3011 dev_list) 3012 #define for_each_netdev_continue_rcu(net, d) \ 3013 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 3014 #define for_each_netdev_in_bond_rcu(bond, slave) \ 3015 for_each_netdev_rcu(&init_net, slave) \ 3016 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 3017 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 3018 3019 static inline struct net_device *next_net_device(struct net_device *dev) 3020 { 3021 struct list_head *lh; 3022 struct net *net; 3023 3024 net = dev_net(dev); 3025 lh = dev->dev_list.next; 3026 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3027 } 3028 3029 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 3030 { 3031 struct list_head *lh; 3032 struct net *net; 3033 3034 net = dev_net(dev); 3035 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 3036 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3037 } 3038 3039 static inline struct net_device *first_net_device(struct net *net) 3040 { 3041 return list_empty(&net->dev_base_head) ? NULL : 3042 net_device_entry(net->dev_base_head.next); 3043 } 3044 3045 static inline struct net_device *first_net_device_rcu(struct net *net) 3046 { 3047 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 3048 3049 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 3050 } 3051 3052 int netdev_boot_setup_check(struct net_device *dev); 3053 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 3054 const char *hwaddr); 3055 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 3056 void dev_add_pack(struct packet_type *pt); 3057 void dev_remove_pack(struct packet_type *pt); 3058 void __dev_remove_pack(struct packet_type *pt); 3059 void dev_add_offload(struct packet_offload *po); 3060 void dev_remove_offload(struct packet_offload *po); 3061 3062 int dev_get_iflink(const struct net_device *dev); 3063 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 3064 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 3065 struct net_device_path_stack *stack); 3066 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 3067 unsigned short mask); 3068 struct net_device *dev_get_by_name(struct net *net, const char *name); 3069 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 3070 struct net_device *__dev_get_by_name(struct net *net, const char *name); 3071 bool netdev_name_in_use(struct net *net, const char *name); 3072 int dev_alloc_name(struct net_device *dev, const char *name); 3073 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); 3074 void dev_close(struct net_device *dev); 3075 void dev_close_many(struct list_head *head, bool unlink); 3076 void dev_disable_lro(struct net_device *dev); 3077 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 3078 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3079 struct net_device *sb_dev); 3080 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3081 struct net_device *sb_dev); 3082 3083 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); 3084 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 3085 3086 static inline int dev_queue_xmit(struct sk_buff *skb) 3087 { 3088 return __dev_queue_xmit(skb, NULL); 3089 } 3090 3091 static inline int dev_queue_xmit_accel(struct sk_buff *skb, 3092 struct net_device *sb_dev) 3093 { 3094 return __dev_queue_xmit(skb, sb_dev); 3095 } 3096 3097 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3098 { 3099 int ret; 3100 3101 ret = __dev_direct_xmit(skb, queue_id); 3102 if (!dev_xmit_complete(ret)) 3103 kfree_skb(skb); 3104 return ret; 3105 } 3106 3107 int register_netdevice(struct net_device *dev); 3108 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 3109 void unregister_netdevice_many(struct list_head *head); 3110 static inline void unregister_netdevice(struct net_device *dev) 3111 { 3112 unregister_netdevice_queue(dev, NULL); 3113 } 3114 3115 int netdev_refcnt_read(const struct net_device *dev); 3116 void free_netdev(struct net_device *dev); 3117 void netdev_freemem(struct net_device *dev); 3118 int init_dummy_netdev(struct net_device *dev); 3119 3120 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 3121 struct sk_buff *skb, 3122 bool all_slaves); 3123 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 3124 struct sock *sk); 3125 struct net_device *dev_get_by_index(struct net *net, int ifindex); 3126 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 3127 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 3128 netdevice_tracker *tracker, gfp_t gfp); 3129 struct net_device *netdev_get_by_name(struct net *net, const char *name, 3130 netdevice_tracker *tracker, gfp_t gfp); 3131 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 3132 struct net_device *dev_get_by_napi_id(unsigned int napi_id); 3133 3134 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 3135 unsigned short type, 3136 const void *daddr, const void *saddr, 3137 unsigned int len) 3138 { 3139 if (!dev->header_ops || !dev->header_ops->create) 3140 return 0; 3141 3142 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 3143 } 3144 3145 static inline int dev_parse_header(const struct sk_buff *skb, 3146 unsigned char *haddr) 3147 { 3148 const struct net_device *dev = skb->dev; 3149 3150 if (!dev->header_ops || !dev->header_ops->parse) 3151 return 0; 3152 return dev->header_ops->parse(skb, haddr); 3153 } 3154 3155 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) 3156 { 3157 const struct net_device *dev = skb->dev; 3158 3159 if (!dev->header_ops || !dev->header_ops->parse_protocol) 3160 return 0; 3161 return dev->header_ops->parse_protocol(skb); 3162 } 3163 3164 /* ll_header must have at least hard_header_len allocated */ 3165 static inline bool dev_validate_header(const struct net_device *dev, 3166 char *ll_header, int len) 3167 { 3168 if (likely(len >= dev->hard_header_len)) 3169 return true; 3170 if (len < dev->min_header_len) 3171 return false; 3172 3173 if (capable(CAP_SYS_RAWIO)) { 3174 memset(ll_header + len, 0, dev->hard_header_len - len); 3175 return true; 3176 } 3177 3178 if (dev->header_ops && dev->header_ops->validate) 3179 return dev->header_ops->validate(ll_header, len); 3180 3181 return false; 3182 } 3183 3184 static inline bool dev_has_header(const struct net_device *dev) 3185 { 3186 return dev->header_ops && dev->header_ops->create; 3187 } 3188 3189 /* 3190 * Incoming packets are placed on per-CPU queues 3191 */ 3192 struct softnet_data { 3193 struct list_head poll_list; 3194 struct sk_buff_head process_queue; 3195 3196 /* stats */ 3197 unsigned int processed; 3198 unsigned int time_squeeze; 3199 #ifdef CONFIG_RPS 3200 struct softnet_data *rps_ipi_list; 3201 #endif 3202 3203 bool in_net_rx_action; 3204 bool in_napi_threaded_poll; 3205 3206 #ifdef CONFIG_NET_FLOW_LIMIT 3207 struct sd_flow_limit __rcu *flow_limit; 3208 #endif 3209 struct Qdisc *output_queue; 3210 struct Qdisc **output_queue_tailp; 3211 struct sk_buff *completion_queue; 3212 #ifdef CONFIG_XFRM_OFFLOAD 3213 struct sk_buff_head xfrm_backlog; 3214 #endif 3215 /* written and read only by owning cpu: */ 3216 struct { 3217 u16 recursion; 3218 u8 more; 3219 #ifdef CONFIG_NET_EGRESS 3220 u8 skip_txqueue; 3221 #endif 3222 } xmit; 3223 #ifdef CONFIG_RPS 3224 /* input_queue_head should be written by cpu owning this struct, 3225 * and only read by other cpus. Worth using a cache line. 3226 */ 3227 unsigned int input_queue_head ____cacheline_aligned_in_smp; 3228 3229 /* Elements below can be accessed between CPUs for RPS/RFS */ 3230 call_single_data_t csd ____cacheline_aligned_in_smp; 3231 struct softnet_data *rps_ipi_next; 3232 unsigned int cpu; 3233 unsigned int input_queue_tail; 3234 #endif 3235 unsigned int received_rps; 3236 unsigned int dropped; 3237 struct sk_buff_head input_pkt_queue; 3238 struct napi_struct backlog; 3239 3240 /* Another possibly contended cache line */ 3241 spinlock_t defer_lock ____cacheline_aligned_in_smp; 3242 int defer_count; 3243 int defer_ipi_scheduled; 3244 struct sk_buff *defer_list; 3245 call_single_data_t defer_csd; 3246 }; 3247 3248 static inline void input_queue_head_incr(struct softnet_data *sd) 3249 { 3250 #ifdef CONFIG_RPS 3251 sd->input_queue_head++; 3252 #endif 3253 } 3254 3255 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 3256 unsigned int *qtail) 3257 { 3258 #ifdef CONFIG_RPS 3259 *qtail = ++sd->input_queue_tail; 3260 #endif 3261 } 3262 3263 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3264 3265 static inline int dev_recursion_level(void) 3266 { 3267 return this_cpu_read(softnet_data.xmit.recursion); 3268 } 3269 3270 #define XMIT_RECURSION_LIMIT 8 3271 static inline bool dev_xmit_recursion(void) 3272 { 3273 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3274 XMIT_RECURSION_LIMIT); 3275 } 3276 3277 static inline void dev_xmit_recursion_inc(void) 3278 { 3279 __this_cpu_inc(softnet_data.xmit.recursion); 3280 } 3281 3282 static inline void dev_xmit_recursion_dec(void) 3283 { 3284 __this_cpu_dec(softnet_data.xmit.recursion); 3285 } 3286 3287 void __netif_schedule(struct Qdisc *q); 3288 void netif_schedule_queue(struct netdev_queue *txq); 3289 3290 static inline void netif_tx_schedule_all(struct net_device *dev) 3291 { 3292 unsigned int i; 3293 3294 for (i = 0; i < dev->num_tx_queues; i++) 3295 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 3296 } 3297 3298 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 3299 { 3300 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3301 } 3302 3303 /** 3304 * netif_start_queue - allow transmit 3305 * @dev: network device 3306 * 3307 * Allow upper layers to call the device hard_start_xmit routine. 3308 */ 3309 static inline void netif_start_queue(struct net_device *dev) 3310 { 3311 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 3312 } 3313 3314 static inline void netif_tx_start_all_queues(struct net_device *dev) 3315 { 3316 unsigned int i; 3317 3318 for (i = 0; i < dev->num_tx_queues; i++) { 3319 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3320 netif_tx_start_queue(txq); 3321 } 3322 } 3323 3324 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3325 3326 /** 3327 * netif_wake_queue - restart transmit 3328 * @dev: network device 3329 * 3330 * Allow upper layers to call the device hard_start_xmit routine. 3331 * Used for flow control when transmit resources are available. 3332 */ 3333 static inline void netif_wake_queue(struct net_device *dev) 3334 { 3335 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 3336 } 3337 3338 static inline void netif_tx_wake_all_queues(struct net_device *dev) 3339 { 3340 unsigned int i; 3341 3342 for (i = 0; i < dev->num_tx_queues; i++) { 3343 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3344 netif_tx_wake_queue(txq); 3345 } 3346 } 3347 3348 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 3349 { 3350 /* Must be an atomic op see netif_txq_try_stop() */ 3351 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3352 } 3353 3354 /** 3355 * netif_stop_queue - stop transmitted packets 3356 * @dev: network device 3357 * 3358 * Stop upper layers calling the device hard_start_xmit routine. 3359 * Used for flow control when transmit resources are unavailable. 3360 */ 3361 static inline void netif_stop_queue(struct net_device *dev) 3362 { 3363 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 3364 } 3365 3366 void netif_tx_stop_all_queues(struct net_device *dev); 3367 3368 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3369 { 3370 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 3371 } 3372 3373 /** 3374 * netif_queue_stopped - test if transmit queue is flowblocked 3375 * @dev: network device 3376 * 3377 * Test if transmit queue on device is currently unable to send. 3378 */ 3379 static inline bool netif_queue_stopped(const struct net_device *dev) 3380 { 3381 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 3382 } 3383 3384 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 3385 { 3386 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 3387 } 3388 3389 static inline bool 3390 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 3391 { 3392 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 3393 } 3394 3395 static inline bool 3396 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 3397 { 3398 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 3399 } 3400 3401 /** 3402 * netdev_queue_set_dql_min_limit - set dql minimum limit 3403 * @dev_queue: pointer to transmit queue 3404 * @min_limit: dql minimum limit 3405 * 3406 * Forces xmit_more() to return true until the minimum threshold 3407 * defined by @min_limit is reached (or until the tx queue is 3408 * empty). Warning: to be use with care, misuse will impact the 3409 * latency. 3410 */ 3411 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, 3412 unsigned int min_limit) 3413 { 3414 #ifdef CONFIG_BQL 3415 dev_queue->dql.min_limit = min_limit; 3416 #endif 3417 } 3418 3419 /** 3420 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 3421 * @dev_queue: pointer to transmit queue 3422 * 3423 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 3424 * to give appropriate hint to the CPU. 3425 */ 3426 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 3427 { 3428 #ifdef CONFIG_BQL 3429 prefetchw(&dev_queue->dql.num_queued); 3430 #endif 3431 } 3432 3433 /** 3434 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 3435 * @dev_queue: pointer to transmit queue 3436 * 3437 * BQL enabled drivers might use this helper in their TX completion path, 3438 * to give appropriate hint to the CPU. 3439 */ 3440 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 3441 { 3442 #ifdef CONFIG_BQL 3443 prefetchw(&dev_queue->dql.limit); 3444 #endif 3445 } 3446 3447 /** 3448 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue 3449 * @dev_queue: network device queue 3450 * @bytes: number of bytes queued to the device queue 3451 * 3452 * Report the number of bytes queued for sending/completion to the network 3453 * device hardware queue. @bytes should be a good approximation and should 3454 * exactly match netdev_completed_queue() @bytes. 3455 * This is typically called once per packet, from ndo_start_xmit(). 3456 */ 3457 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3458 unsigned int bytes) 3459 { 3460 #ifdef CONFIG_BQL 3461 dql_queued(&dev_queue->dql, bytes); 3462 3463 if (likely(dql_avail(&dev_queue->dql) >= 0)) 3464 return; 3465 3466 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3467 3468 /* 3469 * The XOFF flag must be set before checking the dql_avail below, 3470 * because in netdev_tx_completed_queue we update the dql_completed 3471 * before checking the XOFF flag. 3472 */ 3473 smp_mb(); 3474 3475 /* check again in case another CPU has just made room avail */ 3476 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 3477 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 3478 #endif 3479 } 3480 3481 /* Variant of netdev_tx_sent_queue() for drivers that are aware 3482 * that they should not test BQL status themselves. 3483 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last 3484 * skb of a batch. 3485 * Returns true if the doorbell must be used to kick the NIC. 3486 */ 3487 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, 3488 unsigned int bytes, 3489 bool xmit_more) 3490 { 3491 if (xmit_more) { 3492 #ifdef CONFIG_BQL 3493 dql_queued(&dev_queue->dql, bytes); 3494 #endif 3495 return netif_tx_queue_stopped(dev_queue); 3496 } 3497 netdev_tx_sent_queue(dev_queue, bytes); 3498 return true; 3499 } 3500 3501 /** 3502 * netdev_sent_queue - report the number of bytes queued to hardware 3503 * @dev: network device 3504 * @bytes: number of bytes queued to the hardware device queue 3505 * 3506 * Report the number of bytes queued for sending/completion to the network 3507 * device hardware queue#0. @bytes should be a good approximation and should 3508 * exactly match netdev_completed_queue() @bytes. 3509 * This is typically called once per packet, from ndo_start_xmit(). 3510 */ 3511 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 3512 { 3513 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 3514 } 3515 3516 static inline bool __netdev_sent_queue(struct net_device *dev, 3517 unsigned int bytes, 3518 bool xmit_more) 3519 { 3520 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, 3521 xmit_more); 3522 } 3523 3524 /** 3525 * netdev_tx_completed_queue - report number of packets/bytes at TX completion. 3526 * @dev_queue: network device queue 3527 * @pkts: number of packets (currently ignored) 3528 * @bytes: number of bytes dequeued from the device queue 3529 * 3530 * Must be called at most once per TX completion round (and not per 3531 * individual packet), so that BQL can adjust its limits appropriately. 3532 */ 3533 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 3534 unsigned int pkts, unsigned int bytes) 3535 { 3536 #ifdef CONFIG_BQL 3537 if (unlikely(!bytes)) 3538 return; 3539 3540 dql_completed(&dev_queue->dql, bytes); 3541 3542 /* 3543 * Without the memory barrier there is a small possiblity that 3544 * netdev_tx_sent_queue will miss the update and cause the queue to 3545 * be stopped forever 3546 */ 3547 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ 3548 3549 if (unlikely(dql_avail(&dev_queue->dql) < 0)) 3550 return; 3551 3552 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 3553 netif_schedule_queue(dev_queue); 3554 #endif 3555 } 3556 3557 /** 3558 * netdev_completed_queue - report bytes and packets completed by device 3559 * @dev: network device 3560 * @pkts: actual number of packets sent over the medium 3561 * @bytes: actual number of bytes sent over the medium 3562 * 3563 * Report the number of bytes and packets transmitted by the network device 3564 * hardware queue over the physical medium, @bytes must exactly match the 3565 * @bytes amount passed to netdev_sent_queue() 3566 */ 3567 static inline void netdev_completed_queue(struct net_device *dev, 3568 unsigned int pkts, unsigned int bytes) 3569 { 3570 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 3571 } 3572 3573 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 3574 { 3575 #ifdef CONFIG_BQL 3576 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 3577 dql_reset(&q->dql); 3578 #endif 3579 } 3580 3581 /** 3582 * netdev_reset_queue - reset the packets and bytes count of a network device 3583 * @dev_queue: network device 3584 * 3585 * Reset the bytes and packet count of a network device and clear the 3586 * software flow control OFF bit for this network device 3587 */ 3588 static inline void netdev_reset_queue(struct net_device *dev_queue) 3589 { 3590 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 3591 } 3592 3593 /** 3594 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 3595 * @dev: network device 3596 * @queue_index: given tx queue index 3597 * 3598 * Returns 0 if given tx queue index >= number of device tx queues, 3599 * otherwise returns the originally passed tx queue index. 3600 */ 3601 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 3602 { 3603 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 3604 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 3605 dev->name, queue_index, 3606 dev->real_num_tx_queues); 3607 return 0; 3608 } 3609 3610 return queue_index; 3611 } 3612 3613 /** 3614 * netif_running - test if up 3615 * @dev: network device 3616 * 3617 * Test if the device has been brought up. 3618 */ 3619 static inline bool netif_running(const struct net_device *dev) 3620 { 3621 return test_bit(__LINK_STATE_START, &dev->state); 3622 } 3623 3624 /* 3625 * Routines to manage the subqueues on a device. We only need start, 3626 * stop, and a check if it's stopped. All other device management is 3627 * done at the overall netdevice level. 3628 * Also test the device if we're multiqueue. 3629 */ 3630 3631 /** 3632 * netif_start_subqueue - allow sending packets on subqueue 3633 * @dev: network device 3634 * @queue_index: sub queue index 3635 * 3636 * Start individual transmit queue of a device with multiple transmit queues. 3637 */ 3638 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 3639 { 3640 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3641 3642 netif_tx_start_queue(txq); 3643 } 3644 3645 /** 3646 * netif_stop_subqueue - stop sending packets on subqueue 3647 * @dev: network device 3648 * @queue_index: sub queue index 3649 * 3650 * Stop individual transmit queue of a device with multiple transmit queues. 3651 */ 3652 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 3653 { 3654 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3655 netif_tx_stop_queue(txq); 3656 } 3657 3658 /** 3659 * __netif_subqueue_stopped - test status of subqueue 3660 * @dev: network device 3661 * @queue_index: sub queue index 3662 * 3663 * Check individual transmit queue of a device with multiple transmit queues. 3664 */ 3665 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 3666 u16 queue_index) 3667 { 3668 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3669 3670 return netif_tx_queue_stopped(txq); 3671 } 3672 3673 /** 3674 * netif_subqueue_stopped - test status of subqueue 3675 * @dev: network device 3676 * @skb: sub queue buffer pointer 3677 * 3678 * Check individual transmit queue of a device with multiple transmit queues. 3679 */ 3680 static inline bool netif_subqueue_stopped(const struct net_device *dev, 3681 struct sk_buff *skb) 3682 { 3683 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3684 } 3685 3686 /** 3687 * netif_wake_subqueue - allow sending packets on subqueue 3688 * @dev: network device 3689 * @queue_index: sub queue index 3690 * 3691 * Resume individual transmit queue of a device with multiple transmit queues. 3692 */ 3693 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 3694 { 3695 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 3696 3697 netif_tx_wake_queue(txq); 3698 } 3699 3700 #ifdef CONFIG_XPS 3701 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3702 u16 index); 3703 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 3704 u16 index, enum xps_map_type type); 3705 3706 /** 3707 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask 3708 * @j: CPU/Rx queue index 3709 * @mask: bitmask of all cpus/rx queues 3710 * @nr_bits: number of bits in the bitmask 3711 * 3712 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. 3713 */ 3714 static inline bool netif_attr_test_mask(unsigned long j, 3715 const unsigned long *mask, 3716 unsigned int nr_bits) 3717 { 3718 cpu_max_bits_warn(j, nr_bits); 3719 return test_bit(j, mask); 3720 } 3721 3722 /** 3723 * netif_attr_test_online - Test for online CPU/Rx queue 3724 * @j: CPU/Rx queue index 3725 * @online_mask: bitmask for CPUs/Rx queues that are online 3726 * @nr_bits: number of bits in the bitmask 3727 * 3728 * Returns true if a CPU/Rx queue is online. 3729 */ 3730 static inline bool netif_attr_test_online(unsigned long j, 3731 const unsigned long *online_mask, 3732 unsigned int nr_bits) 3733 { 3734 cpu_max_bits_warn(j, nr_bits); 3735 3736 if (online_mask) 3737 return test_bit(j, online_mask); 3738 3739 return (j < nr_bits); 3740 } 3741 3742 /** 3743 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask 3744 * @n: CPU/Rx queue index 3745 * @srcp: the cpumask/Rx queue mask pointer 3746 * @nr_bits: number of bits in the bitmask 3747 * 3748 * Returns >= nr_bits if no further CPUs/Rx queues set. 3749 */ 3750 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, 3751 unsigned int nr_bits) 3752 { 3753 /* -1 is a legal arg here. */ 3754 if (n != -1) 3755 cpu_max_bits_warn(n, nr_bits); 3756 3757 if (srcp) 3758 return find_next_bit(srcp, nr_bits, n + 1); 3759 3760 return n + 1; 3761 } 3762 3763 /** 3764 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p 3765 * @n: CPU/Rx queue index 3766 * @src1p: the first CPUs/Rx queues mask pointer 3767 * @src2p: the second CPUs/Rx queues mask pointer 3768 * @nr_bits: number of bits in the bitmask 3769 * 3770 * Returns >= nr_bits if no further CPUs/Rx queues set in both. 3771 */ 3772 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, 3773 const unsigned long *src2p, 3774 unsigned int nr_bits) 3775 { 3776 /* -1 is a legal arg here. */ 3777 if (n != -1) 3778 cpu_max_bits_warn(n, nr_bits); 3779 3780 if (src1p && src2p) 3781 return find_next_and_bit(src1p, src2p, nr_bits, n + 1); 3782 else if (src1p) 3783 return find_next_bit(src1p, nr_bits, n + 1); 3784 else if (src2p) 3785 return find_next_bit(src2p, nr_bits, n + 1); 3786 3787 return n + 1; 3788 } 3789 #else 3790 static inline int netif_set_xps_queue(struct net_device *dev, 3791 const struct cpumask *mask, 3792 u16 index) 3793 { 3794 return 0; 3795 } 3796 3797 static inline int __netif_set_xps_queue(struct net_device *dev, 3798 const unsigned long *mask, 3799 u16 index, enum xps_map_type type) 3800 { 3801 return 0; 3802 } 3803 #endif 3804 3805 /** 3806 * netif_is_multiqueue - test if device has multiple transmit queues 3807 * @dev: network device 3808 * 3809 * Check if device has multiple transmit queues 3810 */ 3811 static inline bool netif_is_multiqueue(const struct net_device *dev) 3812 { 3813 return dev->num_tx_queues > 1; 3814 } 3815 3816 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 3817 3818 #ifdef CONFIG_SYSFS 3819 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3820 #else 3821 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3822 unsigned int rxqs) 3823 { 3824 dev->real_num_rx_queues = rxqs; 3825 return 0; 3826 } 3827 #endif 3828 int netif_set_real_num_queues(struct net_device *dev, 3829 unsigned int txq, unsigned int rxq); 3830 3831 static inline struct netdev_rx_queue * 3832 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) 3833 { 3834 return dev->_rx + rxq; 3835 } 3836 3837 #ifdef CONFIG_SYSFS 3838 static inline unsigned int get_netdev_rx_queue_index( 3839 struct netdev_rx_queue *queue) 3840 { 3841 struct net_device *dev = queue->dev; 3842 int index = queue - dev->_rx; 3843 3844 BUG_ON(index >= dev->num_rx_queues); 3845 return index; 3846 } 3847 #endif 3848 3849 int netif_get_num_default_rss_queues(void); 3850 3851 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3852 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); 3853 3854 /* 3855 * It is not allowed to call kfree_skb() or consume_skb() from hardware 3856 * interrupt context or with hardware interrupts being disabled. 3857 * (in_hardirq() || irqs_disabled()) 3858 * 3859 * We provide four helpers that can be used in following contexts : 3860 * 3861 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 3862 * replacing kfree_skb(skb) 3863 * 3864 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 3865 * Typically used in place of consume_skb(skb) in TX completion path 3866 * 3867 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3868 * replacing kfree_skb(skb) 3869 * 3870 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3871 * and consumed a packet. Used in place of consume_skb(skb) 3872 */ 3873 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3874 { 3875 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3876 } 3877 3878 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3879 { 3880 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); 3881 } 3882 3883 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3884 { 3885 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 3886 } 3887 3888 static inline void dev_consume_skb_any(struct sk_buff *skb) 3889 { 3890 dev_kfree_skb_any_reason(skb, SKB_CONSUMED); 3891 } 3892 3893 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3894 struct bpf_prog *xdp_prog); 3895 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3896 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3897 int netif_rx(struct sk_buff *skb); 3898 int __netif_rx(struct sk_buff *skb); 3899 3900 int netif_receive_skb(struct sk_buff *skb); 3901 int netif_receive_skb_core(struct sk_buff *skb); 3902 void netif_receive_skb_list_internal(struct list_head *head); 3903 void netif_receive_skb_list(struct list_head *head); 3904 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3905 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3906 struct sk_buff *napi_get_frags(struct napi_struct *napi); 3907 void napi_get_frags_check(struct napi_struct *napi); 3908 gro_result_t napi_gro_frags(struct napi_struct *napi); 3909 struct packet_offload *gro_find_receive_by_type(__be16 type); 3910 struct packet_offload *gro_find_complete_by_type(__be16 type); 3911 3912 static inline void napi_free_frags(struct napi_struct *napi) 3913 { 3914 kfree_skb(napi->skb); 3915 napi->skb = NULL; 3916 } 3917 3918 bool netdev_is_rx_handler_busy(struct net_device *dev); 3919 int netdev_rx_handler_register(struct net_device *dev, 3920 rx_handler_func_t *rx_handler, 3921 void *rx_handler_data); 3922 void netdev_rx_handler_unregister(struct net_device *dev); 3923 3924 bool dev_valid_name(const char *name); 3925 static inline bool is_socket_ioctl_cmd(unsigned int cmd) 3926 { 3927 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; 3928 } 3929 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); 3930 int put_user_ifreq(struct ifreq *ifr, void __user *arg); 3931 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, 3932 void __user *data, bool *need_copyout); 3933 int dev_ifconf(struct net *net, struct ifconf __user *ifc); 3934 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); 3935 unsigned int dev_get_flags(const struct net_device *); 3936 int __dev_change_flags(struct net_device *dev, unsigned int flags, 3937 struct netlink_ext_ack *extack); 3938 int dev_change_flags(struct net_device *dev, unsigned int flags, 3939 struct netlink_ext_ack *extack); 3940 int dev_set_alias(struct net_device *, const char *, size_t); 3941 int dev_get_alias(const struct net_device *, char *, size_t); 3942 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 3943 const char *pat, int new_ifindex); 3944 static inline 3945 int dev_change_net_namespace(struct net_device *dev, struct net *net, 3946 const char *pat) 3947 { 3948 return __dev_change_net_namespace(dev, net, pat, 0); 3949 } 3950 int __dev_set_mtu(struct net_device *, int); 3951 int dev_set_mtu(struct net_device *, int); 3952 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 3953 struct netlink_ext_ack *extack); 3954 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 3955 struct netlink_ext_ack *extack); 3956 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 3957 struct netlink_ext_ack *extack); 3958 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); 3959 int dev_get_port_parent_id(struct net_device *dev, 3960 struct netdev_phys_item_id *ppid, bool recurse); 3961 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); 3962 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); 3963 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3964 struct netdev_queue *txq, int *ret); 3965 3966 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 3967 u8 dev_xdp_prog_count(struct net_device *dev); 3968 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); 3969 3970 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3971 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3972 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); 3973 bool is_skb_forwardable(const struct net_device *dev, 3974 const struct sk_buff *skb); 3975 3976 static __always_inline bool __is_skb_forwardable(const struct net_device *dev, 3977 const struct sk_buff *skb, 3978 const bool check_mtu) 3979 { 3980 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ 3981 unsigned int len; 3982 3983 if (!(dev->flags & IFF_UP)) 3984 return false; 3985 3986 if (!check_mtu) 3987 return true; 3988 3989 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; 3990 if (skb->len <= len) 3991 return true; 3992 3993 /* if TSO is enabled, we don't care about the length as the packet 3994 * could be forwarded without being segmented before 3995 */ 3996 if (skb_is_gso(skb)) 3997 return true; 3998 3999 return false; 4000 } 4001 4002 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); 4003 4004 static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) 4005 { 4006 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 4007 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 4008 4009 if (likely(p)) 4010 return p; 4011 4012 return netdev_core_stats_alloc(dev); 4013 } 4014 4015 #define DEV_CORE_STATS_INC(FIELD) \ 4016 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ 4017 { \ 4018 struct net_device_core_stats __percpu *p; \ 4019 \ 4020 p = dev_core_stats(dev); \ 4021 if (p) \ 4022 this_cpu_inc(p->FIELD); \ 4023 } 4024 DEV_CORE_STATS_INC(rx_dropped) 4025 DEV_CORE_STATS_INC(tx_dropped) 4026 DEV_CORE_STATS_INC(rx_nohandler) 4027 DEV_CORE_STATS_INC(rx_otherhost_dropped) 4028 4029 static __always_inline int ____dev_forward_skb(struct net_device *dev, 4030 struct sk_buff *skb, 4031 const bool check_mtu) 4032 { 4033 if (skb_orphan_frags(skb, GFP_ATOMIC) || 4034 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { 4035 dev_core_stats_rx_dropped_inc(dev); 4036 kfree_skb(skb); 4037 return NET_RX_DROP; 4038 } 4039 4040 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4041 skb->priority = 0; 4042 return 0; 4043 } 4044 4045 bool dev_nit_active(struct net_device *dev); 4046 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4047 4048 static inline void __dev_put(struct net_device *dev) 4049 { 4050 if (dev) { 4051 #ifdef CONFIG_PCPU_DEV_REFCNT 4052 this_cpu_dec(*dev->pcpu_refcnt); 4053 #else 4054 refcount_dec(&dev->dev_refcnt); 4055 #endif 4056 } 4057 } 4058 4059 static inline void __dev_hold(struct net_device *dev) 4060 { 4061 if (dev) { 4062 #ifdef CONFIG_PCPU_DEV_REFCNT 4063 this_cpu_inc(*dev->pcpu_refcnt); 4064 #else 4065 refcount_inc(&dev->dev_refcnt); 4066 #endif 4067 } 4068 } 4069 4070 static inline void __netdev_tracker_alloc(struct net_device *dev, 4071 netdevice_tracker *tracker, 4072 gfp_t gfp) 4073 { 4074 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4075 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); 4076 #endif 4077 } 4078 4079 /* netdev_tracker_alloc() can upgrade a prior untracked reference 4080 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. 4081 */ 4082 static inline void netdev_tracker_alloc(struct net_device *dev, 4083 netdevice_tracker *tracker, gfp_t gfp) 4084 { 4085 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4086 refcount_dec(&dev->refcnt_tracker.no_tracker); 4087 __netdev_tracker_alloc(dev, tracker, gfp); 4088 #endif 4089 } 4090 4091 static inline void netdev_tracker_free(struct net_device *dev, 4092 netdevice_tracker *tracker) 4093 { 4094 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER 4095 ref_tracker_free(&dev->refcnt_tracker, tracker); 4096 #endif 4097 } 4098 4099 static inline void netdev_hold(struct net_device *dev, 4100 netdevice_tracker *tracker, gfp_t gfp) 4101 { 4102 if (dev) { 4103 __dev_hold(dev); 4104 __netdev_tracker_alloc(dev, tracker, gfp); 4105 } 4106 } 4107 4108 static inline void netdev_put(struct net_device *dev, 4109 netdevice_tracker *tracker) 4110 { 4111 if (dev) { 4112 netdev_tracker_free(dev, tracker); 4113 __dev_put(dev); 4114 } 4115 } 4116 4117 /** 4118 * dev_hold - get reference to device 4119 * @dev: network device 4120 * 4121 * Hold reference to device to keep it from being freed. 4122 * Try using netdev_hold() instead. 4123 */ 4124 static inline void dev_hold(struct net_device *dev) 4125 { 4126 netdev_hold(dev, NULL, GFP_ATOMIC); 4127 } 4128 4129 /** 4130 * dev_put - release reference to device 4131 * @dev: network device 4132 * 4133 * Release reference to device to allow it to be freed. 4134 * Try using netdev_put() instead. 4135 */ 4136 static inline void dev_put(struct net_device *dev) 4137 { 4138 netdev_put(dev, NULL); 4139 } 4140 4141 static inline void netdev_ref_replace(struct net_device *odev, 4142 struct net_device *ndev, 4143 netdevice_tracker *tracker, 4144 gfp_t gfp) 4145 { 4146 if (odev) 4147 netdev_tracker_free(odev, tracker); 4148 4149 __dev_hold(ndev); 4150 __dev_put(odev); 4151 4152 if (ndev) 4153 __netdev_tracker_alloc(ndev, tracker, gfp); 4154 } 4155 4156 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 4157 * and _off may be called from IRQ context, but it is caller 4158 * who is responsible for serialization of these calls. 4159 * 4160 * The name carrier is inappropriate, these functions should really be 4161 * called netif_lowerlayer_*() because they represent the state of any 4162 * kind of lower layer not just hardware media. 4163 */ 4164 void linkwatch_fire_event(struct net_device *dev); 4165 4166 /** 4167 * netif_carrier_ok - test if carrier present 4168 * @dev: network device 4169 * 4170 * Check if carrier is present on device 4171 */ 4172 static inline bool netif_carrier_ok(const struct net_device *dev) 4173 { 4174 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 4175 } 4176 4177 unsigned long dev_trans_start(struct net_device *dev); 4178 4179 void __netdev_watchdog_up(struct net_device *dev); 4180 4181 void netif_carrier_on(struct net_device *dev); 4182 void netif_carrier_off(struct net_device *dev); 4183 void netif_carrier_event(struct net_device *dev); 4184 4185 /** 4186 * netif_dormant_on - mark device as dormant. 4187 * @dev: network device 4188 * 4189 * Mark device as dormant (as per RFC2863). 4190 * 4191 * The dormant state indicates that the relevant interface is not 4192 * actually in a condition to pass packets (i.e., it is not 'up') but is 4193 * in a "pending" state, waiting for some external event. For "on- 4194 * demand" interfaces, this new state identifies the situation where the 4195 * interface is waiting for events to place it in the up state. 4196 */ 4197 static inline void netif_dormant_on(struct net_device *dev) 4198 { 4199 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 4200 linkwatch_fire_event(dev); 4201 } 4202 4203 /** 4204 * netif_dormant_off - set device as not dormant. 4205 * @dev: network device 4206 * 4207 * Device is not in dormant state. 4208 */ 4209 static inline void netif_dormant_off(struct net_device *dev) 4210 { 4211 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 4212 linkwatch_fire_event(dev); 4213 } 4214 4215 /** 4216 * netif_dormant - test if device is dormant 4217 * @dev: network device 4218 * 4219 * Check if device is dormant. 4220 */ 4221 static inline bool netif_dormant(const struct net_device *dev) 4222 { 4223 return test_bit(__LINK_STATE_DORMANT, &dev->state); 4224 } 4225 4226 4227 /** 4228 * netif_testing_on - mark device as under test. 4229 * @dev: network device 4230 * 4231 * Mark device as under test (as per RFC2863). 4232 * 4233 * The testing state indicates that some test(s) must be performed on 4234 * the interface. After completion, of the test, the interface state 4235 * will change to up, dormant, or down, as appropriate. 4236 */ 4237 static inline void netif_testing_on(struct net_device *dev) 4238 { 4239 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) 4240 linkwatch_fire_event(dev); 4241 } 4242 4243 /** 4244 * netif_testing_off - set device as not under test. 4245 * @dev: network device 4246 * 4247 * Device is not in testing state. 4248 */ 4249 static inline void netif_testing_off(struct net_device *dev) 4250 { 4251 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) 4252 linkwatch_fire_event(dev); 4253 } 4254 4255 /** 4256 * netif_testing - test if device is under test 4257 * @dev: network device 4258 * 4259 * Check if device is under test 4260 */ 4261 static inline bool netif_testing(const struct net_device *dev) 4262 { 4263 return test_bit(__LINK_STATE_TESTING, &dev->state); 4264 } 4265 4266 4267 /** 4268 * netif_oper_up - test if device is operational 4269 * @dev: network device 4270 * 4271 * Check if carrier is operational 4272 */ 4273 static inline bool netif_oper_up(const struct net_device *dev) 4274 { 4275 return (dev->operstate == IF_OPER_UP || 4276 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 4277 } 4278 4279 /** 4280 * netif_device_present - is device available or removed 4281 * @dev: network device 4282 * 4283 * Check if device has not been removed from system. 4284 */ 4285 static inline bool netif_device_present(const struct net_device *dev) 4286 { 4287 return test_bit(__LINK_STATE_PRESENT, &dev->state); 4288 } 4289 4290 void netif_device_detach(struct net_device *dev); 4291 4292 void netif_device_attach(struct net_device *dev); 4293 4294 /* 4295 * Network interface message level settings 4296 */ 4297 4298 enum { 4299 NETIF_MSG_DRV_BIT, 4300 NETIF_MSG_PROBE_BIT, 4301 NETIF_MSG_LINK_BIT, 4302 NETIF_MSG_TIMER_BIT, 4303 NETIF_MSG_IFDOWN_BIT, 4304 NETIF_MSG_IFUP_BIT, 4305 NETIF_MSG_RX_ERR_BIT, 4306 NETIF_MSG_TX_ERR_BIT, 4307 NETIF_MSG_TX_QUEUED_BIT, 4308 NETIF_MSG_INTR_BIT, 4309 NETIF_MSG_TX_DONE_BIT, 4310 NETIF_MSG_RX_STATUS_BIT, 4311 NETIF_MSG_PKTDATA_BIT, 4312 NETIF_MSG_HW_BIT, 4313 NETIF_MSG_WOL_BIT, 4314 4315 /* When you add a new bit above, update netif_msg_class_names array 4316 * in net/ethtool/common.c 4317 */ 4318 NETIF_MSG_CLASS_COUNT, 4319 }; 4320 /* Both ethtool_ops interface and internal driver implementation use u32 */ 4321 static_assert(NETIF_MSG_CLASS_COUNT <= 32); 4322 4323 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) 4324 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) 4325 4326 #define NETIF_MSG_DRV __NETIF_MSG(DRV) 4327 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) 4328 #define NETIF_MSG_LINK __NETIF_MSG(LINK) 4329 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) 4330 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) 4331 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) 4332 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) 4333 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) 4334 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) 4335 #define NETIF_MSG_INTR __NETIF_MSG(INTR) 4336 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) 4337 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) 4338 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) 4339 #define NETIF_MSG_HW __NETIF_MSG(HW) 4340 #define NETIF_MSG_WOL __NETIF_MSG(WOL) 4341 4342 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 4343 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 4344 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 4345 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 4346 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 4347 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 4348 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 4349 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 4350 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 4351 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 4352 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 4353 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 4354 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 4355 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 4356 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 4357 4358 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 4359 { 4360 /* use default */ 4361 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 4362 return default_msg_enable_bits; 4363 if (debug_value == 0) /* no output */ 4364 return 0; 4365 /* set low N bits */ 4366 return (1U << debug_value) - 1; 4367 } 4368 4369 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 4370 { 4371 spin_lock(&txq->_xmit_lock); 4372 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4373 WRITE_ONCE(txq->xmit_lock_owner, cpu); 4374 } 4375 4376 static inline bool __netif_tx_acquire(struct netdev_queue *txq) 4377 { 4378 __acquire(&txq->_xmit_lock); 4379 return true; 4380 } 4381 4382 static inline void __netif_tx_release(struct netdev_queue *txq) 4383 { 4384 __release(&txq->_xmit_lock); 4385 } 4386 4387 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 4388 { 4389 spin_lock_bh(&txq->_xmit_lock); 4390 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4391 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4392 } 4393 4394 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 4395 { 4396 bool ok = spin_trylock(&txq->_xmit_lock); 4397 4398 if (likely(ok)) { 4399 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4400 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); 4401 } 4402 return ok; 4403 } 4404 4405 static inline void __netif_tx_unlock(struct netdev_queue *txq) 4406 { 4407 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4408 WRITE_ONCE(txq->xmit_lock_owner, -1); 4409 spin_unlock(&txq->_xmit_lock); 4410 } 4411 4412 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 4413 { 4414 /* Pairs with READ_ONCE() in __dev_queue_xmit() */ 4415 WRITE_ONCE(txq->xmit_lock_owner, -1); 4416 spin_unlock_bh(&txq->_xmit_lock); 4417 } 4418 4419 /* 4420 * txq->trans_start can be read locklessly from dev_watchdog() 4421 */ 4422 static inline void txq_trans_update(struct netdev_queue *txq) 4423 { 4424 if (txq->xmit_lock_owner != -1) 4425 WRITE_ONCE(txq->trans_start, jiffies); 4426 } 4427 4428 static inline void txq_trans_cond_update(struct netdev_queue *txq) 4429 { 4430 unsigned long now = jiffies; 4431 4432 if (READ_ONCE(txq->trans_start) != now) 4433 WRITE_ONCE(txq->trans_start, now); 4434 } 4435 4436 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ 4437 static inline void netif_trans_update(struct net_device *dev) 4438 { 4439 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 4440 4441 txq_trans_cond_update(txq); 4442 } 4443 4444 /** 4445 * netif_tx_lock - grab network device transmit lock 4446 * @dev: network device 4447 * 4448 * Get network device transmit lock 4449 */ 4450 void netif_tx_lock(struct net_device *dev); 4451 4452 static inline void netif_tx_lock_bh(struct net_device *dev) 4453 { 4454 local_bh_disable(); 4455 netif_tx_lock(dev); 4456 } 4457 4458 void netif_tx_unlock(struct net_device *dev); 4459 4460 static inline void netif_tx_unlock_bh(struct net_device *dev) 4461 { 4462 netif_tx_unlock(dev); 4463 local_bh_enable(); 4464 } 4465 4466 #define HARD_TX_LOCK(dev, txq, cpu) { \ 4467 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4468 __netif_tx_lock(txq, cpu); \ 4469 } else { \ 4470 __netif_tx_acquire(txq); \ 4471 } \ 4472 } 4473 4474 #define HARD_TX_TRYLOCK(dev, txq) \ 4475 (((dev->features & NETIF_F_LLTX) == 0) ? \ 4476 __netif_tx_trylock(txq) : \ 4477 __netif_tx_acquire(txq)) 4478 4479 #define HARD_TX_UNLOCK(dev, txq) { \ 4480 if ((dev->features & NETIF_F_LLTX) == 0) { \ 4481 __netif_tx_unlock(txq); \ 4482 } else { \ 4483 __netif_tx_release(txq); \ 4484 } \ 4485 } 4486 4487 static inline void netif_tx_disable(struct net_device *dev) 4488 { 4489 unsigned int i; 4490 int cpu; 4491 4492 local_bh_disable(); 4493 cpu = smp_processor_id(); 4494 spin_lock(&dev->tx_global_lock); 4495 for (i = 0; i < dev->num_tx_queues; i++) { 4496 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 4497 4498 __netif_tx_lock(txq, cpu); 4499 netif_tx_stop_queue(txq); 4500 __netif_tx_unlock(txq); 4501 } 4502 spin_unlock(&dev->tx_global_lock); 4503 local_bh_enable(); 4504 } 4505 4506 static inline void netif_addr_lock(struct net_device *dev) 4507 { 4508 unsigned char nest_level = 0; 4509 4510 #ifdef CONFIG_LOCKDEP 4511 nest_level = dev->nested_level; 4512 #endif 4513 spin_lock_nested(&dev->addr_list_lock, nest_level); 4514 } 4515 4516 static inline void netif_addr_lock_bh(struct net_device *dev) 4517 { 4518 unsigned char nest_level = 0; 4519 4520 #ifdef CONFIG_LOCKDEP 4521 nest_level = dev->nested_level; 4522 #endif 4523 local_bh_disable(); 4524 spin_lock_nested(&dev->addr_list_lock, nest_level); 4525 } 4526 4527 static inline void netif_addr_unlock(struct net_device *dev) 4528 { 4529 spin_unlock(&dev->addr_list_lock); 4530 } 4531 4532 static inline void netif_addr_unlock_bh(struct net_device *dev) 4533 { 4534 spin_unlock_bh(&dev->addr_list_lock); 4535 } 4536 4537 /* 4538 * dev_addrs walker. Should be used only for read access. Call with 4539 * rcu_read_lock held. 4540 */ 4541 #define for_each_dev_addr(dev, ha) \ 4542 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 4543 4544 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4545 4546 void ether_setup(struct net_device *dev); 4547 4548 /* Support for loadable net-drivers */ 4549 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 4550 unsigned char name_assign_type, 4551 void (*setup)(struct net_device *), 4552 unsigned int txqs, unsigned int rxqs); 4553 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 4554 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 4555 4556 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 4557 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 4558 count) 4559 4560 int register_netdev(struct net_device *dev); 4561 void unregister_netdev(struct net_device *dev); 4562 4563 int devm_register_netdev(struct device *dev, struct net_device *ndev); 4564 4565 /* General hardware address lists handling functions */ 4566 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 4567 struct netdev_hw_addr_list *from_list, int addr_len); 4568 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 4569 struct netdev_hw_addr_list *from_list, int addr_len); 4570 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 4571 struct net_device *dev, 4572 int (*sync)(struct net_device *, const unsigned char *), 4573 int (*unsync)(struct net_device *, 4574 const unsigned char *)); 4575 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 4576 struct net_device *dev, 4577 int (*sync)(struct net_device *, 4578 const unsigned char *, int), 4579 int (*unsync)(struct net_device *, 4580 const unsigned char *, int)); 4581 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 4582 struct net_device *dev, 4583 int (*unsync)(struct net_device *, 4584 const unsigned char *, int)); 4585 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 4586 struct net_device *dev, 4587 int (*unsync)(struct net_device *, 4588 const unsigned char *)); 4589 void __hw_addr_init(struct netdev_hw_addr_list *list); 4590 4591 /* Functions used for device addresses handling */ 4592 void dev_addr_mod(struct net_device *dev, unsigned int offset, 4593 const void *addr, size_t len); 4594 4595 static inline void 4596 __dev_addr_set(struct net_device *dev, const void *addr, size_t len) 4597 { 4598 dev_addr_mod(dev, 0, addr, len); 4599 } 4600 4601 static inline void dev_addr_set(struct net_device *dev, const u8 *addr) 4602 { 4603 __dev_addr_set(dev, addr, dev->addr_len); 4604 } 4605 4606 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 4607 unsigned char addr_type); 4608 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 4609 unsigned char addr_type); 4610 4611 /* Functions used for unicast addresses handling */ 4612 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 4613 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 4614 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 4615 int dev_uc_sync(struct net_device *to, struct net_device *from); 4616 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 4617 void dev_uc_unsync(struct net_device *to, struct net_device *from); 4618 void dev_uc_flush(struct net_device *dev); 4619 void dev_uc_init(struct net_device *dev); 4620 4621 /** 4622 * __dev_uc_sync - Synchonize device's unicast list 4623 * @dev: device to sync 4624 * @sync: function to call if address should be added 4625 * @unsync: function to call if address should be removed 4626 * 4627 * Add newly added addresses to the interface, and release 4628 * addresses that have been deleted. 4629 */ 4630 static inline int __dev_uc_sync(struct net_device *dev, 4631 int (*sync)(struct net_device *, 4632 const unsigned char *), 4633 int (*unsync)(struct net_device *, 4634 const unsigned char *)) 4635 { 4636 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 4637 } 4638 4639 /** 4640 * __dev_uc_unsync - Remove synchronized addresses from device 4641 * @dev: device to sync 4642 * @unsync: function to call if address should be removed 4643 * 4644 * Remove all addresses that were added to the device by dev_uc_sync(). 4645 */ 4646 static inline void __dev_uc_unsync(struct net_device *dev, 4647 int (*unsync)(struct net_device *, 4648 const unsigned char *)) 4649 { 4650 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 4651 } 4652 4653 /* Functions used for multicast addresses handling */ 4654 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 4655 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 4656 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 4657 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 4658 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 4659 int dev_mc_sync(struct net_device *to, struct net_device *from); 4660 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 4661 void dev_mc_unsync(struct net_device *to, struct net_device *from); 4662 void dev_mc_flush(struct net_device *dev); 4663 void dev_mc_init(struct net_device *dev); 4664 4665 /** 4666 * __dev_mc_sync - Synchonize device's multicast list 4667 * @dev: device to sync 4668 * @sync: function to call if address should be added 4669 * @unsync: function to call if address should be removed 4670 * 4671 * Add newly added addresses to the interface, and release 4672 * addresses that have been deleted. 4673 */ 4674 static inline int __dev_mc_sync(struct net_device *dev, 4675 int (*sync)(struct net_device *, 4676 const unsigned char *), 4677 int (*unsync)(struct net_device *, 4678 const unsigned char *)) 4679 { 4680 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 4681 } 4682 4683 /** 4684 * __dev_mc_unsync - Remove synchronized addresses from device 4685 * @dev: device to sync 4686 * @unsync: function to call if address should be removed 4687 * 4688 * Remove all addresses that were added to the device by dev_mc_sync(). 4689 */ 4690 static inline void __dev_mc_unsync(struct net_device *dev, 4691 int (*unsync)(struct net_device *, 4692 const unsigned char *)) 4693 { 4694 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 4695 } 4696 4697 /* Functions used for secondary unicast and multicast support */ 4698 void dev_set_rx_mode(struct net_device *dev); 4699 int dev_set_promiscuity(struct net_device *dev, int inc); 4700 int dev_set_allmulti(struct net_device *dev, int inc); 4701 void netdev_state_change(struct net_device *dev); 4702 void __netdev_notify_peers(struct net_device *dev); 4703 void netdev_notify_peers(struct net_device *dev); 4704 void netdev_features_change(struct net_device *dev); 4705 /* Load a device via the kmod */ 4706 void dev_load(struct net *net, const char *name); 4707 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 4708 struct rtnl_link_stats64 *storage); 4709 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 4710 const struct net_device_stats *netdev_stats); 4711 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 4712 const struct pcpu_sw_netstats __percpu *netstats); 4713 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4714 4715 extern int netdev_max_backlog; 4716 extern int dev_rx_weight; 4717 extern int dev_tx_weight; 4718 extern int gro_normal_batch; 4719 4720 enum { 4721 NESTED_SYNC_IMM_BIT, 4722 NESTED_SYNC_TODO_BIT, 4723 }; 4724 4725 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) 4726 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) 4727 4728 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) 4729 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) 4730 4731 struct netdev_nested_priv { 4732 unsigned char flags; 4733 void *data; 4734 }; 4735 4736 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 4737 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4738 struct list_head **iter); 4739 4740 /* iterate through upper list, must be called under RCU read lock */ 4741 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 4742 for (iter = &(dev)->adj_list.upper, \ 4743 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 4744 updev; \ 4745 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 4746 4747 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 4748 int (*fn)(struct net_device *upper_dev, 4749 struct netdev_nested_priv *priv), 4750 struct netdev_nested_priv *priv); 4751 4752 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 4753 struct net_device *upper_dev); 4754 4755 bool netdev_has_any_upper_dev(struct net_device *dev); 4756 4757 void *netdev_lower_get_next_private(struct net_device *dev, 4758 struct list_head **iter); 4759 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4760 struct list_head **iter); 4761 4762 #define netdev_for_each_lower_private(dev, priv, iter) \ 4763 for (iter = (dev)->adj_list.lower.next, \ 4764 priv = netdev_lower_get_next_private(dev, &(iter)); \ 4765 priv; \ 4766 priv = netdev_lower_get_next_private(dev, &(iter))) 4767 4768 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 4769 for (iter = &(dev)->adj_list.lower, \ 4770 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 4771 priv; \ 4772 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 4773 4774 void *netdev_lower_get_next(struct net_device *dev, 4775 struct list_head **iter); 4776 4777 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 4778 for (iter = (dev)->adj_list.lower.next, \ 4779 ldev = netdev_lower_get_next(dev, &(iter)); \ 4780 ldev; \ 4781 ldev = netdev_lower_get_next(dev, &(iter))) 4782 4783 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 4784 struct list_head **iter); 4785 int netdev_walk_all_lower_dev(struct net_device *dev, 4786 int (*fn)(struct net_device *lower_dev, 4787 struct netdev_nested_priv *priv), 4788 struct netdev_nested_priv *priv); 4789 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 4790 int (*fn)(struct net_device *lower_dev, 4791 struct netdev_nested_priv *priv), 4792 struct netdev_nested_priv *priv); 4793 4794 void *netdev_adjacent_get_private(struct list_head *adj_list); 4795 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 4796 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 4797 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 4798 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, 4799 struct netlink_ext_ack *extack); 4800 int netdev_master_upper_dev_link(struct net_device *dev, 4801 struct net_device *upper_dev, 4802 void *upper_priv, void *upper_info, 4803 struct netlink_ext_ack *extack); 4804 void netdev_upper_dev_unlink(struct net_device *dev, 4805 struct net_device *upper_dev); 4806 int netdev_adjacent_change_prepare(struct net_device *old_dev, 4807 struct net_device *new_dev, 4808 struct net_device *dev, 4809 struct netlink_ext_ack *extack); 4810 void netdev_adjacent_change_commit(struct net_device *old_dev, 4811 struct net_device *new_dev, 4812 struct net_device *dev); 4813 void netdev_adjacent_change_abort(struct net_device *old_dev, 4814 struct net_device *new_dev, 4815 struct net_device *dev); 4816 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4817 void *netdev_lower_dev_get_private(struct net_device *dev, 4818 struct net_device *lower_dev); 4819 void netdev_lower_state_changed(struct net_device *lower_dev, 4820 void *lower_state_info); 4821 4822 /* RSS keys are 40 or 52 bytes long */ 4823 #define NETDEV_RSS_KEY_LEN 52 4824 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4825 void netdev_rss_key_fill(void *buffer, size_t len); 4826 4827 int skb_checksum_help(struct sk_buff *skb); 4828 int skb_crc32c_csum_help(struct sk_buff *skb); 4829 int skb_csum_hwoffload_help(struct sk_buff *skb, 4830 const netdev_features_t features); 4831 4832 struct netdev_bonding_info { 4833 ifslave slave; 4834 ifbond master; 4835 }; 4836 4837 struct netdev_notifier_bonding_info { 4838 struct netdev_notifier_info info; /* must be first */ 4839 struct netdev_bonding_info bonding_info; 4840 }; 4841 4842 void netdev_bonding_info_change(struct net_device *dev, 4843 struct netdev_bonding_info *bonding_info); 4844 4845 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) 4846 void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); 4847 #else 4848 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, 4849 const void *data) 4850 { 4851 } 4852 #endif 4853 4854 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 4855 4856 static inline bool can_checksum_protocol(netdev_features_t features, 4857 __be16 protocol) 4858 { 4859 if (protocol == htons(ETH_P_FCOE)) 4860 return !!(features & NETIF_F_FCOE_CRC); 4861 4862 /* Assume this is an IP checksum (not SCTP CRC) */ 4863 4864 if (features & NETIF_F_HW_CSUM) { 4865 /* Can checksum everything */ 4866 return true; 4867 } 4868 4869 switch (protocol) { 4870 case htons(ETH_P_IP): 4871 return !!(features & NETIF_F_IP_CSUM); 4872 case htons(ETH_P_IPV6): 4873 return !!(features & NETIF_F_IPV6_CSUM); 4874 default: 4875 return false; 4876 } 4877 } 4878 4879 #ifdef CONFIG_BUG 4880 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); 4881 #else 4882 static inline void netdev_rx_csum_fault(struct net_device *dev, 4883 struct sk_buff *skb) 4884 { 4885 } 4886 #endif 4887 /* rx skb timestamps */ 4888 void net_enable_timestamp(void); 4889 void net_disable_timestamp(void); 4890 4891 static inline ktime_t netdev_get_tstamp(struct net_device *dev, 4892 const struct skb_shared_hwtstamps *hwtstamps, 4893 bool cycles) 4894 { 4895 const struct net_device_ops *ops = dev->netdev_ops; 4896 4897 if (ops->ndo_get_tstamp) 4898 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); 4899 4900 return hwtstamps->hwtstamp; 4901 } 4902 4903 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4904 struct sk_buff *skb, struct net_device *dev, 4905 bool more) 4906 { 4907 __this_cpu_write(softnet_data.xmit.more, more); 4908 return ops->ndo_start_xmit(skb, dev); 4909 } 4910 4911 static inline bool netdev_xmit_more(void) 4912 { 4913 return __this_cpu_read(softnet_data.xmit.more); 4914 } 4915 4916 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 4917 struct netdev_queue *txq, bool more) 4918 { 4919 const struct net_device_ops *ops = dev->netdev_ops; 4920 netdev_tx_t rc; 4921 4922 rc = __netdev_start_xmit(ops, skb, dev, more); 4923 if (rc == NETDEV_TX_OK) 4924 txq_trans_update(txq); 4925 4926 return rc; 4927 } 4928 4929 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 4930 const void *ns); 4931 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 4932 const void *ns); 4933 4934 extern const struct kobj_ns_type_operations net_ns_type_operations; 4935 4936 const char *netdev_drivername(const struct net_device *dev); 4937 4938 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 4939 netdev_features_t f2) 4940 { 4941 if ((f1 ^ f2) & NETIF_F_HW_CSUM) { 4942 if (f1 & NETIF_F_HW_CSUM) 4943 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4944 else 4945 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4946 } 4947 4948 return f1 & f2; 4949 } 4950 4951 static inline netdev_features_t netdev_get_wanted_features( 4952 struct net_device *dev) 4953 { 4954 return (dev->features & ~dev->hw_features) | dev->wanted_features; 4955 } 4956 netdev_features_t netdev_increment_features(netdev_features_t all, 4957 netdev_features_t one, netdev_features_t mask); 4958 4959 /* Allow TSO being used on stacked device : 4960 * Performing the GSO segmentation before last device 4961 * is a performance improvement. 4962 */ 4963 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 4964 netdev_features_t mask) 4965 { 4966 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 4967 } 4968 4969 int __netdev_update_features(struct net_device *dev); 4970 void netdev_update_features(struct net_device *dev); 4971 void netdev_change_features(struct net_device *dev); 4972 4973 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 4974 struct net_device *dev); 4975 4976 netdev_features_t passthru_features_check(struct sk_buff *skb, 4977 struct net_device *dev, 4978 netdev_features_t features); 4979 netdev_features_t netif_skb_features(struct sk_buff *skb); 4980 void skb_warn_bad_offload(const struct sk_buff *skb); 4981 4982 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4983 { 4984 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; 4985 4986 /* check flags correspondence */ 4987 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4988 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 4989 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 4990 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); 4991 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 4992 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 4993 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 4994 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 4995 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); 4996 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); 4997 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 4998 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 4999 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 5000 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 5001 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 5002 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 5003 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); 5004 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); 5005 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); 5006 5007 return (features & feature) == feature; 5008 } 5009 5010 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 5011 { 5012 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 5013 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 5014 } 5015 5016 static inline bool netif_needs_gso(struct sk_buff *skb, 5017 netdev_features_t features) 5018 { 5019 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 5020 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 5021 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 5022 } 5023 5024 void netif_set_tso_max_size(struct net_device *dev, unsigned int size); 5025 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); 5026 void netif_inherit_tso_max(struct net_device *to, 5027 const struct net_device *from); 5028 5029 static inline bool netif_is_macsec(const struct net_device *dev) 5030 { 5031 return dev->priv_flags & IFF_MACSEC; 5032 } 5033 5034 static inline bool netif_is_macvlan(const struct net_device *dev) 5035 { 5036 return dev->priv_flags & IFF_MACVLAN; 5037 } 5038 5039 static inline bool netif_is_macvlan_port(const struct net_device *dev) 5040 { 5041 return dev->priv_flags & IFF_MACVLAN_PORT; 5042 } 5043 5044 static inline bool netif_is_bond_master(const struct net_device *dev) 5045 { 5046 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 5047 } 5048 5049 static inline bool netif_is_bond_slave(const struct net_device *dev) 5050 { 5051 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 5052 } 5053 5054 static inline bool netif_supports_nofcs(struct net_device *dev) 5055 { 5056 return dev->priv_flags & IFF_SUPP_NOFCS; 5057 } 5058 5059 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) 5060 { 5061 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; 5062 } 5063 5064 static inline bool netif_is_l3_master(const struct net_device *dev) 5065 { 5066 return dev->priv_flags & IFF_L3MDEV_MASTER; 5067 } 5068 5069 static inline bool netif_is_l3_slave(const struct net_device *dev) 5070 { 5071 return dev->priv_flags & IFF_L3MDEV_SLAVE; 5072 } 5073 5074 static inline int dev_sdif(const struct net_device *dev) 5075 { 5076 #ifdef CONFIG_NET_L3_MASTER_DEV 5077 if (netif_is_l3_slave(dev)) 5078 return dev->ifindex; 5079 #endif 5080 return 0; 5081 } 5082 5083 static inline bool netif_is_bridge_master(const struct net_device *dev) 5084 { 5085 return dev->priv_flags & IFF_EBRIDGE; 5086 } 5087 5088 static inline bool netif_is_bridge_port(const struct net_device *dev) 5089 { 5090 return dev->priv_flags & IFF_BRIDGE_PORT; 5091 } 5092 5093 static inline bool netif_is_ovs_master(const struct net_device *dev) 5094 { 5095 return dev->priv_flags & IFF_OPENVSWITCH; 5096 } 5097 5098 static inline bool netif_is_ovs_port(const struct net_device *dev) 5099 { 5100 return dev->priv_flags & IFF_OVS_DATAPATH; 5101 } 5102 5103 static inline bool netif_is_any_bridge_port(const struct net_device *dev) 5104 { 5105 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); 5106 } 5107 5108 static inline bool netif_is_team_master(const struct net_device *dev) 5109 { 5110 return dev->priv_flags & IFF_TEAM; 5111 } 5112 5113 static inline bool netif_is_team_port(const struct net_device *dev) 5114 { 5115 return dev->priv_flags & IFF_TEAM_PORT; 5116 } 5117 5118 static inline bool netif_is_lag_master(const struct net_device *dev) 5119 { 5120 return netif_is_bond_master(dev) || netif_is_team_master(dev); 5121 } 5122 5123 static inline bool netif_is_lag_port(const struct net_device *dev) 5124 { 5125 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 5126 } 5127 5128 static inline bool netif_is_rxfh_configured(const struct net_device *dev) 5129 { 5130 return dev->priv_flags & IFF_RXFH_CONFIGURED; 5131 } 5132 5133 static inline bool netif_is_failover(const struct net_device *dev) 5134 { 5135 return dev->priv_flags & IFF_FAILOVER; 5136 } 5137 5138 static inline bool netif_is_failover_slave(const struct net_device *dev) 5139 { 5140 return dev->priv_flags & IFF_FAILOVER_SLAVE; 5141 } 5142 5143 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 5144 static inline void netif_keep_dst(struct net_device *dev) 5145 { 5146 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 5147 } 5148 5149 /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 5150 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 5151 { 5152 /* TODO: reserve and use an additional IFF bit, if we get more users */ 5153 return netif_is_macsec(dev); 5154 } 5155 5156 extern struct pernet_operations __net_initdata loopback_net_ops; 5157 5158 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 5159 5160 /* netdev_printk helpers, similar to dev_printk */ 5161 5162 static inline const char *netdev_name(const struct net_device *dev) 5163 { 5164 if (!dev->name[0] || strchr(dev->name, '%')) 5165 return "(unnamed net_device)"; 5166 return dev->name; 5167 } 5168 5169 static inline const char *netdev_reg_state(const struct net_device *dev) 5170 { 5171 switch (dev->reg_state) { 5172 case NETREG_UNINITIALIZED: return " (uninitialized)"; 5173 case NETREG_REGISTERED: return ""; 5174 case NETREG_UNREGISTERING: return " (unregistering)"; 5175 case NETREG_UNREGISTERED: return " (unregistered)"; 5176 case NETREG_RELEASED: return " (released)"; 5177 case NETREG_DUMMY: return " (dummy)"; 5178 } 5179 5180 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 5181 return " (unknown)"; 5182 } 5183 5184 #define MODULE_ALIAS_NETDEV(device) \ 5185 MODULE_ALIAS("netdev-" device) 5186 5187 /* 5188 * netdev_WARN() acts like dev_printk(), but with the key difference 5189 * of using a WARN/WARN_ON to get the message out, including the 5190 * file/line information and a backtrace. 5191 */ 5192 #define netdev_WARN(dev, format, args...) \ 5193 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5194 netdev_reg_state(dev), ##args) 5195 5196 #define netdev_WARN_ONCE(dev, format, args...) \ 5197 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ 5198 netdev_reg_state(dev), ##args) 5199 5200 /* 5201 * The list of packet types we will receive (as opposed to discard) 5202 * and the routines to invoke. 5203 * 5204 * Why 16. Because with 16 the only overlap we get on a hash of the 5205 * low nibble of the protocol value is RARP/SNAP/X.25. 5206 * 5207 * 0800 IP 5208 * 0001 802.3 5209 * 0002 AX.25 5210 * 0004 802.2 5211 * 8035 RARP 5212 * 0005 SNAP 5213 * 0805 X.25 5214 * 0806 ARP 5215 * 8137 IPX 5216 * 0009 Localtalk 5217 * 86DD IPv6 5218 */ 5219 #define PTYPE_HASH_SIZE (16) 5220 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 5221 5222 extern struct list_head ptype_all __read_mostly; 5223 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 5224 5225 extern struct net_device *blackhole_netdev; 5226 5227 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ 5228 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) 5229 #define DEV_STATS_ADD(DEV, FIELD, VAL) \ 5230 atomic_long_add((VAL), &(DEV)->stats.__##FIELD) 5231 5232 #endif /* _LINUX_NETDEVICE_H */ 5233