1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the Interfaces handler. 7 * 8 * Version: @(#)dev.h 1.0.10 08/12/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 15 * Bjorn Ekwall. <bj0rn@blox.se> 16 * Pekka Riikonen <priikone@poseidon.pspt.fi> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 * 23 * Moved to /usr/include/linux for NET3 24 */ 25 #ifndef _LINUX_NETDEVICE_H 26 #define _LINUX_NETDEVICE_H 27 28 #include <linux/timer.h> 29 #include <linux/bug.h> 30 #include <linux/delay.h> 31 #include <linux/atomic.h> 32 #include <linux/prefetch.h> 33 #include <asm/cache.h> 34 #include <asm/byteorder.h> 35 36 #include <linux/percpu.h> 37 #include <linux/rculist.h> 38 #include <linux/dmaengine.h> 39 #include <linux/workqueue.h> 40 #include <linux/dynamic_queue_limits.h> 41 42 #include <linux/ethtool.h> 43 #include <net/net_namespace.h> 44 #include <net/dsa.h> 45 #ifdef CONFIG_DCB 46 #include <net/dcbnl.h> 47 #endif 48 #include <net/netprio_cgroup.h> 49 50 #include <linux/netdev_features.h> 51 #include <linux/neighbour.h> 52 #include <uapi/linux/netdevice.h> 53 #include <uapi/linux/if_bonding.h> 54 55 struct netpoll_info; 56 struct device; 57 struct phy_device; 58 /* 802.11 specific */ 59 struct wireless_dev; 60 /* 802.15.4 specific */ 61 struct wpan_dev; 62 struct mpls_dev; 63 64 void netdev_set_default_ethtool_ops(struct net_device *dev, 65 const struct ethtool_ops *ops); 66 67 /* Backlog congestion levels */ 68 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 69 #define NET_RX_DROP 1 /* packet dropped */ 70 71 /* 72 * Transmit return codes: transmit return codes originate from three different 73 * namespaces: 74 * 75 * - qdisc return codes 76 * - driver transmit return codes 77 * - errno values 78 * 79 * Drivers are allowed to return any one of those in their hard_start_xmit() 80 * function. Real network devices commonly used with qdiscs should only return 81 * the driver transmit return codes though - when qdiscs are used, the actual 82 * transmission happens asynchronously, so the value is not propagated to 83 * higher layers. Virtual network devices transmit synchronously, in this case 84 * the driver transmit return codes are consumed by dev_queue_xmit(), all 85 * others are propagated to higher layers. 86 */ 87 88 /* qdisc ->enqueue() return codes. */ 89 #define NET_XMIT_SUCCESS 0x00 90 #define NET_XMIT_DROP 0x01 /* skb dropped */ 91 #define NET_XMIT_CN 0x02 /* congestion notification */ 92 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ 93 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 94 95 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 96 * indicates that the device will soon be dropping packets, or already drops 97 * some packets of the same priority; prompting us to send less aggressively. */ 98 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 99 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 100 101 /* Driver transmit return codes */ 102 #define NETDEV_TX_MASK 0xf0 103 104 enum netdev_tx { 105 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 106 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 107 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 108 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ 109 }; 110 typedef enum netdev_tx netdev_tx_t; 111 112 /* 113 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 114 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 115 */ 116 static inline bool dev_xmit_complete(int rc) 117 { 118 /* 119 * Positive cases with an skb consumed by a driver: 120 * - successful transmission (rc == NETDEV_TX_OK) 121 * - error while transmitting (rc < 0) 122 * - error while queueing to a different device (rc & NET_XMIT_MASK) 123 */ 124 if (likely(rc < NET_XMIT_MASK)) 125 return true; 126 127 return false; 128 } 129 130 /* 131 * Compute the worst case header length according to the protocols 132 * used. 133 */ 134 135 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 136 # if defined(CONFIG_MAC80211_MESH) 137 # define LL_MAX_HEADER 128 138 # else 139 # define LL_MAX_HEADER 96 140 # endif 141 #else 142 # define LL_MAX_HEADER 32 143 #endif 144 145 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 146 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 147 #define MAX_HEADER LL_MAX_HEADER 148 #else 149 #define MAX_HEADER (LL_MAX_HEADER + 48) 150 #endif 151 152 /* 153 * Old network device statistics. Fields are native words 154 * (unsigned long) so they can be read and written atomically. 155 */ 156 157 struct net_device_stats { 158 unsigned long rx_packets; 159 unsigned long tx_packets; 160 unsigned long rx_bytes; 161 unsigned long tx_bytes; 162 unsigned long rx_errors; 163 unsigned long tx_errors; 164 unsigned long rx_dropped; 165 unsigned long tx_dropped; 166 unsigned long multicast; 167 unsigned long collisions; 168 unsigned long rx_length_errors; 169 unsigned long rx_over_errors; 170 unsigned long rx_crc_errors; 171 unsigned long rx_frame_errors; 172 unsigned long rx_fifo_errors; 173 unsigned long rx_missed_errors; 174 unsigned long tx_aborted_errors; 175 unsigned long tx_carrier_errors; 176 unsigned long tx_fifo_errors; 177 unsigned long tx_heartbeat_errors; 178 unsigned long tx_window_errors; 179 unsigned long rx_compressed; 180 unsigned long tx_compressed; 181 }; 182 183 184 #include <linux/cache.h> 185 #include <linux/skbuff.h> 186 187 #ifdef CONFIG_RPS 188 #include <linux/static_key.h> 189 extern struct static_key rps_needed; 190 #endif 191 192 struct neighbour; 193 struct neigh_parms; 194 struct sk_buff; 195 196 struct netdev_hw_addr { 197 struct list_head list; 198 unsigned char addr[MAX_ADDR_LEN]; 199 unsigned char type; 200 #define NETDEV_HW_ADDR_T_LAN 1 201 #define NETDEV_HW_ADDR_T_SAN 2 202 #define NETDEV_HW_ADDR_T_SLAVE 3 203 #define NETDEV_HW_ADDR_T_UNICAST 4 204 #define NETDEV_HW_ADDR_T_MULTICAST 5 205 bool global_use; 206 int sync_cnt; 207 int refcount; 208 int synced; 209 struct rcu_head rcu_head; 210 }; 211 212 struct netdev_hw_addr_list { 213 struct list_head list; 214 int count; 215 }; 216 217 #define netdev_hw_addr_list_count(l) ((l)->count) 218 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 219 #define netdev_hw_addr_list_for_each(ha, l) \ 220 list_for_each_entry(ha, &(l)->list, list) 221 222 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 223 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 224 #define netdev_for_each_uc_addr(ha, dev) \ 225 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 226 227 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 228 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 229 #define netdev_for_each_mc_addr(ha, dev) \ 230 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 231 232 struct hh_cache { 233 u16 hh_len; 234 u16 __pad; 235 seqlock_t hh_lock; 236 237 /* cached hardware header; allow for machine alignment needs. */ 238 #define HH_DATA_MOD 16 239 #define HH_DATA_OFF(__len) \ 240 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 241 #define HH_DATA_ALIGN(__len) \ 242 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 243 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 244 }; 245 246 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 247 * Alternative is: 248 * dev->hard_header_len ? (dev->hard_header_len + 249 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 250 * 251 * We could use other alignment values, but we must maintain the 252 * relationship HH alignment <= LL alignment. 253 */ 254 #define LL_RESERVED_SPACE(dev) \ 255 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 256 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 257 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 258 259 struct header_ops { 260 int (*create) (struct sk_buff *skb, struct net_device *dev, 261 unsigned short type, const void *daddr, 262 const void *saddr, unsigned int len); 263 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 264 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 265 void (*cache_update)(struct hh_cache *hh, 266 const struct net_device *dev, 267 const unsigned char *haddr); 268 }; 269 270 /* These flag bits are private to the generic network queueing 271 * layer, they may not be explicitly referenced by any other 272 * code. 273 */ 274 275 enum netdev_state_t { 276 __LINK_STATE_START, 277 __LINK_STATE_PRESENT, 278 __LINK_STATE_NOCARRIER, 279 __LINK_STATE_LINKWATCH_PENDING, 280 __LINK_STATE_DORMANT, 281 }; 282 283 284 /* 285 * This structure holds at boot time configured netdevice settings. They 286 * are then used in the device probing. 287 */ 288 struct netdev_boot_setup { 289 char name[IFNAMSIZ]; 290 struct ifmap map; 291 }; 292 #define NETDEV_BOOT_SETUP_MAX 8 293 294 int __init netdev_boot_setup(char *str); 295 296 /* 297 * Structure for NAPI scheduling similar to tasklet but with weighting 298 */ 299 struct napi_struct { 300 /* The poll_list must only be managed by the entity which 301 * changes the state of the NAPI_STATE_SCHED bit. This means 302 * whoever atomically sets that bit can add this napi_struct 303 * to the per-cpu poll_list, and whoever clears that bit 304 * can remove from the list right before clearing the bit. 305 */ 306 struct list_head poll_list; 307 308 unsigned long state; 309 int weight; 310 unsigned int gro_count; 311 int (*poll)(struct napi_struct *, int); 312 #ifdef CONFIG_NETPOLL 313 spinlock_t poll_lock; 314 int poll_owner; 315 #endif 316 struct net_device *dev; 317 struct sk_buff *gro_list; 318 struct sk_buff *skb; 319 struct hrtimer timer; 320 struct list_head dev_list; 321 struct hlist_node napi_hash_node; 322 unsigned int napi_id; 323 }; 324 325 enum { 326 NAPI_STATE_SCHED, /* Poll is scheduled */ 327 NAPI_STATE_DISABLE, /* Disable pending */ 328 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 329 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ 330 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ 331 }; 332 333 enum gro_result { 334 GRO_MERGED, 335 GRO_MERGED_FREE, 336 GRO_HELD, 337 GRO_NORMAL, 338 GRO_DROP, 339 }; 340 typedef enum gro_result gro_result_t; 341 342 /* 343 * enum rx_handler_result - Possible return values for rx_handlers. 344 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 345 * further. 346 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 347 * case skb->dev was changed by rx_handler. 348 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 349 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. 350 * 351 * rx_handlers are functions called from inside __netif_receive_skb(), to do 352 * special processing of the skb, prior to delivery to protocol handlers. 353 * 354 * Currently, a net_device can only have a single rx_handler registered. Trying 355 * to register a second rx_handler will return -EBUSY. 356 * 357 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 358 * To unregister a rx_handler on a net_device, use 359 * netdev_rx_handler_unregister(). 360 * 361 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 362 * do with the skb. 363 * 364 * If the rx_handler consumed to skb in some way, it should return 365 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 366 * the skb to be delivered in some other ways. 367 * 368 * If the rx_handler changed skb->dev, to divert the skb to another 369 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 370 * new device will be called if it exists. 371 * 372 * If the rx_handler consider the skb should be ignored, it should return 373 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 374 * are registered on exact device (ptype->dev == skb->dev). 375 * 376 * If the rx_handler didn't changed skb->dev, but want the skb to be normally 377 * delivered, it should return RX_HANDLER_PASS. 378 * 379 * A device without a registered rx_handler will behave as if rx_handler 380 * returned RX_HANDLER_PASS. 381 */ 382 383 enum rx_handler_result { 384 RX_HANDLER_CONSUMED, 385 RX_HANDLER_ANOTHER, 386 RX_HANDLER_EXACT, 387 RX_HANDLER_PASS, 388 }; 389 typedef enum rx_handler_result rx_handler_result_t; 390 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 391 392 void __napi_schedule(struct napi_struct *n); 393 void __napi_schedule_irqoff(struct napi_struct *n); 394 395 static inline bool napi_disable_pending(struct napi_struct *n) 396 { 397 return test_bit(NAPI_STATE_DISABLE, &n->state); 398 } 399 400 /** 401 * napi_schedule_prep - check if napi can be scheduled 402 * @n: napi context 403 * 404 * Test if NAPI routine is already running, and if not mark 405 * it as running. This is used as a condition variable 406 * insure only one NAPI poll instance runs. We also make 407 * sure there is no pending NAPI disable. 408 */ 409 static inline bool napi_schedule_prep(struct napi_struct *n) 410 { 411 return !napi_disable_pending(n) && 412 !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 413 } 414 415 /** 416 * napi_schedule - schedule NAPI poll 417 * @n: napi context 418 * 419 * Schedule NAPI poll routine to be called if it is not already 420 * running. 421 */ 422 static inline void napi_schedule(struct napi_struct *n) 423 { 424 if (napi_schedule_prep(n)) 425 __napi_schedule(n); 426 } 427 428 /** 429 * napi_schedule_irqoff - schedule NAPI poll 430 * @n: napi context 431 * 432 * Variant of napi_schedule(), assuming hard irqs are masked. 433 */ 434 static inline void napi_schedule_irqoff(struct napi_struct *n) 435 { 436 if (napi_schedule_prep(n)) 437 __napi_schedule_irqoff(n); 438 } 439 440 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 441 static inline bool napi_reschedule(struct napi_struct *napi) 442 { 443 if (napi_schedule_prep(napi)) { 444 __napi_schedule(napi); 445 return true; 446 } 447 return false; 448 } 449 450 void __napi_complete(struct napi_struct *n); 451 void napi_complete_done(struct napi_struct *n, int work_done); 452 /** 453 * napi_complete - NAPI processing complete 454 * @n: napi context 455 * 456 * Mark NAPI processing as complete. 457 * Consider using napi_complete_done() instead. 458 */ 459 static inline void napi_complete(struct napi_struct *n) 460 { 461 return napi_complete_done(n, 0); 462 } 463 464 /** 465 * napi_hash_add - add a NAPI to global hashtable 466 * @napi: napi context 467 * 468 * generate a new napi_id and store a @napi under it in napi_hash 469 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) 470 * Note: This is normally automatically done from netif_napi_add(), 471 * so might disappear in a future linux version. 472 */ 473 void napi_hash_add(struct napi_struct *napi); 474 475 /** 476 * napi_hash_del - remove a NAPI from global table 477 * @napi: napi context 478 * 479 * Warning: caller must observe rcu grace period 480 * before freeing memory containing @napi, if 481 * this function returns true. 482 * Note: core networking stack automatically calls it 483 * from netif_napi_del() 484 * Drivers might want to call this helper to combine all 485 * the needed rcu grace periods into a single one. 486 */ 487 bool napi_hash_del(struct napi_struct *napi); 488 489 /** 490 * napi_disable - prevent NAPI from scheduling 491 * @n: napi context 492 * 493 * Stop NAPI from being scheduled on this context. 494 * Waits till any outstanding processing completes. 495 */ 496 void napi_disable(struct napi_struct *n); 497 498 /** 499 * napi_enable - enable NAPI scheduling 500 * @n: napi context 501 * 502 * Resume NAPI from being scheduled on this context. 503 * Must be paired with napi_disable. 504 */ 505 static inline void napi_enable(struct napi_struct *n) 506 { 507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 508 smp_mb__before_atomic(); 509 clear_bit(NAPI_STATE_SCHED, &n->state); 510 clear_bit(NAPI_STATE_NPSVC, &n->state); 511 } 512 513 #ifdef CONFIG_SMP 514 /** 515 * napi_synchronize - wait until NAPI is not running 516 * @n: napi context 517 * 518 * Wait until NAPI is done being scheduled on this context. 519 * Waits till any outstanding processing completes but 520 * does not disable future activations. 521 */ 522 static inline void napi_synchronize(const struct napi_struct *n) 523 { 524 while (test_bit(NAPI_STATE_SCHED, &n->state)) 525 msleep(1); 526 } 527 #else 528 # define napi_synchronize(n) barrier() 529 #endif 530 531 enum netdev_queue_state_t { 532 __QUEUE_STATE_DRV_XOFF, 533 __QUEUE_STATE_STACK_XOFF, 534 __QUEUE_STATE_FROZEN, 535 }; 536 537 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) 538 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) 539 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) 540 541 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) 542 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 543 QUEUE_STATE_FROZEN) 544 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ 545 QUEUE_STATE_FROZEN) 546 547 /* 548 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 549 * netif_tx_* functions below are used to manipulate this flag. The 550 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 551 * queue independently. The netif_xmit_*stopped functions below are called 552 * to check if the queue has been stopped by the driver or stack (either 553 * of the XOFF bits are set in the state). Drivers should not need to call 554 * netif_xmit*stopped functions, they should only be using netif_tx_*. 555 */ 556 557 struct netdev_queue { 558 /* 559 * read mostly part 560 */ 561 struct net_device *dev; 562 struct Qdisc __rcu *qdisc; 563 struct Qdisc *qdisc_sleeping; 564 #ifdef CONFIG_SYSFS 565 struct kobject kobj; 566 #endif 567 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 568 int numa_node; 569 #endif 570 /* 571 * write mostly part 572 */ 573 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 574 int xmit_lock_owner; 575 /* 576 * please use this field instead of dev->trans_start 577 */ 578 unsigned long trans_start; 579 580 /* 581 * Number of TX timeouts for this queue 582 * (/sys/class/net/DEV/Q/trans_timeout) 583 */ 584 unsigned long trans_timeout; 585 586 unsigned long state; 587 588 #ifdef CONFIG_BQL 589 struct dql dql; 590 #endif 591 unsigned long tx_maxrate; 592 } ____cacheline_aligned_in_smp; 593 594 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 595 { 596 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 597 return q->numa_node; 598 #else 599 return NUMA_NO_NODE; 600 #endif 601 } 602 603 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 604 { 605 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 606 q->numa_node = node; 607 #endif 608 } 609 610 #ifdef CONFIG_RPS 611 /* 612 * This structure holds an RPS map which can be of variable length. The 613 * map is an array of CPUs. 614 */ 615 struct rps_map { 616 unsigned int len; 617 struct rcu_head rcu; 618 u16 cpus[0]; 619 }; 620 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 621 622 /* 623 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 624 * tail pointer for that CPU's input queue at the time of last enqueue, and 625 * a hardware filter index. 626 */ 627 struct rps_dev_flow { 628 u16 cpu; 629 u16 filter; 630 unsigned int last_qtail; 631 }; 632 #define RPS_NO_FILTER 0xffff 633 634 /* 635 * The rps_dev_flow_table structure contains a table of flow mappings. 636 */ 637 struct rps_dev_flow_table { 638 unsigned int mask; 639 struct rcu_head rcu; 640 struct rps_dev_flow flows[0]; 641 }; 642 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 643 ((_num) * sizeof(struct rps_dev_flow))) 644 645 /* 646 * The rps_sock_flow_table contains mappings of flows to the last CPU 647 * on which they were processed by the application (set in recvmsg). 648 * Each entry is a 32bit value. Upper part is the high order bits 649 * of flow hash, lower part is cpu number. 650 * rps_cpu_mask is used to partition the space, depending on number of 651 * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 652 * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, 653 * meaning we use 32-6=26 bits for the hash. 654 */ 655 struct rps_sock_flow_table { 656 u32 mask; 657 658 u32 ents[0] ____cacheline_aligned_in_smp; 659 }; 660 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) 661 662 #define RPS_NO_CPU 0xffff 663 664 extern u32 rps_cpu_mask; 665 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 666 667 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 668 u32 hash) 669 { 670 if (table && hash) { 671 unsigned int index = hash & table->mask; 672 u32 val = hash & ~rps_cpu_mask; 673 674 /* We only give a hint, preemption can change cpu under us */ 675 val |= raw_smp_processor_id(); 676 677 if (table->ents[index] != val) 678 table->ents[index] = val; 679 } 680 } 681 682 #ifdef CONFIG_RFS_ACCEL 683 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 684 u16 filter_id); 685 #endif 686 #endif /* CONFIG_RPS */ 687 688 /* This structure contains an instance of an RX queue. */ 689 struct netdev_rx_queue { 690 #ifdef CONFIG_RPS 691 struct rps_map __rcu *rps_map; 692 struct rps_dev_flow_table __rcu *rps_flow_table; 693 #endif 694 struct kobject kobj; 695 struct net_device *dev; 696 } ____cacheline_aligned_in_smp; 697 698 /* 699 * RX queue sysfs structures and functions. 700 */ 701 struct rx_queue_attribute { 702 struct attribute attr; 703 ssize_t (*show)(struct netdev_rx_queue *queue, 704 struct rx_queue_attribute *attr, char *buf); 705 ssize_t (*store)(struct netdev_rx_queue *queue, 706 struct rx_queue_attribute *attr, const char *buf, size_t len); 707 }; 708 709 #ifdef CONFIG_XPS 710 /* 711 * This structure holds an XPS map which can be of variable length. The 712 * map is an array of queues. 713 */ 714 struct xps_map { 715 unsigned int len; 716 unsigned int alloc_len; 717 struct rcu_head rcu; 718 u16 queues[0]; 719 }; 720 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 721 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ 722 - sizeof(struct xps_map)) / sizeof(u16)) 723 724 /* 725 * This structure holds all XPS maps for device. Maps are indexed by CPU. 726 */ 727 struct xps_dev_maps { 728 struct rcu_head rcu; 729 struct xps_map __rcu *cpu_map[0]; 730 }; 731 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ 732 (nr_cpu_ids * sizeof(struct xps_map *))) 733 #endif /* CONFIG_XPS */ 734 735 #define TC_MAX_QUEUE 16 736 #define TC_BITMASK 15 737 /* HW offloaded queuing disciplines txq count and offset maps */ 738 struct netdev_tc_txq { 739 u16 count; 740 u16 offset; 741 }; 742 743 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 744 /* 745 * This structure is to hold information about the device 746 * configured to run FCoE protocol stack. 747 */ 748 struct netdev_fcoe_hbainfo { 749 char manufacturer[64]; 750 char serial_number[64]; 751 char hardware_version[64]; 752 char driver_version[64]; 753 char optionrom_version[64]; 754 char firmware_version[64]; 755 char model[256]; 756 char model_description[256]; 757 }; 758 #endif 759 760 #define MAX_PHYS_ITEM_ID_LEN 32 761 762 /* This structure holds a unique identifier to identify some 763 * physical item (port for example) used by a netdevice. 764 */ 765 struct netdev_phys_item_id { 766 unsigned char id[MAX_PHYS_ITEM_ID_LEN]; 767 unsigned char id_len; 768 }; 769 770 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, 771 struct netdev_phys_item_id *b) 772 { 773 return a->id_len == b->id_len && 774 memcmp(a->id, b->id, a->id_len) == 0; 775 } 776 777 typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 778 struct sk_buff *skb); 779 780 /* 781 * This structure defines the management hooks for network devices. 782 * The following hooks can be defined; unless noted otherwise, they are 783 * optional and can be filled with a null pointer. 784 * 785 * int (*ndo_init)(struct net_device *dev); 786 * This function is called once when network device is registered. 787 * The network device can use this to any late stage initializaton 788 * or semantic validattion. It can fail with an error code which will 789 * be propogated back to register_netdev 790 * 791 * void (*ndo_uninit)(struct net_device *dev); 792 * This function is called when device is unregistered or when registration 793 * fails. It is not called if init fails. 794 * 795 * int (*ndo_open)(struct net_device *dev); 796 * This function is called when network device transistions to the up 797 * state. 798 * 799 * int (*ndo_stop)(struct net_device *dev); 800 * This function is called when network device transistions to the down 801 * state. 802 * 803 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 804 * struct net_device *dev); 805 * Called when a packet needs to be transmitted. 806 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop 807 * the queue before that can happen; it's for obsolete devices and weird 808 * corner cases, but the stack really does a non-trivial amount 809 * of useless work if you return NETDEV_TX_BUSY. 810 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 811 * Required can not be NULL. 812 * 813 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 814 * void *accel_priv, select_queue_fallback_t fallback); 815 * Called to decide which queue to when device supports multiple 816 * transmit queues. 817 * 818 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 819 * This function is called to allow device receiver to make 820 * changes to configuration when multicast or promiscious is enabled. 821 * 822 * void (*ndo_set_rx_mode)(struct net_device *dev); 823 * This function is called device changes address list filtering. 824 * If driver handles unicast address filtering, it should set 825 * IFF_UNICAST_FLT to its priv_flags. 826 * 827 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 828 * This function is called when the Media Access Control address 829 * needs to be changed. If this interface is not defined, the 830 * mac address can not be changed. 831 * 832 * int (*ndo_validate_addr)(struct net_device *dev); 833 * Test if Media Access Control address is valid for the device. 834 * 835 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 836 * Called when a user request an ioctl which can't be handled by 837 * the generic interface code. If not defined ioctl's return 838 * not supported error code. 839 * 840 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 841 * Used to set network devices bus interface parameters. This interface 842 * is retained for legacy reason, new devices should use the bus 843 * interface (PCI) for low level management. 844 * 845 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 846 * Called when a user wants to change the Maximum Transfer Unit 847 * of a device. If not defined, any request to change MTU will 848 * will return an error. 849 * 850 * void (*ndo_tx_timeout)(struct net_device *dev); 851 * Callback uses when the transmitter has not made any progress 852 * for dev->watchdog ticks. 853 * 854 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 855 * struct rtnl_link_stats64 *storage); 856 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 857 * Called when a user wants to get the network device usage 858 * statistics. Drivers must do one of the following: 859 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 860 * rtnl_link_stats64 structure passed by the caller. 861 * 2. Define @ndo_get_stats to update a net_device_stats structure 862 * (which should normally be dev->stats) and return a pointer to 863 * it. The structure may be changed asynchronously only if each 864 * field is written atomically. 865 * 3. Update dev->stats asynchronously and atomically, and define 866 * neither operation. 867 * 868 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 869 * If device support VLAN filtering this function is called when a 870 * VLAN id is registered. 871 * 872 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 873 * If device support VLAN filtering this function is called when a 874 * VLAN id is unregistered. 875 * 876 * void (*ndo_poll_controller)(struct net_device *dev); 877 * 878 * SR-IOV management functions. 879 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 880 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 881 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 882 * int max_tx_rate); 883 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 884 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); 885 * int (*ndo_get_vf_config)(struct net_device *dev, 886 * int vf, struct ifla_vf_info *ivf); 887 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 888 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 889 * struct nlattr *port[]); 890 * 891 * Enable or disable the VF ability to query its RSS Redirection Table and 892 * Hash Key. This is needed since on some devices VF share this information 893 * with PF and querying it may adduce a theoretical security risk. 894 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 895 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 896 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 897 * Called to setup 'tc' number of traffic classes in the net device. This 898 * is always called from the stack with the rtnl lock held and netif tx 899 * queues stopped. This allows the netdevice to perform queue management 900 * safely. 901 * 902 * Fiber Channel over Ethernet (FCoE) offload functions. 903 * int (*ndo_fcoe_enable)(struct net_device *dev); 904 * Called when the FCoE protocol stack wants to start using LLD for FCoE 905 * so the underlying device can perform whatever needed configuration or 906 * initialization to support acceleration of FCoE traffic. 907 * 908 * int (*ndo_fcoe_disable)(struct net_device *dev); 909 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 910 * so the underlying device can perform whatever needed clean-ups to 911 * stop supporting acceleration of FCoE traffic. 912 * 913 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 914 * struct scatterlist *sgl, unsigned int sgc); 915 * Called when the FCoE Initiator wants to initialize an I/O that 916 * is a possible candidate for Direct Data Placement (DDP). The LLD can 917 * perform necessary setup and returns 1 to indicate the device is set up 918 * successfully to perform DDP on this I/O, otherwise this returns 0. 919 * 920 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 921 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 922 * indicated by the FC exchange id 'xid', so the underlying device can 923 * clean up and reuse resources for later DDP requests. 924 * 925 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 926 * struct scatterlist *sgl, unsigned int sgc); 927 * Called when the FCoE Target wants to initialize an I/O that 928 * is a possible candidate for Direct Data Placement (DDP). The LLD can 929 * perform necessary setup and returns 1 to indicate the device is set up 930 * successfully to perform DDP on this I/O, otherwise this returns 0. 931 * 932 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 933 * struct netdev_fcoe_hbainfo *hbainfo); 934 * Called when the FCoE Protocol stack wants information on the underlying 935 * device. This information is utilized by the FCoE protocol stack to 936 * register attributes with Fiber Channel management service as per the 937 * FC-GS Fabric Device Management Information(FDMI) specification. 938 * 939 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 940 * Called when the underlying device wants to override default World Wide 941 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 942 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 943 * protocol stack to use. 944 * 945 * RFS acceleration. 946 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 947 * u16 rxq_index, u32 flow_id); 948 * Set hardware filter for RFS. rxq_index is the target queue index; 949 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 950 * Return the filter ID on success, or a negative error code. 951 * 952 * Slave management functions (for bridge, bonding, etc). 953 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 954 * Called to make another netdev an underling. 955 * 956 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 957 * Called to release previously enslaved netdev. 958 * 959 * Feature/offload setting functions. 960 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 961 * netdev_features_t features); 962 * Adjusts the requested feature flags according to device-specific 963 * constraints, and returns the resulting flags. Must not modify 964 * the device state. 965 * 966 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 967 * Called to update device configuration to new features. Passed 968 * feature set might be less than what was returned by ndo_fix_features()). 969 * Must return >0 or -errno if it changed dev->features itself. 970 * 971 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 972 * struct net_device *dev, 973 * const unsigned char *addr, u16 vid, u16 flags) 974 * Adds an FDB entry to dev for addr. 975 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 976 * struct net_device *dev, 977 * const unsigned char *addr, u16 vid) 978 * Deletes the FDB entry from dev coresponding to addr. 979 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 980 * struct net_device *dev, struct net_device *filter_dev, 981 * int idx) 982 * Used to add FDB entries to dump requests. Implementers should add 983 * entries to skb and update idx with the number of entries. 984 * 985 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, 986 * u16 flags) 987 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 988 * struct net_device *dev, u32 filter_mask, 989 * int nlflags) 990 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, 991 * u16 flags); 992 * 993 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 994 * Called to change device carrier. Soft-devices (like dummy, team, etc) 995 * which do not represent real hardware may define this to allow their 996 * userspace components to manage their virtual carrier state. Devices 997 * that determine carrier state from physical hardware properties (eg 998 * network cables) or protocol-dependent mechanisms (eg 999 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 1000 * 1001 * int (*ndo_get_phys_port_id)(struct net_device *dev, 1002 * struct netdev_phys_item_id *ppid); 1003 * Called to get ID of physical port of this device. If driver does 1004 * not implement this, it is assumed that the hw is not able to have 1005 * multiple net devices on single physical port. 1006 * 1007 * void (*ndo_add_vxlan_port)(struct net_device *dev, 1008 * sa_family_t sa_family, __be16 port); 1009 * Called by vxlan to notiy a driver about the UDP port and socket 1010 * address family that vxlan is listnening to. It is called only when 1011 * a new port starts listening. The operation is protected by the 1012 * vxlan_net->sock_lock. 1013 * 1014 * void (*ndo_del_vxlan_port)(struct net_device *dev, 1015 * sa_family_t sa_family, __be16 port); 1016 * Called by vxlan to notify the driver about a UDP port and socket 1017 * address family that vxlan is not listening to anymore. The operation 1018 * is protected by the vxlan_net->sock_lock. 1019 * 1020 * void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1021 * struct net_device *dev) 1022 * Called by upper layer devices to accelerate switching or other 1023 * station functionality into hardware. 'pdev is the lowerdev 1024 * to use for the offload and 'dev' is the net device that will 1025 * back the offload. Returns a pointer to the private structure 1026 * the upper layer will maintain. 1027 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) 1028 * Called by upper layer device to delete the station created 1029 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing 1030 * the station and priv is the structure returned by the add 1031 * operation. 1032 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, 1033 * struct net_device *dev, 1034 * void *priv); 1035 * Callback to use for xmit over the accelerated station. This 1036 * is used in place of ndo_start_xmit on accelerated net 1037 * devices. 1038 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb, 1039 * struct net_device *dev 1040 * netdev_features_t features); 1041 * Called by core transmit path to determine if device is capable of 1042 * performing offload operations on a given packet. This is to give 1043 * the device an opportunity to implement any restrictions that cannot 1044 * be otherwise expressed by feature flags. The check is called with 1045 * the set of features that the stack has calculated and it returns 1046 * those the driver believes to be appropriate. 1047 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1048 * int queue_index, u32 maxrate); 1049 * Called when a user wants to set a max-rate limitation of specific 1050 * TX queue. 1051 * int (*ndo_get_iflink)(const struct net_device *dev); 1052 * Called to get the iflink value of this device. 1053 * void (*ndo_change_proto_down)(struct net_device *dev, 1054 * bool proto_down); 1055 * This function is used to pass protocol port error state information 1056 * to the switch driver. The switch driver can react to the proto_down 1057 * by doing a phys down on the associated switch port. 1058 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); 1059 * This function is used to get egress tunnel information for given skb. 1060 * This is useful for retrieving outer tunnel header parameters while 1061 * sampling packet. 1062 * 1063 */ 1064 struct net_device_ops { 1065 int (*ndo_init)(struct net_device *dev); 1066 void (*ndo_uninit)(struct net_device *dev); 1067 int (*ndo_open)(struct net_device *dev); 1068 int (*ndo_stop)(struct net_device *dev); 1069 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 1070 struct net_device *dev); 1071 u16 (*ndo_select_queue)(struct net_device *dev, 1072 struct sk_buff *skb, 1073 void *accel_priv, 1074 select_queue_fallback_t fallback); 1075 void (*ndo_change_rx_flags)(struct net_device *dev, 1076 int flags); 1077 void (*ndo_set_rx_mode)(struct net_device *dev); 1078 int (*ndo_set_mac_address)(struct net_device *dev, 1079 void *addr); 1080 int (*ndo_validate_addr)(struct net_device *dev); 1081 int (*ndo_do_ioctl)(struct net_device *dev, 1082 struct ifreq *ifr, int cmd); 1083 int (*ndo_set_config)(struct net_device *dev, 1084 struct ifmap *map); 1085 int (*ndo_change_mtu)(struct net_device *dev, 1086 int new_mtu); 1087 int (*ndo_neigh_setup)(struct net_device *dev, 1088 struct neigh_parms *); 1089 void (*ndo_tx_timeout) (struct net_device *dev); 1090 1091 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 1092 struct rtnl_link_stats64 *storage); 1093 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1094 1095 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1096 __be16 proto, u16 vid); 1097 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 1098 __be16 proto, u16 vid); 1099 #ifdef CONFIG_NET_POLL_CONTROLLER 1100 void (*ndo_poll_controller)(struct net_device *dev); 1101 int (*ndo_netpoll_setup)(struct net_device *dev, 1102 struct netpoll_info *info); 1103 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1104 #endif 1105 #ifdef CONFIG_NET_RX_BUSY_POLL 1106 int (*ndo_busy_poll)(struct napi_struct *dev); 1107 #endif 1108 int (*ndo_set_vf_mac)(struct net_device *dev, 1109 int queue, u8 *mac); 1110 int (*ndo_set_vf_vlan)(struct net_device *dev, 1111 int queue, u16 vlan, u8 qos); 1112 int (*ndo_set_vf_rate)(struct net_device *dev, 1113 int vf, int min_tx_rate, 1114 int max_tx_rate); 1115 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1116 int vf, bool setting); 1117 int (*ndo_set_vf_trust)(struct net_device *dev, 1118 int vf, bool setting); 1119 int (*ndo_get_vf_config)(struct net_device *dev, 1120 int vf, 1121 struct ifla_vf_info *ivf); 1122 int (*ndo_set_vf_link_state)(struct net_device *dev, 1123 int vf, int link_state); 1124 int (*ndo_get_vf_stats)(struct net_device *dev, 1125 int vf, 1126 struct ifla_vf_stats 1127 *vf_stats); 1128 int (*ndo_set_vf_port)(struct net_device *dev, 1129 int vf, 1130 struct nlattr *port[]); 1131 int (*ndo_get_vf_port)(struct net_device *dev, 1132 int vf, struct sk_buff *skb); 1133 int (*ndo_set_vf_rss_query_en)( 1134 struct net_device *dev, 1135 int vf, bool setting); 1136 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 1137 #if IS_ENABLED(CONFIG_FCOE) 1138 int (*ndo_fcoe_enable)(struct net_device *dev); 1139 int (*ndo_fcoe_disable)(struct net_device *dev); 1140 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1141 u16 xid, 1142 struct scatterlist *sgl, 1143 unsigned int sgc); 1144 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1145 u16 xid); 1146 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1147 u16 xid, 1148 struct scatterlist *sgl, 1149 unsigned int sgc); 1150 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1151 struct netdev_fcoe_hbainfo *hbainfo); 1152 #endif 1153 1154 #if IS_ENABLED(CONFIG_LIBFCOE) 1155 #define NETDEV_FCOE_WWNN 0 1156 #define NETDEV_FCOE_WWPN 1 1157 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1158 u64 *wwn, int type); 1159 #endif 1160 1161 #ifdef CONFIG_RFS_ACCEL 1162 int (*ndo_rx_flow_steer)(struct net_device *dev, 1163 const struct sk_buff *skb, 1164 u16 rxq_index, 1165 u32 flow_id); 1166 #endif 1167 int (*ndo_add_slave)(struct net_device *dev, 1168 struct net_device *slave_dev); 1169 int (*ndo_del_slave)(struct net_device *dev, 1170 struct net_device *slave_dev); 1171 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1172 netdev_features_t features); 1173 int (*ndo_set_features)(struct net_device *dev, 1174 netdev_features_t features); 1175 int (*ndo_neigh_construct)(struct neighbour *n); 1176 void (*ndo_neigh_destroy)(struct neighbour *n); 1177 1178 int (*ndo_fdb_add)(struct ndmsg *ndm, 1179 struct nlattr *tb[], 1180 struct net_device *dev, 1181 const unsigned char *addr, 1182 u16 vid, 1183 u16 flags); 1184 int (*ndo_fdb_del)(struct ndmsg *ndm, 1185 struct nlattr *tb[], 1186 struct net_device *dev, 1187 const unsigned char *addr, 1188 u16 vid); 1189 int (*ndo_fdb_dump)(struct sk_buff *skb, 1190 struct netlink_callback *cb, 1191 struct net_device *dev, 1192 struct net_device *filter_dev, 1193 int idx); 1194 1195 int (*ndo_bridge_setlink)(struct net_device *dev, 1196 struct nlmsghdr *nlh, 1197 u16 flags); 1198 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1199 u32 pid, u32 seq, 1200 struct net_device *dev, 1201 u32 filter_mask, 1202 int nlflags); 1203 int (*ndo_bridge_dellink)(struct net_device *dev, 1204 struct nlmsghdr *nlh, 1205 u16 flags); 1206 int (*ndo_change_carrier)(struct net_device *dev, 1207 bool new_carrier); 1208 int (*ndo_get_phys_port_id)(struct net_device *dev, 1209 struct netdev_phys_item_id *ppid); 1210 int (*ndo_get_phys_port_name)(struct net_device *dev, 1211 char *name, size_t len); 1212 void (*ndo_add_vxlan_port)(struct net_device *dev, 1213 sa_family_t sa_family, 1214 __be16 port); 1215 void (*ndo_del_vxlan_port)(struct net_device *dev, 1216 sa_family_t sa_family, 1217 __be16 port); 1218 1219 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1220 struct net_device *dev); 1221 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1222 void *priv); 1223 1224 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, 1225 struct net_device *dev, 1226 void *priv); 1227 int (*ndo_get_lock_subclass)(struct net_device *dev); 1228 netdev_features_t (*ndo_features_check) (struct sk_buff *skb, 1229 struct net_device *dev, 1230 netdev_features_t features); 1231 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1232 int queue_index, 1233 u32 maxrate); 1234 int (*ndo_get_iflink)(const struct net_device *dev); 1235 int (*ndo_change_proto_down)(struct net_device *dev, 1236 bool proto_down); 1237 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1238 struct sk_buff *skb); 1239 }; 1240 1241 /** 1242 * enum net_device_priv_flags - &struct net_device priv_flags 1243 * 1244 * These are the &struct net_device, they are only set internally 1245 * by drivers and used in the kernel. These flags are invisible to 1246 * userspace, this means that the order of these flags can change 1247 * during any kernel release. 1248 * 1249 * You should have a pretty good reason to be extending these flags. 1250 * 1251 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1252 * @IFF_EBRIDGE: Ethernet bridging device 1253 * @IFF_BONDING: bonding master or slave 1254 * @IFF_ISATAP: ISATAP interface (RFC4214) 1255 * @IFF_WAN_HDLC: WAN HDLC device 1256 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1257 * release skb->dst 1258 * @IFF_DONT_BRIDGE: disallow bridging this ether dev 1259 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time 1260 * @IFF_MACVLAN_PORT: device used as macvlan port 1261 * @IFF_BRIDGE_PORT: device used as bridge port 1262 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port 1263 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit 1264 * @IFF_UNICAST_FLT: Supports unicast filtering 1265 * @IFF_TEAM_PORT: device used as team port 1266 * @IFF_SUPP_NOFCS: device supports sending custom FCS 1267 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1268 * change when it's running 1269 * @IFF_MACVLAN: Macvlan device 1270 * @IFF_L3MDEV_MASTER: device is an L3 master device 1271 * @IFF_NO_QUEUE: device can run without qdisc attached 1272 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1273 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1274 */ 1275 enum netdev_priv_flags { 1276 IFF_802_1Q_VLAN = 1<<0, 1277 IFF_EBRIDGE = 1<<1, 1278 IFF_BONDING = 1<<2, 1279 IFF_ISATAP = 1<<3, 1280 IFF_WAN_HDLC = 1<<4, 1281 IFF_XMIT_DST_RELEASE = 1<<5, 1282 IFF_DONT_BRIDGE = 1<<6, 1283 IFF_DISABLE_NETPOLL = 1<<7, 1284 IFF_MACVLAN_PORT = 1<<8, 1285 IFF_BRIDGE_PORT = 1<<9, 1286 IFF_OVS_DATAPATH = 1<<10, 1287 IFF_TX_SKB_SHARING = 1<<11, 1288 IFF_UNICAST_FLT = 1<<12, 1289 IFF_TEAM_PORT = 1<<13, 1290 IFF_SUPP_NOFCS = 1<<14, 1291 IFF_LIVE_ADDR_CHANGE = 1<<15, 1292 IFF_MACVLAN = 1<<16, 1293 IFF_XMIT_DST_RELEASE_PERM = 1<<17, 1294 IFF_IPVLAN_MASTER = 1<<18, 1295 IFF_IPVLAN_SLAVE = 1<<19, 1296 IFF_L3MDEV_MASTER = 1<<20, 1297 IFF_NO_QUEUE = 1<<21, 1298 IFF_OPENVSWITCH = 1<<22, 1299 IFF_L3MDEV_SLAVE = 1<<23, 1300 }; 1301 1302 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1303 #define IFF_EBRIDGE IFF_EBRIDGE 1304 #define IFF_BONDING IFF_BONDING 1305 #define IFF_ISATAP IFF_ISATAP 1306 #define IFF_WAN_HDLC IFF_WAN_HDLC 1307 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1308 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1309 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL 1310 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT 1311 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT 1312 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH 1313 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING 1314 #define IFF_UNICAST_FLT IFF_UNICAST_FLT 1315 #define IFF_TEAM_PORT IFF_TEAM_PORT 1316 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1317 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1318 #define IFF_MACVLAN IFF_MACVLAN 1319 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1320 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER 1321 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE 1322 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER 1323 #define IFF_NO_QUEUE IFF_NO_QUEUE 1324 #define IFF_OPENVSWITCH IFF_OPENVSWITCH 1325 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1326 1327 /** 1328 * struct net_device - The DEVICE structure. 1329 * Actually, this whole structure is a big mistake. It mixes I/O 1330 * data with strictly "high-level" data, and it has to know about 1331 * almost every data structure used in the INET module. 1332 * 1333 * @name: This is the first field of the "visible" part of this structure 1334 * (i.e. as seen by users in the "Space.c" file). It is the name 1335 * of the interface. 1336 * 1337 * @name_hlist: Device name hash chain, please keep it close to name[] 1338 * @ifalias: SNMP alias 1339 * @mem_end: Shared memory end 1340 * @mem_start: Shared memory start 1341 * @base_addr: Device I/O address 1342 * @irq: Device IRQ number 1343 * 1344 * @carrier_changes: Stats to monitor carrier on<->off transitions 1345 * 1346 * @state: Generic network queuing layer state, see netdev_state_t 1347 * @dev_list: The global list of network devices 1348 * @napi_list: List entry, that is used for polling napi devices 1349 * @unreg_list: List entry, that is used, when we are unregistering the 1350 * device, see the function unregister_netdev 1351 * @close_list: List entry, that is used, when we are closing the device 1352 * 1353 * @adj_list: Directly linked devices, like slaves for bonding 1354 * @all_adj_list: All linked devices, *including* neighbours 1355 * @features: Currently active device features 1356 * @hw_features: User-changeable features 1357 * 1358 * @wanted_features: User-requested features 1359 * @vlan_features: Mask of features inheritable by VLAN devices 1360 * 1361 * @hw_enc_features: Mask of features inherited by encapsulating devices 1362 * This field indicates what encapsulation 1363 * offloads the hardware is capable of doing, 1364 * and drivers will need to set them appropriately. 1365 * 1366 * @mpls_features: Mask of features inheritable by MPLS 1367 * 1368 * @ifindex: interface index 1369 * @group: The group, that the device belongs to 1370 * 1371 * @stats: Statistics struct, which was left as a legacy, use 1372 * rtnl_link_stats64 instead 1373 * 1374 * @rx_dropped: Dropped packets by core network, 1375 * do not use this in drivers 1376 * @tx_dropped: Dropped packets by core network, 1377 * do not use this in drivers 1378 * 1379 * @wireless_handlers: List of functions to handle Wireless Extensions, 1380 * instead of ioctl, 1381 * see <net/iw_handler.h> for details. 1382 * @wireless_data: Instance data managed by the core of wireless extensions 1383 * 1384 * @netdev_ops: Includes several pointers to callbacks, 1385 * if one wants to override the ndo_*() functions 1386 * @ethtool_ops: Management operations 1387 * @header_ops: Includes callbacks for creating,parsing,caching,etc 1388 * of Layer 2 headers. 1389 * 1390 * @flags: Interface flags (a la BSD) 1391 * @priv_flags: Like 'flags' but invisible to userspace, 1392 * see if.h for the definitions 1393 * @gflags: Global flags ( kept as legacy ) 1394 * @padded: How much padding added by alloc_netdev() 1395 * @operstate: RFC2863 operstate 1396 * @link_mode: Mapping policy to operstate 1397 * @if_port: Selectable AUI, TP, ... 1398 * @dma: DMA channel 1399 * @mtu: Interface MTU value 1400 * @type: Interface hardware type 1401 * @hard_header_len: Hardware header length 1402 * 1403 * @needed_headroom: Extra headroom the hardware may need, but not in all 1404 * cases can this be guaranteed 1405 * @needed_tailroom: Extra tailroom the hardware may need, but not in all 1406 * cases can this be guaranteed. Some cases also use 1407 * LL_MAX_HEADER instead to allocate the skb 1408 * 1409 * interface address info: 1410 * 1411 * @perm_addr: Permanent hw address 1412 * @addr_assign_type: Hw address assignment type 1413 * @addr_len: Hardware address length 1414 * @neigh_priv_len; Used in neigh_alloc(), 1415 * initialized only in atm/clip.c 1416 * @dev_id: Used to differentiate devices that share 1417 * the same link layer address 1418 * @dev_port: Used to differentiate devices that share 1419 * the same function 1420 * @addr_list_lock: XXX: need comments on this one 1421 * @uc_promisc: Counter, that indicates, that promiscuous mode 1422 * has been enabled due to the need to listen to 1423 * additional unicast addresses in a device that 1424 * does not implement ndo_set_rx_mode() 1425 * @uc: unicast mac addresses 1426 * @mc: multicast mac addresses 1427 * @dev_addrs: list of device hw addresses 1428 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1429 * @promiscuity: Number of times, the NIC is told to work in 1430 * Promiscuous mode, if it becomes 0 the NIC will 1431 * exit from working in Promiscuous mode 1432 * @allmulti: Counter, enables or disables allmulticast mode 1433 * 1434 * @vlan_info: VLAN info 1435 * @dsa_ptr: dsa specific data 1436 * @tipc_ptr: TIPC specific data 1437 * @atalk_ptr: AppleTalk link 1438 * @ip_ptr: IPv4 specific data 1439 * @dn_ptr: DECnet specific data 1440 * @ip6_ptr: IPv6 specific data 1441 * @ax25_ptr: AX.25 specific data 1442 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1443 * 1444 * @last_rx: Time of last Rx 1445 * @dev_addr: Hw address (before bcast, 1446 * because most packets are unicast) 1447 * 1448 * @_rx: Array of RX queues 1449 * @num_rx_queues: Number of RX queues 1450 * allocated at register_netdev() time 1451 * @real_num_rx_queues: Number of RX queues currently active in device 1452 * 1453 * @rx_handler: handler for received packets 1454 * @rx_handler_data: XXX: need comments on this one 1455 * @ingress_queue: XXX: need comments on this one 1456 * @broadcast: hw bcast address 1457 * 1458 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, 1459 * indexed by RX queue number. Assigned by driver. 1460 * This must only be set if the ndo_rx_flow_steer 1461 * operation is defined 1462 * @index_hlist: Device index hash chain 1463 * 1464 * @_tx: Array of TX queues 1465 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time 1466 * @real_num_tx_queues: Number of TX queues currently active in device 1467 * @qdisc: Root qdisc from userspace point of view 1468 * @tx_queue_len: Max frames per queue allowed 1469 * @tx_global_lock: XXX: need comments on this one 1470 * 1471 * @xps_maps: XXX: need comments on this one 1472 * 1473 * @offload_fwd_mark: Offload device fwding mark 1474 * 1475 * @trans_start: Time (in jiffies) of last Tx 1476 * @watchdog_timeo: Represents the timeout that is used by 1477 * the watchdog ( see dev_watchdog() ) 1478 * @watchdog_timer: List of timers 1479 * 1480 * @pcpu_refcnt: Number of references to this device 1481 * @todo_list: Delayed register/unregister 1482 * @link_watch_list: XXX: need comments on this one 1483 * 1484 * @reg_state: Register/unregister state machine 1485 * @dismantle: Device is going to be freed 1486 * @rtnl_link_state: This enum represents the phases of creating 1487 * a new link 1488 * 1489 * @destructor: Called from unregister, 1490 * can be used to call free_netdev 1491 * @npinfo: XXX: need comments on this one 1492 * @nd_net: Network namespace this network device is inside 1493 * 1494 * @ml_priv: Mid-layer private 1495 * @lstats: Loopback statistics 1496 * @tstats: Tunnel statistics 1497 * @dstats: Dummy statistics 1498 * @vstats: Virtual ethernet statistics 1499 * 1500 * @garp_port: GARP 1501 * @mrp_port: MRP 1502 * 1503 * @dev: Class/net/name entry 1504 * @sysfs_groups: Space for optional device, statistics and wireless 1505 * sysfs groups 1506 * 1507 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes 1508 * @rtnl_link_ops: Rtnl_link_ops 1509 * 1510 * @gso_max_size: Maximum size of generic segmentation offload 1511 * @gso_max_segs: Maximum number of segments that can be passed to the 1512 * NIC for GSO 1513 * @gso_min_segs: Minimum number of segments that can be passed to the 1514 * NIC for GSO 1515 * 1516 * @dcbnl_ops: Data Center Bridging netlink ops 1517 * @num_tc: Number of traffic classes in the net device 1518 * @tc_to_txq: XXX: need comments on this one 1519 * @prio_tc_map XXX: need comments on this one 1520 * 1521 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1522 * 1523 * @priomap: XXX: need comments on this one 1524 * @phydev: Physical device may attach itself 1525 * for hardware timestamping 1526 * 1527 * @qdisc_tx_busylock: XXX: need comments on this one 1528 * 1529 * @proto_down: protocol port state information can be sent to the 1530 * switch driver and used to set the phys state of the 1531 * switch port. 1532 * 1533 * FIXME: cleanup struct net_device such that network protocol info 1534 * moves out. 1535 */ 1536 1537 struct net_device { 1538 char name[IFNAMSIZ]; 1539 struct hlist_node name_hlist; 1540 char *ifalias; 1541 /* 1542 * I/O specific fields 1543 * FIXME: Merge these and struct ifmap into one 1544 */ 1545 unsigned long mem_end; 1546 unsigned long mem_start; 1547 unsigned long base_addr; 1548 int irq; 1549 1550 atomic_t carrier_changes; 1551 1552 /* 1553 * Some hardware also needs these fields (state,dev_list, 1554 * napi_list,unreg_list,close_list) but they are not 1555 * part of the usual set specified in Space.c. 1556 */ 1557 1558 unsigned long state; 1559 1560 struct list_head dev_list; 1561 struct list_head napi_list; 1562 struct list_head unreg_list; 1563 struct list_head close_list; 1564 struct list_head ptype_all; 1565 struct list_head ptype_specific; 1566 1567 struct { 1568 struct list_head upper; 1569 struct list_head lower; 1570 } adj_list; 1571 1572 struct { 1573 struct list_head upper; 1574 struct list_head lower; 1575 } all_adj_list; 1576 1577 netdev_features_t features; 1578 netdev_features_t hw_features; 1579 netdev_features_t wanted_features; 1580 netdev_features_t vlan_features; 1581 netdev_features_t hw_enc_features; 1582 netdev_features_t mpls_features; 1583 1584 int ifindex; 1585 int group; 1586 1587 struct net_device_stats stats; 1588 1589 atomic_long_t rx_dropped; 1590 atomic_long_t tx_dropped; 1591 1592 #ifdef CONFIG_WIRELESS_EXT 1593 const struct iw_handler_def * wireless_handlers; 1594 struct iw_public_data * wireless_data; 1595 #endif 1596 const struct net_device_ops *netdev_ops; 1597 const struct ethtool_ops *ethtool_ops; 1598 #ifdef CONFIG_NET_SWITCHDEV 1599 const struct switchdev_ops *switchdev_ops; 1600 #endif 1601 #ifdef CONFIG_NET_L3_MASTER_DEV 1602 const struct l3mdev_ops *l3mdev_ops; 1603 #endif 1604 1605 const struct header_ops *header_ops; 1606 1607 unsigned int flags; 1608 unsigned int priv_flags; 1609 1610 unsigned short gflags; 1611 unsigned short padded; 1612 1613 unsigned char operstate; 1614 unsigned char link_mode; 1615 1616 unsigned char if_port; 1617 unsigned char dma; 1618 1619 unsigned int mtu; 1620 unsigned short type; 1621 unsigned short hard_header_len; 1622 1623 unsigned short needed_headroom; 1624 unsigned short needed_tailroom; 1625 1626 /* Interface address info. */ 1627 unsigned char perm_addr[MAX_ADDR_LEN]; 1628 unsigned char addr_assign_type; 1629 unsigned char addr_len; 1630 unsigned short neigh_priv_len; 1631 unsigned short dev_id; 1632 unsigned short dev_port; 1633 spinlock_t addr_list_lock; 1634 unsigned char name_assign_type; 1635 bool uc_promisc; 1636 struct netdev_hw_addr_list uc; 1637 struct netdev_hw_addr_list mc; 1638 struct netdev_hw_addr_list dev_addrs; 1639 1640 #ifdef CONFIG_SYSFS 1641 struct kset *queues_kset; 1642 #endif 1643 unsigned int promiscuity; 1644 unsigned int allmulti; 1645 1646 1647 /* Protocol specific pointers */ 1648 1649 #if IS_ENABLED(CONFIG_VLAN_8021Q) 1650 struct vlan_info __rcu *vlan_info; 1651 #endif 1652 #if IS_ENABLED(CONFIG_NET_DSA) 1653 struct dsa_switch_tree *dsa_ptr; 1654 #endif 1655 #if IS_ENABLED(CONFIG_TIPC) 1656 struct tipc_bearer __rcu *tipc_ptr; 1657 #endif 1658 void *atalk_ptr; 1659 struct in_device __rcu *ip_ptr; 1660 struct dn_dev __rcu *dn_ptr; 1661 struct inet6_dev __rcu *ip6_ptr; 1662 void *ax25_ptr; 1663 struct wireless_dev *ieee80211_ptr; 1664 struct wpan_dev *ieee802154_ptr; 1665 #if IS_ENABLED(CONFIG_MPLS_ROUTING) 1666 struct mpls_dev __rcu *mpls_ptr; 1667 #endif 1668 1669 /* 1670 * Cache lines mostly used on receive path (including eth_type_trans()) 1671 */ 1672 unsigned long last_rx; 1673 1674 /* Interface address info used in eth_type_trans() */ 1675 unsigned char *dev_addr; 1676 1677 1678 #ifdef CONFIG_SYSFS 1679 struct netdev_rx_queue *_rx; 1680 1681 unsigned int num_rx_queues; 1682 unsigned int real_num_rx_queues; 1683 1684 #endif 1685 1686 unsigned long gro_flush_timeout; 1687 rx_handler_func_t __rcu *rx_handler; 1688 void __rcu *rx_handler_data; 1689 1690 #ifdef CONFIG_NET_CLS_ACT 1691 struct tcf_proto __rcu *ingress_cl_list; 1692 #endif 1693 struct netdev_queue __rcu *ingress_queue; 1694 #ifdef CONFIG_NETFILTER_INGRESS 1695 struct list_head nf_hooks_ingress; 1696 #endif 1697 1698 unsigned char broadcast[MAX_ADDR_LEN]; 1699 #ifdef CONFIG_RFS_ACCEL 1700 struct cpu_rmap *rx_cpu_rmap; 1701 #endif 1702 struct hlist_node index_hlist; 1703 1704 /* 1705 * Cache lines mostly used on transmit path 1706 */ 1707 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 1708 unsigned int num_tx_queues; 1709 unsigned int real_num_tx_queues; 1710 struct Qdisc *qdisc; 1711 unsigned long tx_queue_len; 1712 spinlock_t tx_global_lock; 1713 int watchdog_timeo; 1714 1715 #ifdef CONFIG_XPS 1716 struct xps_dev_maps __rcu *xps_maps; 1717 #endif 1718 1719 #ifdef CONFIG_NET_SWITCHDEV 1720 u32 offload_fwd_mark; 1721 #endif 1722 1723 /* These may be needed for future network-power-down code. */ 1724 1725 /* 1726 * trans_start here is expensive for high speed devices on SMP, 1727 * please use netdev_queue->trans_start instead. 1728 */ 1729 unsigned long trans_start; 1730 1731 struct timer_list watchdog_timer; 1732 1733 int __percpu *pcpu_refcnt; 1734 struct list_head todo_list; 1735 1736 struct list_head link_watch_list; 1737 1738 enum { NETREG_UNINITIALIZED=0, 1739 NETREG_REGISTERED, /* completed register_netdevice */ 1740 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1741 NETREG_UNREGISTERED, /* completed unregister todo */ 1742 NETREG_RELEASED, /* called free_netdev */ 1743 NETREG_DUMMY, /* dummy device for NAPI poll */ 1744 } reg_state:8; 1745 1746 bool dismantle; 1747 1748 enum { 1749 RTNL_LINK_INITIALIZED, 1750 RTNL_LINK_INITIALIZING, 1751 } rtnl_link_state:16; 1752 1753 void (*destructor)(struct net_device *dev); 1754 1755 #ifdef CONFIG_NETPOLL 1756 struct netpoll_info __rcu *npinfo; 1757 #endif 1758 1759 possible_net_t nd_net; 1760 1761 /* mid-layer private */ 1762 union { 1763 void *ml_priv; 1764 struct pcpu_lstats __percpu *lstats; 1765 struct pcpu_sw_netstats __percpu *tstats; 1766 struct pcpu_dstats __percpu *dstats; 1767 struct pcpu_vstats __percpu *vstats; 1768 }; 1769 1770 struct garp_port __rcu *garp_port; 1771 struct mrp_port __rcu *mrp_port; 1772 1773 struct device dev; 1774 const struct attribute_group *sysfs_groups[4]; 1775 const struct attribute_group *sysfs_rx_queue_group; 1776 1777 const struct rtnl_link_ops *rtnl_link_ops; 1778 1779 /* for setting kernel sock attribute on TCP connection setup */ 1780 #define GSO_MAX_SIZE 65536 1781 unsigned int gso_max_size; 1782 #define GSO_MAX_SEGS 65535 1783 u16 gso_max_segs; 1784 u16 gso_min_segs; 1785 #ifdef CONFIG_DCB 1786 const struct dcbnl_rtnl_ops *dcbnl_ops; 1787 #endif 1788 u8 num_tc; 1789 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1790 u8 prio_tc_map[TC_BITMASK + 1]; 1791 1792 #if IS_ENABLED(CONFIG_FCOE) 1793 unsigned int fcoe_ddp_xid; 1794 #endif 1795 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 1796 struct netprio_map __rcu *priomap; 1797 #endif 1798 struct phy_device *phydev; 1799 struct lock_class_key *qdisc_tx_busylock; 1800 bool proto_down; 1801 }; 1802 #define to_net_dev(d) container_of(d, struct net_device, dev) 1803 1804 #define NETDEV_ALIGN 32 1805 1806 static inline 1807 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 1808 { 1809 return dev->prio_tc_map[prio & TC_BITMASK]; 1810 } 1811 1812 static inline 1813 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 1814 { 1815 if (tc >= dev->num_tc) 1816 return -EINVAL; 1817 1818 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 1819 return 0; 1820 } 1821 1822 static inline 1823 void netdev_reset_tc(struct net_device *dev) 1824 { 1825 dev->num_tc = 0; 1826 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 1827 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 1828 } 1829 1830 static inline 1831 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 1832 { 1833 if (tc >= dev->num_tc) 1834 return -EINVAL; 1835 1836 dev->tc_to_txq[tc].count = count; 1837 dev->tc_to_txq[tc].offset = offset; 1838 return 0; 1839 } 1840 1841 static inline 1842 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 1843 { 1844 if (num_tc > TC_MAX_QUEUE) 1845 return -EINVAL; 1846 1847 dev->num_tc = num_tc; 1848 return 0; 1849 } 1850 1851 static inline 1852 int netdev_get_num_tc(struct net_device *dev) 1853 { 1854 return dev->num_tc; 1855 } 1856 1857 static inline 1858 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 1859 unsigned int index) 1860 { 1861 return &dev->_tx[index]; 1862 } 1863 1864 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, 1865 const struct sk_buff *skb) 1866 { 1867 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 1868 } 1869 1870 static inline void netdev_for_each_tx_queue(struct net_device *dev, 1871 void (*f)(struct net_device *, 1872 struct netdev_queue *, 1873 void *), 1874 void *arg) 1875 { 1876 unsigned int i; 1877 1878 for (i = 0; i < dev->num_tx_queues; i++) 1879 f(dev, &dev->_tx[i], arg); 1880 } 1881 1882 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1883 struct sk_buff *skb, 1884 void *accel_priv); 1885 1886 /* 1887 * Net namespace inlines 1888 */ 1889 static inline 1890 struct net *dev_net(const struct net_device *dev) 1891 { 1892 return read_pnet(&dev->nd_net); 1893 } 1894 1895 static inline 1896 void dev_net_set(struct net_device *dev, struct net *net) 1897 { 1898 write_pnet(&dev->nd_net, net); 1899 } 1900 1901 static inline bool netdev_uses_dsa(struct net_device *dev) 1902 { 1903 #if IS_ENABLED(CONFIG_NET_DSA) 1904 if (dev->dsa_ptr != NULL) 1905 return dsa_uses_tagged_protocol(dev->dsa_ptr); 1906 #endif 1907 return false; 1908 } 1909 1910 /** 1911 * netdev_priv - access network device private data 1912 * @dev: network device 1913 * 1914 * Get network device private data 1915 */ 1916 static inline void *netdev_priv(const struct net_device *dev) 1917 { 1918 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 1919 } 1920 1921 /* Set the sysfs physical device reference for the network logical device 1922 * if set prior to registration will cause a symlink during initialization. 1923 */ 1924 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 1925 1926 /* Set the sysfs device type for the network logical device to allow 1927 * fine-grained identification of different network device types. For 1928 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 1929 */ 1930 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1931 1932 /* Default NAPI poll() weight 1933 * Device drivers are strongly advised to not use bigger value 1934 */ 1935 #define NAPI_POLL_WEIGHT 64 1936 1937 /** 1938 * netif_napi_add - initialize a napi context 1939 * @dev: network device 1940 * @napi: napi context 1941 * @poll: polling function 1942 * @weight: default weight 1943 * 1944 * netif_napi_add() must be used to initialize a napi context prior to calling 1945 * *any* of the other napi related functions. 1946 */ 1947 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 1948 int (*poll)(struct napi_struct *, int), int weight); 1949 1950 /** 1951 * netif_tx_napi_add - initialize a napi context 1952 * @dev: network device 1953 * @napi: napi context 1954 * @poll: polling function 1955 * @weight: default weight 1956 * 1957 * This variant of netif_napi_add() should be used from drivers using NAPI 1958 * to exclusively poll a TX queue. 1959 * This will avoid we add it into napi_hash[], thus polluting this hash table. 1960 */ 1961 static inline void netif_tx_napi_add(struct net_device *dev, 1962 struct napi_struct *napi, 1963 int (*poll)(struct napi_struct *, int), 1964 int weight) 1965 { 1966 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); 1967 netif_napi_add(dev, napi, poll, weight); 1968 } 1969 1970 /** 1971 * netif_napi_del - remove a napi context 1972 * @napi: napi context 1973 * 1974 * netif_napi_del() removes a napi context from the network device napi list 1975 */ 1976 void netif_napi_del(struct napi_struct *napi); 1977 1978 struct napi_gro_cb { 1979 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 1980 void *frag0; 1981 1982 /* Length of frag0. */ 1983 unsigned int frag0_len; 1984 1985 /* This indicates where we are processing relative to skb->data. */ 1986 int data_offset; 1987 1988 /* This is non-zero if the packet cannot be merged with the new skb. */ 1989 u16 flush; 1990 1991 /* Save the IP ID here and check when we get to the transport layer */ 1992 u16 flush_id; 1993 1994 /* Number of segments aggregated. */ 1995 u16 count; 1996 1997 /* Start offset for remote checksum offload */ 1998 u16 gro_remcsum_start; 1999 2000 /* jiffies when first packet was created/queued */ 2001 unsigned long age; 2002 2003 /* Used in ipv6_gro_receive() and foo-over-udp */ 2004 u16 proto; 2005 2006 /* This is non-zero if the packet may be of the same flow. */ 2007 u8 same_flow:1; 2008 2009 /* Used in udp_gro_receive */ 2010 u8 udp_mark:1; 2011 2012 /* GRO checksum is valid */ 2013 u8 csum_valid:1; 2014 2015 /* Number of checksums via CHECKSUM_UNNECESSARY */ 2016 u8 csum_cnt:3; 2017 2018 /* Free the skb? */ 2019 u8 free:2; 2020 #define NAPI_GRO_FREE 1 2021 #define NAPI_GRO_FREE_STOLEN_HEAD 2 2022 2023 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 2024 u8 is_ipv6:1; 2025 2026 /* 7 bit hole */ 2027 2028 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2029 __wsum csum; 2030 2031 /* used in skb_gro_receive() slow path */ 2032 struct sk_buff *last; 2033 }; 2034 2035 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 2036 2037 struct packet_type { 2038 __be16 type; /* This is really htons(ether_type). */ 2039 struct net_device *dev; /* NULL is wildcarded here */ 2040 int (*func) (struct sk_buff *, 2041 struct net_device *, 2042 struct packet_type *, 2043 struct net_device *); 2044 bool (*id_match)(struct packet_type *ptype, 2045 struct sock *sk); 2046 void *af_packet_priv; 2047 struct list_head list; 2048 }; 2049 2050 struct offload_callbacks { 2051 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2052 netdev_features_t features); 2053 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2054 struct sk_buff *skb); 2055 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2056 }; 2057 2058 struct packet_offload { 2059 __be16 type; /* This is really htons(ether_type). */ 2060 u16 priority; 2061 struct offload_callbacks callbacks; 2062 struct list_head list; 2063 }; 2064 2065 struct udp_offload; 2066 2067 struct udp_offload_callbacks { 2068 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2069 struct sk_buff *skb, 2070 struct udp_offload *uoff); 2071 int (*gro_complete)(struct sk_buff *skb, 2072 int nhoff, 2073 struct udp_offload *uoff); 2074 }; 2075 2076 struct udp_offload { 2077 __be16 port; 2078 u8 ipproto; 2079 struct udp_offload_callbacks callbacks; 2080 }; 2081 2082 /* often modified stats are per cpu, other are shared (netdev->stats) */ 2083 struct pcpu_sw_netstats { 2084 u64 rx_packets; 2085 u64 rx_bytes; 2086 u64 tx_packets; 2087 u64 tx_bytes; 2088 struct u64_stats_sync syncp; 2089 }; 2090 2091 #define __netdev_alloc_pcpu_stats(type, gfp) \ 2092 ({ \ 2093 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2094 if (pcpu_stats) { \ 2095 int __cpu; \ 2096 for_each_possible_cpu(__cpu) { \ 2097 typeof(type) *stat; \ 2098 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2099 u64_stats_init(&stat->syncp); \ 2100 } \ 2101 } \ 2102 pcpu_stats; \ 2103 }) 2104 2105 #define netdev_alloc_pcpu_stats(type) \ 2106 __netdev_alloc_pcpu_stats(type, GFP_KERNEL); 2107 2108 #include <linux/notifier.h> 2109 2110 /* netdevice notifier chain. Please remember to update the rtnetlink 2111 * notification exclusion list in rtnetlink_event() when adding new 2112 * types. 2113 */ 2114 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ 2115 #define NETDEV_DOWN 0x0002 2116 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface 2117 detected a hardware crash and restarted 2118 - we can use this eg to kick tcp sessions 2119 once done */ 2120 #define NETDEV_CHANGE 0x0004 /* Notify device state change */ 2121 #define NETDEV_REGISTER 0x0005 2122 #define NETDEV_UNREGISTER 0x0006 2123 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */ 2124 #define NETDEV_CHANGEADDR 0x0008 2125 #define NETDEV_GOING_DOWN 0x0009 2126 #define NETDEV_CHANGENAME 0x000A 2127 #define NETDEV_FEAT_CHANGE 0x000B 2128 #define NETDEV_BONDING_FAILOVER 0x000C 2129 #define NETDEV_PRE_UP 0x000D 2130 #define NETDEV_PRE_TYPE_CHANGE 0x000E 2131 #define NETDEV_POST_TYPE_CHANGE 0x000F 2132 #define NETDEV_POST_INIT 0x0010 2133 #define NETDEV_UNREGISTER_FINAL 0x0011 2134 #define NETDEV_RELEASE 0x0012 2135 #define NETDEV_NOTIFY_PEERS 0x0013 2136 #define NETDEV_JOIN 0x0014 2137 #define NETDEV_CHANGEUPPER 0x0015 2138 #define NETDEV_RESEND_IGMP 0x0016 2139 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 2140 #define NETDEV_CHANGEINFODATA 0x0018 2141 #define NETDEV_BONDING_INFO 0x0019 2142 #define NETDEV_PRECHANGEUPPER 0x001A 2143 2144 int register_netdevice_notifier(struct notifier_block *nb); 2145 int unregister_netdevice_notifier(struct notifier_block *nb); 2146 2147 struct netdev_notifier_info { 2148 struct net_device *dev; 2149 }; 2150 2151 struct netdev_notifier_change_info { 2152 struct netdev_notifier_info info; /* must be first */ 2153 unsigned int flags_changed; 2154 }; 2155 2156 struct netdev_notifier_changeupper_info { 2157 struct netdev_notifier_info info; /* must be first */ 2158 struct net_device *upper_dev; /* new upper dev */ 2159 bool master; /* is upper dev master */ 2160 bool linking; /* is the nofication for link or unlink */ 2161 }; 2162 2163 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2164 struct net_device *dev) 2165 { 2166 info->dev = dev; 2167 } 2168 2169 static inline struct net_device * 2170 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 2171 { 2172 return info->dev; 2173 } 2174 2175 int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 2176 2177 2178 extern rwlock_t dev_base_lock; /* Device list lock */ 2179 2180 #define for_each_netdev(net, d) \ 2181 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 2182 #define for_each_netdev_reverse(net, d) \ 2183 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 2184 #define for_each_netdev_rcu(net, d) \ 2185 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 2186 #define for_each_netdev_safe(net, d, n) \ 2187 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 2188 #define for_each_netdev_continue(net, d) \ 2189 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 2190 #define for_each_netdev_continue_rcu(net, d) \ 2191 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2192 #define for_each_netdev_in_bond_rcu(bond, slave) \ 2193 for_each_netdev_rcu(&init_net, slave) \ 2194 if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 2195 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2196 2197 static inline struct net_device *next_net_device(struct net_device *dev) 2198 { 2199 struct list_head *lh; 2200 struct net *net; 2201 2202 net = dev_net(dev); 2203 lh = dev->dev_list.next; 2204 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2205 } 2206 2207 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 2208 { 2209 struct list_head *lh; 2210 struct net *net; 2211 2212 net = dev_net(dev); 2213 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 2214 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2215 } 2216 2217 static inline struct net_device *first_net_device(struct net *net) 2218 { 2219 return list_empty(&net->dev_base_head) ? NULL : 2220 net_device_entry(net->dev_base_head.next); 2221 } 2222 2223 static inline struct net_device *first_net_device_rcu(struct net *net) 2224 { 2225 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 2226 2227 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 2228 } 2229 2230 int netdev_boot_setup_check(struct net_device *dev); 2231 unsigned long netdev_boot_base(const char *prefix, int unit); 2232 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 2233 const char *hwaddr); 2234 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 2235 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 2236 void dev_add_pack(struct packet_type *pt); 2237 void dev_remove_pack(struct packet_type *pt); 2238 void __dev_remove_pack(struct packet_type *pt); 2239 void dev_add_offload(struct packet_offload *po); 2240 void dev_remove_offload(struct packet_offload *po); 2241 2242 int dev_get_iflink(const struct net_device *dev); 2243 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); 2244 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2245 unsigned short mask); 2246 struct net_device *dev_get_by_name(struct net *net, const char *name); 2247 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 2248 struct net_device *__dev_get_by_name(struct net *net, const char *name); 2249 int dev_alloc_name(struct net_device *dev, const char *name); 2250 int dev_open(struct net_device *dev); 2251 int dev_close(struct net_device *dev); 2252 int dev_close_many(struct list_head *head, bool unlink); 2253 void dev_disable_lro(struct net_device *dev); 2254 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2255 int dev_queue_xmit(struct sk_buff *skb); 2256 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 2257 int register_netdevice(struct net_device *dev); 2258 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2259 void unregister_netdevice_many(struct list_head *head); 2260 static inline void unregister_netdevice(struct net_device *dev) 2261 { 2262 unregister_netdevice_queue(dev, NULL); 2263 } 2264 2265 int netdev_refcnt_read(const struct net_device *dev); 2266 void free_netdev(struct net_device *dev); 2267 void netdev_freemem(struct net_device *dev); 2268 void synchronize_net(void); 2269 int init_dummy_netdev(struct net_device *dev); 2270 2271 DECLARE_PER_CPU(int, xmit_recursion); 2272 static inline int dev_recursion_level(void) 2273 { 2274 return this_cpu_read(xmit_recursion); 2275 } 2276 2277 struct net_device *dev_get_by_index(struct net *net, int ifindex); 2278 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 2279 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 2280 int netdev_get_name(struct net *net, char *name, int ifindex); 2281 int dev_restart(struct net_device *dev); 2282 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 2283 2284 static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2285 { 2286 return NAPI_GRO_CB(skb)->data_offset; 2287 } 2288 2289 static inline unsigned int skb_gro_len(const struct sk_buff *skb) 2290 { 2291 return skb->len - NAPI_GRO_CB(skb)->data_offset; 2292 } 2293 2294 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) 2295 { 2296 NAPI_GRO_CB(skb)->data_offset += len; 2297 } 2298 2299 static inline void *skb_gro_header_fast(struct sk_buff *skb, 2300 unsigned int offset) 2301 { 2302 return NAPI_GRO_CB(skb)->frag0 + offset; 2303 } 2304 2305 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) 2306 { 2307 return NAPI_GRO_CB(skb)->frag0_len < hlen; 2308 } 2309 2310 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 2311 unsigned int offset) 2312 { 2313 if (!pskb_may_pull(skb, hlen)) 2314 return NULL; 2315 2316 NAPI_GRO_CB(skb)->frag0 = NULL; 2317 NAPI_GRO_CB(skb)->frag0_len = 0; 2318 return skb->data + offset; 2319 } 2320 2321 static inline void *skb_gro_network_header(struct sk_buff *skb) 2322 { 2323 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + 2324 skb_network_offset(skb); 2325 } 2326 2327 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, 2328 const void *start, unsigned int len) 2329 { 2330 if (NAPI_GRO_CB(skb)->csum_valid) 2331 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, 2332 csum_partial(start, len, 0)); 2333 } 2334 2335 /* GRO checksum functions. These are logical equivalents of the normal 2336 * checksum functions (in skbuff.h) except that they operate on the GRO 2337 * offsets and fields in sk_buff. 2338 */ 2339 2340 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); 2341 2342 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) 2343 { 2344 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); 2345 } 2346 2347 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, 2348 bool zero_okay, 2349 __sum16 check) 2350 { 2351 return ((skb->ip_summed != CHECKSUM_PARTIAL || 2352 skb_checksum_start_offset(skb) < 2353 skb_gro_offset(skb)) && 2354 !skb_at_gro_remcsum_start(skb) && 2355 NAPI_GRO_CB(skb)->csum_cnt == 0 && 2356 (!zero_okay || check)); 2357 } 2358 2359 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, 2360 __wsum psum) 2361 { 2362 if (NAPI_GRO_CB(skb)->csum_valid && 2363 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) 2364 return 0; 2365 2366 NAPI_GRO_CB(skb)->csum = psum; 2367 2368 return __skb_gro_checksum_complete(skb); 2369 } 2370 2371 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) 2372 { 2373 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { 2374 /* Consume a checksum from CHECKSUM_UNNECESSARY */ 2375 NAPI_GRO_CB(skb)->csum_cnt--; 2376 } else { 2377 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we 2378 * verified a new top level checksum or an encapsulated one 2379 * during GRO. This saves work if we fallback to normal path. 2380 */ 2381 __skb_incr_checksum_unnecessary(skb); 2382 } 2383 } 2384 2385 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ 2386 compute_pseudo) \ 2387 ({ \ 2388 __sum16 __ret = 0; \ 2389 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ 2390 __ret = __skb_gro_checksum_validate_complete(skb, \ 2391 compute_pseudo(skb, proto)); \ 2392 if (__ret) \ 2393 __skb_mark_checksum_bad(skb); \ 2394 else \ 2395 skb_gro_incr_csum_unnecessary(skb); \ 2396 __ret; \ 2397 }) 2398 2399 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ 2400 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) 2401 2402 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \ 2403 compute_pseudo) \ 2404 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) 2405 2406 #define skb_gro_checksum_simple_validate(skb) \ 2407 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) 2408 2409 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) 2410 { 2411 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && 2412 !NAPI_GRO_CB(skb)->csum_valid); 2413 } 2414 2415 static inline void __skb_gro_checksum_convert(struct sk_buff *skb, 2416 __sum16 check, __wsum pseudo) 2417 { 2418 NAPI_GRO_CB(skb)->csum = ~pseudo; 2419 NAPI_GRO_CB(skb)->csum_valid = 1; 2420 } 2421 2422 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ 2423 do { \ 2424 if (__skb_gro_checksum_convert_check(skb)) \ 2425 __skb_gro_checksum_convert(skb, check, \ 2426 compute_pseudo(skb, proto)); \ 2427 } while (0) 2428 2429 struct gro_remcsum { 2430 int offset; 2431 __wsum delta; 2432 }; 2433 2434 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) 2435 { 2436 grc->offset = 0; 2437 grc->delta = 0; 2438 } 2439 2440 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, 2441 unsigned int off, size_t hdrlen, 2442 int start, int offset, 2443 struct gro_remcsum *grc, 2444 bool nopartial) 2445 { 2446 __wsum delta; 2447 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); 2448 2449 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); 2450 2451 if (!nopartial) { 2452 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; 2453 return ptr; 2454 } 2455 2456 ptr = skb_gro_header_fast(skb, off); 2457 if (skb_gro_header_hard(skb, off + plen)) { 2458 ptr = skb_gro_header_slow(skb, off + plen, off); 2459 if (!ptr) 2460 return NULL; 2461 } 2462 2463 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, 2464 start, offset); 2465 2466 /* Adjust skb->csum since we changed the packet */ 2467 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); 2468 2469 grc->offset = off + hdrlen + offset; 2470 grc->delta = delta; 2471 2472 return ptr; 2473 } 2474 2475 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, 2476 struct gro_remcsum *grc) 2477 { 2478 void *ptr; 2479 size_t plen = grc->offset + sizeof(u16); 2480 2481 if (!grc->delta) 2482 return; 2483 2484 ptr = skb_gro_header_fast(skb, grc->offset); 2485 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { 2486 ptr = skb_gro_header_slow(skb, plen, grc->offset); 2487 if (!ptr) 2488 return; 2489 } 2490 2491 remcsum_unadjust((__sum16 *)ptr, grc->delta); 2492 } 2493 2494 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2495 unsigned short type, 2496 const void *daddr, const void *saddr, 2497 unsigned int len) 2498 { 2499 if (!dev->header_ops || !dev->header_ops->create) 2500 return 0; 2501 2502 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 2503 } 2504 2505 static inline int dev_parse_header(const struct sk_buff *skb, 2506 unsigned char *haddr) 2507 { 2508 const struct net_device *dev = skb->dev; 2509 2510 if (!dev->header_ops || !dev->header_ops->parse) 2511 return 0; 2512 return dev->header_ops->parse(skb, haddr); 2513 } 2514 2515 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 2516 int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 2517 static inline int unregister_gifconf(unsigned int family) 2518 { 2519 return register_gifconf(family, NULL); 2520 } 2521 2522 #ifdef CONFIG_NET_FLOW_LIMIT 2523 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 2524 struct sd_flow_limit { 2525 u64 count; 2526 unsigned int num_buckets; 2527 unsigned int history_head; 2528 u16 history[FLOW_LIMIT_HISTORY]; 2529 u8 buckets[]; 2530 }; 2531 2532 extern int netdev_flow_limit_table_len; 2533 #endif /* CONFIG_NET_FLOW_LIMIT */ 2534 2535 /* 2536 * Incoming packets are placed on per-cpu queues 2537 */ 2538 struct softnet_data { 2539 struct list_head poll_list; 2540 struct sk_buff_head process_queue; 2541 2542 /* stats */ 2543 unsigned int processed; 2544 unsigned int time_squeeze; 2545 unsigned int cpu_collision; 2546 unsigned int received_rps; 2547 #ifdef CONFIG_RPS 2548 struct softnet_data *rps_ipi_list; 2549 #endif 2550 #ifdef CONFIG_NET_FLOW_LIMIT 2551 struct sd_flow_limit __rcu *flow_limit; 2552 #endif 2553 struct Qdisc *output_queue; 2554 struct Qdisc **output_queue_tailp; 2555 struct sk_buff *completion_queue; 2556 2557 #ifdef CONFIG_RPS 2558 /* Elements below can be accessed between CPUs for RPS */ 2559 struct call_single_data csd ____cacheline_aligned_in_smp; 2560 struct softnet_data *rps_ipi_next; 2561 unsigned int cpu; 2562 unsigned int input_queue_head; 2563 unsigned int input_queue_tail; 2564 #endif 2565 unsigned int dropped; 2566 struct sk_buff_head input_pkt_queue; 2567 struct napi_struct backlog; 2568 2569 }; 2570 2571 static inline void input_queue_head_incr(struct softnet_data *sd) 2572 { 2573 #ifdef CONFIG_RPS 2574 sd->input_queue_head++; 2575 #endif 2576 } 2577 2578 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 2579 unsigned int *qtail) 2580 { 2581 #ifdef CONFIG_RPS 2582 *qtail = ++sd->input_queue_tail; 2583 #endif 2584 } 2585 2586 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 2587 2588 void __netif_schedule(struct Qdisc *q); 2589 void netif_schedule_queue(struct netdev_queue *txq); 2590 2591 static inline void netif_tx_schedule_all(struct net_device *dev) 2592 { 2593 unsigned int i; 2594 2595 for (i = 0; i < dev->num_tx_queues; i++) 2596 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 2597 } 2598 2599 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 2600 { 2601 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2602 } 2603 2604 /** 2605 * netif_start_queue - allow transmit 2606 * @dev: network device 2607 * 2608 * Allow upper layers to call the device hard_start_xmit routine. 2609 */ 2610 static inline void netif_start_queue(struct net_device *dev) 2611 { 2612 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 2613 } 2614 2615 static inline void netif_tx_start_all_queues(struct net_device *dev) 2616 { 2617 unsigned int i; 2618 2619 for (i = 0; i < dev->num_tx_queues; i++) { 2620 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2621 netif_tx_start_queue(txq); 2622 } 2623 } 2624 2625 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 2626 2627 /** 2628 * netif_wake_queue - restart transmit 2629 * @dev: network device 2630 * 2631 * Allow upper layers to call the device hard_start_xmit routine. 2632 * Used for flow control when transmit resources are available. 2633 */ 2634 static inline void netif_wake_queue(struct net_device *dev) 2635 { 2636 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 2637 } 2638 2639 static inline void netif_tx_wake_all_queues(struct net_device *dev) 2640 { 2641 unsigned int i; 2642 2643 for (i = 0; i < dev->num_tx_queues; i++) { 2644 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2645 netif_tx_wake_queue(txq); 2646 } 2647 } 2648 2649 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 2650 { 2651 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2652 } 2653 2654 /** 2655 * netif_stop_queue - stop transmitted packets 2656 * @dev: network device 2657 * 2658 * Stop upper layers calling the device hard_start_xmit routine. 2659 * Used for flow control when transmit resources are unavailable. 2660 */ 2661 static inline void netif_stop_queue(struct net_device *dev) 2662 { 2663 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 2664 } 2665 2666 void netif_tx_stop_all_queues(struct net_device *dev); 2667 2668 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 2669 { 2670 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2671 } 2672 2673 /** 2674 * netif_queue_stopped - test if transmit queue is flowblocked 2675 * @dev: network device 2676 * 2677 * Test if transmit queue on device is currently unable to send. 2678 */ 2679 static inline bool netif_queue_stopped(const struct net_device *dev) 2680 { 2681 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 2682 } 2683 2684 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 2685 { 2686 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 2687 } 2688 2689 static inline bool 2690 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 2691 { 2692 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 2693 } 2694 2695 static inline bool 2696 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) 2697 { 2698 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 2699 } 2700 2701 /** 2702 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write 2703 * @dev_queue: pointer to transmit queue 2704 * 2705 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 2706 * to give appropriate hint to the cpu. 2707 */ 2708 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 2709 { 2710 #ifdef CONFIG_BQL 2711 prefetchw(&dev_queue->dql.num_queued); 2712 #endif 2713 } 2714 2715 /** 2716 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write 2717 * @dev_queue: pointer to transmit queue 2718 * 2719 * BQL enabled drivers might use this helper in their TX completion path, 2720 * to give appropriate hint to the cpu. 2721 */ 2722 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 2723 { 2724 #ifdef CONFIG_BQL 2725 prefetchw(&dev_queue->dql.limit); 2726 #endif 2727 } 2728 2729 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 2730 unsigned int bytes) 2731 { 2732 #ifdef CONFIG_BQL 2733 dql_queued(&dev_queue->dql, bytes); 2734 2735 if (likely(dql_avail(&dev_queue->dql) >= 0)) 2736 return; 2737 2738 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2739 2740 /* 2741 * The XOFF flag must be set before checking the dql_avail below, 2742 * because in netdev_tx_completed_queue we update the dql_completed 2743 * before checking the XOFF flag. 2744 */ 2745 smp_mb(); 2746 2747 /* check again in case another CPU has just made room avail */ 2748 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 2749 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2750 #endif 2751 } 2752 2753 /** 2754 * netdev_sent_queue - report the number of bytes queued to hardware 2755 * @dev: network device 2756 * @bytes: number of bytes queued to the hardware device queue 2757 * 2758 * Report the number of bytes queued for sending/completion to the network 2759 * device hardware queue. @bytes should be a good approximation and should 2760 * exactly match netdev_completed_queue() @bytes 2761 */ 2762 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 2763 { 2764 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 2765 } 2766 2767 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 2768 unsigned int pkts, unsigned int bytes) 2769 { 2770 #ifdef CONFIG_BQL 2771 if (unlikely(!bytes)) 2772 return; 2773 2774 dql_completed(&dev_queue->dql, bytes); 2775 2776 /* 2777 * Without the memory barrier there is a small possiblity that 2778 * netdev_tx_sent_queue will miss the update and cause the queue to 2779 * be stopped forever 2780 */ 2781 smp_mb(); 2782 2783 if (dql_avail(&dev_queue->dql) < 0) 2784 return; 2785 2786 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 2787 netif_schedule_queue(dev_queue); 2788 #endif 2789 } 2790 2791 /** 2792 * netdev_completed_queue - report bytes and packets completed by device 2793 * @dev: network device 2794 * @pkts: actual number of packets sent over the medium 2795 * @bytes: actual number of bytes sent over the medium 2796 * 2797 * Report the number of bytes and packets transmitted by the network device 2798 * hardware queue over the physical medium, @bytes must exactly match the 2799 * @bytes amount passed to netdev_sent_queue() 2800 */ 2801 static inline void netdev_completed_queue(struct net_device *dev, 2802 unsigned int pkts, unsigned int bytes) 2803 { 2804 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 2805 } 2806 2807 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 2808 { 2809 #ifdef CONFIG_BQL 2810 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 2811 dql_reset(&q->dql); 2812 #endif 2813 } 2814 2815 /** 2816 * netdev_reset_queue - reset the packets and bytes count of a network device 2817 * @dev_queue: network device 2818 * 2819 * Reset the bytes and packet count of a network device and clear the 2820 * software flow control OFF bit for this network device 2821 */ 2822 static inline void netdev_reset_queue(struct net_device *dev_queue) 2823 { 2824 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 2825 } 2826 2827 /** 2828 * netdev_cap_txqueue - check if selected tx queue exceeds device queues 2829 * @dev: network device 2830 * @queue_index: given tx queue index 2831 * 2832 * Returns 0 if given tx queue index >= number of device tx queues, 2833 * otherwise returns the originally passed tx queue index. 2834 */ 2835 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 2836 { 2837 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2838 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2839 dev->name, queue_index, 2840 dev->real_num_tx_queues); 2841 return 0; 2842 } 2843 2844 return queue_index; 2845 } 2846 2847 /** 2848 * netif_running - test if up 2849 * @dev: network device 2850 * 2851 * Test if the device has been brought up. 2852 */ 2853 static inline bool netif_running(const struct net_device *dev) 2854 { 2855 return test_bit(__LINK_STATE_START, &dev->state); 2856 } 2857 2858 /* 2859 * Routines to manage the subqueues on a device. We only need start 2860 * stop, and a check if it's stopped. All other device management is 2861 * done at the overall netdevice level. 2862 * Also test the device if we're multiqueue. 2863 */ 2864 2865 /** 2866 * netif_start_subqueue - allow sending packets on subqueue 2867 * @dev: network device 2868 * @queue_index: sub queue index 2869 * 2870 * Start individual transmit queue of a device with multiple transmit queues. 2871 */ 2872 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 2873 { 2874 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2875 2876 netif_tx_start_queue(txq); 2877 } 2878 2879 /** 2880 * netif_stop_subqueue - stop sending packets on subqueue 2881 * @dev: network device 2882 * @queue_index: sub queue index 2883 * 2884 * Stop individual transmit queue of a device with multiple transmit queues. 2885 */ 2886 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 2887 { 2888 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2889 netif_tx_stop_queue(txq); 2890 } 2891 2892 /** 2893 * netif_subqueue_stopped - test status of subqueue 2894 * @dev: network device 2895 * @queue_index: sub queue index 2896 * 2897 * Check individual transmit queue of a device with multiple transmit queues. 2898 */ 2899 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 2900 u16 queue_index) 2901 { 2902 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2903 2904 return netif_tx_queue_stopped(txq); 2905 } 2906 2907 static inline bool netif_subqueue_stopped(const struct net_device *dev, 2908 struct sk_buff *skb) 2909 { 2910 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2911 } 2912 2913 void netif_wake_subqueue(struct net_device *dev, u16 queue_index); 2914 2915 #ifdef CONFIG_XPS 2916 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2917 u16 index); 2918 #else 2919 static inline int netif_set_xps_queue(struct net_device *dev, 2920 const struct cpumask *mask, 2921 u16 index) 2922 { 2923 return 0; 2924 } 2925 #endif 2926 2927 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, 2928 unsigned int num_tx_queues); 2929 2930 /* 2931 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2932 * as a distribution range limit for the returned value. 2933 */ 2934 static inline u16 skb_tx_hash(const struct net_device *dev, 2935 struct sk_buff *skb) 2936 { 2937 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); 2938 } 2939 2940 /** 2941 * netif_is_multiqueue - test if device has multiple transmit queues 2942 * @dev: network device 2943 * 2944 * Check if device has multiple transmit queues 2945 */ 2946 static inline bool netif_is_multiqueue(const struct net_device *dev) 2947 { 2948 return dev->num_tx_queues > 1; 2949 } 2950 2951 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 2952 2953 #ifdef CONFIG_SYSFS 2954 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 2955 #else 2956 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2957 unsigned int rxq) 2958 { 2959 return 0; 2960 } 2961 #endif 2962 2963 #ifdef CONFIG_SYSFS 2964 static inline unsigned int get_netdev_rx_queue_index( 2965 struct netdev_rx_queue *queue) 2966 { 2967 struct net_device *dev = queue->dev; 2968 int index = queue - dev->_rx; 2969 2970 BUG_ON(index >= dev->num_rx_queues); 2971 return index; 2972 } 2973 #endif 2974 2975 #define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2976 int netif_get_num_default_rss_queues(void); 2977 2978 enum skb_free_reason { 2979 SKB_REASON_CONSUMED, 2980 SKB_REASON_DROPPED, 2981 }; 2982 2983 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); 2984 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); 2985 2986 /* 2987 * It is not allowed to call kfree_skb() or consume_skb() from hardware 2988 * interrupt context or with hardware interrupts being disabled. 2989 * (in_irq() || irqs_disabled()) 2990 * 2991 * We provide four helpers that can be used in following contexts : 2992 * 2993 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, 2994 * replacing kfree_skb(skb) 2995 * 2996 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. 2997 * Typically used in place of consume_skb(skb) in TX completion path 2998 * 2999 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, 3000 * replacing kfree_skb(skb) 3001 * 3002 * dev_consume_skb_any(skb) when caller doesn't know its current irq context, 3003 * and consumed a packet. Used in place of consume_skb(skb) 3004 */ 3005 static inline void dev_kfree_skb_irq(struct sk_buff *skb) 3006 { 3007 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); 3008 } 3009 3010 static inline void dev_consume_skb_irq(struct sk_buff *skb) 3011 { 3012 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); 3013 } 3014 3015 static inline void dev_kfree_skb_any(struct sk_buff *skb) 3016 { 3017 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); 3018 } 3019 3020 static inline void dev_consume_skb_any(struct sk_buff *skb) 3021 { 3022 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); 3023 } 3024 3025 int netif_rx(struct sk_buff *skb); 3026 int netif_rx_ni(struct sk_buff *skb); 3027 int netif_receive_skb(struct sk_buff *skb); 3028 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3029 void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3030 struct sk_buff *napi_get_frags(struct napi_struct *napi); 3031 gro_result_t napi_gro_frags(struct napi_struct *napi); 3032 struct packet_offload *gro_find_receive_by_type(__be16 type); 3033 struct packet_offload *gro_find_complete_by_type(__be16 type); 3034 3035 static inline void napi_free_frags(struct napi_struct *napi) 3036 { 3037 kfree_skb(napi->skb); 3038 napi->skb = NULL; 3039 } 3040 3041 int netdev_rx_handler_register(struct net_device *dev, 3042 rx_handler_func_t *rx_handler, 3043 void *rx_handler_data); 3044 void netdev_rx_handler_unregister(struct net_device *dev); 3045 3046 bool dev_valid_name(const char *name); 3047 int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 3048 int dev_ethtool(struct net *net, struct ifreq *); 3049 unsigned int dev_get_flags(const struct net_device *); 3050 int __dev_change_flags(struct net_device *, unsigned int flags); 3051 int dev_change_flags(struct net_device *, unsigned int); 3052 void __dev_notify_flags(struct net_device *, unsigned int old_flags, 3053 unsigned int gchanges); 3054 int dev_change_name(struct net_device *, const char *); 3055 int dev_set_alias(struct net_device *, const char *, size_t); 3056 int dev_change_net_namespace(struct net_device *, struct net *, const char *); 3057 int dev_set_mtu(struct net_device *, int); 3058 void dev_set_group(struct net_device *, int); 3059 int dev_set_mac_address(struct net_device *, struct sockaddr *); 3060 int dev_change_carrier(struct net_device *, bool new_carrier); 3061 int dev_get_phys_port_id(struct net_device *dev, 3062 struct netdev_phys_item_id *ppid); 3063 int dev_get_phys_port_name(struct net_device *dev, 3064 char *name, size_t len); 3065 int dev_change_proto_down(struct net_device *dev, bool proto_down); 3066 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3067 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3068 struct netdev_queue *txq, int *ret); 3069 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3070 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3071 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); 3072 3073 extern int netdev_budget; 3074 3075 /* Called by rtnetlink.c:rtnl_unlock() */ 3076 void netdev_run_todo(void); 3077 3078 /** 3079 * dev_put - release reference to device 3080 * @dev: network device 3081 * 3082 * Release reference to device to allow it to be freed. 3083 */ 3084 static inline void dev_put(struct net_device *dev) 3085 { 3086 this_cpu_dec(*dev->pcpu_refcnt); 3087 } 3088 3089 /** 3090 * dev_hold - get reference to device 3091 * @dev: network device 3092 * 3093 * Hold reference to device to keep it from being freed. 3094 */ 3095 static inline void dev_hold(struct net_device *dev) 3096 { 3097 this_cpu_inc(*dev->pcpu_refcnt); 3098 } 3099 3100 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 3101 * and _off may be called from IRQ context, but it is caller 3102 * who is responsible for serialization of these calls. 3103 * 3104 * The name carrier is inappropriate, these functions should really be 3105 * called netif_lowerlayer_*() because they represent the state of any 3106 * kind of lower layer not just hardware media. 3107 */ 3108 3109 void linkwatch_init_dev(struct net_device *dev); 3110 void linkwatch_fire_event(struct net_device *dev); 3111 void linkwatch_forget_dev(struct net_device *dev); 3112 3113 /** 3114 * netif_carrier_ok - test if carrier present 3115 * @dev: network device 3116 * 3117 * Check if carrier is present on device 3118 */ 3119 static inline bool netif_carrier_ok(const struct net_device *dev) 3120 { 3121 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 3122 } 3123 3124 unsigned long dev_trans_start(struct net_device *dev); 3125 3126 void __netdev_watchdog_up(struct net_device *dev); 3127 3128 void netif_carrier_on(struct net_device *dev); 3129 3130 void netif_carrier_off(struct net_device *dev); 3131 3132 /** 3133 * netif_dormant_on - mark device as dormant. 3134 * @dev: network device 3135 * 3136 * Mark device as dormant (as per RFC2863). 3137 * 3138 * The dormant state indicates that the relevant interface is not 3139 * actually in a condition to pass packets (i.e., it is not 'up') but is 3140 * in a "pending" state, waiting for some external event. For "on- 3141 * demand" interfaces, this new state identifies the situation where the 3142 * interface is waiting for events to place it in the up state. 3143 * 3144 */ 3145 static inline void netif_dormant_on(struct net_device *dev) 3146 { 3147 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 3148 linkwatch_fire_event(dev); 3149 } 3150 3151 /** 3152 * netif_dormant_off - set device as not dormant. 3153 * @dev: network device 3154 * 3155 * Device is not in dormant state. 3156 */ 3157 static inline void netif_dormant_off(struct net_device *dev) 3158 { 3159 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 3160 linkwatch_fire_event(dev); 3161 } 3162 3163 /** 3164 * netif_dormant - test if carrier present 3165 * @dev: network device 3166 * 3167 * Check if carrier is present on device 3168 */ 3169 static inline bool netif_dormant(const struct net_device *dev) 3170 { 3171 return test_bit(__LINK_STATE_DORMANT, &dev->state); 3172 } 3173 3174 3175 /** 3176 * netif_oper_up - test if device is operational 3177 * @dev: network device 3178 * 3179 * Check if carrier is operational 3180 */ 3181 static inline bool netif_oper_up(const struct net_device *dev) 3182 { 3183 return (dev->operstate == IF_OPER_UP || 3184 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 3185 } 3186 3187 /** 3188 * netif_device_present - is device available or removed 3189 * @dev: network device 3190 * 3191 * Check if device has not been removed from system. 3192 */ 3193 static inline bool netif_device_present(struct net_device *dev) 3194 { 3195 return test_bit(__LINK_STATE_PRESENT, &dev->state); 3196 } 3197 3198 void netif_device_detach(struct net_device *dev); 3199 3200 void netif_device_attach(struct net_device *dev); 3201 3202 /* 3203 * Network interface message level settings 3204 */ 3205 3206 enum { 3207 NETIF_MSG_DRV = 0x0001, 3208 NETIF_MSG_PROBE = 0x0002, 3209 NETIF_MSG_LINK = 0x0004, 3210 NETIF_MSG_TIMER = 0x0008, 3211 NETIF_MSG_IFDOWN = 0x0010, 3212 NETIF_MSG_IFUP = 0x0020, 3213 NETIF_MSG_RX_ERR = 0x0040, 3214 NETIF_MSG_TX_ERR = 0x0080, 3215 NETIF_MSG_TX_QUEUED = 0x0100, 3216 NETIF_MSG_INTR = 0x0200, 3217 NETIF_MSG_TX_DONE = 0x0400, 3218 NETIF_MSG_RX_STATUS = 0x0800, 3219 NETIF_MSG_PKTDATA = 0x1000, 3220 NETIF_MSG_HW = 0x2000, 3221 NETIF_MSG_WOL = 0x4000, 3222 }; 3223 3224 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 3225 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 3226 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 3227 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 3228 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 3229 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 3230 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 3231 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 3232 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 3233 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 3234 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 3235 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 3236 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 3237 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 3238 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 3239 3240 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 3241 { 3242 /* use default */ 3243 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 3244 return default_msg_enable_bits; 3245 if (debug_value == 0) /* no output */ 3246 return 0; 3247 /* set low N bits */ 3248 return (1 << debug_value) - 1; 3249 } 3250 3251 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 3252 { 3253 spin_lock(&txq->_xmit_lock); 3254 txq->xmit_lock_owner = cpu; 3255 } 3256 3257 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 3258 { 3259 spin_lock_bh(&txq->_xmit_lock); 3260 txq->xmit_lock_owner = smp_processor_id(); 3261 } 3262 3263 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 3264 { 3265 bool ok = spin_trylock(&txq->_xmit_lock); 3266 if (likely(ok)) 3267 txq->xmit_lock_owner = smp_processor_id(); 3268 return ok; 3269 } 3270 3271 static inline void __netif_tx_unlock(struct netdev_queue *txq) 3272 { 3273 txq->xmit_lock_owner = -1; 3274 spin_unlock(&txq->_xmit_lock); 3275 } 3276 3277 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 3278 { 3279 txq->xmit_lock_owner = -1; 3280 spin_unlock_bh(&txq->_xmit_lock); 3281 } 3282 3283 static inline void txq_trans_update(struct netdev_queue *txq) 3284 { 3285 if (txq->xmit_lock_owner != -1) 3286 txq->trans_start = jiffies; 3287 } 3288 3289 /** 3290 * netif_tx_lock - grab network device transmit lock 3291 * @dev: network device 3292 * 3293 * Get network device transmit lock 3294 */ 3295 static inline void netif_tx_lock(struct net_device *dev) 3296 { 3297 unsigned int i; 3298 int cpu; 3299 3300 spin_lock(&dev->tx_global_lock); 3301 cpu = smp_processor_id(); 3302 for (i = 0; i < dev->num_tx_queues; i++) { 3303 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3304 3305 /* We are the only thread of execution doing a 3306 * freeze, but we have to grab the _xmit_lock in 3307 * order to synchronize with threads which are in 3308 * the ->hard_start_xmit() handler and already 3309 * checked the frozen bit. 3310 */ 3311 __netif_tx_lock(txq, cpu); 3312 set_bit(__QUEUE_STATE_FROZEN, &txq->state); 3313 __netif_tx_unlock(txq); 3314 } 3315 } 3316 3317 static inline void netif_tx_lock_bh(struct net_device *dev) 3318 { 3319 local_bh_disable(); 3320 netif_tx_lock(dev); 3321 } 3322 3323 static inline void netif_tx_unlock(struct net_device *dev) 3324 { 3325 unsigned int i; 3326 3327 for (i = 0; i < dev->num_tx_queues; i++) { 3328 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3329 3330 /* No need to grab the _xmit_lock here. If the 3331 * queue is not stopped for another reason, we 3332 * force a schedule. 3333 */ 3334 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); 3335 netif_schedule_queue(txq); 3336 } 3337 spin_unlock(&dev->tx_global_lock); 3338 } 3339 3340 static inline void netif_tx_unlock_bh(struct net_device *dev) 3341 { 3342 netif_tx_unlock(dev); 3343 local_bh_enable(); 3344 } 3345 3346 #define HARD_TX_LOCK(dev, txq, cpu) { \ 3347 if ((dev->features & NETIF_F_LLTX) == 0) { \ 3348 __netif_tx_lock(txq, cpu); \ 3349 } \ 3350 } 3351 3352 #define HARD_TX_TRYLOCK(dev, txq) \ 3353 (((dev->features & NETIF_F_LLTX) == 0) ? \ 3354 __netif_tx_trylock(txq) : \ 3355 true ) 3356 3357 #define HARD_TX_UNLOCK(dev, txq) { \ 3358 if ((dev->features & NETIF_F_LLTX) == 0) { \ 3359 __netif_tx_unlock(txq); \ 3360 } \ 3361 } 3362 3363 static inline void netif_tx_disable(struct net_device *dev) 3364 { 3365 unsigned int i; 3366 int cpu; 3367 3368 local_bh_disable(); 3369 cpu = smp_processor_id(); 3370 for (i = 0; i < dev->num_tx_queues; i++) { 3371 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 3372 3373 __netif_tx_lock(txq, cpu); 3374 netif_tx_stop_queue(txq); 3375 __netif_tx_unlock(txq); 3376 } 3377 local_bh_enable(); 3378 } 3379 3380 static inline void netif_addr_lock(struct net_device *dev) 3381 { 3382 spin_lock(&dev->addr_list_lock); 3383 } 3384 3385 static inline void netif_addr_lock_nested(struct net_device *dev) 3386 { 3387 int subclass = SINGLE_DEPTH_NESTING; 3388 3389 if (dev->netdev_ops->ndo_get_lock_subclass) 3390 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); 3391 3392 spin_lock_nested(&dev->addr_list_lock, subclass); 3393 } 3394 3395 static inline void netif_addr_lock_bh(struct net_device *dev) 3396 { 3397 spin_lock_bh(&dev->addr_list_lock); 3398 } 3399 3400 static inline void netif_addr_unlock(struct net_device *dev) 3401 { 3402 spin_unlock(&dev->addr_list_lock); 3403 } 3404 3405 static inline void netif_addr_unlock_bh(struct net_device *dev) 3406 { 3407 spin_unlock_bh(&dev->addr_list_lock); 3408 } 3409 3410 /* 3411 * dev_addrs walker. Should be used only for read access. Call with 3412 * rcu_read_lock held. 3413 */ 3414 #define for_each_dev_addr(dev, ha) \ 3415 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 3416 3417 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 3418 3419 void ether_setup(struct net_device *dev); 3420 3421 /* Support for loadable net-drivers */ 3422 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 3423 unsigned char name_assign_type, 3424 void (*setup)(struct net_device *), 3425 unsigned int txqs, unsigned int rxqs); 3426 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 3427 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 3428 3429 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ 3430 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ 3431 count) 3432 3433 int register_netdev(struct net_device *dev); 3434 void unregister_netdev(struct net_device *dev); 3435 3436 /* General hardware address lists handling functions */ 3437 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 3438 struct netdev_hw_addr_list *from_list, int addr_len); 3439 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 3440 struct netdev_hw_addr_list *from_list, int addr_len); 3441 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 3442 struct net_device *dev, 3443 int (*sync)(struct net_device *, const unsigned char *), 3444 int (*unsync)(struct net_device *, 3445 const unsigned char *)); 3446 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 3447 struct net_device *dev, 3448 int (*unsync)(struct net_device *, 3449 const unsigned char *)); 3450 void __hw_addr_init(struct netdev_hw_addr_list *list); 3451 3452 /* Functions used for device addresses handling */ 3453 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 3454 unsigned char addr_type); 3455 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 3456 unsigned char addr_type); 3457 void dev_addr_flush(struct net_device *dev); 3458 int dev_addr_init(struct net_device *dev); 3459 3460 /* Functions used for unicast addresses handling */ 3461 int dev_uc_add(struct net_device *dev, const unsigned char *addr); 3462 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 3463 int dev_uc_del(struct net_device *dev, const unsigned char *addr); 3464 int dev_uc_sync(struct net_device *to, struct net_device *from); 3465 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 3466 void dev_uc_unsync(struct net_device *to, struct net_device *from); 3467 void dev_uc_flush(struct net_device *dev); 3468 void dev_uc_init(struct net_device *dev); 3469 3470 /** 3471 * __dev_uc_sync - Synchonize device's unicast list 3472 * @dev: device to sync 3473 * @sync: function to call if address should be added 3474 * @unsync: function to call if address should be removed 3475 * 3476 * Add newly added addresses to the interface, and release 3477 * addresses that have been deleted. 3478 **/ 3479 static inline int __dev_uc_sync(struct net_device *dev, 3480 int (*sync)(struct net_device *, 3481 const unsigned char *), 3482 int (*unsync)(struct net_device *, 3483 const unsigned char *)) 3484 { 3485 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); 3486 } 3487 3488 /** 3489 * __dev_uc_unsync - Remove synchronized addresses from device 3490 * @dev: device to sync 3491 * @unsync: function to call if address should be removed 3492 * 3493 * Remove all addresses that were added to the device by dev_uc_sync(). 3494 **/ 3495 static inline void __dev_uc_unsync(struct net_device *dev, 3496 int (*unsync)(struct net_device *, 3497 const unsigned char *)) 3498 { 3499 __hw_addr_unsync_dev(&dev->uc, dev, unsync); 3500 } 3501 3502 /* Functions used for multicast addresses handling */ 3503 int dev_mc_add(struct net_device *dev, const unsigned char *addr); 3504 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 3505 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 3506 int dev_mc_del(struct net_device *dev, const unsigned char *addr); 3507 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 3508 int dev_mc_sync(struct net_device *to, struct net_device *from); 3509 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 3510 void dev_mc_unsync(struct net_device *to, struct net_device *from); 3511 void dev_mc_flush(struct net_device *dev); 3512 void dev_mc_init(struct net_device *dev); 3513 3514 /** 3515 * __dev_mc_sync - Synchonize device's multicast list 3516 * @dev: device to sync 3517 * @sync: function to call if address should be added 3518 * @unsync: function to call if address should be removed 3519 * 3520 * Add newly added addresses to the interface, and release 3521 * addresses that have been deleted. 3522 **/ 3523 static inline int __dev_mc_sync(struct net_device *dev, 3524 int (*sync)(struct net_device *, 3525 const unsigned char *), 3526 int (*unsync)(struct net_device *, 3527 const unsigned char *)) 3528 { 3529 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); 3530 } 3531 3532 /** 3533 * __dev_mc_unsync - Remove synchronized addresses from device 3534 * @dev: device to sync 3535 * @unsync: function to call if address should be removed 3536 * 3537 * Remove all addresses that were added to the device by dev_mc_sync(). 3538 **/ 3539 static inline void __dev_mc_unsync(struct net_device *dev, 3540 int (*unsync)(struct net_device *, 3541 const unsigned char *)) 3542 { 3543 __hw_addr_unsync_dev(&dev->mc, dev, unsync); 3544 } 3545 3546 /* Functions used for secondary unicast and multicast support */ 3547 void dev_set_rx_mode(struct net_device *dev); 3548 void __dev_set_rx_mode(struct net_device *dev); 3549 int dev_set_promiscuity(struct net_device *dev, int inc); 3550 int dev_set_allmulti(struct net_device *dev, int inc); 3551 void netdev_state_change(struct net_device *dev); 3552 void netdev_notify_peers(struct net_device *dev); 3553 void netdev_features_change(struct net_device *dev); 3554 /* Load a device via the kmod */ 3555 void dev_load(struct net *net, const char *name); 3556 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 3557 struct rtnl_link_stats64 *storage); 3558 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 3559 const struct net_device_stats *netdev_stats); 3560 3561 extern int netdev_max_backlog; 3562 extern int netdev_tstamp_prequeue; 3563 extern int weight_p; 3564 extern int bpf_jit_enable; 3565 3566 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 3567 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 3568 struct list_head **iter); 3569 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 3570 struct list_head **iter); 3571 3572 /* iterate through upper list, must be called under RCU read lock */ 3573 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ 3574 for (iter = &(dev)->adj_list.upper, \ 3575 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 3576 updev; \ 3577 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 3578 3579 /* iterate through upper list, must be called under RCU read lock */ 3580 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ 3581 for (iter = &(dev)->all_adj_list.upper, \ 3582 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ 3583 updev; \ 3584 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) 3585 3586 void *netdev_lower_get_next_private(struct net_device *dev, 3587 struct list_head **iter); 3588 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 3589 struct list_head **iter); 3590 3591 #define netdev_for_each_lower_private(dev, priv, iter) \ 3592 for (iter = (dev)->adj_list.lower.next, \ 3593 priv = netdev_lower_get_next_private(dev, &(iter)); \ 3594 priv; \ 3595 priv = netdev_lower_get_next_private(dev, &(iter))) 3596 3597 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 3598 for (iter = &(dev)->adj_list.lower, \ 3599 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 3600 priv; \ 3601 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 3602 3603 void *netdev_lower_get_next(struct net_device *dev, 3604 struct list_head **iter); 3605 #define netdev_for_each_lower_dev(dev, ldev, iter) \ 3606 for (iter = &(dev)->adj_list.lower, \ 3607 ldev = netdev_lower_get_next(dev, &(iter)); \ 3608 ldev; \ 3609 ldev = netdev_lower_get_next(dev, &(iter))) 3610 3611 void *netdev_adjacent_get_private(struct list_head *adj_list); 3612 void *netdev_lower_get_first_private_rcu(struct net_device *dev); 3613 struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 3614 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 3615 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); 3616 int netdev_master_upper_dev_link(struct net_device *dev, 3617 struct net_device *upper_dev); 3618 int netdev_master_upper_dev_link_private(struct net_device *dev, 3619 struct net_device *upper_dev, 3620 void *private); 3621 void netdev_upper_dev_unlink(struct net_device *dev, 3622 struct net_device *upper_dev); 3623 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3624 void *netdev_lower_dev_get_private(struct net_device *dev, 3625 struct net_device *lower_dev); 3626 3627 /* RSS keys are 40 or 52 bytes long */ 3628 #define NETDEV_RSS_KEY_LEN 52 3629 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; 3630 void netdev_rss_key_fill(void *buffer, size_t len); 3631 3632 int dev_get_nest_level(struct net_device *dev, 3633 bool (*type_check)(struct net_device *dev)); 3634 int skb_checksum_help(struct sk_buff *skb); 3635 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3636 netdev_features_t features, bool tx_path); 3637 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3638 netdev_features_t features); 3639 3640 struct netdev_bonding_info { 3641 ifslave slave; 3642 ifbond master; 3643 }; 3644 3645 struct netdev_notifier_bonding_info { 3646 struct netdev_notifier_info info; /* must be first */ 3647 struct netdev_bonding_info bonding_info; 3648 }; 3649 3650 void netdev_bonding_info_change(struct net_device *dev, 3651 struct netdev_bonding_info *bonding_info); 3652 3653 static inline 3654 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 3655 { 3656 return __skb_gso_segment(skb, features, true); 3657 } 3658 __be16 skb_network_protocol(struct sk_buff *skb, int *depth); 3659 3660 static inline bool can_checksum_protocol(netdev_features_t features, 3661 __be16 protocol) 3662 { 3663 return ((features & NETIF_F_GEN_CSUM) || 3664 ((features & NETIF_F_V4_CSUM) && 3665 protocol == htons(ETH_P_IP)) || 3666 ((features & NETIF_F_V6_CSUM) && 3667 protocol == htons(ETH_P_IPV6)) || 3668 ((features & NETIF_F_FCOE_CRC) && 3669 protocol == htons(ETH_P_FCOE))); 3670 } 3671 3672 #ifdef CONFIG_BUG 3673 void netdev_rx_csum_fault(struct net_device *dev); 3674 #else 3675 static inline void netdev_rx_csum_fault(struct net_device *dev) 3676 { 3677 } 3678 #endif 3679 /* rx skb timestamps */ 3680 void net_enable_timestamp(void); 3681 void net_disable_timestamp(void); 3682 3683 #ifdef CONFIG_PROC_FS 3684 int __init dev_proc_init(void); 3685 #else 3686 #define dev_proc_init() 0 3687 #endif 3688 3689 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 3690 struct sk_buff *skb, struct net_device *dev, 3691 bool more) 3692 { 3693 skb->xmit_more = more ? 1 : 0; 3694 return ops->ndo_start_xmit(skb, dev); 3695 } 3696 3697 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, 3698 struct netdev_queue *txq, bool more) 3699 { 3700 const struct net_device_ops *ops = dev->netdev_ops; 3701 int rc; 3702 3703 rc = __netdev_start_xmit(ops, skb, dev, more); 3704 if (rc == NETDEV_TX_OK) 3705 txq_trans_update(txq); 3706 3707 return rc; 3708 } 3709 3710 int netdev_class_create_file_ns(struct class_attribute *class_attr, 3711 const void *ns); 3712 void netdev_class_remove_file_ns(struct class_attribute *class_attr, 3713 const void *ns); 3714 3715 static inline int netdev_class_create_file(struct class_attribute *class_attr) 3716 { 3717 return netdev_class_create_file_ns(class_attr, NULL); 3718 } 3719 3720 static inline void netdev_class_remove_file(struct class_attribute *class_attr) 3721 { 3722 netdev_class_remove_file_ns(class_attr, NULL); 3723 } 3724 3725 extern struct kobj_ns_type_operations net_ns_type_operations; 3726 3727 const char *netdev_drivername(const struct net_device *dev); 3728 3729 void linkwatch_run_queue(void); 3730 3731 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 3732 netdev_features_t f2) 3733 { 3734 if (f1 & NETIF_F_GEN_CSUM) 3735 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 3736 if (f2 & NETIF_F_GEN_CSUM) 3737 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 3738 f1 &= f2; 3739 if (f1 & NETIF_F_GEN_CSUM) 3740 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 3741 3742 return f1; 3743 } 3744 3745 static inline netdev_features_t netdev_get_wanted_features( 3746 struct net_device *dev) 3747 { 3748 return (dev->features & ~dev->hw_features) | dev->wanted_features; 3749 } 3750 netdev_features_t netdev_increment_features(netdev_features_t all, 3751 netdev_features_t one, netdev_features_t mask); 3752 3753 /* Allow TSO being used on stacked device : 3754 * Performing the GSO segmentation before last device 3755 * is a performance improvement. 3756 */ 3757 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 3758 netdev_features_t mask) 3759 { 3760 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 3761 } 3762 3763 int __netdev_update_features(struct net_device *dev); 3764 void netdev_update_features(struct net_device *dev); 3765 void netdev_change_features(struct net_device *dev); 3766 3767 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3768 struct net_device *dev); 3769 3770 netdev_features_t passthru_features_check(struct sk_buff *skb, 3771 struct net_device *dev, 3772 netdev_features_t features); 3773 netdev_features_t netif_skb_features(struct sk_buff *skb); 3774 3775 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3776 { 3777 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 3778 3779 /* check flags correspondence */ 3780 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 3781 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); 3782 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 3783 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 3784 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 3785 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 3786 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); 3787 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); 3788 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); 3789 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); 3790 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 3791 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 3792 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 3793 3794 return (features & feature) == feature; 3795 } 3796 3797 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 3798 { 3799 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 3800 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 3801 } 3802 3803 static inline bool netif_needs_gso(struct sk_buff *skb, 3804 netdev_features_t features) 3805 { 3806 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 3807 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 3808 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 3809 } 3810 3811 static inline void netif_set_gso_max_size(struct net_device *dev, 3812 unsigned int size) 3813 { 3814 dev->gso_max_size = size; 3815 } 3816 3817 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, 3818 int pulled_hlen, u16 mac_offset, 3819 int mac_len) 3820 { 3821 skb->protocol = protocol; 3822 skb->encapsulation = 1; 3823 skb_push(skb, pulled_hlen); 3824 skb_reset_transport_header(skb); 3825 skb->mac_header = mac_offset; 3826 skb->network_header = skb->mac_header + mac_len; 3827 skb->mac_len = mac_len; 3828 } 3829 3830 static inline bool netif_is_macvlan(struct net_device *dev) 3831 { 3832 return dev->priv_flags & IFF_MACVLAN; 3833 } 3834 3835 static inline bool netif_is_macvlan_port(struct net_device *dev) 3836 { 3837 return dev->priv_flags & IFF_MACVLAN_PORT; 3838 } 3839 3840 static inline bool netif_is_ipvlan(struct net_device *dev) 3841 { 3842 return dev->priv_flags & IFF_IPVLAN_SLAVE; 3843 } 3844 3845 static inline bool netif_is_ipvlan_port(struct net_device *dev) 3846 { 3847 return dev->priv_flags & IFF_IPVLAN_MASTER; 3848 } 3849 3850 static inline bool netif_is_bond_master(struct net_device *dev) 3851 { 3852 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 3853 } 3854 3855 static inline bool netif_is_bond_slave(struct net_device *dev) 3856 { 3857 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 3858 } 3859 3860 static inline bool netif_supports_nofcs(struct net_device *dev) 3861 { 3862 return dev->priv_flags & IFF_SUPP_NOFCS; 3863 } 3864 3865 static inline bool netif_is_l3_master(const struct net_device *dev) 3866 { 3867 return dev->priv_flags & IFF_L3MDEV_MASTER; 3868 } 3869 3870 static inline bool netif_is_l3_slave(const struct net_device *dev) 3871 { 3872 return dev->priv_flags & IFF_L3MDEV_SLAVE; 3873 } 3874 3875 static inline bool netif_is_bridge_master(const struct net_device *dev) 3876 { 3877 return dev->priv_flags & IFF_EBRIDGE; 3878 } 3879 3880 static inline bool netif_is_bridge_port(const struct net_device *dev) 3881 { 3882 return dev->priv_flags & IFF_BRIDGE_PORT; 3883 } 3884 3885 static inline bool netif_is_ovs_master(const struct net_device *dev) 3886 { 3887 return dev->priv_flags & IFF_OPENVSWITCH; 3888 } 3889 3890 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 3891 static inline void netif_keep_dst(struct net_device *dev) 3892 { 3893 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 3894 } 3895 3896 extern struct pernet_operations __net_initdata loopback_net_ops; 3897 3898 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 3899 3900 /* netdev_printk helpers, similar to dev_printk */ 3901 3902 static inline const char *netdev_name(const struct net_device *dev) 3903 { 3904 if (!dev->name[0] || strchr(dev->name, '%')) 3905 return "(unnamed net_device)"; 3906 return dev->name; 3907 } 3908 3909 static inline const char *netdev_reg_state(const struct net_device *dev) 3910 { 3911 switch (dev->reg_state) { 3912 case NETREG_UNINITIALIZED: return " (uninitialized)"; 3913 case NETREG_REGISTERED: return ""; 3914 case NETREG_UNREGISTERING: return " (unregistering)"; 3915 case NETREG_UNREGISTERED: return " (unregistered)"; 3916 case NETREG_RELEASED: return " (released)"; 3917 case NETREG_DUMMY: return " (dummy)"; 3918 } 3919 3920 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); 3921 return " (unknown)"; 3922 } 3923 3924 __printf(3, 4) 3925 void netdev_printk(const char *level, const struct net_device *dev, 3926 const char *format, ...); 3927 __printf(2, 3) 3928 void netdev_emerg(const struct net_device *dev, const char *format, ...); 3929 __printf(2, 3) 3930 void netdev_alert(const struct net_device *dev, const char *format, ...); 3931 __printf(2, 3) 3932 void netdev_crit(const struct net_device *dev, const char *format, ...); 3933 __printf(2, 3) 3934 void netdev_err(const struct net_device *dev, const char *format, ...); 3935 __printf(2, 3) 3936 void netdev_warn(const struct net_device *dev, const char *format, ...); 3937 __printf(2, 3) 3938 void netdev_notice(const struct net_device *dev, const char *format, ...); 3939 __printf(2, 3) 3940 void netdev_info(const struct net_device *dev, const char *format, ...); 3941 3942 #define MODULE_ALIAS_NETDEV(device) \ 3943 MODULE_ALIAS("netdev-" device) 3944 3945 #if defined(CONFIG_DYNAMIC_DEBUG) 3946 #define netdev_dbg(__dev, format, args...) \ 3947 do { \ 3948 dynamic_netdev_dbg(__dev, format, ##args); \ 3949 } while (0) 3950 #elif defined(DEBUG) 3951 #define netdev_dbg(__dev, format, args...) \ 3952 netdev_printk(KERN_DEBUG, __dev, format, ##args) 3953 #else 3954 #define netdev_dbg(__dev, format, args...) \ 3955 ({ \ 3956 if (0) \ 3957 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 3958 }) 3959 #endif 3960 3961 #if defined(VERBOSE_DEBUG) 3962 #define netdev_vdbg netdev_dbg 3963 #else 3964 3965 #define netdev_vdbg(dev, format, args...) \ 3966 ({ \ 3967 if (0) \ 3968 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 3969 0; \ 3970 }) 3971 #endif 3972 3973 /* 3974 * netdev_WARN() acts like dev_printk(), but with the key difference 3975 * of using a WARN/WARN_ON to get the message out, including the 3976 * file/line information and a backtrace. 3977 */ 3978 #define netdev_WARN(dev, format, args...) \ 3979 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ 3980 netdev_reg_state(dev), ##args) 3981 3982 /* netif printk helpers, similar to netdev_printk */ 3983 3984 #define netif_printk(priv, type, level, dev, fmt, args...) \ 3985 do { \ 3986 if (netif_msg_##type(priv)) \ 3987 netdev_printk(level, (dev), fmt, ##args); \ 3988 } while (0) 3989 3990 #define netif_level(level, priv, type, dev, fmt, args...) \ 3991 do { \ 3992 if (netif_msg_##type(priv)) \ 3993 netdev_##level(dev, fmt, ##args); \ 3994 } while (0) 3995 3996 #define netif_emerg(priv, type, dev, fmt, args...) \ 3997 netif_level(emerg, priv, type, dev, fmt, ##args) 3998 #define netif_alert(priv, type, dev, fmt, args...) \ 3999 netif_level(alert, priv, type, dev, fmt, ##args) 4000 #define netif_crit(priv, type, dev, fmt, args...) \ 4001 netif_level(crit, priv, type, dev, fmt, ##args) 4002 #define netif_err(priv, type, dev, fmt, args...) \ 4003 netif_level(err, priv, type, dev, fmt, ##args) 4004 #define netif_warn(priv, type, dev, fmt, args...) \ 4005 netif_level(warn, priv, type, dev, fmt, ##args) 4006 #define netif_notice(priv, type, dev, fmt, args...) \ 4007 netif_level(notice, priv, type, dev, fmt, ##args) 4008 #define netif_info(priv, type, dev, fmt, args...) \ 4009 netif_level(info, priv, type, dev, fmt, ##args) 4010 4011 #if defined(CONFIG_DYNAMIC_DEBUG) 4012 #define netif_dbg(priv, type, netdev, format, args...) \ 4013 do { \ 4014 if (netif_msg_##type(priv)) \ 4015 dynamic_netdev_dbg(netdev, format, ##args); \ 4016 } while (0) 4017 #elif defined(DEBUG) 4018 #define netif_dbg(priv, type, dev, format, args...) \ 4019 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 4020 #else 4021 #define netif_dbg(priv, type, dev, format, args...) \ 4022 ({ \ 4023 if (0) \ 4024 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 4025 0; \ 4026 }) 4027 #endif 4028 4029 #if defined(VERBOSE_DEBUG) 4030 #define netif_vdbg netif_dbg 4031 #else 4032 #define netif_vdbg(priv, type, dev, format, args...) \ 4033 ({ \ 4034 if (0) \ 4035 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 4036 0; \ 4037 }) 4038 #endif 4039 4040 /* 4041 * The list of packet types we will receive (as opposed to discard) 4042 * and the routines to invoke. 4043 * 4044 * Why 16. Because with 16 the only overlap we get on a hash of the 4045 * low nibble of the protocol value is RARP/SNAP/X.25. 4046 * 4047 * NOTE: That is no longer true with the addition of VLAN tags. Not 4048 * sure which should go first, but I bet it won't make much 4049 * difference if we are running VLANs. The good news is that 4050 * this protocol won't be in the list unless compiled in, so 4051 * the average user (w/out VLANs) will not be adversely affected. 4052 * --BLG 4053 * 4054 * 0800 IP 4055 * 8100 802.1Q VLAN 4056 * 0001 802.3 4057 * 0002 AX.25 4058 * 0004 802.2 4059 * 8035 RARP 4060 * 0005 SNAP 4061 * 0805 X.25 4062 * 0806 ARP 4063 * 8137 IPX 4064 * 0009 Localtalk 4065 * 86DD IPv6 4066 */ 4067 #define PTYPE_HASH_SIZE (16) 4068 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 4069 4070 #endif /* _LINUX_NETDEVICE_H */ 4071