1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the Interfaces handler. 7 * 8 * Version: @(#)dev.h 1.0.10 08/12/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 15 * Bjorn Ekwall. <bj0rn@blox.se> 16 * Pekka Riikonen <priikone@poseidon.pspt.fi> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 * 23 * Moved to /usr/include/linux for NET3 24 */ 25 #ifndef _LINUX_NETDEVICE_H 26 #define _LINUX_NETDEVICE_H 27 28 #include <linux/pm_qos.h> 29 #include <linux/timer.h> 30 #include <linux/bug.h> 31 #include <linux/delay.h> 32 #include <linux/atomic.h> 33 #include <asm/cache.h> 34 #include <asm/byteorder.h> 35 36 #include <linux/percpu.h> 37 #include <linux/rculist.h> 38 #include <linux/dmaengine.h> 39 #include <linux/workqueue.h> 40 #include <linux/dynamic_queue_limits.h> 41 42 #include <linux/ethtool.h> 43 #include <net/net_namespace.h> 44 #include <net/dsa.h> 45 #ifdef CONFIG_DCB 46 #include <net/dcbnl.h> 47 #endif 48 #include <net/netprio_cgroup.h> 49 50 #include <linux/netdev_features.h> 51 #include <linux/neighbour.h> 52 #include <uapi/linux/netdevice.h> 53 54 struct netpoll_info; 55 struct device; 56 struct phy_device; 57 /* 802.11 specific */ 58 struct wireless_dev; 59 /* source back-compat hooks */ 60 #define SET_ETHTOOL_OPS(netdev,ops) \ 61 ( (netdev)->ethtool_ops = (ops) ) 62 63 extern void netdev_set_default_ethtool_ops(struct net_device *dev, 64 const struct ethtool_ops *ops); 65 66 /* hardware address assignment types */ 67 #define NET_ADDR_PERM 0 /* address is permanent (default) */ 68 #define NET_ADDR_RANDOM 1 /* address is generated randomly */ 69 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ 70 #define NET_ADDR_SET 3 /* address is set using 71 * dev_set_mac_address() */ 72 73 /* Backlog congestion levels */ 74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 75 #define NET_RX_DROP 1 /* packet dropped */ 76 77 /* 78 * Transmit return codes: transmit return codes originate from three different 79 * namespaces: 80 * 81 * - qdisc return codes 82 * - driver transmit return codes 83 * - errno values 84 * 85 * Drivers are allowed to return any one of those in their hard_start_xmit() 86 * function. Real network devices commonly used with qdiscs should only return 87 * the driver transmit return codes though - when qdiscs are used, the actual 88 * transmission happens asynchronously, so the value is not propagated to 89 * higher layers. Virtual network devices transmit synchronously, in this case 90 * the driver transmit return codes are consumed by dev_queue_xmit(), all 91 * others are propagated to higher layers. 92 */ 93 94 /* qdisc ->enqueue() return codes. */ 95 #define NET_XMIT_SUCCESS 0x00 96 #define NET_XMIT_DROP 0x01 /* skb dropped */ 97 #define NET_XMIT_CN 0x02 /* congestion notification */ 98 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ 99 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 100 101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 102 * indicates that the device will soon be dropping packets, or already drops 103 * some packets of the same priority; prompting us to send less aggressively. */ 104 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 105 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 106 107 /* Driver transmit return codes */ 108 #define NETDEV_TX_MASK 0xf0 109 110 enum netdev_tx { 111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 112 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ 115 }; 116 typedef enum netdev_tx netdev_tx_t; 117 118 /* 119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 121 */ 122 static inline bool dev_xmit_complete(int rc) 123 { 124 /* 125 * Positive cases with an skb consumed by a driver: 126 * - successful transmission (rc == NETDEV_TX_OK) 127 * - error while transmitting (rc < 0) 128 * - error while queueing to a different device (rc & NET_XMIT_MASK) 129 */ 130 if (likely(rc < NET_XMIT_MASK)) 131 return true; 132 133 return false; 134 } 135 136 /* 137 * Compute the worst case header length according to the protocols 138 * used. 139 */ 140 141 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 142 # if defined(CONFIG_MAC80211_MESH) 143 # define LL_MAX_HEADER 128 144 # else 145 # define LL_MAX_HEADER 96 146 # endif 147 #else 148 # define LL_MAX_HEADER 32 149 #endif 150 151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 153 #define MAX_HEADER LL_MAX_HEADER 154 #else 155 #define MAX_HEADER (LL_MAX_HEADER + 48) 156 #endif 157 158 /* 159 * Old network device statistics. Fields are native words 160 * (unsigned long) so they can be read and written atomically. 161 */ 162 163 struct net_device_stats { 164 unsigned long rx_packets; 165 unsigned long tx_packets; 166 unsigned long rx_bytes; 167 unsigned long tx_bytes; 168 unsigned long rx_errors; 169 unsigned long tx_errors; 170 unsigned long rx_dropped; 171 unsigned long tx_dropped; 172 unsigned long multicast; 173 unsigned long collisions; 174 unsigned long rx_length_errors; 175 unsigned long rx_over_errors; 176 unsigned long rx_crc_errors; 177 unsigned long rx_frame_errors; 178 unsigned long rx_fifo_errors; 179 unsigned long rx_missed_errors; 180 unsigned long tx_aborted_errors; 181 unsigned long tx_carrier_errors; 182 unsigned long tx_fifo_errors; 183 unsigned long tx_heartbeat_errors; 184 unsigned long tx_window_errors; 185 unsigned long rx_compressed; 186 unsigned long tx_compressed; 187 }; 188 189 190 #include <linux/cache.h> 191 #include <linux/skbuff.h> 192 193 #ifdef CONFIG_RPS 194 #include <linux/static_key.h> 195 extern struct static_key rps_needed; 196 #endif 197 198 struct neighbour; 199 struct neigh_parms; 200 struct sk_buff; 201 202 struct netdev_hw_addr { 203 struct list_head list; 204 unsigned char addr[MAX_ADDR_LEN]; 205 unsigned char type; 206 #define NETDEV_HW_ADDR_T_LAN 1 207 #define NETDEV_HW_ADDR_T_SAN 2 208 #define NETDEV_HW_ADDR_T_SLAVE 3 209 #define NETDEV_HW_ADDR_T_UNICAST 4 210 #define NETDEV_HW_ADDR_T_MULTICAST 5 211 bool global_use; 212 int sync_cnt; 213 int refcount; 214 int synced; 215 struct rcu_head rcu_head; 216 }; 217 218 struct netdev_hw_addr_list { 219 struct list_head list; 220 int count; 221 }; 222 223 #define netdev_hw_addr_list_count(l) ((l)->count) 224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 225 #define netdev_hw_addr_list_for_each(ha, l) \ 226 list_for_each_entry(ha, &(l)->list, list) 227 228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 230 #define netdev_for_each_uc_addr(ha, dev) \ 231 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 232 233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 235 #define netdev_for_each_mc_addr(ha, dev) \ 236 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 237 238 struct hh_cache { 239 u16 hh_len; 240 u16 __pad; 241 seqlock_t hh_lock; 242 243 /* cached hardware header; allow for machine alignment needs. */ 244 #define HH_DATA_MOD 16 245 #define HH_DATA_OFF(__len) \ 246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 247 #define HH_DATA_ALIGN(__len) \ 248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 250 }; 251 252 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 253 * Alternative is: 254 * dev->hard_header_len ? (dev->hard_header_len + 255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 256 * 257 * We could use other alignment values, but we must maintain the 258 * relationship HH alignment <= LL alignment. 259 */ 260 #define LL_RESERVED_SPACE(dev) \ 261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 264 265 struct header_ops { 266 int (*create) (struct sk_buff *skb, struct net_device *dev, 267 unsigned short type, const void *daddr, 268 const void *saddr, unsigned int len); 269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 270 int (*rebuild)(struct sk_buff *skb); 271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 272 void (*cache_update)(struct hh_cache *hh, 273 const struct net_device *dev, 274 const unsigned char *haddr); 275 }; 276 277 /* These flag bits are private to the generic network queueing 278 * layer, they may not be explicitly referenced by any other 279 * code. 280 */ 281 282 enum netdev_state_t { 283 __LINK_STATE_START, 284 __LINK_STATE_PRESENT, 285 __LINK_STATE_NOCARRIER, 286 __LINK_STATE_LINKWATCH_PENDING, 287 __LINK_STATE_DORMANT, 288 }; 289 290 291 /* 292 * This structure holds at boot time configured netdevice settings. They 293 * are then used in the device probing. 294 */ 295 struct netdev_boot_setup { 296 char name[IFNAMSIZ]; 297 struct ifmap map; 298 }; 299 #define NETDEV_BOOT_SETUP_MAX 8 300 301 extern int __init netdev_boot_setup(char *str); 302 303 /* 304 * Structure for NAPI scheduling similar to tasklet but with weighting 305 */ 306 struct napi_struct { 307 /* The poll_list must only be managed by the entity which 308 * changes the state of the NAPI_STATE_SCHED bit. This means 309 * whoever atomically sets that bit can add this napi_struct 310 * to the per-cpu poll_list, and whoever clears that bit 311 * can remove from the list right before clearing the bit. 312 */ 313 struct list_head poll_list; 314 315 unsigned long state; 316 int weight; 317 unsigned int gro_count; 318 int (*poll)(struct napi_struct *, int); 319 #ifdef CONFIG_NETPOLL 320 spinlock_t poll_lock; 321 int poll_owner; 322 #endif 323 struct net_device *dev; 324 struct sk_buff *gro_list; 325 struct sk_buff *skb; 326 struct list_head dev_list; 327 struct hlist_node napi_hash_node; 328 unsigned int napi_id; 329 }; 330 331 enum { 332 NAPI_STATE_SCHED, /* Poll is scheduled */ 333 NAPI_STATE_DISABLE, /* Disable pending */ 334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 335 NAPI_STATE_HASHED, /* In NAPI hash */ 336 }; 337 338 enum gro_result { 339 GRO_MERGED, 340 GRO_MERGED_FREE, 341 GRO_HELD, 342 GRO_NORMAL, 343 GRO_DROP, 344 }; 345 typedef enum gro_result gro_result_t; 346 347 /* 348 * enum rx_handler_result - Possible return values for rx_handlers. 349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 350 * further. 351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 352 * case skb->dev was changed by rx_handler. 353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. 355 * 356 * rx_handlers are functions called from inside __netif_receive_skb(), to do 357 * special processing of the skb, prior to delivery to protocol handlers. 358 * 359 * Currently, a net_device can only have a single rx_handler registered. Trying 360 * to register a second rx_handler will return -EBUSY. 361 * 362 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 363 * To unregister a rx_handler on a net_device, use 364 * netdev_rx_handler_unregister(). 365 * 366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 367 * do with the skb. 368 * 369 * If the rx_handler consumed to skb in some way, it should return 370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 371 * the skb to be delivered in some other ways. 372 * 373 * If the rx_handler changed skb->dev, to divert the skb to another 374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 375 * new device will be called if it exists. 376 * 377 * If the rx_handler consider the skb should be ignored, it should return 378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 379 * are registered on exact device (ptype->dev == skb->dev). 380 * 381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally 382 * delivered, it should return RX_HANDLER_PASS. 383 * 384 * A device without a registered rx_handler will behave as if rx_handler 385 * returned RX_HANDLER_PASS. 386 */ 387 388 enum rx_handler_result { 389 RX_HANDLER_CONSUMED, 390 RX_HANDLER_ANOTHER, 391 RX_HANDLER_EXACT, 392 RX_HANDLER_PASS, 393 }; 394 typedef enum rx_handler_result rx_handler_result_t; 395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 396 397 extern void __napi_schedule(struct napi_struct *n); 398 399 static inline bool napi_disable_pending(struct napi_struct *n) 400 { 401 return test_bit(NAPI_STATE_DISABLE, &n->state); 402 } 403 404 /** 405 * napi_schedule_prep - check if napi can be scheduled 406 * @n: napi context 407 * 408 * Test if NAPI routine is already running, and if not mark 409 * it as running. This is used as a condition variable 410 * insure only one NAPI poll instance runs. We also make 411 * sure there is no pending NAPI disable. 412 */ 413 static inline bool napi_schedule_prep(struct napi_struct *n) 414 { 415 return !napi_disable_pending(n) && 416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 417 } 418 419 /** 420 * napi_schedule - schedule NAPI poll 421 * @n: napi context 422 * 423 * Schedule NAPI poll routine to be called if it is not already 424 * running. 425 */ 426 static inline void napi_schedule(struct napi_struct *n) 427 { 428 if (napi_schedule_prep(n)) 429 __napi_schedule(n); 430 } 431 432 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 433 static inline bool napi_reschedule(struct napi_struct *napi) 434 { 435 if (napi_schedule_prep(napi)) { 436 __napi_schedule(napi); 437 return true; 438 } 439 return false; 440 } 441 442 /** 443 * napi_complete - NAPI processing complete 444 * @n: napi context 445 * 446 * Mark NAPI processing as complete. 447 */ 448 extern void __napi_complete(struct napi_struct *n); 449 extern void napi_complete(struct napi_struct *n); 450 451 /** 452 * napi_by_id - lookup a NAPI by napi_id 453 * @napi_id: hashed napi_id 454 * 455 * lookup @napi_id in napi_hash table 456 * must be called under rcu_read_lock() 457 */ 458 extern struct napi_struct *napi_by_id(unsigned int napi_id); 459 460 /** 461 * napi_hash_add - add a NAPI to global hashtable 462 * @napi: napi context 463 * 464 * generate a new napi_id and store a @napi under it in napi_hash 465 */ 466 extern void napi_hash_add(struct napi_struct *napi); 467 468 /** 469 * napi_hash_del - remove a NAPI from global table 470 * @napi: napi context 471 * 472 * Warning: caller must observe rcu grace period 473 * before freeing memory containing @napi 474 */ 475 extern void napi_hash_del(struct napi_struct *napi); 476 477 /** 478 * napi_disable - prevent NAPI from scheduling 479 * @n: napi context 480 * 481 * Stop NAPI from being scheduled on this context. 482 * Waits till any outstanding processing completes. 483 */ 484 static inline void napi_disable(struct napi_struct *n) 485 { 486 set_bit(NAPI_STATE_DISABLE, &n->state); 487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 488 msleep(1); 489 clear_bit(NAPI_STATE_DISABLE, &n->state); 490 } 491 492 /** 493 * napi_enable - enable NAPI scheduling 494 * @n: napi context 495 * 496 * Resume NAPI from being scheduled on this context. 497 * Must be paired with napi_disable. 498 */ 499 static inline void napi_enable(struct napi_struct *n) 500 { 501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 502 smp_mb__before_clear_bit(); 503 clear_bit(NAPI_STATE_SCHED, &n->state); 504 } 505 506 #ifdef CONFIG_SMP 507 /** 508 * napi_synchronize - wait until NAPI is not running 509 * @n: napi context 510 * 511 * Wait until NAPI is done being scheduled on this context. 512 * Waits till any outstanding processing completes but 513 * does not disable future activations. 514 */ 515 static inline void napi_synchronize(const struct napi_struct *n) 516 { 517 while (test_bit(NAPI_STATE_SCHED, &n->state)) 518 msleep(1); 519 } 520 #else 521 # define napi_synchronize(n) barrier() 522 #endif 523 524 enum netdev_queue_state_t { 525 __QUEUE_STATE_DRV_XOFF, 526 __QUEUE_STATE_STACK_XOFF, 527 __QUEUE_STATE_FROZEN, 528 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ 529 (1 << __QUEUE_STATE_STACK_XOFF)) 530 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 531 (1 << __QUEUE_STATE_FROZEN)) 532 }; 533 /* 534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 535 * netif_tx_* functions below are used to manipulate this flag. The 536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 537 * queue independently. The netif_xmit_*stopped functions below are called 538 * to check if the queue has been stopped by the driver or stack (either 539 * of the XOFF bits are set in the state). Drivers should not need to call 540 * netif_xmit*stopped functions, they should only be using netif_tx_*. 541 */ 542 543 struct netdev_queue { 544 /* 545 * read mostly part 546 */ 547 struct net_device *dev; 548 struct Qdisc *qdisc; 549 struct Qdisc *qdisc_sleeping; 550 #ifdef CONFIG_SYSFS 551 struct kobject kobj; 552 #endif 553 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 554 int numa_node; 555 #endif 556 /* 557 * write mostly part 558 */ 559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 560 int xmit_lock_owner; 561 /* 562 * please use this field instead of dev->trans_start 563 */ 564 unsigned long trans_start; 565 566 /* 567 * Number of TX timeouts for this queue 568 * (/sys/class/net/DEV/Q/trans_timeout) 569 */ 570 unsigned long trans_timeout; 571 572 unsigned long state; 573 574 #ifdef CONFIG_BQL 575 struct dql dql; 576 #endif 577 } ____cacheline_aligned_in_smp; 578 579 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 580 { 581 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 582 return q->numa_node; 583 #else 584 return NUMA_NO_NODE; 585 #endif 586 } 587 588 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 589 { 590 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 591 q->numa_node = node; 592 #endif 593 } 594 595 #ifdef CONFIG_RPS 596 /* 597 * This structure holds an RPS map which can be of variable length. The 598 * map is an array of CPUs. 599 */ 600 struct rps_map { 601 unsigned int len; 602 struct rcu_head rcu; 603 u16 cpus[0]; 604 }; 605 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 606 607 /* 608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 609 * tail pointer for that CPU's input queue at the time of last enqueue, and 610 * a hardware filter index. 611 */ 612 struct rps_dev_flow { 613 u16 cpu; 614 u16 filter; 615 unsigned int last_qtail; 616 }; 617 #define RPS_NO_FILTER 0xffff 618 619 /* 620 * The rps_dev_flow_table structure contains a table of flow mappings. 621 */ 622 struct rps_dev_flow_table { 623 unsigned int mask; 624 struct rcu_head rcu; 625 struct rps_dev_flow flows[0]; 626 }; 627 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 628 ((_num) * sizeof(struct rps_dev_flow))) 629 630 /* 631 * The rps_sock_flow_table contains mappings of flows to the last CPU 632 * on which they were processed by the application (set in recvmsg). 633 */ 634 struct rps_sock_flow_table { 635 unsigned int mask; 636 u16 ents[0]; 637 }; 638 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ 639 ((_num) * sizeof(u16))) 640 641 #define RPS_NO_CPU 0xffff 642 643 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 644 u32 hash) 645 { 646 if (table && hash) { 647 unsigned int cpu, index = hash & table->mask; 648 649 /* We only give a hint, preemption can change cpu under us */ 650 cpu = raw_smp_processor_id(); 651 652 if (table->ents[index] != cpu) 653 table->ents[index] = cpu; 654 } 655 } 656 657 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, 658 u32 hash) 659 { 660 if (table && hash) 661 table->ents[hash & table->mask] = RPS_NO_CPU; 662 } 663 664 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 665 666 #ifdef CONFIG_RFS_ACCEL 667 extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 668 u32 flow_id, u16 filter_id); 669 #endif 670 671 /* This structure contains an instance of an RX queue. */ 672 struct netdev_rx_queue { 673 struct rps_map __rcu *rps_map; 674 struct rps_dev_flow_table __rcu *rps_flow_table; 675 struct kobject kobj; 676 struct net_device *dev; 677 } ____cacheline_aligned_in_smp; 678 #endif /* CONFIG_RPS */ 679 680 #ifdef CONFIG_XPS 681 /* 682 * This structure holds an XPS map which can be of variable length. The 683 * map is an array of queues. 684 */ 685 struct xps_map { 686 unsigned int len; 687 unsigned int alloc_len; 688 struct rcu_head rcu; 689 u16 queues[0]; 690 }; 691 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 692 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ 693 / sizeof(u16)) 694 695 /* 696 * This structure holds all XPS maps for device. Maps are indexed by CPU. 697 */ 698 struct xps_dev_maps { 699 struct rcu_head rcu; 700 struct xps_map __rcu *cpu_map[0]; 701 }; 702 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ 703 (nr_cpu_ids * sizeof(struct xps_map *))) 704 #endif /* CONFIG_XPS */ 705 706 #define TC_MAX_QUEUE 16 707 #define TC_BITMASK 15 708 /* HW offloaded queuing disciplines txq count and offset maps */ 709 struct netdev_tc_txq { 710 u16 count; 711 u16 offset; 712 }; 713 714 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 715 /* 716 * This structure is to hold information about the device 717 * configured to run FCoE protocol stack. 718 */ 719 struct netdev_fcoe_hbainfo { 720 char manufacturer[64]; 721 char serial_number[64]; 722 char hardware_version[64]; 723 char driver_version[64]; 724 char optionrom_version[64]; 725 char firmware_version[64]; 726 char model[256]; 727 char model_description[256]; 728 }; 729 #endif 730 731 #define MAX_PHYS_PORT_ID_LEN 32 732 733 /* This structure holds a unique identifier to identify the 734 * physical port used by a netdevice. 735 */ 736 struct netdev_phys_port_id { 737 unsigned char id[MAX_PHYS_PORT_ID_LEN]; 738 unsigned char id_len; 739 }; 740 741 /* 742 * This structure defines the management hooks for network devices. 743 * The following hooks can be defined; unless noted otherwise, they are 744 * optional and can be filled with a null pointer. 745 * 746 * int (*ndo_init)(struct net_device *dev); 747 * This function is called once when network device is registered. 748 * The network device can use this to any late stage initializaton 749 * or semantic validattion. It can fail with an error code which will 750 * be propogated back to register_netdev 751 * 752 * void (*ndo_uninit)(struct net_device *dev); 753 * This function is called when device is unregistered or when registration 754 * fails. It is not called if init fails. 755 * 756 * int (*ndo_open)(struct net_device *dev); 757 * This function is called when network device transistions to the up 758 * state. 759 * 760 * int (*ndo_stop)(struct net_device *dev); 761 * This function is called when network device transistions to the down 762 * state. 763 * 764 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 765 * struct net_device *dev); 766 * Called when a packet needs to be transmitted. 767 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. 768 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 769 * Required can not be NULL. 770 * 771 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 772 * Called to decide which queue to when device supports multiple 773 * transmit queues. 774 * 775 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 776 * This function is called to allow device receiver to make 777 * changes to configuration when multicast or promiscious is enabled. 778 * 779 * void (*ndo_set_rx_mode)(struct net_device *dev); 780 * This function is called device changes address list filtering. 781 * If driver handles unicast address filtering, it should set 782 * IFF_UNICAST_FLT to its priv_flags. 783 * 784 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 785 * This function is called when the Media Access Control address 786 * needs to be changed. If this interface is not defined, the 787 * mac address can not be changed. 788 * 789 * int (*ndo_validate_addr)(struct net_device *dev); 790 * Test if Media Access Control address is valid for the device. 791 * 792 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 793 * Called when a user request an ioctl which can't be handled by 794 * the generic interface code. If not defined ioctl's return 795 * not supported error code. 796 * 797 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 798 * Used to set network devices bus interface parameters. This interface 799 * is retained for legacy reason, new devices should use the bus 800 * interface (PCI) for low level management. 801 * 802 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 803 * Called when a user wants to change the Maximum Transfer Unit 804 * of a device. If not defined, any request to change MTU will 805 * will return an error. 806 * 807 * void (*ndo_tx_timeout)(struct net_device *dev); 808 * Callback uses when the transmitter has not made any progress 809 * for dev->watchdog ticks. 810 * 811 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 812 * struct rtnl_link_stats64 *storage); 813 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 814 * Called when a user wants to get the network device usage 815 * statistics. Drivers must do one of the following: 816 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 817 * rtnl_link_stats64 structure passed by the caller. 818 * 2. Define @ndo_get_stats to update a net_device_stats structure 819 * (which should normally be dev->stats) and return a pointer to 820 * it. The structure may be changed asynchronously only if each 821 * field is written atomically. 822 * 3. Update dev->stats asynchronously and atomically, and define 823 * neither operation. 824 * 825 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 826 * If device support VLAN filtering this function is called when a 827 * VLAN id is registered. 828 * 829 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 830 * If device support VLAN filtering this function is called when a 831 * VLAN id is unregistered. 832 * 833 * void (*ndo_poll_controller)(struct net_device *dev); 834 * 835 * SR-IOV management functions. 836 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 837 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 838 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); 839 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 840 * int (*ndo_get_vf_config)(struct net_device *dev, 841 * int vf, struct ifla_vf_info *ivf); 842 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 843 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 844 * struct nlattr *port[]); 845 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 846 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 847 * Called to setup 'tc' number of traffic classes in the net device. This 848 * is always called from the stack with the rtnl lock held and netif tx 849 * queues stopped. This allows the netdevice to perform queue management 850 * safely. 851 * 852 * Fiber Channel over Ethernet (FCoE) offload functions. 853 * int (*ndo_fcoe_enable)(struct net_device *dev); 854 * Called when the FCoE protocol stack wants to start using LLD for FCoE 855 * so the underlying device can perform whatever needed configuration or 856 * initialization to support acceleration of FCoE traffic. 857 * 858 * int (*ndo_fcoe_disable)(struct net_device *dev); 859 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 860 * so the underlying device can perform whatever needed clean-ups to 861 * stop supporting acceleration of FCoE traffic. 862 * 863 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 864 * struct scatterlist *sgl, unsigned int sgc); 865 * Called when the FCoE Initiator wants to initialize an I/O that 866 * is a possible candidate for Direct Data Placement (DDP). The LLD can 867 * perform necessary setup and returns 1 to indicate the device is set up 868 * successfully to perform DDP on this I/O, otherwise this returns 0. 869 * 870 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 871 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 872 * indicated by the FC exchange id 'xid', so the underlying device can 873 * clean up and reuse resources for later DDP requests. 874 * 875 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 876 * struct scatterlist *sgl, unsigned int sgc); 877 * Called when the FCoE Target wants to initialize an I/O that 878 * is a possible candidate for Direct Data Placement (DDP). The LLD can 879 * perform necessary setup and returns 1 to indicate the device is set up 880 * successfully to perform DDP on this I/O, otherwise this returns 0. 881 * 882 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 883 * struct netdev_fcoe_hbainfo *hbainfo); 884 * Called when the FCoE Protocol stack wants information on the underlying 885 * device. This information is utilized by the FCoE protocol stack to 886 * register attributes with Fiber Channel management service as per the 887 * FC-GS Fabric Device Management Information(FDMI) specification. 888 * 889 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 890 * Called when the underlying device wants to override default World Wide 891 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 892 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 893 * protocol stack to use. 894 * 895 * RFS acceleration. 896 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 897 * u16 rxq_index, u32 flow_id); 898 * Set hardware filter for RFS. rxq_index is the target queue index; 899 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 900 * Return the filter ID on success, or a negative error code. 901 * 902 * Slave management functions (for bridge, bonding, etc). 903 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 904 * Called to make another netdev an underling. 905 * 906 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 907 * Called to release previously enslaved netdev. 908 * 909 * Feature/offload setting functions. 910 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 911 * netdev_features_t features); 912 * Adjusts the requested feature flags according to device-specific 913 * constraints, and returns the resulting flags. Must not modify 914 * the device state. 915 * 916 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 917 * Called to update device configuration to new features. Passed 918 * feature set might be less than what was returned by ndo_fix_features()). 919 * Must return >0 or -errno if it changed dev->features itself. 920 * 921 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 922 * struct net_device *dev, 923 * const unsigned char *addr, u16 flags) 924 * Adds an FDB entry to dev for addr. 925 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 926 * struct net_device *dev, 927 * const unsigned char *addr) 928 * Deletes the FDB entry from dev coresponding to addr. 929 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 930 * struct net_device *dev, int idx) 931 * Used to add FDB entries to dump requests. Implementers should add 932 * entries to skb and update idx with the number of entries. 933 * 934 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 935 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 936 * struct net_device *dev, u32 filter_mask) 937 * 938 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 939 * Called to change device carrier. Soft-devices (like dummy, team, etc) 940 * which do not represent real hardware may define this to allow their 941 * userspace components to manage their virtual carrier state. Devices 942 * that determine carrier state from physical hardware properties (eg 943 * network cables) or protocol-dependent mechanisms (eg 944 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 945 * 946 * int (*ndo_get_phys_port_id)(struct net_device *dev, 947 * struct netdev_phys_port_id *ppid); 948 * Called to get ID of physical port of this device. If driver does 949 * not implement this, it is assumed that the hw is not able to have 950 * multiple net devices on single physical port. 951 * 952 * void (*ndo_add_vxlan_port)(struct net_device *dev, 953 * sa_family_t sa_family, __be16 port); 954 * Called by vxlan to notiy a driver about the UDP port and socket 955 * address family that vxlan is listnening to. It is called only when 956 * a new port starts listening. The operation is protected by the 957 * vxlan_net->sock_lock. 958 * 959 * void (*ndo_del_vxlan_port)(struct net_device *dev, 960 * sa_family_t sa_family, __be16 port); 961 * Called by vxlan to notify the driver about a UDP port and socket 962 * address family that vxlan is not listening to anymore. The operation 963 * is protected by the vxlan_net->sock_lock. 964 */ 965 struct net_device_ops { 966 int (*ndo_init)(struct net_device *dev); 967 void (*ndo_uninit)(struct net_device *dev); 968 int (*ndo_open)(struct net_device *dev); 969 int (*ndo_stop)(struct net_device *dev); 970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 971 struct net_device *dev); 972 u16 (*ndo_select_queue)(struct net_device *dev, 973 struct sk_buff *skb); 974 void (*ndo_change_rx_flags)(struct net_device *dev, 975 int flags); 976 void (*ndo_set_rx_mode)(struct net_device *dev); 977 int (*ndo_set_mac_address)(struct net_device *dev, 978 void *addr); 979 int (*ndo_validate_addr)(struct net_device *dev); 980 int (*ndo_do_ioctl)(struct net_device *dev, 981 struct ifreq *ifr, int cmd); 982 int (*ndo_set_config)(struct net_device *dev, 983 struct ifmap *map); 984 int (*ndo_change_mtu)(struct net_device *dev, 985 int new_mtu); 986 int (*ndo_neigh_setup)(struct net_device *dev, 987 struct neigh_parms *); 988 void (*ndo_tx_timeout) (struct net_device *dev); 989 990 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 991 struct rtnl_link_stats64 *storage); 992 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 993 994 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 995 __be16 proto, u16 vid); 996 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 997 __be16 proto, u16 vid); 998 #ifdef CONFIG_NET_POLL_CONTROLLER 999 void (*ndo_poll_controller)(struct net_device *dev); 1000 int (*ndo_netpoll_setup)(struct net_device *dev, 1001 struct netpoll_info *info, 1002 gfp_t gfp); 1003 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1004 #endif 1005 #ifdef CONFIG_NET_RX_BUSY_POLL 1006 int (*ndo_busy_poll)(struct napi_struct *dev); 1007 #endif 1008 int (*ndo_set_vf_mac)(struct net_device *dev, 1009 int queue, u8 *mac); 1010 int (*ndo_set_vf_vlan)(struct net_device *dev, 1011 int queue, u16 vlan, u8 qos); 1012 int (*ndo_set_vf_tx_rate)(struct net_device *dev, 1013 int vf, int rate); 1014 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1015 int vf, bool setting); 1016 int (*ndo_get_vf_config)(struct net_device *dev, 1017 int vf, 1018 struct ifla_vf_info *ivf); 1019 int (*ndo_set_vf_link_state)(struct net_device *dev, 1020 int vf, int link_state); 1021 int (*ndo_set_vf_port)(struct net_device *dev, 1022 int vf, 1023 struct nlattr *port[]); 1024 int (*ndo_get_vf_port)(struct net_device *dev, 1025 int vf, struct sk_buff *skb); 1026 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 1027 #if IS_ENABLED(CONFIG_FCOE) 1028 int (*ndo_fcoe_enable)(struct net_device *dev); 1029 int (*ndo_fcoe_disable)(struct net_device *dev); 1030 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1031 u16 xid, 1032 struct scatterlist *sgl, 1033 unsigned int sgc); 1034 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1035 u16 xid); 1036 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1037 u16 xid, 1038 struct scatterlist *sgl, 1039 unsigned int sgc); 1040 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1041 struct netdev_fcoe_hbainfo *hbainfo); 1042 #endif 1043 1044 #if IS_ENABLED(CONFIG_LIBFCOE) 1045 #define NETDEV_FCOE_WWNN 0 1046 #define NETDEV_FCOE_WWPN 1 1047 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1048 u64 *wwn, int type); 1049 #endif 1050 1051 #ifdef CONFIG_RFS_ACCEL 1052 int (*ndo_rx_flow_steer)(struct net_device *dev, 1053 const struct sk_buff *skb, 1054 u16 rxq_index, 1055 u32 flow_id); 1056 #endif 1057 int (*ndo_add_slave)(struct net_device *dev, 1058 struct net_device *slave_dev); 1059 int (*ndo_del_slave)(struct net_device *dev, 1060 struct net_device *slave_dev); 1061 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1062 netdev_features_t features); 1063 int (*ndo_set_features)(struct net_device *dev, 1064 netdev_features_t features); 1065 int (*ndo_neigh_construct)(struct neighbour *n); 1066 void (*ndo_neigh_destroy)(struct neighbour *n); 1067 1068 int (*ndo_fdb_add)(struct ndmsg *ndm, 1069 struct nlattr *tb[], 1070 struct net_device *dev, 1071 const unsigned char *addr, 1072 u16 flags); 1073 int (*ndo_fdb_del)(struct ndmsg *ndm, 1074 struct nlattr *tb[], 1075 struct net_device *dev, 1076 const unsigned char *addr); 1077 int (*ndo_fdb_dump)(struct sk_buff *skb, 1078 struct netlink_callback *cb, 1079 struct net_device *dev, 1080 int idx); 1081 1082 int (*ndo_bridge_setlink)(struct net_device *dev, 1083 struct nlmsghdr *nlh); 1084 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1085 u32 pid, u32 seq, 1086 struct net_device *dev, 1087 u32 filter_mask); 1088 int (*ndo_bridge_dellink)(struct net_device *dev, 1089 struct nlmsghdr *nlh); 1090 int (*ndo_change_carrier)(struct net_device *dev, 1091 bool new_carrier); 1092 int (*ndo_get_phys_port_id)(struct net_device *dev, 1093 struct netdev_phys_port_id *ppid); 1094 void (*ndo_add_vxlan_port)(struct net_device *dev, 1095 sa_family_t sa_family, 1096 __be16 port); 1097 void (*ndo_del_vxlan_port)(struct net_device *dev, 1098 sa_family_t sa_family, 1099 __be16 port); 1100 }; 1101 1102 /* 1103 * The DEVICE structure. 1104 * Actually, this whole structure is a big mistake. It mixes I/O 1105 * data with strictly "high-level" data, and it has to know about 1106 * almost every data structure used in the INET module. 1107 * 1108 * FIXME: cleanup struct net_device such that network protocol info 1109 * moves out. 1110 */ 1111 1112 struct net_device { 1113 1114 /* 1115 * This is the first field of the "visible" part of this structure 1116 * (i.e. as seen by users in the "Space.c" file). It is the name 1117 * of the interface. 1118 */ 1119 char name[IFNAMSIZ]; 1120 1121 /* device name hash chain, please keep it close to name[] */ 1122 struct hlist_node name_hlist; 1123 1124 /* snmp alias */ 1125 char *ifalias; 1126 1127 /* 1128 * I/O specific fields 1129 * FIXME: Merge these and struct ifmap into one 1130 */ 1131 unsigned long mem_end; /* shared mem end */ 1132 unsigned long mem_start; /* shared mem start */ 1133 unsigned long base_addr; /* device I/O address */ 1134 unsigned int irq; /* device IRQ number */ 1135 1136 /* 1137 * Some hardware also needs these fields, but they are not 1138 * part of the usual set specified in Space.c. 1139 */ 1140 1141 unsigned long state; 1142 1143 struct list_head dev_list; 1144 struct list_head napi_list; 1145 struct list_head unreg_list; 1146 struct list_head upper_dev_list; /* List of upper devices */ 1147 struct list_head lower_dev_list; 1148 1149 1150 /* currently active device features */ 1151 netdev_features_t features; 1152 /* user-changeable features */ 1153 netdev_features_t hw_features; 1154 /* user-requested features */ 1155 netdev_features_t wanted_features; 1156 /* mask of features inheritable by VLAN devices */ 1157 netdev_features_t vlan_features; 1158 /* mask of features inherited by encapsulating devices 1159 * This field indicates what encapsulation offloads 1160 * the hardware is capable of doing, and drivers will 1161 * need to set them appropriately. 1162 */ 1163 netdev_features_t hw_enc_features; 1164 /* mask of fetures inheritable by MPLS */ 1165 netdev_features_t mpls_features; 1166 1167 /* Interface index. Unique device identifier */ 1168 int ifindex; 1169 int iflink; 1170 1171 struct net_device_stats stats; 1172 atomic_long_t rx_dropped; /* dropped packets by core network 1173 * Do not use this in drivers. 1174 */ 1175 1176 #ifdef CONFIG_WIRELESS_EXT 1177 /* List of functions to handle Wireless Extensions (instead of ioctl). 1178 * See <net/iw_handler.h> for details. Jean II */ 1179 const struct iw_handler_def * wireless_handlers; 1180 /* Instance data managed by the core of Wireless Extensions. */ 1181 struct iw_public_data * wireless_data; 1182 #endif 1183 /* Management operations */ 1184 const struct net_device_ops *netdev_ops; 1185 const struct ethtool_ops *ethtool_ops; 1186 1187 /* Hardware header description */ 1188 const struct header_ops *header_ops; 1189 1190 unsigned int flags; /* interface flags (a la BSD) */ 1191 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. 1192 * See if.h for definitions. */ 1193 unsigned short gflags; 1194 unsigned short padded; /* How much padding added by alloc_netdev() */ 1195 1196 unsigned char operstate; /* RFC2863 operstate */ 1197 unsigned char link_mode; /* mapping policy to operstate */ 1198 1199 unsigned char if_port; /* Selectable AUI, TP,..*/ 1200 unsigned char dma; /* DMA channel */ 1201 1202 unsigned int mtu; /* interface MTU value */ 1203 unsigned short type; /* interface hardware type */ 1204 unsigned short hard_header_len; /* hardware hdr length */ 1205 1206 /* extra head- and tailroom the hardware may need, but not in all cases 1207 * can this be guaranteed, especially tailroom. Some cases also use 1208 * LL_MAX_HEADER instead to allocate the skb. 1209 */ 1210 unsigned short needed_headroom; 1211 unsigned short needed_tailroom; 1212 1213 /* Interface address info. */ 1214 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1215 unsigned char addr_assign_type; /* hw address assignment type */ 1216 unsigned char addr_len; /* hardware address length */ 1217 unsigned char neigh_priv_len; 1218 unsigned short dev_id; /* Used to differentiate devices 1219 * that share the same link 1220 * layer address 1221 */ 1222 spinlock_t addr_list_lock; 1223 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1224 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1225 struct netdev_hw_addr_list dev_addrs; /* list of device 1226 * hw addresses 1227 */ 1228 #ifdef CONFIG_SYSFS 1229 struct kset *queues_kset; 1230 #endif 1231 1232 bool uc_promisc; 1233 unsigned int promiscuity; 1234 unsigned int allmulti; 1235 1236 1237 /* Protocol specific pointers */ 1238 1239 #if IS_ENABLED(CONFIG_VLAN_8021Q) 1240 struct vlan_info __rcu *vlan_info; /* VLAN info */ 1241 #endif 1242 #if IS_ENABLED(CONFIG_NET_DSA) 1243 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ 1244 #endif 1245 void *atalk_ptr; /* AppleTalk link */ 1246 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1247 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ 1248 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1249 void *ax25_ptr; /* AX.25 specific data */ 1250 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, 1251 assign before registering */ 1252 1253 /* 1254 * Cache lines mostly used on receive path (including eth_type_trans()) 1255 */ 1256 unsigned long last_rx; /* Time of last Rx 1257 * This should not be set in 1258 * drivers, unless really needed, 1259 * because network stack (bonding) 1260 * use it if/when necessary, to 1261 * avoid dirtying this cache line. 1262 */ 1263 1264 /* Interface address info used in eth_type_trans() */ 1265 unsigned char *dev_addr; /* hw address, (before bcast 1266 because most packets are 1267 unicast) */ 1268 1269 1270 #ifdef CONFIG_RPS 1271 struct netdev_rx_queue *_rx; 1272 1273 /* Number of RX queues allocated at register_netdev() time */ 1274 unsigned int num_rx_queues; 1275 1276 /* Number of RX queues currently active in device */ 1277 unsigned int real_num_rx_queues; 1278 1279 #endif 1280 1281 rx_handler_func_t __rcu *rx_handler; 1282 void __rcu *rx_handler_data; 1283 1284 struct netdev_queue __rcu *ingress_queue; 1285 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1286 1287 1288 /* 1289 * Cache lines mostly used on transmit path 1290 */ 1291 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 1292 1293 /* Number of TX queues allocated at alloc_netdev_mq() time */ 1294 unsigned int num_tx_queues; 1295 1296 /* Number of TX queues currently active in device */ 1297 unsigned int real_num_tx_queues; 1298 1299 /* root qdisc from userspace point of view */ 1300 struct Qdisc *qdisc; 1301 1302 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1303 spinlock_t tx_global_lock; 1304 1305 #ifdef CONFIG_XPS 1306 struct xps_dev_maps __rcu *xps_maps; 1307 #endif 1308 #ifdef CONFIG_RFS_ACCEL 1309 /* CPU reverse-mapping for RX completion interrupts, indexed 1310 * by RX queue number. Assigned by driver. This must only be 1311 * set if the ndo_rx_flow_steer operation is defined. */ 1312 struct cpu_rmap *rx_cpu_rmap; 1313 #endif 1314 1315 /* These may be needed for future network-power-down code. */ 1316 1317 /* 1318 * trans_start here is expensive for high speed devices on SMP, 1319 * please use netdev_queue->trans_start instead. 1320 */ 1321 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 1322 1323 int watchdog_timeo; /* used by dev_watchdog() */ 1324 struct timer_list watchdog_timer; 1325 1326 /* Number of references to this device */ 1327 int __percpu *pcpu_refcnt; 1328 1329 /* delayed register/unregister */ 1330 struct list_head todo_list; 1331 /* device index hash chain */ 1332 struct hlist_node index_hlist; 1333 1334 struct list_head link_watch_list; 1335 1336 /* register/unregister state machine */ 1337 enum { NETREG_UNINITIALIZED=0, 1338 NETREG_REGISTERED, /* completed register_netdevice */ 1339 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1340 NETREG_UNREGISTERED, /* completed unregister todo */ 1341 NETREG_RELEASED, /* called free_netdev */ 1342 NETREG_DUMMY, /* dummy device for NAPI poll */ 1343 } reg_state:8; 1344 1345 bool dismantle; /* device is going do be freed */ 1346 1347 enum { 1348 RTNL_LINK_INITIALIZED, 1349 RTNL_LINK_INITIALIZING, 1350 } rtnl_link_state:16; 1351 1352 /* Called from unregister, can be used to call free_netdev */ 1353 void (*destructor)(struct net_device *dev); 1354 1355 #ifdef CONFIG_NETPOLL 1356 struct netpoll_info __rcu *npinfo; 1357 #endif 1358 1359 #ifdef CONFIG_NET_NS 1360 /* Network namespace this network device is inside */ 1361 struct net *nd_net; 1362 #endif 1363 1364 /* mid-layer private */ 1365 union { 1366 void *ml_priv; 1367 struct pcpu_lstats __percpu *lstats; /* loopback stats */ 1368 struct pcpu_tstats __percpu *tstats; /* tunnel stats */ 1369 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1370 struct pcpu_vstats __percpu *vstats; /* veth stats */ 1371 }; 1372 /* GARP */ 1373 struct garp_port __rcu *garp_port; 1374 /* MRP */ 1375 struct mrp_port __rcu *mrp_port; 1376 1377 /* class/net/name entry */ 1378 struct device dev; 1379 /* space for optional device, statistics, and wireless sysfs groups */ 1380 const struct attribute_group *sysfs_groups[4]; 1381 1382 /* rtnetlink link ops */ 1383 const struct rtnl_link_ops *rtnl_link_ops; 1384 1385 /* for setting kernel sock attribute on TCP connection setup */ 1386 #define GSO_MAX_SIZE 65536 1387 unsigned int gso_max_size; 1388 #define GSO_MAX_SEGS 65535 1389 u16 gso_max_segs; 1390 1391 #ifdef CONFIG_DCB 1392 /* Data Center Bridging netlink ops */ 1393 const struct dcbnl_rtnl_ops *dcbnl_ops; 1394 #endif 1395 u8 num_tc; 1396 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1397 u8 prio_tc_map[TC_BITMASK + 1]; 1398 1399 #if IS_ENABLED(CONFIG_FCOE) 1400 /* max exchange id for FCoE LRO by ddp */ 1401 unsigned int fcoe_ddp_xid; 1402 #endif 1403 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 1404 struct netprio_map __rcu *priomap; 1405 #endif 1406 /* phy device may attach itself for hardware timestamping */ 1407 struct phy_device *phydev; 1408 1409 struct lock_class_key *qdisc_tx_busylock; 1410 1411 /* group the device belongs to */ 1412 int group; 1413 1414 struct pm_qos_request pm_qos_req; 1415 }; 1416 #define to_net_dev(d) container_of(d, struct net_device, dev) 1417 1418 #define NETDEV_ALIGN 32 1419 1420 static inline 1421 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 1422 { 1423 return dev->prio_tc_map[prio & TC_BITMASK]; 1424 } 1425 1426 static inline 1427 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 1428 { 1429 if (tc >= dev->num_tc) 1430 return -EINVAL; 1431 1432 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 1433 return 0; 1434 } 1435 1436 static inline 1437 void netdev_reset_tc(struct net_device *dev) 1438 { 1439 dev->num_tc = 0; 1440 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 1441 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 1442 } 1443 1444 static inline 1445 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 1446 { 1447 if (tc >= dev->num_tc) 1448 return -EINVAL; 1449 1450 dev->tc_to_txq[tc].count = count; 1451 dev->tc_to_txq[tc].offset = offset; 1452 return 0; 1453 } 1454 1455 static inline 1456 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 1457 { 1458 if (num_tc > TC_MAX_QUEUE) 1459 return -EINVAL; 1460 1461 dev->num_tc = num_tc; 1462 return 0; 1463 } 1464 1465 static inline 1466 int netdev_get_num_tc(struct net_device *dev) 1467 { 1468 return dev->num_tc; 1469 } 1470 1471 static inline 1472 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 1473 unsigned int index) 1474 { 1475 return &dev->_tx[index]; 1476 } 1477 1478 static inline void netdev_for_each_tx_queue(struct net_device *dev, 1479 void (*f)(struct net_device *, 1480 struct netdev_queue *, 1481 void *), 1482 void *arg) 1483 { 1484 unsigned int i; 1485 1486 for (i = 0; i < dev->num_tx_queues; i++) 1487 f(dev, &dev->_tx[i], arg); 1488 } 1489 1490 extern struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1491 struct sk_buff *skb); 1492 extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1493 1494 /* 1495 * Net namespace inlines 1496 */ 1497 static inline 1498 struct net *dev_net(const struct net_device *dev) 1499 { 1500 return read_pnet(&dev->nd_net); 1501 } 1502 1503 static inline 1504 void dev_net_set(struct net_device *dev, struct net *net) 1505 { 1506 #ifdef CONFIG_NET_NS 1507 release_net(dev->nd_net); 1508 dev->nd_net = hold_net(net); 1509 #endif 1510 } 1511 1512 static inline bool netdev_uses_dsa_tags(struct net_device *dev) 1513 { 1514 #ifdef CONFIG_NET_DSA_TAG_DSA 1515 if (dev->dsa_ptr != NULL) 1516 return dsa_uses_dsa_tags(dev->dsa_ptr); 1517 #endif 1518 1519 return 0; 1520 } 1521 1522 static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1523 { 1524 #ifdef CONFIG_NET_DSA_TAG_TRAILER 1525 if (dev->dsa_ptr != NULL) 1526 return dsa_uses_trailer_tags(dev->dsa_ptr); 1527 #endif 1528 1529 return 0; 1530 } 1531 1532 /** 1533 * netdev_priv - access network device private data 1534 * @dev: network device 1535 * 1536 * Get network device private data 1537 */ 1538 static inline void *netdev_priv(const struct net_device *dev) 1539 { 1540 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 1541 } 1542 1543 /* Set the sysfs physical device reference for the network logical device 1544 * if set prior to registration will cause a symlink during initialization. 1545 */ 1546 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 1547 1548 /* Set the sysfs device type for the network logical device to allow 1549 * fin grained indentification of different network device types. For 1550 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 1551 */ 1552 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1553 1554 /* Default NAPI poll() weight 1555 * Device drivers are strongly advised to not use bigger value 1556 */ 1557 #define NAPI_POLL_WEIGHT 64 1558 1559 /** 1560 * netif_napi_add - initialize a napi context 1561 * @dev: network device 1562 * @napi: napi context 1563 * @poll: polling function 1564 * @weight: default weight 1565 * 1566 * netif_napi_add() must be used to initialize a napi context prior to calling 1567 * *any* of the other napi related functions. 1568 */ 1569 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 1570 int (*poll)(struct napi_struct *, int), int weight); 1571 1572 /** 1573 * netif_napi_del - remove a napi context 1574 * @napi: napi context 1575 * 1576 * netif_napi_del() removes a napi context from the network device napi list 1577 */ 1578 void netif_napi_del(struct napi_struct *napi); 1579 1580 struct napi_gro_cb { 1581 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 1582 void *frag0; 1583 1584 /* Length of frag0. */ 1585 unsigned int frag0_len; 1586 1587 /* This indicates where we are processing relative to skb->data. */ 1588 int data_offset; 1589 1590 /* This is non-zero if the packet cannot be merged with the new skb. */ 1591 int flush; 1592 1593 /* Number of segments aggregated. */ 1594 u16 count; 1595 1596 /* This is non-zero if the packet may be of the same flow. */ 1597 u8 same_flow; 1598 1599 /* Free the skb? */ 1600 u8 free; 1601 #define NAPI_GRO_FREE 1 1602 #define NAPI_GRO_FREE_STOLEN_HEAD 2 1603 1604 /* jiffies when first packet was created/queued */ 1605 unsigned long age; 1606 1607 /* Used in ipv6_gro_receive() */ 1608 int proto; 1609 1610 /* used in skb_gro_receive() slow path */ 1611 struct sk_buff *last; 1612 }; 1613 1614 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 1615 1616 struct packet_type { 1617 __be16 type; /* This is really htons(ether_type). */ 1618 struct net_device *dev; /* NULL is wildcarded here */ 1619 int (*func) (struct sk_buff *, 1620 struct net_device *, 1621 struct packet_type *, 1622 struct net_device *); 1623 bool (*id_match)(struct packet_type *ptype, 1624 struct sock *sk); 1625 void *af_packet_priv; 1626 struct list_head list; 1627 }; 1628 1629 struct offload_callbacks { 1630 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1631 netdev_features_t features); 1632 int (*gso_send_check)(struct sk_buff *skb); 1633 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1634 struct sk_buff *skb); 1635 int (*gro_complete)(struct sk_buff *skb); 1636 }; 1637 1638 struct packet_offload { 1639 __be16 type; /* This is really htons(ether_type). */ 1640 struct offload_callbacks callbacks; 1641 struct list_head list; 1642 }; 1643 1644 #include <linux/notifier.h> 1645 1646 /* netdevice notifier chain. Please remember to update the rtnetlink 1647 * notification exclusion list in rtnetlink_event() when adding new 1648 * types. 1649 */ 1650 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ 1651 #define NETDEV_DOWN 0x0002 1652 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface 1653 detected a hardware crash and restarted 1654 - we can use this eg to kick tcp sessions 1655 once done */ 1656 #define NETDEV_CHANGE 0x0004 /* Notify device state change */ 1657 #define NETDEV_REGISTER 0x0005 1658 #define NETDEV_UNREGISTER 0x0006 1659 #define NETDEV_CHANGEMTU 0x0007 1660 #define NETDEV_CHANGEADDR 0x0008 1661 #define NETDEV_GOING_DOWN 0x0009 1662 #define NETDEV_CHANGENAME 0x000A 1663 #define NETDEV_FEAT_CHANGE 0x000B 1664 #define NETDEV_BONDING_FAILOVER 0x000C 1665 #define NETDEV_PRE_UP 0x000D 1666 #define NETDEV_PRE_TYPE_CHANGE 0x000E 1667 #define NETDEV_POST_TYPE_CHANGE 0x000F 1668 #define NETDEV_POST_INIT 0x0010 1669 #define NETDEV_UNREGISTER_FINAL 0x0011 1670 #define NETDEV_RELEASE 0x0012 1671 #define NETDEV_NOTIFY_PEERS 0x0013 1672 #define NETDEV_JOIN 0x0014 1673 #define NETDEV_CHANGEUPPER 0x0015 1674 #define NETDEV_RESEND_IGMP 0x0016 1675 1676 extern int register_netdevice_notifier(struct notifier_block *nb); 1677 extern int unregister_netdevice_notifier(struct notifier_block *nb); 1678 1679 struct netdev_notifier_info { 1680 struct net_device *dev; 1681 }; 1682 1683 struct netdev_notifier_change_info { 1684 struct netdev_notifier_info info; /* must be first */ 1685 unsigned int flags_changed; 1686 }; 1687 1688 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 1689 struct net_device *dev) 1690 { 1691 info->dev = dev; 1692 } 1693 1694 static inline struct net_device * 1695 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 1696 { 1697 return info->dev; 1698 } 1699 1700 extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, 1701 struct netdev_notifier_info *info); 1702 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1703 1704 1705 extern rwlock_t dev_base_lock; /* Device list lock */ 1706 1707 #define for_each_netdev(net, d) \ 1708 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 1709 #define for_each_netdev_reverse(net, d) \ 1710 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 1711 #define for_each_netdev_rcu(net, d) \ 1712 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 1713 #define for_each_netdev_safe(net, d, n) \ 1714 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 1715 #define for_each_netdev_continue(net, d) \ 1716 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 1717 #define for_each_netdev_continue_rcu(net, d) \ 1718 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 1719 #define for_each_netdev_in_bond_rcu(bond, slave) \ 1720 for_each_netdev_rcu(&init_net, slave) \ 1721 if (netdev_master_upper_dev_get_rcu(slave) == bond) 1722 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 1723 1724 static inline struct net_device *next_net_device(struct net_device *dev) 1725 { 1726 struct list_head *lh; 1727 struct net *net; 1728 1729 net = dev_net(dev); 1730 lh = dev->dev_list.next; 1731 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1732 } 1733 1734 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 1735 { 1736 struct list_head *lh; 1737 struct net *net; 1738 1739 net = dev_net(dev); 1740 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 1741 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1742 } 1743 1744 static inline struct net_device *first_net_device(struct net *net) 1745 { 1746 return list_empty(&net->dev_base_head) ? NULL : 1747 net_device_entry(net->dev_base_head.next); 1748 } 1749 1750 static inline struct net_device *first_net_device_rcu(struct net *net) 1751 { 1752 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 1753 1754 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1755 } 1756 1757 extern int netdev_boot_setup_check(struct net_device *dev); 1758 extern unsigned long netdev_boot_base(const char *prefix, int unit); 1759 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1760 const char *hwaddr); 1761 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1762 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1763 extern void dev_add_pack(struct packet_type *pt); 1764 extern void dev_remove_pack(struct packet_type *pt); 1765 extern void __dev_remove_pack(struct packet_type *pt); 1766 extern void dev_add_offload(struct packet_offload *po); 1767 extern void dev_remove_offload(struct packet_offload *po); 1768 extern void __dev_remove_offload(struct packet_offload *po); 1769 1770 extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 1771 unsigned short mask); 1772 extern struct net_device *dev_get_by_name(struct net *net, const char *name); 1773 extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 1774 extern struct net_device *__dev_get_by_name(struct net *net, const char *name); 1775 extern int dev_alloc_name(struct net_device *dev, const char *name); 1776 extern int dev_open(struct net_device *dev); 1777 extern int dev_close(struct net_device *dev); 1778 extern void dev_disable_lro(struct net_device *dev); 1779 extern int dev_loopback_xmit(struct sk_buff *newskb); 1780 extern int dev_queue_xmit(struct sk_buff *skb); 1781 extern int register_netdevice(struct net_device *dev); 1782 extern void unregister_netdevice_queue(struct net_device *dev, 1783 struct list_head *head); 1784 extern void unregister_netdevice_many(struct list_head *head); 1785 static inline void unregister_netdevice(struct net_device *dev) 1786 { 1787 unregister_netdevice_queue(dev, NULL); 1788 } 1789 1790 extern int netdev_refcnt_read(const struct net_device *dev); 1791 extern void free_netdev(struct net_device *dev); 1792 extern void synchronize_net(void); 1793 extern int init_dummy_netdev(struct net_device *dev); 1794 1795 extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1796 extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1797 extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1798 extern int netdev_get_name(struct net *net, char *name, int ifindex); 1799 extern int dev_restart(struct net_device *dev); 1800 #ifdef CONFIG_NETPOLL_TRAP 1801 extern int netpoll_trap(void); 1802 #endif 1803 extern int skb_gro_receive(struct sk_buff **head, 1804 struct sk_buff *skb); 1805 1806 static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1807 { 1808 return NAPI_GRO_CB(skb)->data_offset; 1809 } 1810 1811 static inline unsigned int skb_gro_len(const struct sk_buff *skb) 1812 { 1813 return skb->len - NAPI_GRO_CB(skb)->data_offset; 1814 } 1815 1816 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) 1817 { 1818 NAPI_GRO_CB(skb)->data_offset += len; 1819 } 1820 1821 static inline void *skb_gro_header_fast(struct sk_buff *skb, 1822 unsigned int offset) 1823 { 1824 return NAPI_GRO_CB(skb)->frag0 + offset; 1825 } 1826 1827 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) 1828 { 1829 return NAPI_GRO_CB(skb)->frag0_len < hlen; 1830 } 1831 1832 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 1833 unsigned int offset) 1834 { 1835 if (!pskb_may_pull(skb, hlen)) 1836 return NULL; 1837 1838 NAPI_GRO_CB(skb)->frag0 = NULL; 1839 NAPI_GRO_CB(skb)->frag0_len = 0; 1840 return skb->data + offset; 1841 } 1842 1843 static inline void *skb_gro_mac_header(struct sk_buff *skb) 1844 { 1845 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); 1846 } 1847 1848 static inline void *skb_gro_network_header(struct sk_buff *skb) 1849 { 1850 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + 1851 skb_network_offset(skb); 1852 } 1853 1854 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1855 unsigned short type, 1856 const void *daddr, const void *saddr, 1857 unsigned int len) 1858 { 1859 if (!dev->header_ops || !dev->header_ops->create) 1860 return 0; 1861 1862 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 1863 } 1864 1865 static inline int dev_parse_header(const struct sk_buff *skb, 1866 unsigned char *haddr) 1867 { 1868 const struct net_device *dev = skb->dev; 1869 1870 if (!dev->header_ops || !dev->header_ops->parse) 1871 return 0; 1872 return dev->header_ops->parse(skb, haddr); 1873 } 1874 1875 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1876 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 1877 static inline int unregister_gifconf(unsigned int family) 1878 { 1879 return register_gifconf(family, NULL); 1880 } 1881 1882 #ifdef CONFIG_NET_FLOW_LIMIT 1883 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 1884 struct sd_flow_limit { 1885 u64 count; 1886 unsigned int num_buckets; 1887 unsigned int history_head; 1888 u16 history[FLOW_LIMIT_HISTORY]; 1889 u8 buckets[]; 1890 }; 1891 1892 extern int netdev_flow_limit_table_len; 1893 #endif /* CONFIG_NET_FLOW_LIMIT */ 1894 1895 /* 1896 * Incoming packets are placed on per-cpu queues 1897 */ 1898 struct softnet_data { 1899 struct Qdisc *output_queue; 1900 struct Qdisc **output_queue_tailp; 1901 struct list_head poll_list; 1902 struct sk_buff *completion_queue; 1903 struct sk_buff_head process_queue; 1904 1905 /* stats */ 1906 unsigned int processed; 1907 unsigned int time_squeeze; 1908 unsigned int cpu_collision; 1909 unsigned int received_rps; 1910 1911 #ifdef CONFIG_RPS 1912 struct softnet_data *rps_ipi_list; 1913 1914 /* Elements below can be accessed between CPUs for RPS */ 1915 struct call_single_data csd ____cacheline_aligned_in_smp; 1916 struct softnet_data *rps_ipi_next; 1917 unsigned int cpu; 1918 unsigned int input_queue_head; 1919 unsigned int input_queue_tail; 1920 #endif 1921 unsigned int dropped; 1922 struct sk_buff_head input_pkt_queue; 1923 struct napi_struct backlog; 1924 1925 #ifdef CONFIG_NET_FLOW_LIMIT 1926 struct sd_flow_limit __rcu *flow_limit; 1927 #endif 1928 }; 1929 1930 static inline void input_queue_head_incr(struct softnet_data *sd) 1931 { 1932 #ifdef CONFIG_RPS 1933 sd->input_queue_head++; 1934 #endif 1935 } 1936 1937 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 1938 unsigned int *qtail) 1939 { 1940 #ifdef CONFIG_RPS 1941 *qtail = ++sd->input_queue_tail; 1942 #endif 1943 } 1944 1945 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1946 1947 extern void __netif_schedule(struct Qdisc *q); 1948 1949 static inline void netif_schedule_queue(struct netdev_queue *txq) 1950 { 1951 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) 1952 __netif_schedule(txq->qdisc); 1953 } 1954 1955 static inline void netif_tx_schedule_all(struct net_device *dev) 1956 { 1957 unsigned int i; 1958 1959 for (i = 0; i < dev->num_tx_queues; i++) 1960 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 1961 } 1962 1963 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 1964 { 1965 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 1966 } 1967 1968 /** 1969 * netif_start_queue - allow transmit 1970 * @dev: network device 1971 * 1972 * Allow upper layers to call the device hard_start_xmit routine. 1973 */ 1974 static inline void netif_start_queue(struct net_device *dev) 1975 { 1976 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 1977 } 1978 1979 static inline void netif_tx_start_all_queues(struct net_device *dev) 1980 { 1981 unsigned int i; 1982 1983 for (i = 0; i < dev->num_tx_queues; i++) { 1984 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1985 netif_tx_start_queue(txq); 1986 } 1987 } 1988 1989 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 1990 { 1991 #ifdef CONFIG_NETPOLL_TRAP 1992 if (netpoll_trap()) { 1993 netif_tx_start_queue(dev_queue); 1994 return; 1995 } 1996 #endif 1997 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) 1998 __netif_schedule(dev_queue->qdisc); 1999 } 2000 2001 /** 2002 * netif_wake_queue - restart transmit 2003 * @dev: network device 2004 * 2005 * Allow upper layers to call the device hard_start_xmit routine. 2006 * Used for flow control when transmit resources are available. 2007 */ 2008 static inline void netif_wake_queue(struct net_device *dev) 2009 { 2010 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 2011 } 2012 2013 static inline void netif_tx_wake_all_queues(struct net_device *dev) 2014 { 2015 unsigned int i; 2016 2017 for (i = 0; i < dev->num_tx_queues; i++) { 2018 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2019 netif_tx_wake_queue(txq); 2020 } 2021 } 2022 2023 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 2024 { 2025 if (WARN_ON(!dev_queue)) { 2026 pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); 2027 return; 2028 } 2029 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2030 } 2031 2032 /** 2033 * netif_stop_queue - stop transmitted packets 2034 * @dev: network device 2035 * 2036 * Stop upper layers calling the device hard_start_xmit routine. 2037 * Used for flow control when transmit resources are unavailable. 2038 */ 2039 static inline void netif_stop_queue(struct net_device *dev) 2040 { 2041 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 2042 } 2043 2044 static inline void netif_tx_stop_all_queues(struct net_device *dev) 2045 { 2046 unsigned int i; 2047 2048 for (i = 0; i < dev->num_tx_queues; i++) { 2049 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2050 netif_tx_stop_queue(txq); 2051 } 2052 } 2053 2054 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 2055 { 2056 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2057 } 2058 2059 /** 2060 * netif_queue_stopped - test if transmit queue is flowblocked 2061 * @dev: network device 2062 * 2063 * Test if transmit queue on device is currently unable to send. 2064 */ 2065 static inline bool netif_queue_stopped(const struct net_device *dev) 2066 { 2067 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 2068 } 2069 2070 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 2071 { 2072 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 2073 } 2074 2075 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 2076 { 2077 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 2078 } 2079 2080 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 2081 unsigned int bytes) 2082 { 2083 #ifdef CONFIG_BQL 2084 dql_queued(&dev_queue->dql, bytes); 2085 2086 if (likely(dql_avail(&dev_queue->dql) >= 0)) 2087 return; 2088 2089 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2090 2091 /* 2092 * The XOFF flag must be set before checking the dql_avail below, 2093 * because in netdev_tx_completed_queue we update the dql_completed 2094 * before checking the XOFF flag. 2095 */ 2096 smp_mb(); 2097 2098 /* check again in case another CPU has just made room avail */ 2099 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 2100 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2101 #endif 2102 } 2103 2104 /** 2105 * netdev_sent_queue - report the number of bytes queued to hardware 2106 * @dev: network device 2107 * @bytes: number of bytes queued to the hardware device queue 2108 * 2109 * Report the number of bytes queued for sending/completion to the network 2110 * device hardware queue. @bytes should be a good approximation and should 2111 * exactly match netdev_completed_queue() @bytes 2112 */ 2113 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 2114 { 2115 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 2116 } 2117 2118 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 2119 unsigned int pkts, unsigned int bytes) 2120 { 2121 #ifdef CONFIG_BQL 2122 if (unlikely(!bytes)) 2123 return; 2124 2125 dql_completed(&dev_queue->dql, bytes); 2126 2127 /* 2128 * Without the memory barrier there is a small possiblity that 2129 * netdev_tx_sent_queue will miss the update and cause the queue to 2130 * be stopped forever 2131 */ 2132 smp_mb(); 2133 2134 if (dql_avail(&dev_queue->dql) < 0) 2135 return; 2136 2137 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 2138 netif_schedule_queue(dev_queue); 2139 #endif 2140 } 2141 2142 /** 2143 * netdev_completed_queue - report bytes and packets completed by device 2144 * @dev: network device 2145 * @pkts: actual number of packets sent over the medium 2146 * @bytes: actual number of bytes sent over the medium 2147 * 2148 * Report the number of bytes and packets transmitted by the network device 2149 * hardware queue over the physical medium, @bytes must exactly match the 2150 * @bytes amount passed to netdev_sent_queue() 2151 */ 2152 static inline void netdev_completed_queue(struct net_device *dev, 2153 unsigned int pkts, unsigned int bytes) 2154 { 2155 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 2156 } 2157 2158 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 2159 { 2160 #ifdef CONFIG_BQL 2161 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 2162 dql_reset(&q->dql); 2163 #endif 2164 } 2165 2166 /** 2167 * netdev_reset_queue - reset the packets and bytes count of a network device 2168 * @dev_queue: network device 2169 * 2170 * Reset the bytes and packet count of a network device and clear the 2171 * software flow control OFF bit for this network device 2172 */ 2173 static inline void netdev_reset_queue(struct net_device *dev_queue) 2174 { 2175 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 2176 } 2177 2178 /** 2179 * netif_running - test if up 2180 * @dev: network device 2181 * 2182 * Test if the device has been brought up. 2183 */ 2184 static inline bool netif_running(const struct net_device *dev) 2185 { 2186 return test_bit(__LINK_STATE_START, &dev->state); 2187 } 2188 2189 /* 2190 * Routines to manage the subqueues on a device. We only need start 2191 * stop, and a check if it's stopped. All other device management is 2192 * done at the overall netdevice level. 2193 * Also test the device if we're multiqueue. 2194 */ 2195 2196 /** 2197 * netif_start_subqueue - allow sending packets on subqueue 2198 * @dev: network device 2199 * @queue_index: sub queue index 2200 * 2201 * Start individual transmit queue of a device with multiple transmit queues. 2202 */ 2203 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 2204 { 2205 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2206 2207 netif_tx_start_queue(txq); 2208 } 2209 2210 /** 2211 * netif_stop_subqueue - stop sending packets on subqueue 2212 * @dev: network device 2213 * @queue_index: sub queue index 2214 * 2215 * Stop individual transmit queue of a device with multiple transmit queues. 2216 */ 2217 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 2218 { 2219 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2220 #ifdef CONFIG_NETPOLL_TRAP 2221 if (netpoll_trap()) 2222 return; 2223 #endif 2224 netif_tx_stop_queue(txq); 2225 } 2226 2227 /** 2228 * netif_subqueue_stopped - test status of subqueue 2229 * @dev: network device 2230 * @queue_index: sub queue index 2231 * 2232 * Check individual transmit queue of a device with multiple transmit queues. 2233 */ 2234 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 2235 u16 queue_index) 2236 { 2237 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2238 2239 return netif_tx_queue_stopped(txq); 2240 } 2241 2242 static inline bool netif_subqueue_stopped(const struct net_device *dev, 2243 struct sk_buff *skb) 2244 { 2245 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2246 } 2247 2248 /** 2249 * netif_wake_subqueue - allow sending packets on subqueue 2250 * @dev: network device 2251 * @queue_index: sub queue index 2252 * 2253 * Resume individual transmit queue of a device with multiple transmit queues. 2254 */ 2255 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 2256 { 2257 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2258 #ifdef CONFIG_NETPOLL_TRAP 2259 if (netpoll_trap()) 2260 return; 2261 #endif 2262 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) 2263 __netif_schedule(txq->qdisc); 2264 } 2265 2266 #ifdef CONFIG_XPS 2267 extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, 2268 u16 index); 2269 #else 2270 static inline int netif_set_xps_queue(struct net_device *dev, 2271 struct cpumask *mask, 2272 u16 index) 2273 { 2274 return 0; 2275 } 2276 #endif 2277 2278 /* 2279 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2280 * as a distribution range limit for the returned value. 2281 */ 2282 static inline u16 skb_tx_hash(const struct net_device *dev, 2283 const struct sk_buff *skb) 2284 { 2285 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); 2286 } 2287 2288 /** 2289 * netif_is_multiqueue - test if device has multiple transmit queues 2290 * @dev: network device 2291 * 2292 * Check if device has multiple transmit queues 2293 */ 2294 static inline bool netif_is_multiqueue(const struct net_device *dev) 2295 { 2296 return dev->num_tx_queues > 1; 2297 } 2298 2299 extern int netif_set_real_num_tx_queues(struct net_device *dev, 2300 unsigned int txq); 2301 2302 #ifdef CONFIG_RPS 2303 extern int netif_set_real_num_rx_queues(struct net_device *dev, 2304 unsigned int rxq); 2305 #else 2306 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2307 unsigned int rxq) 2308 { 2309 return 0; 2310 } 2311 #endif 2312 2313 static inline int netif_copy_real_num_queues(struct net_device *to_dev, 2314 const struct net_device *from_dev) 2315 { 2316 int err; 2317 2318 err = netif_set_real_num_tx_queues(to_dev, 2319 from_dev->real_num_tx_queues); 2320 if (err) 2321 return err; 2322 #ifdef CONFIG_RPS 2323 return netif_set_real_num_rx_queues(to_dev, 2324 from_dev->real_num_rx_queues); 2325 #else 2326 return 0; 2327 #endif 2328 } 2329 2330 #define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2331 extern int netif_get_num_default_rss_queues(void); 2332 2333 /* Use this variant when it is known for sure that it 2334 * is executing from hardware interrupt context or with hardware interrupts 2335 * disabled. 2336 */ 2337 extern void dev_kfree_skb_irq(struct sk_buff *skb); 2338 2339 /* Use this variant in places where it could be invoked 2340 * from either hardware interrupt or other context, with hardware interrupts 2341 * either disabled or enabled. 2342 */ 2343 extern void dev_kfree_skb_any(struct sk_buff *skb); 2344 2345 extern int netif_rx(struct sk_buff *skb); 2346 extern int netif_rx_ni(struct sk_buff *skb); 2347 extern int netif_receive_skb(struct sk_buff *skb); 2348 extern gro_result_t napi_gro_receive(struct napi_struct *napi, 2349 struct sk_buff *skb); 2350 extern void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2351 extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 2352 extern gro_result_t napi_gro_frags(struct napi_struct *napi); 2353 2354 static inline void napi_free_frags(struct napi_struct *napi) 2355 { 2356 kfree_skb(napi->skb); 2357 napi->skb = NULL; 2358 } 2359 2360 extern int netdev_rx_handler_register(struct net_device *dev, 2361 rx_handler_func_t *rx_handler, 2362 void *rx_handler_data); 2363 extern void netdev_rx_handler_unregister(struct net_device *dev); 2364 2365 extern bool dev_valid_name(const char *name); 2366 extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 2367 extern int dev_ethtool(struct net *net, struct ifreq *); 2368 extern unsigned int dev_get_flags(const struct net_device *); 2369 extern int __dev_change_flags(struct net_device *, unsigned int flags); 2370 extern int dev_change_flags(struct net_device *, unsigned int); 2371 extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); 2372 extern int dev_change_name(struct net_device *, const char *); 2373 extern int dev_set_alias(struct net_device *, const char *, size_t); 2374 extern int dev_change_net_namespace(struct net_device *, 2375 struct net *, const char *); 2376 extern int dev_set_mtu(struct net_device *, int); 2377 extern void dev_set_group(struct net_device *, int); 2378 extern int dev_set_mac_address(struct net_device *, 2379 struct sockaddr *); 2380 extern int dev_change_carrier(struct net_device *, 2381 bool new_carrier); 2382 extern int dev_get_phys_port_id(struct net_device *dev, 2383 struct netdev_phys_port_id *ppid); 2384 extern int dev_hard_start_xmit(struct sk_buff *skb, 2385 struct net_device *dev, 2386 struct netdev_queue *txq); 2387 extern int dev_forward_skb(struct net_device *dev, 2388 struct sk_buff *skb); 2389 2390 extern int netdev_budget; 2391 2392 /* Called by rtnetlink.c:rtnl_unlock() */ 2393 extern void netdev_run_todo(void); 2394 2395 /** 2396 * dev_put - release reference to device 2397 * @dev: network device 2398 * 2399 * Release reference to device to allow it to be freed. 2400 */ 2401 static inline void dev_put(struct net_device *dev) 2402 { 2403 this_cpu_dec(*dev->pcpu_refcnt); 2404 } 2405 2406 /** 2407 * dev_hold - get reference to device 2408 * @dev: network device 2409 * 2410 * Hold reference to device to keep it from being freed. 2411 */ 2412 static inline void dev_hold(struct net_device *dev) 2413 { 2414 this_cpu_inc(*dev->pcpu_refcnt); 2415 } 2416 2417 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 2418 * and _off may be called from IRQ context, but it is caller 2419 * who is responsible for serialization of these calls. 2420 * 2421 * The name carrier is inappropriate, these functions should really be 2422 * called netif_lowerlayer_*() because they represent the state of any 2423 * kind of lower layer not just hardware media. 2424 */ 2425 2426 extern void linkwatch_init_dev(struct net_device *dev); 2427 extern void linkwatch_fire_event(struct net_device *dev); 2428 extern void linkwatch_forget_dev(struct net_device *dev); 2429 2430 /** 2431 * netif_carrier_ok - test if carrier present 2432 * @dev: network device 2433 * 2434 * Check if carrier is present on device 2435 */ 2436 static inline bool netif_carrier_ok(const struct net_device *dev) 2437 { 2438 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 2439 } 2440 2441 extern unsigned long dev_trans_start(struct net_device *dev); 2442 2443 extern void __netdev_watchdog_up(struct net_device *dev); 2444 2445 extern void netif_carrier_on(struct net_device *dev); 2446 2447 extern void netif_carrier_off(struct net_device *dev); 2448 2449 /** 2450 * netif_dormant_on - mark device as dormant. 2451 * @dev: network device 2452 * 2453 * Mark device as dormant (as per RFC2863). 2454 * 2455 * The dormant state indicates that the relevant interface is not 2456 * actually in a condition to pass packets (i.e., it is not 'up') but is 2457 * in a "pending" state, waiting for some external event. For "on- 2458 * demand" interfaces, this new state identifies the situation where the 2459 * interface is waiting for events to place it in the up state. 2460 * 2461 */ 2462 static inline void netif_dormant_on(struct net_device *dev) 2463 { 2464 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 2465 linkwatch_fire_event(dev); 2466 } 2467 2468 /** 2469 * netif_dormant_off - set device as not dormant. 2470 * @dev: network device 2471 * 2472 * Device is not in dormant state. 2473 */ 2474 static inline void netif_dormant_off(struct net_device *dev) 2475 { 2476 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 2477 linkwatch_fire_event(dev); 2478 } 2479 2480 /** 2481 * netif_dormant - test if carrier present 2482 * @dev: network device 2483 * 2484 * Check if carrier is present on device 2485 */ 2486 static inline bool netif_dormant(const struct net_device *dev) 2487 { 2488 return test_bit(__LINK_STATE_DORMANT, &dev->state); 2489 } 2490 2491 2492 /** 2493 * netif_oper_up - test if device is operational 2494 * @dev: network device 2495 * 2496 * Check if carrier is operational 2497 */ 2498 static inline bool netif_oper_up(const struct net_device *dev) 2499 { 2500 return (dev->operstate == IF_OPER_UP || 2501 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 2502 } 2503 2504 /** 2505 * netif_device_present - is device available or removed 2506 * @dev: network device 2507 * 2508 * Check if device has not been removed from system. 2509 */ 2510 static inline bool netif_device_present(struct net_device *dev) 2511 { 2512 return test_bit(__LINK_STATE_PRESENT, &dev->state); 2513 } 2514 2515 extern void netif_device_detach(struct net_device *dev); 2516 2517 extern void netif_device_attach(struct net_device *dev); 2518 2519 /* 2520 * Network interface message level settings 2521 */ 2522 2523 enum { 2524 NETIF_MSG_DRV = 0x0001, 2525 NETIF_MSG_PROBE = 0x0002, 2526 NETIF_MSG_LINK = 0x0004, 2527 NETIF_MSG_TIMER = 0x0008, 2528 NETIF_MSG_IFDOWN = 0x0010, 2529 NETIF_MSG_IFUP = 0x0020, 2530 NETIF_MSG_RX_ERR = 0x0040, 2531 NETIF_MSG_TX_ERR = 0x0080, 2532 NETIF_MSG_TX_QUEUED = 0x0100, 2533 NETIF_MSG_INTR = 0x0200, 2534 NETIF_MSG_TX_DONE = 0x0400, 2535 NETIF_MSG_RX_STATUS = 0x0800, 2536 NETIF_MSG_PKTDATA = 0x1000, 2537 NETIF_MSG_HW = 0x2000, 2538 NETIF_MSG_WOL = 0x4000, 2539 }; 2540 2541 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 2542 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 2543 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 2544 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 2545 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 2546 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 2547 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 2548 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 2549 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 2550 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 2551 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 2552 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 2553 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 2554 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 2555 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 2556 2557 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 2558 { 2559 /* use default */ 2560 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 2561 return default_msg_enable_bits; 2562 if (debug_value == 0) /* no output */ 2563 return 0; 2564 /* set low N bits */ 2565 return (1 << debug_value) - 1; 2566 } 2567 2568 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 2569 { 2570 spin_lock(&txq->_xmit_lock); 2571 txq->xmit_lock_owner = cpu; 2572 } 2573 2574 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 2575 { 2576 spin_lock_bh(&txq->_xmit_lock); 2577 txq->xmit_lock_owner = smp_processor_id(); 2578 } 2579 2580 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 2581 { 2582 bool ok = spin_trylock(&txq->_xmit_lock); 2583 if (likely(ok)) 2584 txq->xmit_lock_owner = smp_processor_id(); 2585 return ok; 2586 } 2587 2588 static inline void __netif_tx_unlock(struct netdev_queue *txq) 2589 { 2590 txq->xmit_lock_owner = -1; 2591 spin_unlock(&txq->_xmit_lock); 2592 } 2593 2594 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 2595 { 2596 txq->xmit_lock_owner = -1; 2597 spin_unlock_bh(&txq->_xmit_lock); 2598 } 2599 2600 static inline void txq_trans_update(struct netdev_queue *txq) 2601 { 2602 if (txq->xmit_lock_owner != -1) 2603 txq->trans_start = jiffies; 2604 } 2605 2606 /** 2607 * netif_tx_lock - grab network device transmit lock 2608 * @dev: network device 2609 * 2610 * Get network device transmit lock 2611 */ 2612 static inline void netif_tx_lock(struct net_device *dev) 2613 { 2614 unsigned int i; 2615 int cpu; 2616 2617 spin_lock(&dev->tx_global_lock); 2618 cpu = smp_processor_id(); 2619 for (i = 0; i < dev->num_tx_queues; i++) { 2620 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2621 2622 /* We are the only thread of execution doing a 2623 * freeze, but we have to grab the _xmit_lock in 2624 * order to synchronize with threads which are in 2625 * the ->hard_start_xmit() handler and already 2626 * checked the frozen bit. 2627 */ 2628 __netif_tx_lock(txq, cpu); 2629 set_bit(__QUEUE_STATE_FROZEN, &txq->state); 2630 __netif_tx_unlock(txq); 2631 } 2632 } 2633 2634 static inline void netif_tx_lock_bh(struct net_device *dev) 2635 { 2636 local_bh_disable(); 2637 netif_tx_lock(dev); 2638 } 2639 2640 static inline void netif_tx_unlock(struct net_device *dev) 2641 { 2642 unsigned int i; 2643 2644 for (i = 0; i < dev->num_tx_queues; i++) { 2645 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2646 2647 /* No need to grab the _xmit_lock here. If the 2648 * queue is not stopped for another reason, we 2649 * force a schedule. 2650 */ 2651 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); 2652 netif_schedule_queue(txq); 2653 } 2654 spin_unlock(&dev->tx_global_lock); 2655 } 2656 2657 static inline void netif_tx_unlock_bh(struct net_device *dev) 2658 { 2659 netif_tx_unlock(dev); 2660 local_bh_enable(); 2661 } 2662 2663 #define HARD_TX_LOCK(dev, txq, cpu) { \ 2664 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2665 __netif_tx_lock(txq, cpu); \ 2666 } \ 2667 } 2668 2669 #define HARD_TX_UNLOCK(dev, txq) { \ 2670 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2671 __netif_tx_unlock(txq); \ 2672 } \ 2673 } 2674 2675 static inline void netif_tx_disable(struct net_device *dev) 2676 { 2677 unsigned int i; 2678 int cpu; 2679 2680 local_bh_disable(); 2681 cpu = smp_processor_id(); 2682 for (i = 0; i < dev->num_tx_queues; i++) { 2683 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2684 2685 __netif_tx_lock(txq, cpu); 2686 netif_tx_stop_queue(txq); 2687 __netif_tx_unlock(txq); 2688 } 2689 local_bh_enable(); 2690 } 2691 2692 static inline void netif_addr_lock(struct net_device *dev) 2693 { 2694 spin_lock(&dev->addr_list_lock); 2695 } 2696 2697 static inline void netif_addr_lock_nested(struct net_device *dev) 2698 { 2699 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); 2700 } 2701 2702 static inline void netif_addr_lock_bh(struct net_device *dev) 2703 { 2704 spin_lock_bh(&dev->addr_list_lock); 2705 } 2706 2707 static inline void netif_addr_unlock(struct net_device *dev) 2708 { 2709 spin_unlock(&dev->addr_list_lock); 2710 } 2711 2712 static inline void netif_addr_unlock_bh(struct net_device *dev) 2713 { 2714 spin_unlock_bh(&dev->addr_list_lock); 2715 } 2716 2717 /* 2718 * dev_addrs walker. Should be used only for read access. Call with 2719 * rcu_read_lock held. 2720 */ 2721 #define for_each_dev_addr(dev, ha) \ 2722 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 2723 2724 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 2725 2726 extern void ether_setup(struct net_device *dev); 2727 2728 /* Support for loadable net-drivers */ 2729 extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 2730 void (*setup)(struct net_device *), 2731 unsigned int txqs, unsigned int rxqs); 2732 #define alloc_netdev(sizeof_priv, name, setup) \ 2733 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) 2734 2735 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ 2736 alloc_netdev_mqs(sizeof_priv, name, setup, count, count) 2737 2738 extern int register_netdev(struct net_device *dev); 2739 extern void unregister_netdev(struct net_device *dev); 2740 2741 /* General hardware address lists handling functions */ 2742 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, 2743 struct netdev_hw_addr_list *from_list, 2744 int addr_len, unsigned char addr_type); 2745 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, 2746 struct netdev_hw_addr_list *from_list, 2747 int addr_len, unsigned char addr_type); 2748 extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 2749 struct netdev_hw_addr_list *from_list, 2750 int addr_len); 2751 extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 2752 struct netdev_hw_addr_list *from_list, 2753 int addr_len); 2754 extern void __hw_addr_flush(struct netdev_hw_addr_list *list); 2755 extern void __hw_addr_init(struct netdev_hw_addr_list *list); 2756 2757 /* Functions used for device addresses handling */ 2758 extern int dev_addr_add(struct net_device *dev, const unsigned char *addr, 2759 unsigned char addr_type); 2760 extern int dev_addr_del(struct net_device *dev, const unsigned char *addr, 2761 unsigned char addr_type); 2762 extern int dev_addr_add_multiple(struct net_device *to_dev, 2763 struct net_device *from_dev, 2764 unsigned char addr_type); 2765 extern int dev_addr_del_multiple(struct net_device *to_dev, 2766 struct net_device *from_dev, 2767 unsigned char addr_type); 2768 extern void dev_addr_flush(struct net_device *dev); 2769 extern int dev_addr_init(struct net_device *dev); 2770 2771 /* Functions used for unicast addresses handling */ 2772 extern int dev_uc_add(struct net_device *dev, const unsigned char *addr); 2773 extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 2774 extern int dev_uc_del(struct net_device *dev, const unsigned char *addr); 2775 extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2776 extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 2777 extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2778 extern void dev_uc_flush(struct net_device *dev); 2779 extern void dev_uc_init(struct net_device *dev); 2780 2781 /* Functions used for multicast addresses handling */ 2782 extern int dev_mc_add(struct net_device *dev, const unsigned char *addr); 2783 extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 2784 extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 2785 extern int dev_mc_del(struct net_device *dev, const unsigned char *addr); 2786 extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 2787 extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2788 extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 2789 extern void dev_mc_unsync(struct net_device *to, struct net_device *from); 2790 extern void dev_mc_flush(struct net_device *dev); 2791 extern void dev_mc_init(struct net_device *dev); 2792 2793 /* Functions used for secondary unicast and multicast support */ 2794 extern void dev_set_rx_mode(struct net_device *dev); 2795 extern void __dev_set_rx_mode(struct net_device *dev); 2796 extern int dev_set_promiscuity(struct net_device *dev, int inc); 2797 extern int dev_set_allmulti(struct net_device *dev, int inc); 2798 extern void netdev_state_change(struct net_device *dev); 2799 extern void netdev_notify_peers(struct net_device *dev); 2800 extern void netdev_features_change(struct net_device *dev); 2801 /* Load a device via the kmod */ 2802 extern void dev_load(struct net *net, const char *name); 2803 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 2804 struct rtnl_link_stats64 *storage); 2805 extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 2806 const struct net_device_stats *netdev_stats); 2807 2808 extern int netdev_max_backlog; 2809 extern int netdev_tstamp_prequeue; 2810 extern int weight_p; 2811 extern int bpf_jit_enable; 2812 2813 extern bool netdev_has_upper_dev(struct net_device *dev, 2814 struct net_device *upper_dev); 2815 extern bool netdev_has_any_upper_dev(struct net_device *dev); 2816 extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 2817 struct list_head **iter); 2818 2819 /* iterate through upper list, must be called under RCU read lock */ 2820 #define netdev_for_each_upper_dev_rcu(dev, upper, iter) \ 2821 for (iter = &(dev)->upper_dev_list, \ 2822 upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 2823 upper; \ 2824 upper = netdev_upper_get_next_dev_rcu(dev, &(iter))) 2825 2826 extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 2827 extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 2828 extern int netdev_upper_dev_link(struct net_device *dev, 2829 struct net_device *upper_dev); 2830 extern int netdev_master_upper_dev_link(struct net_device *dev, 2831 struct net_device *upper_dev); 2832 extern void netdev_upper_dev_unlink(struct net_device *dev, 2833 struct net_device *upper_dev); 2834 extern int skb_checksum_help(struct sk_buff *skb); 2835 extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2836 netdev_features_t features, bool tx_path); 2837 extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2838 netdev_features_t features); 2839 2840 static inline 2841 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 2842 { 2843 return __skb_gso_segment(skb, features, true); 2844 } 2845 __be16 skb_network_protocol(struct sk_buff *skb); 2846 2847 static inline bool can_checksum_protocol(netdev_features_t features, 2848 __be16 protocol) 2849 { 2850 return ((features & NETIF_F_GEN_CSUM) || 2851 ((features & NETIF_F_V4_CSUM) && 2852 protocol == htons(ETH_P_IP)) || 2853 ((features & NETIF_F_V6_CSUM) && 2854 protocol == htons(ETH_P_IPV6)) || 2855 ((features & NETIF_F_FCOE_CRC) && 2856 protocol == htons(ETH_P_FCOE))); 2857 } 2858 2859 #ifdef CONFIG_BUG 2860 extern void netdev_rx_csum_fault(struct net_device *dev); 2861 #else 2862 static inline void netdev_rx_csum_fault(struct net_device *dev) 2863 { 2864 } 2865 #endif 2866 /* rx skb timestamps */ 2867 extern void net_enable_timestamp(void); 2868 extern void net_disable_timestamp(void); 2869 2870 #ifdef CONFIG_PROC_FS 2871 extern int __init dev_proc_init(void); 2872 #else 2873 #define dev_proc_init() 0 2874 #endif 2875 2876 extern int netdev_class_create_file(struct class_attribute *class_attr); 2877 extern void netdev_class_remove_file(struct class_attribute *class_attr); 2878 2879 extern struct kobj_ns_type_operations net_ns_type_operations; 2880 2881 extern const char *netdev_drivername(const struct net_device *dev); 2882 2883 extern void linkwatch_run_queue(void); 2884 2885 static inline netdev_features_t netdev_get_wanted_features( 2886 struct net_device *dev) 2887 { 2888 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2889 } 2890 netdev_features_t netdev_increment_features(netdev_features_t all, 2891 netdev_features_t one, netdev_features_t mask); 2892 2893 /* Allow TSO being used on stacked device : 2894 * Performing the GSO segmentation before last device 2895 * is a performance improvement. 2896 */ 2897 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 2898 netdev_features_t mask) 2899 { 2900 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 2901 } 2902 2903 int __netdev_update_features(struct net_device *dev); 2904 void netdev_update_features(struct net_device *dev); 2905 void netdev_change_features(struct net_device *dev); 2906 2907 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2908 struct net_device *dev); 2909 2910 netdev_features_t netif_skb_features(struct sk_buff *skb); 2911 2912 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 2913 { 2914 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 2915 2916 /* check flags correspondence */ 2917 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 2918 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); 2919 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 2920 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 2921 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 2922 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 2923 2924 return (features & feature) == feature; 2925 } 2926 2927 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 2928 { 2929 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2930 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2931 } 2932 2933 static inline bool netif_needs_gso(struct sk_buff *skb, 2934 netdev_features_t features) 2935 { 2936 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2937 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 2938 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 2939 } 2940 2941 static inline void netif_set_gso_max_size(struct net_device *dev, 2942 unsigned int size) 2943 { 2944 dev->gso_max_size = size; 2945 } 2946 2947 static inline bool netif_is_bond_master(struct net_device *dev) 2948 { 2949 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 2950 } 2951 2952 static inline bool netif_is_bond_slave(struct net_device *dev) 2953 { 2954 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 2955 } 2956 2957 static inline bool netif_supports_nofcs(struct net_device *dev) 2958 { 2959 return dev->priv_flags & IFF_SUPP_NOFCS; 2960 } 2961 2962 extern struct pernet_operations __net_initdata loopback_net_ops; 2963 2964 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 2965 2966 /* netdev_printk helpers, similar to dev_printk */ 2967 2968 static inline const char *netdev_name(const struct net_device *dev) 2969 { 2970 if (dev->reg_state != NETREG_REGISTERED) 2971 return "(unregistered net_device)"; 2972 return dev->name; 2973 } 2974 2975 extern __printf(3, 4) 2976 int netdev_printk(const char *level, const struct net_device *dev, 2977 const char *format, ...); 2978 extern __printf(2, 3) 2979 int netdev_emerg(const struct net_device *dev, const char *format, ...); 2980 extern __printf(2, 3) 2981 int netdev_alert(const struct net_device *dev, const char *format, ...); 2982 extern __printf(2, 3) 2983 int netdev_crit(const struct net_device *dev, const char *format, ...); 2984 extern __printf(2, 3) 2985 int netdev_err(const struct net_device *dev, const char *format, ...); 2986 extern __printf(2, 3) 2987 int netdev_warn(const struct net_device *dev, const char *format, ...); 2988 extern __printf(2, 3) 2989 int netdev_notice(const struct net_device *dev, const char *format, ...); 2990 extern __printf(2, 3) 2991 int netdev_info(const struct net_device *dev, const char *format, ...); 2992 2993 #define MODULE_ALIAS_NETDEV(device) \ 2994 MODULE_ALIAS("netdev-" device) 2995 2996 #if defined(CONFIG_DYNAMIC_DEBUG) 2997 #define netdev_dbg(__dev, format, args...) \ 2998 do { \ 2999 dynamic_netdev_dbg(__dev, format, ##args); \ 3000 } while (0) 3001 #elif defined(DEBUG) 3002 #define netdev_dbg(__dev, format, args...) \ 3003 netdev_printk(KERN_DEBUG, __dev, format, ##args) 3004 #else 3005 #define netdev_dbg(__dev, format, args...) \ 3006 ({ \ 3007 if (0) \ 3008 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 3009 0; \ 3010 }) 3011 #endif 3012 3013 #if defined(VERBOSE_DEBUG) 3014 #define netdev_vdbg netdev_dbg 3015 #else 3016 3017 #define netdev_vdbg(dev, format, args...) \ 3018 ({ \ 3019 if (0) \ 3020 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 3021 0; \ 3022 }) 3023 #endif 3024 3025 /* 3026 * netdev_WARN() acts like dev_printk(), but with the key difference 3027 * of using a WARN/WARN_ON to get the message out, including the 3028 * file/line information and a backtrace. 3029 */ 3030 #define netdev_WARN(dev, format, args...) \ 3031 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); 3032 3033 /* netif printk helpers, similar to netdev_printk */ 3034 3035 #define netif_printk(priv, type, level, dev, fmt, args...) \ 3036 do { \ 3037 if (netif_msg_##type(priv)) \ 3038 netdev_printk(level, (dev), fmt, ##args); \ 3039 } while (0) 3040 3041 #define netif_level(level, priv, type, dev, fmt, args...) \ 3042 do { \ 3043 if (netif_msg_##type(priv)) \ 3044 netdev_##level(dev, fmt, ##args); \ 3045 } while (0) 3046 3047 #define netif_emerg(priv, type, dev, fmt, args...) \ 3048 netif_level(emerg, priv, type, dev, fmt, ##args) 3049 #define netif_alert(priv, type, dev, fmt, args...) \ 3050 netif_level(alert, priv, type, dev, fmt, ##args) 3051 #define netif_crit(priv, type, dev, fmt, args...) \ 3052 netif_level(crit, priv, type, dev, fmt, ##args) 3053 #define netif_err(priv, type, dev, fmt, args...) \ 3054 netif_level(err, priv, type, dev, fmt, ##args) 3055 #define netif_warn(priv, type, dev, fmt, args...) \ 3056 netif_level(warn, priv, type, dev, fmt, ##args) 3057 #define netif_notice(priv, type, dev, fmt, args...) \ 3058 netif_level(notice, priv, type, dev, fmt, ##args) 3059 #define netif_info(priv, type, dev, fmt, args...) \ 3060 netif_level(info, priv, type, dev, fmt, ##args) 3061 3062 #if defined(CONFIG_DYNAMIC_DEBUG) 3063 #define netif_dbg(priv, type, netdev, format, args...) \ 3064 do { \ 3065 if (netif_msg_##type(priv)) \ 3066 dynamic_netdev_dbg(netdev, format, ##args); \ 3067 } while (0) 3068 #elif defined(DEBUG) 3069 #define netif_dbg(priv, type, dev, format, args...) \ 3070 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 3071 #else 3072 #define netif_dbg(priv, type, dev, format, args...) \ 3073 ({ \ 3074 if (0) \ 3075 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 3076 0; \ 3077 }) 3078 #endif 3079 3080 #if defined(VERBOSE_DEBUG) 3081 #define netif_vdbg netif_dbg 3082 #else 3083 #define netif_vdbg(priv, type, dev, format, args...) \ 3084 ({ \ 3085 if (0) \ 3086 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 3087 0; \ 3088 }) 3089 #endif 3090 3091 /* 3092 * The list of packet types we will receive (as opposed to discard) 3093 * and the routines to invoke. 3094 * 3095 * Why 16. Because with 16 the only overlap we get on a hash of the 3096 * low nibble of the protocol value is RARP/SNAP/X.25. 3097 * 3098 * NOTE: That is no longer true with the addition of VLAN tags. Not 3099 * sure which should go first, but I bet it won't make much 3100 * difference if we are running VLANs. The good news is that 3101 * this protocol won't be in the list unless compiled in, so 3102 * the average user (w/out VLANs) will not be adversely affected. 3103 * --BLG 3104 * 3105 * 0800 IP 3106 * 8100 802.1Q VLAN 3107 * 0001 802.3 3108 * 0002 AX.25 3109 * 0004 802.2 3110 * 8035 RARP 3111 * 0005 SNAP 3112 * 0805 X.25 3113 * 0806 ARP 3114 * 8137 IPX 3115 * 0009 Localtalk 3116 * 86DD IPv6 3117 */ 3118 #define PTYPE_HASH_SIZE (16) 3119 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 3120 3121 #endif /* _LINUX_NETDEVICE_H */ 3122