1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2016 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #ifndef _IXGBE_H_ 30 #define _IXGBE_H_ 31 32 #include <linux/bitops.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/cpumask.h> 37 #include <linux/aer.h> 38 #include <linux/if_vlan.h> 39 #include <linux/jiffies.h> 40 41 #include <linux/timecounter.h> 42 #include <linux/net_tstamp.h> 43 #include <linux/ptp_clock_kernel.h> 44 45 #include "ixgbe_type.h" 46 #include "ixgbe_common.h" 47 #include "ixgbe_dcb.h" 48 #if IS_ENABLED(CONFIG_FCOE) 49 #define IXGBE_FCOE 50 #include "ixgbe_fcoe.h" 51 #endif /* IS_ENABLED(CONFIG_FCOE) */ 52 #ifdef CONFIG_IXGBE_DCA 53 #include <linux/dca.h> 54 #endif 55 56 #include <net/busy_poll.h> 57 58 /* common prefix used by pr_<> macros */ 59 #undef pr_fmt 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 /* TX/RX descriptor defines */ 63 #define IXGBE_DEFAULT_TXD 512 64 #define IXGBE_DEFAULT_TX_WORK 256 65 #define IXGBE_MAX_TXD 4096 66 #define IXGBE_MIN_TXD 64 67 68 #if (PAGE_SIZE < 8192) 69 #define IXGBE_DEFAULT_RXD 512 70 #else 71 #define IXGBE_DEFAULT_RXD 128 72 #endif 73 #define IXGBE_MAX_RXD 4096 74 #define IXGBE_MIN_RXD 64 75 76 #define IXGBE_ETH_P_LLDP 0x88CC 77 78 /* flow control */ 79 #define IXGBE_MIN_FCRTL 0x40 80 #define IXGBE_MAX_FCRTL 0x7FF80 81 #define IXGBE_MIN_FCRTH 0x600 82 #define IXGBE_MAX_FCRTH 0x7FFF0 83 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF 84 #define IXGBE_MIN_FCPAUSE 0 85 #define IXGBE_MAX_FCPAUSE 0xFFFF 86 87 /* Supported Rx Buffer Sizes */ 88 #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ 89 #define IXGBE_RXBUFFER_1536 1536 90 #define IXGBE_RXBUFFER_2K 2048 91 #define IXGBE_RXBUFFER_3K 3072 92 #define IXGBE_RXBUFFER_4K 4096 93 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 94 95 /* Attempt to maximize the headroom available for incoming frames. We 96 * use a 2K buffer for receives and need 1536/1534 to store the data for 97 * the frame. This leaves us with 512 bytes of room. From that we need 98 * to deduct the space needed for the shared info and the padding needed 99 * to IP align the frame. 100 * 101 * Note: For cache line sizes 256 or larger this value is going to end 102 * up negative. In these cases we should fall back to the 3K 103 * buffers. 104 */ 105 #if (PAGE_SIZE < 8192) 106 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) 107 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \ 108 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) 109 110 static inline int ixgbe_compute_pad(int rx_buf_len) 111 { 112 int page_size, pad_size; 113 114 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 115 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; 116 117 return pad_size; 118 } 119 120 static inline int ixgbe_skb_pad(void) 121 { 122 int rx_buf_len; 123 124 /* If a 2K buffer cannot handle a standard Ethernet frame then 125 * optimize padding for a 3K buffer instead of a 1.5K buffer. 126 * 127 * For a 3K buffer we need to add enough padding to allow for 128 * tailroom due to NET_IP_ALIGN possibly shifting us out of 129 * cache-line alignment. 130 */ 131 if (IXGBE_2K_TOO_SMALL_WITH_PADDING) 132 rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); 133 else 134 rx_buf_len = IXGBE_RXBUFFER_1536; 135 136 /* if needed make room for NET_IP_ALIGN */ 137 rx_buf_len -= NET_IP_ALIGN; 138 139 return ixgbe_compute_pad(rx_buf_len); 140 } 141 142 #define IXGBE_SKB_PAD ixgbe_skb_pad() 143 #else 144 #define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 145 #endif 146 147 /* 148 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 149 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, 150 * this adds up to 448 bytes of extra data. 151 * 152 * Since netdev_alloc_skb now allocates a page fragment we can use a value 153 * of 256 and the resultant skb will have a truesize of 960 or less. 154 */ 155 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 156 157 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 158 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 159 160 #define IXGBE_RX_DMA_ATTR \ 161 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 162 163 enum ixgbe_tx_flags { 164 /* cmd_type flags */ 165 IXGBE_TX_FLAGS_HW_VLAN = 0x01, 166 IXGBE_TX_FLAGS_TSO = 0x02, 167 IXGBE_TX_FLAGS_TSTAMP = 0x04, 168 169 /* olinfo flags */ 170 IXGBE_TX_FLAGS_CC = 0x08, 171 IXGBE_TX_FLAGS_IPV4 = 0x10, 172 IXGBE_TX_FLAGS_CSUM = 0x20, 173 174 /* software defined flags */ 175 IXGBE_TX_FLAGS_SW_VLAN = 0x40, 176 IXGBE_TX_FLAGS_FCOE = 0x80, 177 }; 178 179 /* VLAN info */ 180 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 181 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 182 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 183 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 184 185 #define IXGBE_MAX_VF_MC_ENTRIES 30 186 #define IXGBE_MAX_VF_FUNCTIONS 64 187 #define IXGBE_MAX_VFTA_ENTRIES 128 188 #define MAX_EMULATION_MAC_ADDRS 16 189 #define IXGBE_MAX_PF_MACVLANS 15 190 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) 191 #define IXGBE_82599_VF_DEVICE_ID 0x10ED 192 #define IXGBE_X540_VF_DEVICE_ID 0x1515 193 194 struct vf_data_storage { 195 struct pci_dev *vfdev; 196 unsigned char vf_mac_addresses[ETH_ALEN]; 197 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; 198 u16 num_vf_mc_hashes; 199 bool clear_to_send; 200 bool pf_set_mac; 201 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 202 u16 pf_qos; 203 u16 tx_rate; 204 u8 spoofchk_enabled; 205 bool rss_query_enabled; 206 u8 trusted; 207 int xcast_mode; 208 unsigned int vf_api; 209 }; 210 211 enum ixgbevf_xcast_modes { 212 IXGBEVF_XCAST_MODE_NONE = 0, 213 IXGBEVF_XCAST_MODE_MULTI, 214 IXGBEVF_XCAST_MODE_ALLMULTI, 215 IXGBEVF_XCAST_MODE_PROMISC, 216 }; 217 218 struct vf_macvlans { 219 struct list_head l; 220 int vf; 221 bool free; 222 bool is_macvlan; 223 u8 vf_macvlan[ETH_ALEN]; 224 }; 225 226 #define IXGBE_MAX_TXD_PWR 14 227 #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) 228 229 /* Tx Descriptors needed, worst case */ 230 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 231 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 232 233 /* wrapper around a pointer to a socket buffer, 234 * so a DMA handle can be stored along with the buffer */ 235 struct ixgbe_tx_buffer { 236 union ixgbe_adv_tx_desc *next_to_watch; 237 unsigned long time_stamp; 238 union { 239 struct sk_buff *skb; 240 /* XDP uses address ptr on irq_clean */ 241 void *data; 242 }; 243 unsigned int bytecount; 244 unsigned short gso_segs; 245 __be16 protocol; 246 DEFINE_DMA_UNMAP_ADDR(dma); 247 DEFINE_DMA_UNMAP_LEN(len); 248 u32 tx_flags; 249 }; 250 251 struct ixgbe_rx_buffer { 252 struct sk_buff *skb; 253 dma_addr_t dma; 254 struct page *page; 255 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 256 __u32 page_offset; 257 #else 258 __u16 page_offset; 259 #endif 260 __u16 pagecnt_bias; 261 }; 262 263 struct ixgbe_queue_stats { 264 u64 packets; 265 u64 bytes; 266 }; 267 268 struct ixgbe_tx_queue_stats { 269 u64 restart_queue; 270 u64 tx_busy; 271 u64 tx_done_old; 272 }; 273 274 struct ixgbe_rx_queue_stats { 275 u64 rsc_count; 276 u64 rsc_flush; 277 u64 non_eop_descs; 278 u64 alloc_rx_page; 279 u64 alloc_rx_page_failed; 280 u64 alloc_rx_buff_failed; 281 u64 csum_err; 282 }; 283 284 #define IXGBE_TS_HDR_LEN 8 285 286 enum ixgbe_ring_state_t { 287 __IXGBE_RX_3K_BUFFER, 288 __IXGBE_RX_BUILD_SKB_ENABLED, 289 __IXGBE_RX_RSC_ENABLED, 290 __IXGBE_RX_CSUM_UDP_ZERO_ERR, 291 __IXGBE_RX_FCOE, 292 __IXGBE_TX_FDIR_INIT_DONE, 293 __IXGBE_TX_XPS_INIT_DONE, 294 __IXGBE_TX_DETECT_HANG, 295 __IXGBE_HANG_CHECK_ARMED, 296 __IXGBE_TX_XDP_RING, 297 }; 298 299 #define ring_uses_build_skb(ring) \ 300 test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) 301 302 struct ixgbe_fwd_adapter { 303 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 304 struct net_device *netdev; 305 struct ixgbe_adapter *real_adapter; 306 unsigned int tx_base_queue; 307 unsigned int rx_base_queue; 308 int pool; 309 }; 310 311 #define check_for_tx_hang(ring) \ 312 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 313 #define set_check_for_tx_hang(ring) \ 314 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 315 #define clear_check_for_tx_hang(ring) \ 316 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 317 #define ring_is_rsc_enabled(ring) \ 318 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 319 #define set_ring_rsc_enabled(ring) \ 320 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 321 #define clear_ring_rsc_enabled(ring) \ 322 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 323 #define ring_is_xdp(ring) \ 324 test_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 325 #define set_ring_xdp(ring) \ 326 set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 327 #define clear_ring_xdp(ring) \ 328 clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 329 struct ixgbe_ring { 330 struct ixgbe_ring *next; /* pointer to next ring in q_vector */ 331 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ 332 struct net_device *netdev; /* netdev ring belongs to */ 333 struct bpf_prog *xdp_prog; 334 struct device *dev; /* device for DMA mapping */ 335 struct ixgbe_fwd_adapter *l2_accel_priv; 336 void *desc; /* descriptor ring memory */ 337 union { 338 struct ixgbe_tx_buffer *tx_buffer_info; 339 struct ixgbe_rx_buffer *rx_buffer_info; 340 }; 341 unsigned long state; 342 u8 __iomem *tail; 343 dma_addr_t dma; /* phys. address of descriptor ring */ 344 unsigned int size; /* length in bytes */ 345 346 u16 count; /* amount of descriptors */ 347 348 u8 queue_index; /* needed for multiqueue queue management */ 349 u8 reg_idx; /* holds the special value that gets 350 * the hardware register offset 351 * associated with this ring, which is 352 * different for DCB and RSS modes 353 */ 354 u16 next_to_use; 355 u16 next_to_clean; 356 357 unsigned long last_rx_timestamp; 358 359 union { 360 u16 next_to_alloc; 361 struct { 362 u8 atr_sample_rate; 363 u8 atr_count; 364 }; 365 }; 366 367 u8 dcb_tc; 368 struct ixgbe_queue_stats stats; 369 struct u64_stats_sync syncp; 370 union { 371 struct ixgbe_tx_queue_stats tx_stats; 372 struct ixgbe_rx_queue_stats rx_stats; 373 }; 374 } ____cacheline_internodealigned_in_smp; 375 376 enum ixgbe_ring_f_enum { 377 RING_F_NONE = 0, 378 RING_F_VMDQ, /* SR-IOV uses the same ring feature */ 379 RING_F_RSS, 380 RING_F_FDIR, 381 #ifdef IXGBE_FCOE 382 RING_F_FCOE, 383 #endif /* IXGBE_FCOE */ 384 385 RING_F_ARRAY_SIZE /* must be last in enum set */ 386 }; 387 388 #define IXGBE_MAX_RSS_INDICES 16 389 #define IXGBE_MAX_RSS_INDICES_X550 63 390 #define IXGBE_MAX_VMDQ_INDICES 64 391 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ 392 #define IXGBE_MAX_FCOE_INDICES 8 393 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 394 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 395 #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 396 #define IXGBE_MAX_L2A_QUEUES 4 397 #define IXGBE_BAD_L2A_QUEUE 3 398 #define IXGBE_MAX_MACVLANS 31 399 #define IXGBE_MAX_DCBMACVLANS 8 400 401 struct ixgbe_ring_feature { 402 u16 limit; /* upper limit on feature indices */ 403 u16 indices; /* current value of indices */ 404 u16 mask; /* Mask used for feature to ring mapping */ 405 u16 offset; /* offset to start of feature */ 406 } ____cacheline_internodealigned_in_smp; 407 408 #define IXGBE_82599_VMDQ_8Q_MASK 0x78 409 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C 410 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E 411 412 /* 413 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since 414 * this is twice the size of a half page we need to double the page order 415 * for FCoE enabled Rx queues. 416 */ 417 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) 418 { 419 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 420 return IXGBE_RXBUFFER_3K; 421 #if (PAGE_SIZE < 8192) 422 if (ring_uses_build_skb(ring)) 423 return IXGBE_MAX_2K_FRAME_BUILD_SKB; 424 #endif 425 return IXGBE_RXBUFFER_2K; 426 } 427 428 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) 429 { 430 #if (PAGE_SIZE < 8192) 431 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 432 return 1; 433 #endif 434 return 0; 435 } 436 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) 437 438 #define IXGBE_ITR_ADAPTIVE_MIN_INC 2 439 #define IXGBE_ITR_ADAPTIVE_MIN_USECS 10 440 #define IXGBE_ITR_ADAPTIVE_MAX_USECS 126 441 #define IXGBE_ITR_ADAPTIVE_LATENCY 0x80 442 #define IXGBE_ITR_ADAPTIVE_BULK 0x00 443 444 struct ixgbe_ring_container { 445 struct ixgbe_ring *ring; /* pointer to linked list of rings */ 446 unsigned long next_update; /* jiffies value of last update */ 447 unsigned int total_bytes; /* total bytes processed this int */ 448 unsigned int total_packets; /* total packets processed this int */ 449 u16 work_limit; /* total work allowed per interrupt */ 450 u8 count; /* total number of rings in vector */ 451 u8 itr; /* current ITR setting for ring */ 452 }; 453 454 /* iterator for handling rings in ring container */ 455 #define ixgbe_for_each_ring(pos, head) \ 456 for (pos = (head).ring; pos != NULL; pos = pos->next) 457 458 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 459 ? 8 : 1) 460 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 461 462 /* MAX_Q_VECTORS of these are allocated, 463 * but we only use one per queue-specific vector. 464 */ 465 struct ixgbe_q_vector { 466 struct ixgbe_adapter *adapter; 467 #ifdef CONFIG_IXGBE_DCA 468 int cpu; /* CPU for DCA */ 469 #endif 470 u16 v_idx; /* index of q_vector within array, also used for 471 * finding the bit in EICR and friends that 472 * represents the vector for this ring */ 473 u16 itr; /* Interrupt throttle rate written to EITR */ 474 struct ixgbe_ring_container rx, tx; 475 476 struct napi_struct napi; 477 cpumask_t affinity_mask; 478 int numa_node; 479 struct rcu_head rcu; /* to avoid race with update stats on free */ 480 char name[IFNAMSIZ + 9]; 481 482 /* for dynamic allocation of rings associated with this q_vector */ 483 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 484 }; 485 486 #ifdef CONFIG_IXGBE_HWMON 487 488 #define IXGBE_HWMON_TYPE_LOC 0 489 #define IXGBE_HWMON_TYPE_TEMP 1 490 #define IXGBE_HWMON_TYPE_CAUTION 2 491 #define IXGBE_HWMON_TYPE_MAX 3 492 493 struct hwmon_attr { 494 struct device_attribute dev_attr; 495 struct ixgbe_hw *hw; 496 struct ixgbe_thermal_diode_data *sensor; 497 char name[12]; 498 }; 499 500 struct hwmon_buff { 501 struct attribute_group group; 502 const struct attribute_group *groups[2]; 503 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; 504 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; 505 unsigned int n_hwmon; 506 }; 507 #endif /* CONFIG_IXGBE_HWMON */ 508 509 /* 510 * microsecond values for various ITR rates shifted by 2 to fit itr register 511 * with the first 3 bits reserved 0 512 */ 513 #define IXGBE_MIN_RSC_ITR 24 514 #define IXGBE_100K_ITR 40 515 #define IXGBE_20K_ITR 200 516 #define IXGBE_12K_ITR 336 517 518 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ 519 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, 520 const u32 stat_err_bits) 521 { 522 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 523 } 524 525 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) 526 { 527 u16 ntc = ring->next_to_clean; 528 u16 ntu = ring->next_to_use; 529 530 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 531 } 532 533 #define IXGBE_RX_DESC(R, i) \ 534 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 535 #define IXGBE_TX_DESC(R, i) \ 536 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) 537 #define IXGBE_TX_CTXTDESC(R, i) \ 538 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 539 540 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ 541 #ifdef IXGBE_FCOE 542 /* Use 3K as the baby jumbo frame size for FCoE */ 543 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 544 #endif /* IXGBE_FCOE */ 545 546 #define OTHER_VECTOR 1 547 #define NON_Q_VECTORS (OTHER_VECTOR) 548 549 #define MAX_MSIX_VECTORS_82599 64 550 #define MAX_Q_VECTORS_82599 64 551 #define MAX_MSIX_VECTORS_82598 18 552 #define MAX_Q_VECTORS_82598 16 553 554 struct ixgbe_mac_addr { 555 u8 addr[ETH_ALEN]; 556 u16 pool; 557 u16 state; /* bitmask */ 558 }; 559 560 #define IXGBE_MAC_STATE_DEFAULT 0x1 561 #define IXGBE_MAC_STATE_MODIFIED 0x2 562 #define IXGBE_MAC_STATE_IN_USE 0x4 563 564 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599 565 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 566 567 #define MIN_MSIX_Q_VECTORS 1 568 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 569 570 /* default to trying for four seconds */ 571 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 572 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ 573 574 /* board specific private data structure */ 575 struct ixgbe_adapter { 576 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 577 /* OS defined structs */ 578 struct net_device *netdev; 579 struct bpf_prog *xdp_prog; 580 struct pci_dev *pdev; 581 582 unsigned long state; 583 584 /* Some features need tri-state capability, 585 * thus the additional *_CAPABLE flags. 586 */ 587 u32 flags; 588 #define IXGBE_FLAG_MSI_ENABLED BIT(1) 589 #define IXGBE_FLAG_MSIX_ENABLED BIT(3) 590 #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) 591 #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) 592 #define IXGBE_FLAG_RX_PS_ENABLED BIT(6) 593 #define IXGBE_FLAG_DCA_ENABLED BIT(8) 594 #define IXGBE_FLAG_DCA_CAPABLE BIT(9) 595 #define IXGBE_FLAG_IMIR_ENABLED BIT(10) 596 #define IXGBE_FLAG_MQ_CAPABLE BIT(11) 597 #define IXGBE_FLAG_DCB_ENABLED BIT(12) 598 #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) 599 #define IXGBE_FLAG_VMDQ_ENABLED BIT(14) 600 #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) 601 #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) 602 #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) 603 #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) 604 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) 605 #define IXGBE_FLAG_FCOE_CAPABLE BIT(20) 606 #define IXGBE_FLAG_FCOE_ENABLED BIT(21) 607 #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) 608 #define IXGBE_FLAG_SRIOV_ENABLED BIT(23) 609 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) 610 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) 611 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) 612 #define IXGBE_FLAG_DCB_CAPABLE BIT(27) 613 #define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) 614 615 u32 flags2; 616 #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) 617 #define IXGBE_FLAG2_RSC_ENABLED BIT(1) 618 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) 619 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) 620 #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) 621 #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) 622 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) 623 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) 624 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) 625 #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) 626 #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) 627 #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) 628 #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) 629 #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) 630 #define IXGBE_FLAG2_EEE_ENABLED BIT(15) 631 #define IXGBE_FLAG2_RX_LEGACY BIT(16) 632 633 /* Tx fast path data */ 634 int num_tx_queues; 635 u16 tx_itr_setting; 636 u16 tx_work_limit; 637 638 /* Rx fast path data */ 639 int num_rx_queues; 640 u16 rx_itr_setting; 641 642 /* Port number used to identify VXLAN traffic */ 643 __be16 vxlan_port; 644 __be16 geneve_port; 645 646 /* XDP */ 647 int num_xdp_queues; 648 struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; 649 650 /* TX */ 651 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 652 653 u64 restart_queue; 654 u64 lsc_int; 655 u32 tx_timeout_count; 656 657 /* RX */ 658 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; 659 int num_rx_pools; /* == num_rx_queues in 82598 */ 660 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ 661 u64 hw_csum_rx_error; 662 u64 hw_rx_no_dma_resources; 663 u64 rsc_total_count; 664 u64 rsc_total_flush; 665 u64 non_eop_descs; 666 u32 alloc_rx_page; 667 u32 alloc_rx_page_failed; 668 u32 alloc_rx_buff_failed; 669 670 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; 671 672 /* DCB parameters */ 673 struct ieee_pfc *ixgbe_ieee_pfc; 674 struct ieee_ets *ixgbe_ieee_ets; 675 struct ixgbe_dcb_config dcb_cfg; 676 struct ixgbe_dcb_config temp_dcb_cfg; 677 u8 dcb_set_bitmap; 678 u8 dcbx_cap; 679 enum ixgbe_fc_mode last_lfc_mode; 680 681 int num_q_vectors; /* current number of q_vectors for device */ 682 int max_q_vectors; /* true count of q_vectors for device */ 683 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; 684 struct msix_entry *msix_entries; 685 686 u32 test_icr; 687 struct ixgbe_ring test_tx_ring; 688 struct ixgbe_ring test_rx_ring; 689 690 /* structs defined in ixgbe_hw.h */ 691 struct ixgbe_hw hw; 692 u16 msg_enable; 693 struct ixgbe_hw_stats stats; 694 695 u64 tx_busy; 696 unsigned int tx_ring_count; 697 unsigned int xdp_ring_count; 698 unsigned int rx_ring_count; 699 700 u32 link_speed; 701 bool link_up; 702 unsigned long sfp_poll_time; 703 unsigned long link_check_timeout; 704 705 struct timer_list service_timer; 706 struct work_struct service_task; 707 708 struct hlist_head fdir_filter_list; 709 unsigned long fdir_overflow; /* number of times ATR was backed off */ 710 union ixgbe_atr_input fdir_mask; 711 int fdir_filter_count; 712 u32 fdir_pballoc; 713 u32 atr_sample_rate; 714 spinlock_t fdir_perfect_lock; 715 716 #ifdef IXGBE_FCOE 717 struct ixgbe_fcoe fcoe; 718 #endif /* IXGBE_FCOE */ 719 u8 __iomem *io_addr; /* Mainly for iounmap use */ 720 u32 wol; 721 722 u16 bridge_mode; 723 724 u16 eeprom_verh; 725 u16 eeprom_verl; 726 u16 eeprom_cap; 727 728 u32 interrupt_event; 729 u32 led_reg; 730 731 struct ptp_clock *ptp_clock; 732 struct ptp_clock_info ptp_caps; 733 struct work_struct ptp_tx_work; 734 struct sk_buff *ptp_tx_skb; 735 struct hwtstamp_config tstamp_config; 736 unsigned long ptp_tx_start; 737 unsigned long last_overflow_check; 738 unsigned long last_rx_ptp_check; 739 unsigned long last_rx_timestamp; 740 spinlock_t tmreg_lock; 741 struct cyclecounter hw_cc; 742 struct timecounter hw_tc; 743 u32 base_incval; 744 u32 tx_hwtstamp_timeouts; 745 u32 tx_hwtstamp_skipped; 746 u32 rx_hwtstamp_cleared; 747 void (*ptp_setup_sdp)(struct ixgbe_adapter *); 748 749 /* SR-IOV */ 750 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 751 unsigned int num_vfs; 752 struct vf_data_storage *vfinfo; 753 int vf_rate_link_speed; 754 struct vf_macvlans vf_mvs; 755 struct vf_macvlans *mv_list; 756 757 u32 timer_event_accumulator; 758 u32 vferr_refcount; 759 struct ixgbe_mac_addr *mac_table; 760 struct kobject *info_kobj; 761 #ifdef CONFIG_IXGBE_HWMON 762 struct hwmon_buff *ixgbe_hwmon_buff; 763 #endif /* CONFIG_IXGBE_HWMON */ 764 #ifdef CONFIG_DEBUG_FS 765 struct dentry *ixgbe_dbg_adapter; 766 #endif /*CONFIG_DEBUG_FS*/ 767 768 u8 default_up; 769 unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ 770 771 #define IXGBE_MAX_LINK_HANDLE 10 772 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; 773 unsigned long tables; 774 775 /* maximum number of RETA entries among all devices supported by ixgbe 776 * driver: currently it's x550 device in non-SRIOV mode 777 */ 778 #define IXGBE_MAX_RETA_ENTRIES 512 779 u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; 780 781 #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ 782 u32 *rss_key; 783 }; 784 785 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) 786 { 787 switch (adapter->hw.mac.type) { 788 case ixgbe_mac_82598EB: 789 case ixgbe_mac_82599EB: 790 case ixgbe_mac_X540: 791 return IXGBE_MAX_RSS_INDICES; 792 case ixgbe_mac_X550: 793 case ixgbe_mac_X550EM_x: 794 case ixgbe_mac_x550em_a: 795 return IXGBE_MAX_RSS_INDICES_X550; 796 default: 797 return 0; 798 } 799 } 800 801 struct ixgbe_fdir_filter { 802 struct hlist_node fdir_node; 803 union ixgbe_atr_input filter; 804 u16 sw_idx; 805 u64 action; 806 }; 807 808 enum ixgbe_state_t { 809 __IXGBE_TESTING, 810 __IXGBE_RESETTING, 811 __IXGBE_DOWN, 812 __IXGBE_DISABLED, 813 __IXGBE_REMOVING, 814 __IXGBE_SERVICE_SCHED, 815 __IXGBE_SERVICE_INITED, 816 __IXGBE_IN_SFP_INIT, 817 __IXGBE_PTP_RUNNING, 818 __IXGBE_PTP_TX_IN_PROGRESS, 819 __IXGBE_RESET_REQUESTED, 820 }; 821 822 struct ixgbe_cb { 823 union { /* Union defining head/tail partner */ 824 struct sk_buff *head; 825 struct sk_buff *tail; 826 }; 827 dma_addr_t dma; 828 u16 append_cnt; 829 bool page_released; 830 }; 831 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) 832 833 enum ixgbe_boards { 834 board_82598, 835 board_82599, 836 board_X540, 837 board_X550, 838 board_X550EM_x, 839 board_x550em_x_fw, 840 board_x550em_a, 841 board_x550em_a_fw, 842 }; 843 844 extern const struct ixgbe_info ixgbe_82598_info; 845 extern const struct ixgbe_info ixgbe_82599_info; 846 extern const struct ixgbe_info ixgbe_X540_info; 847 extern const struct ixgbe_info ixgbe_X550_info; 848 extern const struct ixgbe_info ixgbe_X550EM_x_info; 849 extern const struct ixgbe_info ixgbe_x550em_x_fw_info; 850 extern const struct ixgbe_info ixgbe_x550em_a_info; 851 extern const struct ixgbe_info ixgbe_x550em_a_fw_info; 852 #ifdef CONFIG_IXGBE_DCB 853 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; 854 #endif 855 856 extern char ixgbe_driver_name[]; 857 extern const char ixgbe_driver_version[]; 858 #ifdef IXGBE_FCOE 859 extern char ixgbe_default_device_descr[]; 860 #endif /* IXGBE_FCOE */ 861 862 int ixgbe_open(struct net_device *netdev); 863 int ixgbe_close(struct net_device *netdev); 864 void ixgbe_up(struct ixgbe_adapter *adapter); 865 void ixgbe_down(struct ixgbe_adapter *adapter); 866 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 867 void ixgbe_reset(struct ixgbe_adapter *adapter); 868 void ixgbe_set_ethtool_ops(struct net_device *netdev); 869 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 870 int ixgbe_setup_tx_resources(struct ixgbe_ring *); 871 void ixgbe_free_rx_resources(struct ixgbe_ring *); 872 void ixgbe_free_tx_resources(struct ixgbe_ring *); 873 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); 874 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); 875 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); 876 void ixgbe_update_stats(struct ixgbe_adapter *adapter); 877 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 878 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 879 u16 subdevice_id); 880 #ifdef CONFIG_PCI_IOV 881 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); 882 #endif 883 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 884 const u8 *addr, u16 queue); 885 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, 886 const u8 *addr, u16 queue); 887 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); 888 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 889 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, 890 struct ixgbe_ring *); 891 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, 892 struct ixgbe_tx_buffer *); 893 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 894 void ixgbe_write_eitr(struct ixgbe_q_vector *); 895 int ixgbe_poll(struct napi_struct *napi, int budget); 896 int ethtool_ioctl(struct ifreq *ifr); 897 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 898 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); 899 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); 900 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 901 union ixgbe_atr_hash_dword input, 902 union ixgbe_atr_hash_dword common, 903 u8 queue); 904 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 905 union ixgbe_atr_input *input_mask); 906 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 907 union ixgbe_atr_input *input, 908 u16 soft_id, u8 queue); 909 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 910 union ixgbe_atr_input *input, 911 u16 soft_id); 912 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 913 union ixgbe_atr_input *mask); 914 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 915 struct ixgbe_fdir_filter *input, 916 u16 sw_idx); 917 void ixgbe_set_rx_mode(struct net_device *netdev); 918 #ifdef CONFIG_IXGBE_DCB 919 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 920 #endif 921 int ixgbe_setup_tc(struct net_device *dev, u8 tc); 922 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 923 void ixgbe_do_reset(struct net_device *netdev); 924 #ifdef CONFIG_IXGBE_HWMON 925 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); 926 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); 927 #endif /* CONFIG_IXGBE_HWMON */ 928 #ifdef IXGBE_FCOE 929 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 930 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, 931 u8 *hdr_len); 932 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 933 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); 934 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 935 struct scatterlist *sgl, unsigned int sgc); 936 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 937 struct scatterlist *sgl, unsigned int sgc); 938 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 939 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 940 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 941 int ixgbe_fcoe_enable(struct net_device *netdev); 942 int ixgbe_fcoe_disable(struct net_device *netdev); 943 #ifdef CONFIG_IXGBE_DCB 944 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 945 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 946 #endif /* CONFIG_IXGBE_DCB */ 947 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); 948 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 949 struct netdev_fcoe_hbainfo *info); 950 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); 951 #endif /* IXGBE_FCOE */ 952 #ifdef CONFIG_DEBUG_FS 953 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); 954 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); 955 void ixgbe_dbg_init(void); 956 void ixgbe_dbg_exit(void); 957 #else 958 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} 959 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} 960 static inline void ixgbe_dbg_init(void) {} 961 static inline void ixgbe_dbg_exit(void) {} 962 #endif /* CONFIG_DEBUG_FS */ 963 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 964 { 965 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 966 } 967 968 void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 969 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); 970 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 971 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 972 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 973 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); 974 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); 975 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); 976 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, 977 union ixgbe_adv_rx_desc *rx_desc, 978 struct sk_buff *skb) 979 { 980 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { 981 ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); 982 return; 983 } 984 985 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) 986 return; 987 988 ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 989 990 /* Update the last_rx_timestamp timer in order to enable watchdog check 991 * for error case of latched timestamp on a dropped packet. 992 */ 993 rx_ring->last_rx_timestamp = jiffies; 994 } 995 996 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 997 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 998 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 999 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); 1000 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); 1001 #ifdef CONFIG_PCI_IOV 1002 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); 1003 #endif 1004 1005 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 1006 struct ixgbe_adapter *adapter, 1007 struct ixgbe_ring *tx_ring); 1008 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); 1009 void ixgbe_store_key(struct ixgbe_adapter *adapter); 1010 void ixgbe_store_reta(struct ixgbe_adapter *adapter); 1011 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 1012 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 1013 #endif /* _IXGBE_H_ */ 1014