1d89f8841SSasha Neftin /* SPDX-License-Identifier: GPL-2.0 */ 2d89f8841SSasha Neftin /* Copyright (c) 2018 Intel Corporation */ 3d89f8841SSasha Neftin 4d89f8841SSasha Neftin #ifndef _IGC_H_ 5d89f8841SSasha Neftin #define _IGC_H_ 6d89f8841SSasha Neftin 7d89f8841SSasha Neftin #include <linux/kobject.h> 8d89f8841SSasha Neftin #include <linux/pci.h> 9d89f8841SSasha Neftin #include <linux/netdevice.h> 10d89f8841SSasha Neftin #include <linux/vmalloc.h> 11d89f8841SSasha Neftin #include <linux/ethtool.h> 12d89f8841SSasha Neftin #include <linux/sctp.h> 13d89f8841SSasha Neftin 14146740f9SSasha Neftin #include "igc_hw.h" 15146740f9SSasha Neftin 168c5ad0daSSasha Neftin /* forward declaration */ 178c5ad0daSSasha Neftin void igc_set_ethtool_ops(struct net_device *); 188c5ad0daSSasha Neftin 198c5ad0daSSasha Neftin struct igc_adapter; 208c5ad0daSSasha Neftin struct igc_ring; 218c5ad0daSSasha Neftin 228c5ad0daSSasha Neftin void igc_up(struct igc_adapter *adapter); 238c5ad0daSSasha Neftin void igc_down(struct igc_adapter *adapter); 248c5ad0daSSasha Neftin int igc_setup_tx_resources(struct igc_ring *ring); 258c5ad0daSSasha Neftin int igc_setup_rx_resources(struct igc_ring *ring); 268c5ad0daSSasha Neftin void igc_free_tx_resources(struct igc_ring *ring); 278c5ad0daSSasha Neftin void igc_free_rx_resources(struct igc_ring *ring); 288c5ad0daSSasha Neftin unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); 298c5ad0daSSasha Neftin void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 308c5ad0daSSasha Neftin const u32 max_rss_queues); 318c5ad0daSSasha Neftin int igc_reinit_queues(struct igc_adapter *adapter); 322121c271SSasha Neftin void igc_write_rss_indir_tbl(struct igc_adapter *adapter); 338c5ad0daSSasha Neftin bool igc_has_link(struct igc_adapter *adapter); 348c5ad0daSSasha Neftin void igc_reset(struct igc_adapter *adapter); 358c5ad0daSSasha Neftin int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); 366245c848SSasha Neftin int igc_add_mac_steering_filter(struct igc_adapter *adapter, 376245c848SSasha Neftin const u8 *addr, u8 queue, u8 flags); 386245c848SSasha Neftin int igc_del_mac_steering_filter(struct igc_adapter *adapter, 396245c848SSasha Neftin const u8 *addr, u8 queue, u8 flags); 408c5ad0daSSasha Neftin 41d89f8841SSasha Neftin extern char igc_driver_name[]; 42d89f8841SSasha Neftin extern char igc_driver_version[]; 43d89f8841SSasha Neftin 448c5ad0daSSasha Neftin #define IGC_REGS_LEN 740 458c5ad0daSSasha Neftin #define IGC_RETA_SIZE 128 468c5ad0daSSasha Neftin 473df25e4cSSasha Neftin /* Interrupt defines */ 483df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 493df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSI BIT(0) 508c5ad0daSSasha Neftin #define IGC_FLAG_QUEUE_PAIRS BIT(3) 518c5ad0daSSasha Neftin #define IGC_FLAG_DMAC BIT(4) 520507ef8aSSasha Neftin #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) 53208983f0SSasha Neftin #define IGC_FLAG_MEDIA_RESET BIT(10) 54208983f0SSasha Neftin #define IGC_FLAG_MAS_ENABLE BIT(12) 553df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSIX BIT(13) 560507ef8aSSasha Neftin #define IGC_FLAG_VLAN_PROMISC BIT(15) 578c5ad0daSSasha Neftin #define IGC_FLAG_RX_LEGACY BIT(16) 583df25e4cSSasha Neftin 592121c271SSasha Neftin #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) 602121c271SSasha Neftin #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) 612121c271SSasha Neftin 622121c271SSasha Neftin #define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 632121c271SSasha Neftin #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 642121c271SSasha Neftin #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 652121c271SSasha Neftin 663df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 673df25e4cSSasha Neftin #define IGC_4K_ITR 980 683df25e4cSSasha Neftin #define IGC_20K_ITR 196 693df25e4cSSasha Neftin #define IGC_70K_ITR 56 703df25e4cSSasha Neftin 710507ef8aSSasha Neftin #define IGC_DEFAULT_ITR 3 /* dynamic */ 720507ef8aSSasha Neftin #define IGC_MAX_ITR_USECS 10000 730507ef8aSSasha Neftin #define IGC_MIN_ITR_USECS 10 740507ef8aSSasha Neftin #define NON_Q_VECTORS 1 750507ef8aSSasha Neftin #define MAX_MSIX_ENTRIES 10 760507ef8aSSasha Neftin 770507ef8aSSasha Neftin /* TX/RX descriptor defines */ 780507ef8aSSasha Neftin #define IGC_DEFAULT_TXD 256 790507ef8aSSasha Neftin #define IGC_DEFAULT_TX_WORK 128 800507ef8aSSasha Neftin #define IGC_MIN_TXD 80 810507ef8aSSasha Neftin #define IGC_MAX_TXD 4096 820507ef8aSSasha Neftin 830507ef8aSSasha Neftin #define IGC_DEFAULT_RXD 256 840507ef8aSSasha Neftin #define IGC_MIN_RXD 80 850507ef8aSSasha Neftin #define IGC_MAX_RXD 4096 860507ef8aSSasha Neftin 87c9a11c23SSasha Neftin /* Transmit and receive queues */ 88c9a11c23SSasha Neftin #define IGC_MAX_RX_QUEUES 4 89c9a11c23SSasha Neftin #define IGC_MAX_TX_QUEUES 4 90c9a11c23SSasha Neftin 91c9a11c23SSasha Neftin #define MAX_Q_VECTORS 8 92c9a11c23SSasha Neftin #define MAX_STD_JUMBO_FRAME_SIZE 9216 93c9a11c23SSasha Neftin 9413b5b7fdSSasha Neftin /* Supported Rx Buffer Sizes */ 9513b5b7fdSSasha Neftin #define IGC_RXBUFFER_256 256 9613b5b7fdSSasha Neftin #define IGC_RXBUFFER_2048 2048 9713b5b7fdSSasha Neftin #define IGC_RXBUFFER_3072 3072 9813b5b7fdSSasha Neftin 998c5ad0daSSasha Neftin #define AUTO_ALL_MODES 0 10013b5b7fdSSasha Neftin #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 10113b5b7fdSSasha Neftin 10213b5b7fdSSasha Neftin /* RX and TX descriptor control thresholds. 10313b5b7fdSSasha Neftin * PTHRESH - MAC will consider prefetch if it has fewer than this number of 10413b5b7fdSSasha Neftin * descriptors available in its onboard memory. 10513b5b7fdSSasha Neftin * Setting this to 0 disables RX descriptor prefetch. 10613b5b7fdSSasha Neftin * HTHRESH - MAC will only prefetch if there are at least this many descriptors 10713b5b7fdSSasha Neftin * available in host memory. 10813b5b7fdSSasha Neftin * If PTHRESH is 0, this should also be 0. 10913b5b7fdSSasha Neftin * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back 11013b5b7fdSSasha Neftin * descriptors until either it has this many to write back, or the 11113b5b7fdSSasha Neftin * ITR timer expires. 11213b5b7fdSSasha Neftin */ 11313b5b7fdSSasha Neftin #define IGC_RX_PTHRESH 8 11413b5b7fdSSasha Neftin #define IGC_RX_HTHRESH 8 11513b5b7fdSSasha Neftin #define IGC_TX_PTHRESH 8 11613b5b7fdSSasha Neftin #define IGC_TX_HTHRESH 1 11713b5b7fdSSasha Neftin #define IGC_RX_WTHRESH 4 11813b5b7fdSSasha Neftin #define IGC_TX_WTHRESH 16 11913b5b7fdSSasha Neftin 12013b5b7fdSSasha Neftin #define IGC_RX_DMA_ATTR \ 12113b5b7fdSSasha Neftin (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 12213b5b7fdSSasha Neftin 12313b5b7fdSSasha Neftin #define IGC_TS_HDR_LEN 16 12413b5b7fdSSasha Neftin 12513b5b7fdSSasha Neftin #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 12613b5b7fdSSasha Neftin 12713b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 12813b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB \ 12913b5b7fdSSasha Neftin (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) 13013b5b7fdSSasha Neftin #else 13113b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) 13213b5b7fdSSasha Neftin #endif 13313b5b7fdSSasha Neftin 1340507ef8aSSasha Neftin /* How many Rx Buffers do we bundle into one write to the hardware ? */ 1350507ef8aSSasha Neftin #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 1360507ef8aSSasha Neftin 1370507ef8aSSasha Neftin /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ 1380507ef8aSSasha Neftin static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, 1390507ef8aSSasha Neftin const u32 stat_err_bits) 1400507ef8aSSasha Neftin { 1410507ef8aSSasha Neftin return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 1420507ef8aSSasha Neftin } 1430507ef8aSSasha Neftin 144c9a11c23SSasha Neftin enum igc_state_t { 145c9a11c23SSasha Neftin __IGC_TESTING, 146c9a11c23SSasha Neftin __IGC_RESETTING, 147c9a11c23SSasha Neftin __IGC_DOWN, 148c9a11c23SSasha Neftin __IGC_PTP_TX_IN_PROGRESS, 149c9a11c23SSasha Neftin }; 150c9a11c23SSasha Neftin 1510507ef8aSSasha Neftin enum igc_tx_flags { 1520507ef8aSSasha Neftin /* cmd_type flags */ 1530507ef8aSSasha Neftin IGC_TX_FLAGS_VLAN = 0x01, 1540507ef8aSSasha Neftin IGC_TX_FLAGS_TSO = 0x02, 1550507ef8aSSasha Neftin IGC_TX_FLAGS_TSTAMP = 0x04, 1560507ef8aSSasha Neftin 1570507ef8aSSasha Neftin /* olinfo flags */ 1580507ef8aSSasha Neftin IGC_TX_FLAGS_IPV4 = 0x10, 1590507ef8aSSasha Neftin IGC_TX_FLAGS_CSUM = 0x20, 1600507ef8aSSasha Neftin }; 1610507ef8aSSasha Neftin 162ab405612SSasha Neftin enum igc_boards { 163ab405612SSasha Neftin board_base, 164ab405612SSasha Neftin }; 165ab405612SSasha Neftin 1660507ef8aSSasha Neftin /* The largest size we can write to the descriptor is 65535. In order to 1670507ef8aSSasha Neftin * maintain a power of two alignment we have to limit ourselves to 32K. 1680507ef8aSSasha Neftin */ 1690507ef8aSSasha Neftin #define IGC_MAX_TXD_PWR 15 1700507ef8aSSasha Neftin #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) 1710507ef8aSSasha Neftin 1720507ef8aSSasha Neftin /* Tx Descriptors needed, worst case */ 1730507ef8aSSasha Neftin #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) 1740507ef8aSSasha Neftin #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 1750507ef8aSSasha Neftin 17613b5b7fdSSasha Neftin /* wrapper around a pointer to a socket buffer, 17713b5b7fdSSasha Neftin * so a DMA handle can be stored along with the buffer 17813b5b7fdSSasha Neftin */ 17913b5b7fdSSasha Neftin struct igc_tx_buffer { 18013b5b7fdSSasha Neftin union igc_adv_tx_desc *next_to_watch; 18113b5b7fdSSasha Neftin unsigned long time_stamp; 18213b5b7fdSSasha Neftin struct sk_buff *skb; 18313b5b7fdSSasha Neftin unsigned int bytecount; 18413b5b7fdSSasha Neftin u16 gso_segs; 18513b5b7fdSSasha Neftin __be16 protocol; 18613b5b7fdSSasha Neftin 18713b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_ADDR(dma); 18813b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_LEN(len); 18913b5b7fdSSasha Neftin u32 tx_flags; 19013b5b7fdSSasha Neftin }; 19113b5b7fdSSasha Neftin 19213b5b7fdSSasha Neftin struct igc_rx_buffer { 19313b5b7fdSSasha Neftin dma_addr_t dma; 19413b5b7fdSSasha Neftin struct page *page; 19513b5b7fdSSasha Neftin #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 19613b5b7fdSSasha Neftin __u32 page_offset; 19713b5b7fdSSasha Neftin #else 19813b5b7fdSSasha Neftin __u16 page_offset; 19913b5b7fdSSasha Neftin #endif 20013b5b7fdSSasha Neftin __u16 pagecnt_bias; 20113b5b7fdSSasha Neftin }; 20213b5b7fdSSasha Neftin 2033df25e4cSSasha Neftin struct igc_tx_queue_stats { 2043df25e4cSSasha Neftin u64 packets; 2053df25e4cSSasha Neftin u64 bytes; 2063df25e4cSSasha Neftin u64 restart_queue; 2070507ef8aSSasha Neftin u64 restart_queue2; 2083df25e4cSSasha Neftin }; 2093df25e4cSSasha Neftin 2103df25e4cSSasha Neftin struct igc_rx_queue_stats { 2113df25e4cSSasha Neftin u64 packets; 2123df25e4cSSasha Neftin u64 bytes; 2133df25e4cSSasha Neftin u64 drops; 2143df25e4cSSasha Neftin u64 csum_err; 2153df25e4cSSasha Neftin u64 alloc_failed; 2163df25e4cSSasha Neftin }; 2173df25e4cSSasha Neftin 2183df25e4cSSasha Neftin struct igc_rx_packet_stats { 2193df25e4cSSasha Neftin u64 ipv4_packets; /* IPv4 headers processed */ 2203df25e4cSSasha Neftin u64 ipv4e_packets; /* IPv4E headers with extensions processed */ 2213df25e4cSSasha Neftin u64 ipv6_packets; /* IPv6 headers processed */ 2223df25e4cSSasha Neftin u64 ipv6e_packets; /* IPv6E headers with extensions processed */ 2233df25e4cSSasha Neftin u64 tcp_packets; /* TCP headers processed */ 2243df25e4cSSasha Neftin u64 udp_packets; /* UDP headers processed */ 2253df25e4cSSasha Neftin u64 sctp_packets; /* SCTP headers processed */ 2263df25e4cSSasha Neftin u64 nfs_packets; /* NFS headers processe */ 2273df25e4cSSasha Neftin u64 other_packets; 2283df25e4cSSasha Neftin }; 2293df25e4cSSasha Neftin 2303df25e4cSSasha Neftin struct igc_ring_container { 2313df25e4cSSasha Neftin struct igc_ring *ring; /* pointer to linked list of rings */ 2323df25e4cSSasha Neftin unsigned int total_bytes; /* total bytes processed this int */ 2333df25e4cSSasha Neftin unsigned int total_packets; /* total packets processed this int */ 2343df25e4cSSasha Neftin u16 work_limit; /* total work allowed per interrupt */ 2353df25e4cSSasha Neftin u8 count; /* total number of rings in vector */ 2363df25e4cSSasha Neftin u8 itr; /* current ITR setting for ring */ 2373df25e4cSSasha Neftin }; 2383df25e4cSSasha Neftin 2393df25e4cSSasha Neftin struct igc_ring { 2403df25e4cSSasha Neftin struct igc_q_vector *q_vector; /* backlink to q_vector */ 2413df25e4cSSasha Neftin struct net_device *netdev; /* back pointer to net_device */ 2423df25e4cSSasha Neftin struct device *dev; /* device for dma mapping */ 2433df25e4cSSasha Neftin union { /* array of buffer info structs */ 2443df25e4cSSasha Neftin struct igc_tx_buffer *tx_buffer_info; 2453df25e4cSSasha Neftin struct igc_rx_buffer *rx_buffer_info; 2463df25e4cSSasha Neftin }; 2473df25e4cSSasha Neftin void *desc; /* descriptor ring memory */ 2483df25e4cSSasha Neftin unsigned long flags; /* ring specific flags */ 2493df25e4cSSasha Neftin void __iomem *tail; /* pointer to ring tail register */ 2503df25e4cSSasha Neftin dma_addr_t dma; /* phys address of the ring */ 2513df25e4cSSasha Neftin unsigned int size; /* length of desc. ring in bytes */ 2523df25e4cSSasha Neftin 2533df25e4cSSasha Neftin u16 count; /* number of desc. in the ring */ 2543df25e4cSSasha Neftin u8 queue_index; /* logical index of the ring*/ 2553df25e4cSSasha Neftin u8 reg_idx; /* physical index of the ring */ 2563df25e4cSSasha Neftin 2573df25e4cSSasha Neftin /* everything past this point are written often */ 2583df25e4cSSasha Neftin u16 next_to_clean; 2593df25e4cSSasha Neftin u16 next_to_use; 2603df25e4cSSasha Neftin u16 next_to_alloc; 2613df25e4cSSasha Neftin 2623df25e4cSSasha Neftin union { 2633df25e4cSSasha Neftin /* TX */ 2643df25e4cSSasha Neftin struct { 2653df25e4cSSasha Neftin struct igc_tx_queue_stats tx_stats; 2660507ef8aSSasha Neftin struct u64_stats_sync tx_syncp; 2670507ef8aSSasha Neftin struct u64_stats_sync tx_syncp2; 2683df25e4cSSasha Neftin }; 2693df25e4cSSasha Neftin /* RX */ 2703df25e4cSSasha Neftin struct { 2713df25e4cSSasha Neftin struct igc_rx_queue_stats rx_stats; 2723df25e4cSSasha Neftin struct igc_rx_packet_stats pkt_stats; 2730507ef8aSSasha Neftin struct u64_stats_sync rx_syncp; 2743df25e4cSSasha Neftin struct sk_buff *skb; 2753df25e4cSSasha Neftin }; 2763df25e4cSSasha Neftin }; 2773df25e4cSSasha Neftin } ____cacheline_internodealigned_in_smp; 2783df25e4cSSasha Neftin 279c9a11c23SSasha Neftin struct igc_q_vector { 280c9a11c23SSasha Neftin struct igc_adapter *adapter; /* backlink */ 2813df25e4cSSasha Neftin void __iomem *itr_register; 2823df25e4cSSasha Neftin u32 eims_value; /* EIMS mask value */ 2833df25e4cSSasha Neftin 2843df25e4cSSasha Neftin u16 itr_val; 2853df25e4cSSasha Neftin u8 set_itr; 2863df25e4cSSasha Neftin 2873df25e4cSSasha Neftin struct igc_ring_container rx, tx; 288c9a11c23SSasha Neftin 289c9a11c23SSasha Neftin struct napi_struct napi; 2903df25e4cSSasha Neftin 2913df25e4cSSasha Neftin struct rcu_head rcu; /* to avoid race with update stats on free */ 2923df25e4cSSasha Neftin char name[IFNAMSIZ + 9]; 2933df25e4cSSasha Neftin struct net_device poll_dev; 2943df25e4cSSasha Neftin 2953df25e4cSSasha Neftin /* for dynamic allocation of rings associated with this q_vector */ 2963df25e4cSSasha Neftin struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; 297c9a11c23SSasha Neftin }; 298c9a11c23SSasha Neftin 2996245c848SSasha Neftin #define MAX_ETYPE_FILTER (4 - 1) 3006245c848SSasha Neftin 3016245c848SSasha Neftin enum igc_filter_match_flags { 3026245c848SSasha Neftin IGC_FILTER_FLAG_ETHER_TYPE = 0x1, 3036245c848SSasha Neftin IGC_FILTER_FLAG_VLAN_TCI = 0x2, 3046245c848SSasha Neftin IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, 3056245c848SSasha Neftin IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, 3066245c848SSasha Neftin }; 3076245c848SSasha Neftin 3086245c848SSasha Neftin /* RX network flow classification data structure */ 3096245c848SSasha Neftin struct igc_nfc_input { 3106245c848SSasha Neftin /* Byte layout in order, all values with MSB first: 3116245c848SSasha Neftin * match_flags - 1 byte 3126245c848SSasha Neftin * etype - 2 bytes 3136245c848SSasha Neftin * vlan_tci - 2 bytes 3146245c848SSasha Neftin */ 3156245c848SSasha Neftin u8 match_flags; 3166245c848SSasha Neftin __be16 etype; 3176245c848SSasha Neftin __be16 vlan_tci; 3186245c848SSasha Neftin u8 src_addr[ETH_ALEN]; 3196245c848SSasha Neftin u8 dst_addr[ETH_ALEN]; 3206245c848SSasha Neftin }; 3216245c848SSasha Neftin 3226245c848SSasha Neftin struct igc_nfc_filter { 3236245c848SSasha Neftin struct hlist_node nfc_node; 3246245c848SSasha Neftin struct igc_nfc_input filter; 3256245c848SSasha Neftin unsigned long cookie; 3266245c848SSasha Neftin u16 etype_reg_index; 3276245c848SSasha Neftin u16 sw_idx; 3286245c848SSasha Neftin u16 action; 3296245c848SSasha Neftin }; 3306245c848SSasha Neftin 331c9a11c23SSasha Neftin struct igc_mac_addr { 332c9a11c23SSasha Neftin u8 addr[ETH_ALEN]; 333c9a11c23SSasha Neftin u8 queue; 334c9a11c23SSasha Neftin u8 state; /* bitmask */ 335c9a11c23SSasha Neftin }; 336c9a11c23SSasha Neftin 337c9a11c23SSasha Neftin #define IGC_MAC_STATE_DEFAULT 0x1 3386245c848SSasha Neftin #define IGC_MAC_STATE_IN_USE 0x2 3396245c848SSasha Neftin #define IGC_MAC_STATE_SRC_ADDR 0x4 3406245c848SSasha Neftin #define IGC_MAC_STATE_QUEUE_STEERING 0x8 3416245c848SSasha Neftin 3426245c848SSasha Neftin #define IGC_MAX_RXNFC_FILTERS 16 343c9a11c23SSasha Neftin 344146740f9SSasha Neftin /* Board specific private data structure */ 345146740f9SSasha Neftin struct igc_adapter { 346c9a11c23SSasha Neftin struct net_device *netdev; 347c9a11c23SSasha Neftin 348c9a11c23SSasha Neftin unsigned long state; 349c9a11c23SSasha Neftin unsigned int flags; 350c9a11c23SSasha Neftin unsigned int num_q_vectors; 3513df25e4cSSasha Neftin 3523df25e4cSSasha Neftin struct msix_entry *msix_entries; 3533df25e4cSSasha Neftin 3543df25e4cSSasha Neftin /* TX */ 3553df25e4cSSasha Neftin u16 tx_work_limit; 356208983f0SSasha Neftin u32 tx_timeout_count; 3573df25e4cSSasha Neftin int num_tx_queues; 3583df25e4cSSasha Neftin struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; 3593df25e4cSSasha Neftin 3603df25e4cSSasha Neftin /* RX */ 3613df25e4cSSasha Neftin int num_rx_queues; 3623df25e4cSSasha Neftin struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; 3633df25e4cSSasha Neftin 3643df25e4cSSasha Neftin struct timer_list watchdog_timer; 3653df25e4cSSasha Neftin struct timer_list dma_err_timer; 3663df25e4cSSasha Neftin struct timer_list phy_info_timer; 3673df25e4cSSasha Neftin 368c9a11c23SSasha Neftin u16 link_speed; 369c9a11c23SSasha Neftin u16 link_duplex; 370c9a11c23SSasha Neftin 371c9a11c23SSasha Neftin u8 port_num; 372c9a11c23SSasha Neftin 373146740f9SSasha Neftin u8 __iomem *io_addr; 3743df25e4cSSasha Neftin /* Interrupt Throttle Rate */ 3753df25e4cSSasha Neftin u32 rx_itr_setting; 3763df25e4cSSasha Neftin u32 tx_itr_setting; 3773df25e4cSSasha Neftin 3783df25e4cSSasha Neftin struct work_struct reset_task; 379c9a11c23SSasha Neftin struct work_struct watchdog_task; 3803df25e4cSSasha Neftin struct work_struct dma_err_task; 3814eb80801SSasha Neftin bool fc_autoneg; 382c9a11c23SSasha Neftin 3830507ef8aSSasha Neftin u8 tx_timeout_factor; 3840507ef8aSSasha Neftin 385c9a11c23SSasha Neftin int msg_enable; 386c9a11c23SSasha Neftin u32 max_frame_size; 3870507ef8aSSasha Neftin u32 min_frame_size; 388146740f9SSasha Neftin 389146740f9SSasha Neftin /* OS defined structs */ 390146740f9SSasha Neftin struct pci_dev *pdev; 3910507ef8aSSasha Neftin /* lock for statistics */ 3920507ef8aSSasha Neftin spinlock_t stats64_lock; 3930507ef8aSSasha Neftin struct rtnl_link_stats64 stats64; 394146740f9SSasha Neftin 395146740f9SSasha Neftin /* structs defined in igc_hw.h */ 396146740f9SSasha Neftin struct igc_hw hw; 3973df25e4cSSasha Neftin struct igc_hw_stats stats; 398c9a11c23SSasha Neftin 399c9a11c23SSasha Neftin struct igc_q_vector *q_vector[MAX_Q_VECTORS]; 4003df25e4cSSasha Neftin u32 eims_enable_mask; 4013df25e4cSSasha Neftin u32 eims_other; 4023df25e4cSSasha Neftin 4033df25e4cSSasha Neftin u16 tx_ring_count; 4043df25e4cSSasha Neftin u16 rx_ring_count; 4053df25e4cSSasha Neftin 4060507ef8aSSasha Neftin u32 *shadow_vfta; 4070507ef8aSSasha Neftin 4083df25e4cSSasha Neftin u32 rss_queues; 4092121c271SSasha Neftin u32 rss_indir_tbl_init; 410c9a11c23SSasha Neftin 4116245c848SSasha Neftin /* RX network flow classification support */ 4126245c848SSasha Neftin struct hlist_head nfc_filter_list; 4136245c848SSasha Neftin struct hlist_head cls_flower_list; 4146245c848SSasha Neftin unsigned int nfc_filter_count; 4156245c848SSasha Neftin 4160507ef8aSSasha Neftin /* lock for RX network flow classification filter */ 4170507ef8aSSasha Neftin spinlock_t nfc_lock; 4186245c848SSasha Neftin bool etype_bitmap[MAX_ETYPE_FILTER]; 4190507ef8aSSasha Neftin 420c9a11c23SSasha Neftin struct igc_mac_addr *mac_table; 421ab405612SSasha Neftin 4228c5ad0daSSasha Neftin u8 rss_indir_tbl[IGC_RETA_SIZE]; 4238c5ad0daSSasha Neftin 424208983f0SSasha Neftin unsigned long link_check_timeout; 425ab405612SSasha Neftin struct igc_info ei; 426146740f9SSasha Neftin }; 427146740f9SSasha Neftin 42813b5b7fdSSasha Neftin /* igc_desc_unused - calculate if we have unused descriptors */ 42913b5b7fdSSasha Neftin static inline u16 igc_desc_unused(const struct igc_ring *ring) 43013b5b7fdSSasha Neftin { 43113b5b7fdSSasha Neftin u16 ntc = ring->next_to_clean; 43213b5b7fdSSasha Neftin u16 ntu = ring->next_to_use; 43313b5b7fdSSasha Neftin 43413b5b7fdSSasha Neftin return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 43513b5b7fdSSasha Neftin } 43613b5b7fdSSasha Neftin 4375586838fSSasha Neftin static inline s32 igc_get_phy_info(struct igc_hw *hw) 4385586838fSSasha Neftin { 4395586838fSSasha Neftin if (hw->phy.ops.get_phy_info) 4405586838fSSasha Neftin return hw->phy.ops.get_phy_info(hw); 4415586838fSSasha Neftin 4425586838fSSasha Neftin return 0; 4435586838fSSasha Neftin } 4445586838fSSasha Neftin 4455586838fSSasha Neftin static inline s32 igc_reset_phy(struct igc_hw *hw) 4465586838fSSasha Neftin { 4475586838fSSasha Neftin if (hw->phy.ops.reset) 4485586838fSSasha Neftin return hw->phy.ops.reset(hw); 4495586838fSSasha Neftin 4505586838fSSasha Neftin return 0; 4515586838fSSasha Neftin } 4525586838fSSasha Neftin 45313b5b7fdSSasha Neftin static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) 45413b5b7fdSSasha Neftin { 45513b5b7fdSSasha Neftin return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 45613b5b7fdSSasha Neftin } 45713b5b7fdSSasha Neftin 45813b5b7fdSSasha Neftin enum igc_ring_flags_t { 45913b5b7fdSSasha Neftin IGC_RING_FLAG_RX_3K_BUFFER, 46013b5b7fdSSasha Neftin IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, 46113b5b7fdSSasha Neftin IGC_RING_FLAG_RX_SCTP_CSUM, 46213b5b7fdSSasha Neftin IGC_RING_FLAG_RX_LB_VLAN_BSWAP, 46313b5b7fdSSasha Neftin IGC_RING_FLAG_TX_CTX_IDX, 46413b5b7fdSSasha Neftin IGC_RING_FLAG_TX_DETECT_HANG 46513b5b7fdSSasha Neftin }; 46613b5b7fdSSasha Neftin 46713b5b7fdSSasha Neftin #define ring_uses_large_buffer(ring) \ 46813b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) 46913b5b7fdSSasha Neftin 47013b5b7fdSSasha Neftin #define ring_uses_build_skb(ring) \ 47113b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) 47213b5b7fdSSasha Neftin 47313b5b7fdSSasha Neftin static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) 47413b5b7fdSSasha Neftin { 47513b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 47613b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 47713b5b7fdSSasha Neftin return IGC_RXBUFFER_3072; 47813b5b7fdSSasha Neftin 47913b5b7fdSSasha Neftin if (ring_uses_build_skb(ring)) 48013b5b7fdSSasha Neftin return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; 48113b5b7fdSSasha Neftin #endif 48213b5b7fdSSasha Neftin return IGC_RXBUFFER_2048; 48313b5b7fdSSasha Neftin } 48413b5b7fdSSasha Neftin 48513b5b7fdSSasha Neftin static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) 48613b5b7fdSSasha Neftin { 48713b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 48813b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 48913b5b7fdSSasha Neftin return 1; 49013b5b7fdSSasha Neftin #endif 49113b5b7fdSSasha Neftin return 0; 49213b5b7fdSSasha Neftin } 49313b5b7fdSSasha Neftin 494208983f0SSasha Neftin static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) 495208983f0SSasha Neftin { 496208983f0SSasha Neftin if (hw->phy.ops.read_reg) 497208983f0SSasha Neftin return hw->phy.ops.read_reg(hw, offset, data); 498208983f0SSasha Neftin 499208983f0SSasha Neftin return 0; 500208983f0SSasha Neftin } 501208983f0SSasha Neftin 5028c5ad0daSSasha Neftin /* forward declaration */ 5038c5ad0daSSasha Neftin void igc_reinit_locked(struct igc_adapter *); 5046245c848SSasha Neftin int igc_add_filter(struct igc_adapter *adapter, 5056245c848SSasha Neftin struct igc_nfc_filter *input); 5066245c848SSasha Neftin int igc_erase_filter(struct igc_adapter *adapter, 5076245c848SSasha Neftin struct igc_nfc_filter *input); 5088c5ad0daSSasha Neftin 50913b5b7fdSSasha Neftin #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) 51013b5b7fdSSasha Neftin 5110507ef8aSSasha Neftin #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) 5120507ef8aSSasha Neftin 51313b5b7fdSSasha Neftin #define IGC_RX_DESC(R, i) \ 51413b5b7fdSSasha Neftin (&(((union igc_adv_rx_desc *)((R)->desc))[i])) 51513b5b7fdSSasha Neftin #define IGC_TX_DESC(R, i) \ 51613b5b7fdSSasha Neftin (&(((union igc_adv_tx_desc *)((R)->desc))[i])) 51713b5b7fdSSasha Neftin #define IGC_TX_CTXTDESC(R, i) \ 51813b5b7fdSSasha Neftin (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) 51913b5b7fdSSasha Neftin 520d89f8841SSasha Neftin #endif /* _IGC_H_ */ 521