1d89f8841SSasha Neftin /* SPDX-License-Identifier: GPL-2.0 */ 2d89f8841SSasha Neftin /* Copyright (c) 2018 Intel Corporation */ 3d89f8841SSasha Neftin 4d89f8841SSasha Neftin #ifndef _IGC_H_ 5d89f8841SSasha Neftin #define _IGC_H_ 6d89f8841SSasha Neftin 7d89f8841SSasha Neftin #include <linux/kobject.h> 8d89f8841SSasha Neftin #include <linux/pci.h> 9d89f8841SSasha Neftin #include <linux/netdevice.h> 10d89f8841SSasha Neftin #include <linux/vmalloc.h> 11d89f8841SSasha Neftin #include <linux/ethtool.h> 12d89f8841SSasha Neftin #include <linux/sctp.h> 13d89f8841SSasha Neftin 14146740f9SSasha Neftin #include "igc_hw.h" 15146740f9SSasha Neftin 168c5ad0daSSasha Neftin /* forward declaration */ 178c5ad0daSSasha Neftin void igc_set_ethtool_ops(struct net_device *); 188c5ad0daSSasha Neftin 198c5ad0daSSasha Neftin struct igc_adapter; 208c5ad0daSSasha Neftin struct igc_ring; 218c5ad0daSSasha Neftin 228c5ad0daSSasha Neftin void igc_up(struct igc_adapter *adapter); 238c5ad0daSSasha Neftin void igc_down(struct igc_adapter *adapter); 248c5ad0daSSasha Neftin int igc_setup_tx_resources(struct igc_ring *ring); 258c5ad0daSSasha Neftin int igc_setup_rx_resources(struct igc_ring *ring); 268c5ad0daSSasha Neftin void igc_free_tx_resources(struct igc_ring *ring); 278c5ad0daSSasha Neftin void igc_free_rx_resources(struct igc_ring *ring); 288c5ad0daSSasha Neftin unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); 298c5ad0daSSasha Neftin void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 308c5ad0daSSasha Neftin const u32 max_rss_queues); 318c5ad0daSSasha Neftin int igc_reinit_queues(struct igc_adapter *adapter); 322121c271SSasha Neftin void igc_write_rss_indir_tbl(struct igc_adapter *adapter); 338c5ad0daSSasha Neftin bool igc_has_link(struct igc_adapter *adapter); 348c5ad0daSSasha Neftin void igc_reset(struct igc_adapter *adapter); 358c5ad0daSSasha Neftin int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); 366245c848SSasha Neftin int igc_add_mac_steering_filter(struct igc_adapter *adapter, 376245c848SSasha Neftin const u8 *addr, u8 queue, u8 flags); 386245c848SSasha Neftin int igc_del_mac_steering_filter(struct igc_adapter *adapter, 396245c848SSasha Neftin const u8 *addr, u8 queue, u8 flags); 4036b9fea6SSasha Neftin void igc_update_stats(struct igc_adapter *adapter); 418c5ad0daSSasha Neftin 42d89f8841SSasha Neftin extern char igc_driver_name[]; 43d89f8841SSasha Neftin extern char igc_driver_version[]; 44d89f8841SSasha Neftin 458c5ad0daSSasha Neftin #define IGC_REGS_LEN 740 468c5ad0daSSasha Neftin #define IGC_RETA_SIZE 128 478c5ad0daSSasha Neftin 483df25e4cSSasha Neftin /* Interrupt defines */ 493df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 503df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSI BIT(0) 518c5ad0daSSasha Neftin #define IGC_FLAG_QUEUE_PAIRS BIT(3) 528c5ad0daSSasha Neftin #define IGC_FLAG_DMAC BIT(4) 530507ef8aSSasha Neftin #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) 54208983f0SSasha Neftin #define IGC_FLAG_MEDIA_RESET BIT(10) 55208983f0SSasha Neftin #define IGC_FLAG_MAS_ENABLE BIT(12) 563df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSIX BIT(13) 570507ef8aSSasha Neftin #define IGC_FLAG_VLAN_PROMISC BIT(15) 588c5ad0daSSasha Neftin #define IGC_FLAG_RX_LEGACY BIT(16) 593df25e4cSSasha Neftin 602121c271SSasha Neftin #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) 612121c271SSasha Neftin #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) 622121c271SSasha Neftin 632121c271SSasha Neftin #define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 642121c271SSasha Neftin #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 652121c271SSasha Neftin #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 662121c271SSasha Neftin 673df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 683df25e4cSSasha Neftin #define IGC_4K_ITR 980 693df25e4cSSasha Neftin #define IGC_20K_ITR 196 703df25e4cSSasha Neftin #define IGC_70K_ITR 56 713df25e4cSSasha Neftin 720507ef8aSSasha Neftin #define IGC_DEFAULT_ITR 3 /* dynamic */ 730507ef8aSSasha Neftin #define IGC_MAX_ITR_USECS 10000 740507ef8aSSasha Neftin #define IGC_MIN_ITR_USECS 10 750507ef8aSSasha Neftin #define NON_Q_VECTORS 1 760507ef8aSSasha Neftin #define MAX_MSIX_ENTRIES 10 770507ef8aSSasha Neftin 780507ef8aSSasha Neftin /* TX/RX descriptor defines */ 790507ef8aSSasha Neftin #define IGC_DEFAULT_TXD 256 800507ef8aSSasha Neftin #define IGC_DEFAULT_TX_WORK 128 810507ef8aSSasha Neftin #define IGC_MIN_TXD 80 820507ef8aSSasha Neftin #define IGC_MAX_TXD 4096 830507ef8aSSasha Neftin 840507ef8aSSasha Neftin #define IGC_DEFAULT_RXD 256 850507ef8aSSasha Neftin #define IGC_MIN_RXD 80 860507ef8aSSasha Neftin #define IGC_MAX_RXD 4096 870507ef8aSSasha Neftin 88c9a11c23SSasha Neftin /* Transmit and receive queues */ 89c9a11c23SSasha Neftin #define IGC_MAX_RX_QUEUES 4 90c9a11c23SSasha Neftin #define IGC_MAX_TX_QUEUES 4 91c9a11c23SSasha Neftin 92c9a11c23SSasha Neftin #define MAX_Q_VECTORS 8 93c9a11c23SSasha Neftin #define MAX_STD_JUMBO_FRAME_SIZE 9216 94c9a11c23SSasha Neftin 9513b5b7fdSSasha Neftin /* Supported Rx Buffer Sizes */ 9613b5b7fdSSasha Neftin #define IGC_RXBUFFER_256 256 9713b5b7fdSSasha Neftin #define IGC_RXBUFFER_2048 2048 9813b5b7fdSSasha Neftin #define IGC_RXBUFFER_3072 3072 9913b5b7fdSSasha Neftin 1008c5ad0daSSasha Neftin #define AUTO_ALL_MODES 0 10113b5b7fdSSasha Neftin #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 10213b5b7fdSSasha Neftin 10313b5b7fdSSasha Neftin /* RX and TX descriptor control thresholds. 10413b5b7fdSSasha Neftin * PTHRESH - MAC will consider prefetch if it has fewer than this number of 10513b5b7fdSSasha Neftin * descriptors available in its onboard memory. 10613b5b7fdSSasha Neftin * Setting this to 0 disables RX descriptor prefetch. 10713b5b7fdSSasha Neftin * HTHRESH - MAC will only prefetch if there are at least this many descriptors 10813b5b7fdSSasha Neftin * available in host memory. 10913b5b7fdSSasha Neftin * If PTHRESH is 0, this should also be 0. 11013b5b7fdSSasha Neftin * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back 11113b5b7fdSSasha Neftin * descriptors until either it has this many to write back, or the 11213b5b7fdSSasha Neftin * ITR timer expires. 11313b5b7fdSSasha Neftin */ 11413b5b7fdSSasha Neftin #define IGC_RX_PTHRESH 8 11513b5b7fdSSasha Neftin #define IGC_RX_HTHRESH 8 11613b5b7fdSSasha Neftin #define IGC_TX_PTHRESH 8 11713b5b7fdSSasha Neftin #define IGC_TX_HTHRESH 1 11813b5b7fdSSasha Neftin #define IGC_RX_WTHRESH 4 11913b5b7fdSSasha Neftin #define IGC_TX_WTHRESH 16 12013b5b7fdSSasha Neftin 12113b5b7fdSSasha Neftin #define IGC_RX_DMA_ATTR \ 12213b5b7fdSSasha Neftin (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 12313b5b7fdSSasha Neftin 12413b5b7fdSSasha Neftin #define IGC_TS_HDR_LEN 16 12513b5b7fdSSasha Neftin 12613b5b7fdSSasha Neftin #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 12713b5b7fdSSasha Neftin 12813b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 12913b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB \ 13013b5b7fdSSasha Neftin (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) 13113b5b7fdSSasha Neftin #else 13213b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) 13313b5b7fdSSasha Neftin #endif 13413b5b7fdSSasha Neftin 1350507ef8aSSasha Neftin /* How many Rx Buffers do we bundle into one write to the hardware ? */ 1360507ef8aSSasha Neftin #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 1370507ef8aSSasha Neftin 138d3ae3cfbSSasha Neftin /* VLAN info */ 139d3ae3cfbSSasha Neftin #define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 140d3ae3cfbSSasha Neftin 1410507ef8aSSasha Neftin /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ 1420507ef8aSSasha Neftin static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, 1430507ef8aSSasha Neftin const u32 stat_err_bits) 1440507ef8aSSasha Neftin { 1450507ef8aSSasha Neftin return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 1460507ef8aSSasha Neftin } 1470507ef8aSSasha Neftin 148c9a11c23SSasha Neftin enum igc_state_t { 149c9a11c23SSasha Neftin __IGC_TESTING, 150c9a11c23SSasha Neftin __IGC_RESETTING, 151c9a11c23SSasha Neftin __IGC_DOWN, 152c9a11c23SSasha Neftin __IGC_PTP_TX_IN_PROGRESS, 153c9a11c23SSasha Neftin }; 154c9a11c23SSasha Neftin 1550507ef8aSSasha Neftin enum igc_tx_flags { 1560507ef8aSSasha Neftin /* cmd_type flags */ 1570507ef8aSSasha Neftin IGC_TX_FLAGS_VLAN = 0x01, 1580507ef8aSSasha Neftin IGC_TX_FLAGS_TSO = 0x02, 1590507ef8aSSasha Neftin IGC_TX_FLAGS_TSTAMP = 0x04, 1600507ef8aSSasha Neftin 1610507ef8aSSasha Neftin /* olinfo flags */ 1620507ef8aSSasha Neftin IGC_TX_FLAGS_IPV4 = 0x10, 1630507ef8aSSasha Neftin IGC_TX_FLAGS_CSUM = 0x20, 1640507ef8aSSasha Neftin }; 1650507ef8aSSasha Neftin 166ab405612SSasha Neftin enum igc_boards { 167ab405612SSasha Neftin board_base, 168ab405612SSasha Neftin }; 169ab405612SSasha Neftin 1700507ef8aSSasha Neftin /* The largest size we can write to the descriptor is 65535. In order to 1710507ef8aSSasha Neftin * maintain a power of two alignment we have to limit ourselves to 32K. 1720507ef8aSSasha Neftin */ 1730507ef8aSSasha Neftin #define IGC_MAX_TXD_PWR 15 1740507ef8aSSasha Neftin #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) 1750507ef8aSSasha Neftin 1760507ef8aSSasha Neftin /* Tx Descriptors needed, worst case */ 1770507ef8aSSasha Neftin #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) 1780507ef8aSSasha Neftin #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 1790507ef8aSSasha Neftin 18013b5b7fdSSasha Neftin /* wrapper around a pointer to a socket buffer, 18113b5b7fdSSasha Neftin * so a DMA handle can be stored along with the buffer 18213b5b7fdSSasha Neftin */ 18313b5b7fdSSasha Neftin struct igc_tx_buffer { 18413b5b7fdSSasha Neftin union igc_adv_tx_desc *next_to_watch; 18513b5b7fdSSasha Neftin unsigned long time_stamp; 18613b5b7fdSSasha Neftin struct sk_buff *skb; 18713b5b7fdSSasha Neftin unsigned int bytecount; 18813b5b7fdSSasha Neftin u16 gso_segs; 18913b5b7fdSSasha Neftin __be16 protocol; 19013b5b7fdSSasha Neftin 19113b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_ADDR(dma); 19213b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_LEN(len); 19313b5b7fdSSasha Neftin u32 tx_flags; 19413b5b7fdSSasha Neftin }; 19513b5b7fdSSasha Neftin 19613b5b7fdSSasha Neftin struct igc_rx_buffer { 19713b5b7fdSSasha Neftin dma_addr_t dma; 19813b5b7fdSSasha Neftin struct page *page; 19913b5b7fdSSasha Neftin #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 20013b5b7fdSSasha Neftin __u32 page_offset; 20113b5b7fdSSasha Neftin #else 20213b5b7fdSSasha Neftin __u16 page_offset; 20313b5b7fdSSasha Neftin #endif 20413b5b7fdSSasha Neftin __u16 pagecnt_bias; 20513b5b7fdSSasha Neftin }; 20613b5b7fdSSasha Neftin 2073df25e4cSSasha Neftin struct igc_tx_queue_stats { 2083df25e4cSSasha Neftin u64 packets; 2093df25e4cSSasha Neftin u64 bytes; 2103df25e4cSSasha Neftin u64 restart_queue; 2110507ef8aSSasha Neftin u64 restart_queue2; 2123df25e4cSSasha Neftin }; 2133df25e4cSSasha Neftin 2143df25e4cSSasha Neftin struct igc_rx_queue_stats { 2153df25e4cSSasha Neftin u64 packets; 2163df25e4cSSasha Neftin u64 bytes; 2173df25e4cSSasha Neftin u64 drops; 2183df25e4cSSasha Neftin u64 csum_err; 2193df25e4cSSasha Neftin u64 alloc_failed; 2203df25e4cSSasha Neftin }; 2213df25e4cSSasha Neftin 2223df25e4cSSasha Neftin struct igc_rx_packet_stats { 2233df25e4cSSasha Neftin u64 ipv4_packets; /* IPv4 headers processed */ 2243df25e4cSSasha Neftin u64 ipv4e_packets; /* IPv4E headers with extensions processed */ 2253df25e4cSSasha Neftin u64 ipv6_packets; /* IPv6 headers processed */ 2263df25e4cSSasha Neftin u64 ipv6e_packets; /* IPv6E headers with extensions processed */ 2273df25e4cSSasha Neftin u64 tcp_packets; /* TCP headers processed */ 2283df25e4cSSasha Neftin u64 udp_packets; /* UDP headers processed */ 2293df25e4cSSasha Neftin u64 sctp_packets; /* SCTP headers processed */ 2303df25e4cSSasha Neftin u64 nfs_packets; /* NFS headers processe */ 2313df25e4cSSasha Neftin u64 other_packets; 2323df25e4cSSasha Neftin }; 2333df25e4cSSasha Neftin 2343df25e4cSSasha Neftin struct igc_ring_container { 2353df25e4cSSasha Neftin struct igc_ring *ring; /* pointer to linked list of rings */ 2363df25e4cSSasha Neftin unsigned int total_bytes; /* total bytes processed this int */ 2373df25e4cSSasha Neftin unsigned int total_packets; /* total packets processed this int */ 2383df25e4cSSasha Neftin u16 work_limit; /* total work allowed per interrupt */ 2393df25e4cSSasha Neftin u8 count; /* total number of rings in vector */ 2403df25e4cSSasha Neftin u8 itr; /* current ITR setting for ring */ 2413df25e4cSSasha Neftin }; 2423df25e4cSSasha Neftin 2433df25e4cSSasha Neftin struct igc_ring { 2443df25e4cSSasha Neftin struct igc_q_vector *q_vector; /* backlink to q_vector */ 2453df25e4cSSasha Neftin struct net_device *netdev; /* back pointer to net_device */ 2463df25e4cSSasha Neftin struct device *dev; /* device for dma mapping */ 2473df25e4cSSasha Neftin union { /* array of buffer info structs */ 2483df25e4cSSasha Neftin struct igc_tx_buffer *tx_buffer_info; 2493df25e4cSSasha Neftin struct igc_rx_buffer *rx_buffer_info; 2503df25e4cSSasha Neftin }; 2513df25e4cSSasha Neftin void *desc; /* descriptor ring memory */ 2523df25e4cSSasha Neftin unsigned long flags; /* ring specific flags */ 2533df25e4cSSasha Neftin void __iomem *tail; /* pointer to ring tail register */ 2543df25e4cSSasha Neftin dma_addr_t dma; /* phys address of the ring */ 2553df25e4cSSasha Neftin unsigned int size; /* length of desc. ring in bytes */ 2563df25e4cSSasha Neftin 2573df25e4cSSasha Neftin u16 count; /* number of desc. in the ring */ 2583df25e4cSSasha Neftin u8 queue_index; /* logical index of the ring*/ 2593df25e4cSSasha Neftin u8 reg_idx; /* physical index of the ring */ 260d3ae3cfbSSasha Neftin bool launchtime_enable; /* true if LaunchTime is enabled */ 2613df25e4cSSasha Neftin 2623df25e4cSSasha Neftin /* everything past this point are written often */ 2633df25e4cSSasha Neftin u16 next_to_clean; 2643df25e4cSSasha Neftin u16 next_to_use; 2653df25e4cSSasha Neftin u16 next_to_alloc; 2663df25e4cSSasha Neftin 2673df25e4cSSasha Neftin union { 2683df25e4cSSasha Neftin /* TX */ 2693df25e4cSSasha Neftin struct { 2703df25e4cSSasha Neftin struct igc_tx_queue_stats tx_stats; 2710507ef8aSSasha Neftin struct u64_stats_sync tx_syncp; 2720507ef8aSSasha Neftin struct u64_stats_sync tx_syncp2; 2733df25e4cSSasha Neftin }; 2743df25e4cSSasha Neftin /* RX */ 2753df25e4cSSasha Neftin struct { 2763df25e4cSSasha Neftin struct igc_rx_queue_stats rx_stats; 2773df25e4cSSasha Neftin struct igc_rx_packet_stats pkt_stats; 2780507ef8aSSasha Neftin struct u64_stats_sync rx_syncp; 2793df25e4cSSasha Neftin struct sk_buff *skb; 2803df25e4cSSasha Neftin }; 2813df25e4cSSasha Neftin }; 2823df25e4cSSasha Neftin } ____cacheline_internodealigned_in_smp; 2833df25e4cSSasha Neftin 284c9a11c23SSasha Neftin struct igc_q_vector { 285c9a11c23SSasha Neftin struct igc_adapter *adapter; /* backlink */ 2863df25e4cSSasha Neftin void __iomem *itr_register; 2873df25e4cSSasha Neftin u32 eims_value; /* EIMS mask value */ 2883df25e4cSSasha Neftin 2893df25e4cSSasha Neftin u16 itr_val; 2903df25e4cSSasha Neftin u8 set_itr; 2913df25e4cSSasha Neftin 2923df25e4cSSasha Neftin struct igc_ring_container rx, tx; 293c9a11c23SSasha Neftin 294c9a11c23SSasha Neftin struct napi_struct napi; 2953df25e4cSSasha Neftin 2963df25e4cSSasha Neftin struct rcu_head rcu; /* to avoid race with update stats on free */ 2973df25e4cSSasha Neftin char name[IFNAMSIZ + 9]; 2983df25e4cSSasha Neftin struct net_device poll_dev; 2993df25e4cSSasha Neftin 3003df25e4cSSasha Neftin /* for dynamic allocation of rings associated with this q_vector */ 3013df25e4cSSasha Neftin struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; 302c9a11c23SSasha Neftin }; 303c9a11c23SSasha Neftin 3046245c848SSasha Neftin #define MAX_ETYPE_FILTER (4 - 1) 3056245c848SSasha Neftin 3066245c848SSasha Neftin enum igc_filter_match_flags { 3076245c848SSasha Neftin IGC_FILTER_FLAG_ETHER_TYPE = 0x1, 3086245c848SSasha Neftin IGC_FILTER_FLAG_VLAN_TCI = 0x2, 3096245c848SSasha Neftin IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, 3106245c848SSasha Neftin IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, 3116245c848SSasha Neftin }; 3126245c848SSasha Neftin 3136245c848SSasha Neftin /* RX network flow classification data structure */ 3146245c848SSasha Neftin struct igc_nfc_input { 3156245c848SSasha Neftin /* Byte layout in order, all values with MSB first: 3166245c848SSasha Neftin * match_flags - 1 byte 3176245c848SSasha Neftin * etype - 2 bytes 3186245c848SSasha Neftin * vlan_tci - 2 bytes 3196245c848SSasha Neftin */ 3206245c848SSasha Neftin u8 match_flags; 3216245c848SSasha Neftin __be16 etype; 3226245c848SSasha Neftin __be16 vlan_tci; 3236245c848SSasha Neftin u8 src_addr[ETH_ALEN]; 3246245c848SSasha Neftin u8 dst_addr[ETH_ALEN]; 3256245c848SSasha Neftin }; 3266245c848SSasha Neftin 3276245c848SSasha Neftin struct igc_nfc_filter { 3286245c848SSasha Neftin struct hlist_node nfc_node; 3296245c848SSasha Neftin struct igc_nfc_input filter; 3306245c848SSasha Neftin unsigned long cookie; 3316245c848SSasha Neftin u16 etype_reg_index; 3326245c848SSasha Neftin u16 sw_idx; 3336245c848SSasha Neftin u16 action; 3346245c848SSasha Neftin }; 3356245c848SSasha Neftin 336c9a11c23SSasha Neftin struct igc_mac_addr { 337c9a11c23SSasha Neftin u8 addr[ETH_ALEN]; 338c9a11c23SSasha Neftin u8 queue; 339c9a11c23SSasha Neftin u8 state; /* bitmask */ 340c9a11c23SSasha Neftin }; 341c9a11c23SSasha Neftin 342c9a11c23SSasha Neftin #define IGC_MAC_STATE_DEFAULT 0x1 3436245c848SSasha Neftin #define IGC_MAC_STATE_IN_USE 0x2 3446245c848SSasha Neftin #define IGC_MAC_STATE_SRC_ADDR 0x4 3456245c848SSasha Neftin #define IGC_MAC_STATE_QUEUE_STEERING 0x8 3466245c848SSasha Neftin 3476245c848SSasha Neftin #define IGC_MAX_RXNFC_FILTERS 16 348c9a11c23SSasha Neftin 349146740f9SSasha Neftin /* Board specific private data structure */ 350146740f9SSasha Neftin struct igc_adapter { 351c9a11c23SSasha Neftin struct net_device *netdev; 352c9a11c23SSasha Neftin 353c9a11c23SSasha Neftin unsigned long state; 354c9a11c23SSasha Neftin unsigned int flags; 355c9a11c23SSasha Neftin unsigned int num_q_vectors; 3563df25e4cSSasha Neftin 3573df25e4cSSasha Neftin struct msix_entry *msix_entries; 3583df25e4cSSasha Neftin 3593df25e4cSSasha Neftin /* TX */ 3603df25e4cSSasha Neftin u16 tx_work_limit; 361208983f0SSasha Neftin u32 tx_timeout_count; 3623df25e4cSSasha Neftin int num_tx_queues; 3633df25e4cSSasha Neftin struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; 3643df25e4cSSasha Neftin 3653df25e4cSSasha Neftin /* RX */ 3663df25e4cSSasha Neftin int num_rx_queues; 3673df25e4cSSasha Neftin struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; 3683df25e4cSSasha Neftin 3693df25e4cSSasha Neftin struct timer_list watchdog_timer; 3703df25e4cSSasha Neftin struct timer_list dma_err_timer; 3713df25e4cSSasha Neftin struct timer_list phy_info_timer; 3723df25e4cSSasha Neftin 373c9a11c23SSasha Neftin u16 link_speed; 374c9a11c23SSasha Neftin u16 link_duplex; 375c9a11c23SSasha Neftin 376c9a11c23SSasha Neftin u8 port_num; 377c9a11c23SSasha Neftin 378146740f9SSasha Neftin u8 __iomem *io_addr; 3793df25e4cSSasha Neftin /* Interrupt Throttle Rate */ 3803df25e4cSSasha Neftin u32 rx_itr_setting; 3813df25e4cSSasha Neftin u32 tx_itr_setting; 3823df25e4cSSasha Neftin 3833df25e4cSSasha Neftin struct work_struct reset_task; 384c9a11c23SSasha Neftin struct work_struct watchdog_task; 3853df25e4cSSasha Neftin struct work_struct dma_err_task; 3864eb80801SSasha Neftin bool fc_autoneg; 387c9a11c23SSasha Neftin 3880507ef8aSSasha Neftin u8 tx_timeout_factor; 3890507ef8aSSasha Neftin 390c9a11c23SSasha Neftin int msg_enable; 391c9a11c23SSasha Neftin u32 max_frame_size; 3920507ef8aSSasha Neftin u32 min_frame_size; 393146740f9SSasha Neftin 394146740f9SSasha Neftin /* OS defined structs */ 395146740f9SSasha Neftin struct pci_dev *pdev; 3960507ef8aSSasha Neftin /* lock for statistics */ 3970507ef8aSSasha Neftin spinlock_t stats64_lock; 3980507ef8aSSasha Neftin struct rtnl_link_stats64 stats64; 399146740f9SSasha Neftin 400146740f9SSasha Neftin /* structs defined in igc_hw.h */ 401146740f9SSasha Neftin struct igc_hw hw; 4023df25e4cSSasha Neftin struct igc_hw_stats stats; 403c9a11c23SSasha Neftin 404c9a11c23SSasha Neftin struct igc_q_vector *q_vector[MAX_Q_VECTORS]; 4053df25e4cSSasha Neftin u32 eims_enable_mask; 4063df25e4cSSasha Neftin u32 eims_other; 4073df25e4cSSasha Neftin 4083df25e4cSSasha Neftin u16 tx_ring_count; 4093df25e4cSSasha Neftin u16 rx_ring_count; 4103df25e4cSSasha Neftin 41136b9fea6SSasha Neftin u32 tx_hwtstamp_timeouts; 41236b9fea6SSasha Neftin u32 tx_hwtstamp_skipped; 41336b9fea6SSasha Neftin u32 rx_hwtstamp_cleared; 4140507ef8aSSasha Neftin u32 *shadow_vfta; 4150507ef8aSSasha Neftin 4163df25e4cSSasha Neftin u32 rss_queues; 4172121c271SSasha Neftin u32 rss_indir_tbl_init; 418c9a11c23SSasha Neftin 4196245c848SSasha Neftin /* RX network flow classification support */ 4206245c848SSasha Neftin struct hlist_head nfc_filter_list; 4216245c848SSasha Neftin struct hlist_head cls_flower_list; 4226245c848SSasha Neftin unsigned int nfc_filter_count; 4236245c848SSasha Neftin 4240507ef8aSSasha Neftin /* lock for RX network flow classification filter */ 4250507ef8aSSasha Neftin spinlock_t nfc_lock; 4266245c848SSasha Neftin bool etype_bitmap[MAX_ETYPE_FILTER]; 4270507ef8aSSasha Neftin 428c9a11c23SSasha Neftin struct igc_mac_addr *mac_table; 429ab405612SSasha Neftin 4308c5ad0daSSasha Neftin u8 rss_indir_tbl[IGC_RETA_SIZE]; 4318c5ad0daSSasha Neftin 432208983f0SSasha Neftin unsigned long link_check_timeout; 433ab405612SSasha Neftin struct igc_info ei; 434146740f9SSasha Neftin }; 435146740f9SSasha Neftin 43613b5b7fdSSasha Neftin /* igc_desc_unused - calculate if we have unused descriptors */ 43713b5b7fdSSasha Neftin static inline u16 igc_desc_unused(const struct igc_ring *ring) 43813b5b7fdSSasha Neftin { 43913b5b7fdSSasha Neftin u16 ntc = ring->next_to_clean; 44013b5b7fdSSasha Neftin u16 ntu = ring->next_to_use; 44113b5b7fdSSasha Neftin 44213b5b7fdSSasha Neftin return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 44313b5b7fdSSasha Neftin } 44413b5b7fdSSasha Neftin 4455586838fSSasha Neftin static inline s32 igc_get_phy_info(struct igc_hw *hw) 4465586838fSSasha Neftin { 4475586838fSSasha Neftin if (hw->phy.ops.get_phy_info) 4485586838fSSasha Neftin return hw->phy.ops.get_phy_info(hw); 4495586838fSSasha Neftin 4505586838fSSasha Neftin return 0; 4515586838fSSasha Neftin } 4525586838fSSasha Neftin 4535586838fSSasha Neftin static inline s32 igc_reset_phy(struct igc_hw *hw) 4545586838fSSasha Neftin { 4555586838fSSasha Neftin if (hw->phy.ops.reset) 4565586838fSSasha Neftin return hw->phy.ops.reset(hw); 4575586838fSSasha Neftin 4585586838fSSasha Neftin return 0; 4595586838fSSasha Neftin } 4605586838fSSasha Neftin 46113b5b7fdSSasha Neftin static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) 46213b5b7fdSSasha Neftin { 46313b5b7fdSSasha Neftin return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 46413b5b7fdSSasha Neftin } 46513b5b7fdSSasha Neftin 46613b5b7fdSSasha Neftin enum igc_ring_flags_t { 46713b5b7fdSSasha Neftin IGC_RING_FLAG_RX_3K_BUFFER, 46813b5b7fdSSasha Neftin IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, 46913b5b7fdSSasha Neftin IGC_RING_FLAG_RX_SCTP_CSUM, 47013b5b7fdSSasha Neftin IGC_RING_FLAG_RX_LB_VLAN_BSWAP, 47113b5b7fdSSasha Neftin IGC_RING_FLAG_TX_CTX_IDX, 47213b5b7fdSSasha Neftin IGC_RING_FLAG_TX_DETECT_HANG 47313b5b7fdSSasha Neftin }; 47413b5b7fdSSasha Neftin 47513b5b7fdSSasha Neftin #define ring_uses_large_buffer(ring) \ 47613b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) 47713b5b7fdSSasha Neftin 47813b5b7fdSSasha Neftin #define ring_uses_build_skb(ring) \ 47913b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) 48013b5b7fdSSasha Neftin 48113b5b7fdSSasha Neftin static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) 48213b5b7fdSSasha Neftin { 48313b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 48413b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 48513b5b7fdSSasha Neftin return IGC_RXBUFFER_3072; 48613b5b7fdSSasha Neftin 48713b5b7fdSSasha Neftin if (ring_uses_build_skb(ring)) 48813b5b7fdSSasha Neftin return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; 48913b5b7fdSSasha Neftin #endif 49013b5b7fdSSasha Neftin return IGC_RXBUFFER_2048; 49113b5b7fdSSasha Neftin } 49213b5b7fdSSasha Neftin 49313b5b7fdSSasha Neftin static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) 49413b5b7fdSSasha Neftin { 49513b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 49613b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 49713b5b7fdSSasha Neftin return 1; 49813b5b7fdSSasha Neftin #endif 49913b5b7fdSSasha Neftin return 0; 50013b5b7fdSSasha Neftin } 50113b5b7fdSSasha Neftin 502208983f0SSasha Neftin static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) 503208983f0SSasha Neftin { 504208983f0SSasha Neftin if (hw->phy.ops.read_reg) 505208983f0SSasha Neftin return hw->phy.ops.read_reg(hw, offset, data); 506208983f0SSasha Neftin 507208983f0SSasha Neftin return 0; 508208983f0SSasha Neftin } 509208983f0SSasha Neftin 5108c5ad0daSSasha Neftin /* forward declaration */ 5118c5ad0daSSasha Neftin void igc_reinit_locked(struct igc_adapter *); 5126245c848SSasha Neftin int igc_add_filter(struct igc_adapter *adapter, 5136245c848SSasha Neftin struct igc_nfc_filter *input); 5146245c848SSasha Neftin int igc_erase_filter(struct igc_adapter *adapter, 5156245c848SSasha Neftin struct igc_nfc_filter *input); 5168c5ad0daSSasha Neftin 51713b5b7fdSSasha Neftin #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) 51813b5b7fdSSasha Neftin 5190507ef8aSSasha Neftin #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) 5200507ef8aSSasha Neftin 52113b5b7fdSSasha Neftin #define IGC_RX_DESC(R, i) \ 52213b5b7fdSSasha Neftin (&(((union igc_adv_rx_desc *)((R)->desc))[i])) 52313b5b7fdSSasha Neftin #define IGC_TX_DESC(R, i) \ 52413b5b7fdSSasha Neftin (&(((union igc_adv_tx_desc *)((R)->desc))[i])) 52513b5b7fdSSasha Neftin #define IGC_TX_CTXTDESC(R, i) \ 52613b5b7fdSSasha Neftin (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) 52713b5b7fdSSasha Neftin 528d89f8841SSasha Neftin #endif /* _IGC_H_ */ 529