1d89f8841SSasha Neftin /* SPDX-License-Identifier: GPL-2.0 */ 2d89f8841SSasha Neftin /* Copyright (c) 2018 Intel Corporation */ 3d89f8841SSasha Neftin 4d89f8841SSasha Neftin #ifndef _IGC_H_ 5d89f8841SSasha Neftin #define _IGC_H_ 6d89f8841SSasha Neftin 7d89f8841SSasha Neftin #include <linux/kobject.h> 8d89f8841SSasha Neftin 9d89f8841SSasha Neftin #include <linux/pci.h> 10d89f8841SSasha Neftin #include <linux/netdevice.h> 11d89f8841SSasha Neftin #include <linux/vmalloc.h> 12d89f8841SSasha Neftin 13d89f8841SSasha Neftin #include <linux/ethtool.h> 14d89f8841SSasha Neftin 15d89f8841SSasha Neftin #include <linux/sctp.h> 16d89f8841SSasha Neftin 17d89f8841SSasha Neftin #define IGC_ERR(args...) pr_err("igc: " args) 18d89f8841SSasha Neftin 19d89f8841SSasha Neftin #define PFX "igc: " 20d89f8841SSasha Neftin 21d89f8841SSasha Neftin #include <linux/timecounter.h> 22d89f8841SSasha Neftin #include <linux/net_tstamp.h> 23d89f8841SSasha Neftin #include <linux/ptp_clock_kernel.h> 24d89f8841SSasha Neftin 25146740f9SSasha Neftin #include "igc_hw.h" 26146740f9SSasha Neftin 27d89f8841SSasha Neftin /* main */ 28d89f8841SSasha Neftin extern char igc_driver_name[]; 29d89f8841SSasha Neftin extern char igc_driver_version[]; 30d89f8841SSasha Neftin 313df25e4cSSasha Neftin /* Interrupt defines */ 323df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 333df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSI BIT(0) 343df25e4cSSasha Neftin #define IGC_FLAG_QUEUE_PAIRS BIT(4) 350507ef8aSSasha Neftin #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) 363df25e4cSSasha Neftin #define IGC_FLAG_HAS_MSIX BIT(13) 370507ef8aSSasha Neftin #define IGC_FLAG_VLAN_PROMISC BIT(15) 383df25e4cSSasha Neftin 393df25e4cSSasha Neftin #define IGC_START_ITR 648 /* ~6000 ints/sec */ 403df25e4cSSasha Neftin #define IGC_4K_ITR 980 413df25e4cSSasha Neftin #define IGC_20K_ITR 196 423df25e4cSSasha Neftin #define IGC_70K_ITR 56 433df25e4cSSasha Neftin 440507ef8aSSasha Neftin #define IGC_DEFAULT_ITR 3 /* dynamic */ 450507ef8aSSasha Neftin #define IGC_MAX_ITR_USECS 10000 460507ef8aSSasha Neftin #define IGC_MIN_ITR_USECS 10 470507ef8aSSasha Neftin #define NON_Q_VECTORS 1 480507ef8aSSasha Neftin #define MAX_MSIX_ENTRIES 10 490507ef8aSSasha Neftin 500507ef8aSSasha Neftin /* TX/RX descriptor defines */ 510507ef8aSSasha Neftin #define IGC_DEFAULT_TXD 256 520507ef8aSSasha Neftin #define IGC_DEFAULT_TX_WORK 128 530507ef8aSSasha Neftin #define IGC_MIN_TXD 80 540507ef8aSSasha Neftin #define IGC_MAX_TXD 4096 550507ef8aSSasha Neftin 560507ef8aSSasha Neftin #define IGC_DEFAULT_RXD 256 570507ef8aSSasha Neftin #define IGC_MIN_RXD 80 580507ef8aSSasha Neftin #define IGC_MAX_RXD 4096 590507ef8aSSasha Neftin 60c9a11c23SSasha Neftin /* Transmit and receive queues */ 61c9a11c23SSasha Neftin #define IGC_MAX_RX_QUEUES 4 62c9a11c23SSasha Neftin #define IGC_MAX_TX_QUEUES 4 63c9a11c23SSasha Neftin 64c9a11c23SSasha Neftin #define MAX_Q_VECTORS 8 65c9a11c23SSasha Neftin #define MAX_STD_JUMBO_FRAME_SIZE 9216 66c9a11c23SSasha Neftin 6713b5b7fdSSasha Neftin /* Supported Rx Buffer Sizes */ 6813b5b7fdSSasha Neftin #define IGC_RXBUFFER_256 256 6913b5b7fdSSasha Neftin #define IGC_RXBUFFER_2048 2048 7013b5b7fdSSasha Neftin #define IGC_RXBUFFER_3072 3072 7113b5b7fdSSasha Neftin 7213b5b7fdSSasha Neftin #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 7313b5b7fdSSasha Neftin 7413b5b7fdSSasha Neftin /* RX and TX descriptor control thresholds. 7513b5b7fdSSasha Neftin * PTHRESH - MAC will consider prefetch if it has fewer than this number of 7613b5b7fdSSasha Neftin * descriptors available in its onboard memory. 7713b5b7fdSSasha Neftin * Setting this to 0 disables RX descriptor prefetch. 7813b5b7fdSSasha Neftin * HTHRESH - MAC will only prefetch if there are at least this many descriptors 7913b5b7fdSSasha Neftin * available in host memory. 8013b5b7fdSSasha Neftin * If PTHRESH is 0, this should also be 0. 8113b5b7fdSSasha Neftin * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back 8213b5b7fdSSasha Neftin * descriptors until either it has this many to write back, or the 8313b5b7fdSSasha Neftin * ITR timer expires. 8413b5b7fdSSasha Neftin */ 8513b5b7fdSSasha Neftin #define IGC_RX_PTHRESH 8 8613b5b7fdSSasha Neftin #define IGC_RX_HTHRESH 8 8713b5b7fdSSasha Neftin #define IGC_TX_PTHRESH 8 8813b5b7fdSSasha Neftin #define IGC_TX_HTHRESH 1 8913b5b7fdSSasha Neftin #define IGC_RX_WTHRESH 4 9013b5b7fdSSasha Neftin #define IGC_TX_WTHRESH 16 9113b5b7fdSSasha Neftin 9213b5b7fdSSasha Neftin #define IGC_RX_DMA_ATTR \ 9313b5b7fdSSasha Neftin (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 9413b5b7fdSSasha Neftin 9513b5b7fdSSasha Neftin #define IGC_TS_HDR_LEN 16 9613b5b7fdSSasha Neftin 9713b5b7fdSSasha Neftin #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 9813b5b7fdSSasha Neftin 9913b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 10013b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB \ 10113b5b7fdSSasha Neftin (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) 10213b5b7fdSSasha Neftin #else 10313b5b7fdSSasha Neftin #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) 10413b5b7fdSSasha Neftin #endif 10513b5b7fdSSasha Neftin 1060507ef8aSSasha Neftin /* How many Rx Buffers do we bundle into one write to the hardware ? */ 1070507ef8aSSasha Neftin #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 1080507ef8aSSasha Neftin 1090507ef8aSSasha Neftin /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ 1100507ef8aSSasha Neftin static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, 1110507ef8aSSasha Neftin const u32 stat_err_bits) 1120507ef8aSSasha Neftin { 1130507ef8aSSasha Neftin return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 1140507ef8aSSasha Neftin } 1150507ef8aSSasha Neftin 116c9a11c23SSasha Neftin enum igc_state_t { 117c9a11c23SSasha Neftin __IGC_TESTING, 118c9a11c23SSasha Neftin __IGC_RESETTING, 119c9a11c23SSasha Neftin __IGC_DOWN, 120c9a11c23SSasha Neftin __IGC_PTP_TX_IN_PROGRESS, 121c9a11c23SSasha Neftin }; 122c9a11c23SSasha Neftin 1230507ef8aSSasha Neftin enum igc_tx_flags { 1240507ef8aSSasha Neftin /* cmd_type flags */ 1250507ef8aSSasha Neftin IGC_TX_FLAGS_VLAN = 0x01, 1260507ef8aSSasha Neftin IGC_TX_FLAGS_TSO = 0x02, 1270507ef8aSSasha Neftin IGC_TX_FLAGS_TSTAMP = 0x04, 1280507ef8aSSasha Neftin 1290507ef8aSSasha Neftin /* olinfo flags */ 1300507ef8aSSasha Neftin IGC_TX_FLAGS_IPV4 = 0x10, 1310507ef8aSSasha Neftin IGC_TX_FLAGS_CSUM = 0x20, 1320507ef8aSSasha Neftin }; 1330507ef8aSSasha Neftin 1340507ef8aSSasha Neftin /* The largest size we can write to the descriptor is 65535. In order to 1350507ef8aSSasha Neftin * maintain a power of two alignment we have to limit ourselves to 32K. 1360507ef8aSSasha Neftin */ 1370507ef8aSSasha Neftin #define IGC_MAX_TXD_PWR 15 1380507ef8aSSasha Neftin #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) 1390507ef8aSSasha Neftin 1400507ef8aSSasha Neftin /* Tx Descriptors needed, worst case */ 1410507ef8aSSasha Neftin #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) 1420507ef8aSSasha Neftin #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 1430507ef8aSSasha Neftin 14413b5b7fdSSasha Neftin /* wrapper around a pointer to a socket buffer, 14513b5b7fdSSasha Neftin * so a DMA handle can be stored along with the buffer 14613b5b7fdSSasha Neftin */ 14713b5b7fdSSasha Neftin struct igc_tx_buffer { 14813b5b7fdSSasha Neftin union igc_adv_tx_desc *next_to_watch; 14913b5b7fdSSasha Neftin unsigned long time_stamp; 15013b5b7fdSSasha Neftin struct sk_buff *skb; 15113b5b7fdSSasha Neftin unsigned int bytecount; 15213b5b7fdSSasha Neftin u16 gso_segs; 15313b5b7fdSSasha Neftin __be16 protocol; 15413b5b7fdSSasha Neftin 15513b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_ADDR(dma); 15613b5b7fdSSasha Neftin DEFINE_DMA_UNMAP_LEN(len); 15713b5b7fdSSasha Neftin u32 tx_flags; 15813b5b7fdSSasha Neftin }; 15913b5b7fdSSasha Neftin 16013b5b7fdSSasha Neftin struct igc_rx_buffer { 16113b5b7fdSSasha Neftin dma_addr_t dma; 16213b5b7fdSSasha Neftin struct page *page; 16313b5b7fdSSasha Neftin #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 16413b5b7fdSSasha Neftin __u32 page_offset; 16513b5b7fdSSasha Neftin #else 16613b5b7fdSSasha Neftin __u16 page_offset; 16713b5b7fdSSasha Neftin #endif 16813b5b7fdSSasha Neftin __u16 pagecnt_bias; 16913b5b7fdSSasha Neftin }; 17013b5b7fdSSasha Neftin 1713df25e4cSSasha Neftin struct igc_tx_queue_stats { 1723df25e4cSSasha Neftin u64 packets; 1733df25e4cSSasha Neftin u64 bytes; 1743df25e4cSSasha Neftin u64 restart_queue; 1750507ef8aSSasha Neftin u64 restart_queue2; 1763df25e4cSSasha Neftin }; 1773df25e4cSSasha Neftin 1783df25e4cSSasha Neftin struct igc_rx_queue_stats { 1793df25e4cSSasha Neftin u64 packets; 1803df25e4cSSasha Neftin u64 bytes; 1813df25e4cSSasha Neftin u64 drops; 1823df25e4cSSasha Neftin u64 csum_err; 1833df25e4cSSasha Neftin u64 alloc_failed; 1843df25e4cSSasha Neftin }; 1853df25e4cSSasha Neftin 1863df25e4cSSasha Neftin struct igc_rx_packet_stats { 1873df25e4cSSasha Neftin u64 ipv4_packets; /* IPv4 headers processed */ 1883df25e4cSSasha Neftin u64 ipv4e_packets; /* IPv4E headers with extensions processed */ 1893df25e4cSSasha Neftin u64 ipv6_packets; /* IPv6 headers processed */ 1903df25e4cSSasha Neftin u64 ipv6e_packets; /* IPv6E headers with extensions processed */ 1913df25e4cSSasha Neftin u64 tcp_packets; /* TCP headers processed */ 1923df25e4cSSasha Neftin u64 udp_packets; /* UDP headers processed */ 1933df25e4cSSasha Neftin u64 sctp_packets; /* SCTP headers processed */ 1943df25e4cSSasha Neftin u64 nfs_packets; /* NFS headers processe */ 1953df25e4cSSasha Neftin u64 other_packets; 1963df25e4cSSasha Neftin }; 1973df25e4cSSasha Neftin 1983df25e4cSSasha Neftin struct igc_ring_container { 1993df25e4cSSasha Neftin struct igc_ring *ring; /* pointer to linked list of rings */ 2003df25e4cSSasha Neftin unsigned int total_bytes; /* total bytes processed this int */ 2013df25e4cSSasha Neftin unsigned int total_packets; /* total packets processed this int */ 2023df25e4cSSasha Neftin u16 work_limit; /* total work allowed per interrupt */ 2033df25e4cSSasha Neftin u8 count; /* total number of rings in vector */ 2043df25e4cSSasha Neftin u8 itr; /* current ITR setting for ring */ 2053df25e4cSSasha Neftin }; 2063df25e4cSSasha Neftin 2073df25e4cSSasha Neftin struct igc_ring { 2083df25e4cSSasha Neftin struct igc_q_vector *q_vector; /* backlink to q_vector */ 2093df25e4cSSasha Neftin struct net_device *netdev; /* back pointer to net_device */ 2103df25e4cSSasha Neftin struct device *dev; /* device for dma mapping */ 2113df25e4cSSasha Neftin union { /* array of buffer info structs */ 2123df25e4cSSasha Neftin struct igc_tx_buffer *tx_buffer_info; 2133df25e4cSSasha Neftin struct igc_rx_buffer *rx_buffer_info; 2143df25e4cSSasha Neftin }; 2153df25e4cSSasha Neftin void *desc; /* descriptor ring memory */ 2163df25e4cSSasha Neftin unsigned long flags; /* ring specific flags */ 2173df25e4cSSasha Neftin void __iomem *tail; /* pointer to ring tail register */ 2183df25e4cSSasha Neftin dma_addr_t dma; /* phys address of the ring */ 2193df25e4cSSasha Neftin unsigned int size; /* length of desc. ring in bytes */ 2203df25e4cSSasha Neftin 2213df25e4cSSasha Neftin u16 count; /* number of desc. in the ring */ 2223df25e4cSSasha Neftin u8 queue_index; /* logical index of the ring*/ 2233df25e4cSSasha Neftin u8 reg_idx; /* physical index of the ring */ 2243df25e4cSSasha Neftin 2253df25e4cSSasha Neftin /* everything past this point are written often */ 2263df25e4cSSasha Neftin u16 next_to_clean; 2273df25e4cSSasha Neftin u16 next_to_use; 2283df25e4cSSasha Neftin u16 next_to_alloc; 2293df25e4cSSasha Neftin 2303df25e4cSSasha Neftin union { 2313df25e4cSSasha Neftin /* TX */ 2323df25e4cSSasha Neftin struct { 2333df25e4cSSasha Neftin struct igc_tx_queue_stats tx_stats; 2340507ef8aSSasha Neftin struct u64_stats_sync tx_syncp; 2350507ef8aSSasha Neftin struct u64_stats_sync tx_syncp2; 2363df25e4cSSasha Neftin }; 2373df25e4cSSasha Neftin /* RX */ 2383df25e4cSSasha Neftin struct { 2393df25e4cSSasha Neftin struct igc_rx_queue_stats rx_stats; 2403df25e4cSSasha Neftin struct igc_rx_packet_stats pkt_stats; 2410507ef8aSSasha Neftin struct u64_stats_sync rx_syncp; 2423df25e4cSSasha Neftin struct sk_buff *skb; 2433df25e4cSSasha Neftin }; 2443df25e4cSSasha Neftin }; 2453df25e4cSSasha Neftin } ____cacheline_internodealigned_in_smp; 2463df25e4cSSasha Neftin 247c9a11c23SSasha Neftin struct igc_q_vector { 248c9a11c23SSasha Neftin struct igc_adapter *adapter; /* backlink */ 2493df25e4cSSasha Neftin void __iomem *itr_register; 2503df25e4cSSasha Neftin u32 eims_value; /* EIMS mask value */ 2513df25e4cSSasha Neftin 2523df25e4cSSasha Neftin u16 itr_val; 2533df25e4cSSasha Neftin u8 set_itr; 2543df25e4cSSasha Neftin 2553df25e4cSSasha Neftin struct igc_ring_container rx, tx; 256c9a11c23SSasha Neftin 257c9a11c23SSasha Neftin struct napi_struct napi; 2583df25e4cSSasha Neftin 2593df25e4cSSasha Neftin struct rcu_head rcu; /* to avoid race with update stats on free */ 2603df25e4cSSasha Neftin char name[IFNAMSIZ + 9]; 2613df25e4cSSasha Neftin struct net_device poll_dev; 2623df25e4cSSasha Neftin 2633df25e4cSSasha Neftin /* for dynamic allocation of rings associated with this q_vector */ 2643df25e4cSSasha Neftin struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; 265c9a11c23SSasha Neftin }; 266c9a11c23SSasha Neftin 267c9a11c23SSasha Neftin struct igc_mac_addr { 268c9a11c23SSasha Neftin u8 addr[ETH_ALEN]; 269c9a11c23SSasha Neftin u8 queue; 270c9a11c23SSasha Neftin u8 state; /* bitmask */ 271c9a11c23SSasha Neftin }; 272c9a11c23SSasha Neftin 273c9a11c23SSasha Neftin #define IGC_MAC_STATE_DEFAULT 0x1 274c9a11c23SSasha Neftin #define IGC_MAC_STATE_MODIFIED 0x2 275c9a11c23SSasha Neftin #define IGC_MAC_STATE_IN_USE 0x4 276c9a11c23SSasha Neftin 277146740f9SSasha Neftin /* Board specific private data structure */ 278146740f9SSasha Neftin struct igc_adapter { 279c9a11c23SSasha Neftin struct net_device *netdev; 280c9a11c23SSasha Neftin 281c9a11c23SSasha Neftin unsigned long state; 282c9a11c23SSasha Neftin unsigned int flags; 283c9a11c23SSasha Neftin unsigned int num_q_vectors; 2843df25e4cSSasha Neftin 2853df25e4cSSasha Neftin struct msix_entry *msix_entries; 2863df25e4cSSasha Neftin 2873df25e4cSSasha Neftin /* TX */ 2883df25e4cSSasha Neftin u16 tx_work_limit; 2893df25e4cSSasha Neftin int num_tx_queues; 2903df25e4cSSasha Neftin struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; 2913df25e4cSSasha Neftin 2923df25e4cSSasha Neftin /* RX */ 2933df25e4cSSasha Neftin int num_rx_queues; 2943df25e4cSSasha Neftin struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; 2953df25e4cSSasha Neftin 2963df25e4cSSasha Neftin struct timer_list watchdog_timer; 2973df25e4cSSasha Neftin struct timer_list dma_err_timer; 2983df25e4cSSasha Neftin struct timer_list phy_info_timer; 2993df25e4cSSasha Neftin 300c9a11c23SSasha Neftin u16 link_speed; 301c9a11c23SSasha Neftin u16 link_duplex; 302c9a11c23SSasha Neftin 303c9a11c23SSasha Neftin u8 port_num; 304c9a11c23SSasha Neftin 305146740f9SSasha Neftin u8 __iomem *io_addr; 3063df25e4cSSasha Neftin /* Interrupt Throttle Rate */ 3073df25e4cSSasha Neftin u32 rx_itr_setting; 3083df25e4cSSasha Neftin u32 tx_itr_setting; 3093df25e4cSSasha Neftin 3103df25e4cSSasha Neftin struct work_struct reset_task; 311c9a11c23SSasha Neftin struct work_struct watchdog_task; 3123df25e4cSSasha Neftin struct work_struct dma_err_task; 313c9a11c23SSasha Neftin 3140507ef8aSSasha Neftin u8 tx_timeout_factor; 3150507ef8aSSasha Neftin 316c9a11c23SSasha Neftin int msg_enable; 317c9a11c23SSasha Neftin u32 max_frame_size; 3180507ef8aSSasha Neftin u32 min_frame_size; 319146740f9SSasha Neftin 320146740f9SSasha Neftin /* OS defined structs */ 321146740f9SSasha Neftin struct pci_dev *pdev; 3220507ef8aSSasha Neftin /* lock for statistics */ 3230507ef8aSSasha Neftin spinlock_t stats64_lock; 3240507ef8aSSasha Neftin struct rtnl_link_stats64 stats64; 325146740f9SSasha Neftin 326146740f9SSasha Neftin /* structs defined in igc_hw.h */ 327146740f9SSasha Neftin struct igc_hw hw; 3283df25e4cSSasha Neftin struct igc_hw_stats stats; 329c9a11c23SSasha Neftin 330c9a11c23SSasha Neftin struct igc_q_vector *q_vector[MAX_Q_VECTORS]; 3313df25e4cSSasha Neftin u32 eims_enable_mask; 3323df25e4cSSasha Neftin u32 eims_other; 3333df25e4cSSasha Neftin 3343df25e4cSSasha Neftin u16 tx_ring_count; 3353df25e4cSSasha Neftin u16 rx_ring_count; 3363df25e4cSSasha Neftin 3370507ef8aSSasha Neftin u32 *shadow_vfta; 3380507ef8aSSasha Neftin 3393df25e4cSSasha Neftin u32 rss_queues; 340c9a11c23SSasha Neftin 3410507ef8aSSasha Neftin /* lock for RX network flow classification filter */ 3420507ef8aSSasha Neftin spinlock_t nfc_lock; 3430507ef8aSSasha Neftin 344c9a11c23SSasha Neftin struct igc_mac_addr *mac_table; 345146740f9SSasha Neftin }; 346146740f9SSasha Neftin 34713b5b7fdSSasha Neftin /* igc_desc_unused - calculate if we have unused descriptors */ 34813b5b7fdSSasha Neftin static inline u16 igc_desc_unused(const struct igc_ring *ring) 34913b5b7fdSSasha Neftin { 35013b5b7fdSSasha Neftin u16 ntc = ring->next_to_clean; 35113b5b7fdSSasha Neftin u16 ntu = ring->next_to_use; 35213b5b7fdSSasha Neftin 35313b5b7fdSSasha Neftin return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 35413b5b7fdSSasha Neftin } 35513b5b7fdSSasha Neftin 35613b5b7fdSSasha Neftin static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) 35713b5b7fdSSasha Neftin { 35813b5b7fdSSasha Neftin return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 35913b5b7fdSSasha Neftin } 36013b5b7fdSSasha Neftin 36113b5b7fdSSasha Neftin enum igc_ring_flags_t { 36213b5b7fdSSasha Neftin IGC_RING_FLAG_RX_3K_BUFFER, 36313b5b7fdSSasha Neftin IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, 36413b5b7fdSSasha Neftin IGC_RING_FLAG_RX_SCTP_CSUM, 36513b5b7fdSSasha Neftin IGC_RING_FLAG_RX_LB_VLAN_BSWAP, 36613b5b7fdSSasha Neftin IGC_RING_FLAG_TX_CTX_IDX, 36713b5b7fdSSasha Neftin IGC_RING_FLAG_TX_DETECT_HANG 36813b5b7fdSSasha Neftin }; 36913b5b7fdSSasha Neftin 37013b5b7fdSSasha Neftin #define ring_uses_large_buffer(ring) \ 37113b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) 37213b5b7fdSSasha Neftin 37313b5b7fdSSasha Neftin #define ring_uses_build_skb(ring) \ 37413b5b7fdSSasha Neftin test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) 37513b5b7fdSSasha Neftin 37613b5b7fdSSasha Neftin static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) 37713b5b7fdSSasha Neftin { 37813b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 37913b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 38013b5b7fdSSasha Neftin return IGC_RXBUFFER_3072; 38113b5b7fdSSasha Neftin 38213b5b7fdSSasha Neftin if (ring_uses_build_skb(ring)) 38313b5b7fdSSasha Neftin return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; 38413b5b7fdSSasha Neftin #endif 38513b5b7fdSSasha Neftin return IGC_RXBUFFER_2048; 38613b5b7fdSSasha Neftin } 38713b5b7fdSSasha Neftin 38813b5b7fdSSasha Neftin static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) 38913b5b7fdSSasha Neftin { 39013b5b7fdSSasha Neftin #if (PAGE_SIZE < 8192) 39113b5b7fdSSasha Neftin if (ring_uses_large_buffer(ring)) 39213b5b7fdSSasha Neftin return 1; 39313b5b7fdSSasha Neftin #endif 39413b5b7fdSSasha Neftin return 0; 39513b5b7fdSSasha Neftin } 39613b5b7fdSSasha Neftin 39713b5b7fdSSasha Neftin #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) 39813b5b7fdSSasha Neftin 3990507ef8aSSasha Neftin #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) 4000507ef8aSSasha Neftin 40113b5b7fdSSasha Neftin #define IGC_RX_DESC(R, i) \ 40213b5b7fdSSasha Neftin (&(((union igc_adv_rx_desc *)((R)->desc))[i])) 40313b5b7fdSSasha Neftin #define IGC_TX_DESC(R, i) \ 40413b5b7fdSSasha Neftin (&(((union igc_adv_tx_desc *)((R)->desc))[i])) 40513b5b7fdSSasha Neftin #define IGC_TX_CTXTDESC(R, i) \ 40613b5b7fdSSasha Neftin (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) 40713b5b7fdSSasha Neftin 408d89f8841SSasha Neftin #endif /* _IGC_H_ */ 409