19aebddd1SJeff Kirsher /*
240263820SVasundhara Volam  * Copyright (C) 2005 - 2014 Emulex
39aebddd1SJeff Kirsher  * All rights reserved.
49aebddd1SJeff Kirsher  *
59aebddd1SJeff Kirsher  * This program is free software; you can redistribute it and/or
69aebddd1SJeff Kirsher  * modify it under the terms of the GNU General Public License version 2
79aebddd1SJeff Kirsher  * as published by the Free Software Foundation.  The full GNU General
89aebddd1SJeff Kirsher  * Public License is included in this distribution in the file called COPYING.
99aebddd1SJeff Kirsher  *
109aebddd1SJeff Kirsher  * Contact Information:
119aebddd1SJeff Kirsher  * linux-drivers@emulex.com
129aebddd1SJeff Kirsher  *
139aebddd1SJeff Kirsher  * Emulex
149aebddd1SJeff Kirsher  * 3333 Susan Street
159aebddd1SJeff Kirsher  * Costa Mesa, CA 92626
169aebddd1SJeff Kirsher  */
179aebddd1SJeff Kirsher 
189aebddd1SJeff Kirsher #ifndef BE_H
199aebddd1SJeff Kirsher #define BE_H
209aebddd1SJeff Kirsher 
219aebddd1SJeff Kirsher #include <linux/pci.h>
229aebddd1SJeff Kirsher #include <linux/etherdevice.h>
239aebddd1SJeff Kirsher #include <linux/delay.h>
249aebddd1SJeff Kirsher #include <net/tcp.h>
259aebddd1SJeff Kirsher #include <net/ip.h>
269aebddd1SJeff Kirsher #include <net/ipv6.h>
279aebddd1SJeff Kirsher #include <linux/if_vlan.h>
289aebddd1SJeff Kirsher #include <linux/workqueue.h>
299aebddd1SJeff Kirsher #include <linux/interrupt.h>
309aebddd1SJeff Kirsher #include <linux/firmware.h>
319aebddd1SJeff Kirsher #include <linux/slab.h>
329aebddd1SJeff Kirsher #include <linux/u64_stats_sync.h>
339aebddd1SJeff Kirsher 
349aebddd1SJeff Kirsher #include "be_hw.h"
35045508a8SParav Pandit #include "be_roce.h"
369aebddd1SJeff Kirsher 
37c346e6e5SSathya Perla #define DRV_VER			"10.4u"
389aebddd1SJeff Kirsher #define DRV_NAME		"be2net"
3900d3d51eSSarveshwar Bandi #define BE_NAME			"Emulex BladeEngine2"
4000d3d51eSSarveshwar Bandi #define BE3_NAME		"Emulex BladeEngine3"
4100d3d51eSSarveshwar Bandi #define OC_NAME			"Emulex OneConnect"
429aebddd1SJeff Kirsher #define OC_NAME_BE		OC_NAME	"(be3)"
439aebddd1SJeff Kirsher #define OC_NAME_LANCER		OC_NAME "(Lancer)"
44ecedb6aeSAjit Khaparde #define OC_NAME_SH		OC_NAME "(Skyhawk)"
45f3effb45SSuresh Reddy #define DRV_DESC		"Emulex OneConnect NIC Driver"
469aebddd1SJeff Kirsher 
479aebddd1SJeff Kirsher #define BE_VENDOR_ID 		0x19a2
489aebddd1SJeff Kirsher #define EMULEX_VENDOR_ID	0x10df
499aebddd1SJeff Kirsher #define BE_DEVICE_ID1		0x211
509aebddd1SJeff Kirsher #define BE_DEVICE_ID2		0x221
519aebddd1SJeff Kirsher #define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
529aebddd1SJeff Kirsher #define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
539aebddd1SJeff Kirsher #define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
549aebddd1SJeff Kirsher #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
55ecedb6aeSAjit Khaparde #define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
5676b73530SPadmanabh Ratnakar #define OC_DEVICE_ID6		0x728   /* Device id for VF in SkyHawk */
574762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID1	0xE602
584762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID2	0xE642
594762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID3	0xE612
604762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID4	0xE652
619aebddd1SJeff Kirsher 
629aebddd1SJeff Kirsher static inline char *nic_name(struct pci_dev *pdev)
639aebddd1SJeff Kirsher {
649aebddd1SJeff Kirsher 	switch (pdev->device) {
659aebddd1SJeff Kirsher 	case OC_DEVICE_ID1:
669aebddd1SJeff Kirsher 		return OC_NAME;
679aebddd1SJeff Kirsher 	case OC_DEVICE_ID2:
689aebddd1SJeff Kirsher 		return OC_NAME_BE;
699aebddd1SJeff Kirsher 	case OC_DEVICE_ID3:
709aebddd1SJeff Kirsher 	case OC_DEVICE_ID4:
719aebddd1SJeff Kirsher 		return OC_NAME_LANCER;
729aebddd1SJeff Kirsher 	case BE_DEVICE_ID2:
739aebddd1SJeff Kirsher 		return BE3_NAME;
74ecedb6aeSAjit Khaparde 	case OC_DEVICE_ID5:
7576b73530SPadmanabh Ratnakar 	case OC_DEVICE_ID6:
76ecedb6aeSAjit Khaparde 		return OC_NAME_SH;
779aebddd1SJeff Kirsher 	default:
789aebddd1SJeff Kirsher 		return BE_NAME;
799aebddd1SJeff Kirsher 	}
809aebddd1SJeff Kirsher }
819aebddd1SJeff Kirsher 
829aebddd1SJeff Kirsher /* Number of bytes of an RX frame that are copied to skb->data */
839aebddd1SJeff Kirsher #define BE_HDR_LEN		((u16) 64)
84bb349bb4SEric Dumazet /* allocate extra space to allow tunneling decapsulation without head reallocation */
85bb349bb4SEric Dumazet #define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
86bb349bb4SEric Dumazet 
879aebddd1SJeff Kirsher #define BE_MAX_JUMBO_FRAME_SIZE	9018
889aebddd1SJeff Kirsher #define BE_MIN_MTU		256
899aebddd1SJeff Kirsher 
909aebddd1SJeff Kirsher #define BE_NUM_VLANS_SUPPORTED	64
912632bafdSSathya Perla #define BE_MAX_EQD		128u
929aebddd1SJeff Kirsher #define	BE_MAX_TX_FRAG_COUNT	30
939aebddd1SJeff Kirsher 
949aebddd1SJeff Kirsher #define EVNT_Q_LEN		1024
959aebddd1SJeff Kirsher #define TX_Q_LEN		2048
969aebddd1SJeff Kirsher #define TX_CQ_LEN		1024
979aebddd1SJeff Kirsher #define RX_Q_LEN		1024	/* Does not support any other value */
989aebddd1SJeff Kirsher #define RX_CQ_LEN		1024
999aebddd1SJeff Kirsher #define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
1009aebddd1SJeff Kirsher #define MCC_CQ_LEN		256
1019aebddd1SJeff Kirsher 
10210ef9ab4SSathya Perla #define BE2_MAX_RSS_QS		4
10368d7bdcbSSathya Perla #define BE3_MAX_RSS_QS		16
10468d7bdcbSSathya Perla #define BE3_MAX_TX_QS		16
10568d7bdcbSSathya Perla #define BE3_MAX_EVT_QS		16
106e3dc867cSSuresh Reddy #define BE3_SRIOV_MAX_EVT_QS	8
10710ef9ab4SSathya Perla 
10868d7bdcbSSathya Perla #define MAX_RX_QS		32
10968d7bdcbSSathya Perla #define MAX_EVT_QS		32
11068d7bdcbSSathya Perla #define MAX_TX_QS		32
11168d7bdcbSSathya Perla 
112045508a8SParav Pandit #define MAX_ROCE_EQS		5
11368d7bdcbSSathya Perla #define MAX_MSIX_VECTORS	32
11492bf14abSSathya Perla #define MIN_MSIX_VECTORS	1
11510ef9ab4SSathya Perla #define BE_TX_BUDGET		256
1169aebddd1SJeff Kirsher #define BE_NAPI_WEIGHT		64
1179aebddd1SJeff Kirsher #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
1189aebddd1SJeff Kirsher #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)
1199aebddd1SJeff Kirsher 
1207c5a5242SVasundhara Volam #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
1219aebddd1SJeff Kirsher #define FW_VER_LEN		32
1229aebddd1SJeff Kirsher 
123e2557877SVenkata Duvvuru #define	RSS_INDIR_TABLE_LEN	128
124e2557877SVenkata Duvvuru #define RSS_HASH_KEY_LEN	40
125e2557877SVenkata Duvvuru 
1269aebddd1SJeff Kirsher struct be_dma_mem {
1279aebddd1SJeff Kirsher 	void *va;
1289aebddd1SJeff Kirsher 	dma_addr_t dma;
1299aebddd1SJeff Kirsher 	u32 size;
1309aebddd1SJeff Kirsher };
1319aebddd1SJeff Kirsher 
1329aebddd1SJeff Kirsher struct be_queue_info {
1339aebddd1SJeff Kirsher 	struct be_dma_mem dma_mem;
1349aebddd1SJeff Kirsher 	u16 len;
1359aebddd1SJeff Kirsher 	u16 entry_size;	/* Size of an element in the queue */
1369aebddd1SJeff Kirsher 	u16 id;
1379aebddd1SJeff Kirsher 	u16 tail, head;
1389aebddd1SJeff Kirsher 	bool created;
1399aebddd1SJeff Kirsher 	atomic_t used;	/* Number of valid elements in the queue */
1409aebddd1SJeff Kirsher };
1419aebddd1SJeff Kirsher 
1429aebddd1SJeff Kirsher static inline u32 MODULO(u16 val, u16 limit)
1439aebddd1SJeff Kirsher {
1449aebddd1SJeff Kirsher 	BUG_ON(limit & (limit - 1));
1459aebddd1SJeff Kirsher 	return val & (limit - 1);
1469aebddd1SJeff Kirsher }
1479aebddd1SJeff Kirsher 
1489aebddd1SJeff Kirsher static inline void index_adv(u16 *index, u16 val, u16 limit)
1499aebddd1SJeff Kirsher {
1509aebddd1SJeff Kirsher 	*index = MODULO((*index + val), limit);
1519aebddd1SJeff Kirsher }
1529aebddd1SJeff Kirsher 
1539aebddd1SJeff Kirsher static inline void index_inc(u16 *index, u16 limit)
1549aebddd1SJeff Kirsher {
1559aebddd1SJeff Kirsher 	*index = MODULO((*index + 1), limit);
1569aebddd1SJeff Kirsher }
1579aebddd1SJeff Kirsher 
1589aebddd1SJeff Kirsher static inline void *queue_head_node(struct be_queue_info *q)
1599aebddd1SJeff Kirsher {
1609aebddd1SJeff Kirsher 	return q->dma_mem.va + q->head * q->entry_size;
1619aebddd1SJeff Kirsher }
1629aebddd1SJeff Kirsher 
1639aebddd1SJeff Kirsher static inline void *queue_tail_node(struct be_queue_info *q)
1649aebddd1SJeff Kirsher {
1659aebddd1SJeff Kirsher 	return q->dma_mem.va + q->tail * q->entry_size;
1669aebddd1SJeff Kirsher }
1679aebddd1SJeff Kirsher 
1683de09455SSomnath Kotur static inline void *queue_index_node(struct be_queue_info *q, u16 index)
1693de09455SSomnath Kotur {
1703de09455SSomnath Kotur 	return q->dma_mem.va + index * q->entry_size;
1713de09455SSomnath Kotur }
1723de09455SSomnath Kotur 
1739aebddd1SJeff Kirsher static inline void queue_head_inc(struct be_queue_info *q)
1749aebddd1SJeff Kirsher {
1759aebddd1SJeff Kirsher 	index_inc(&q->head, q->len);
1769aebddd1SJeff Kirsher }
1779aebddd1SJeff Kirsher 
178652bf646SPadmanabh Ratnakar static inline void index_dec(u16 *index, u16 limit)
179652bf646SPadmanabh Ratnakar {
180652bf646SPadmanabh Ratnakar 	*index = MODULO((*index - 1), limit);
181652bf646SPadmanabh Ratnakar }
182652bf646SPadmanabh Ratnakar 
1839aebddd1SJeff Kirsher static inline void queue_tail_inc(struct be_queue_info *q)
1849aebddd1SJeff Kirsher {
1859aebddd1SJeff Kirsher 	index_inc(&q->tail, q->len);
1869aebddd1SJeff Kirsher }
1879aebddd1SJeff Kirsher 
1889aebddd1SJeff Kirsher struct be_eq_obj {
1899aebddd1SJeff Kirsher 	struct be_queue_info q;
1909aebddd1SJeff Kirsher 	char desc[32];
1919aebddd1SJeff Kirsher 
1929aebddd1SJeff Kirsher 	/* Adaptive interrupt coalescing (AIC) info */
1939aebddd1SJeff Kirsher 	bool enable_aic;
19410ef9ab4SSathya Perla 	u32 min_eqd;		/* in usecs */
19510ef9ab4SSathya Perla 	u32 max_eqd;		/* in usecs */
19610ef9ab4SSathya Perla 	u32 eqd;		/* configured val when aic is off */
19710ef9ab4SSathya Perla 	u32 cur_eqd;		/* in usecs */
1989aebddd1SJeff Kirsher 
19910ef9ab4SSathya Perla 	u8 idx;			/* array index */
200f2f781a7SSathya Perla 	u8 msix_idx;
20110ef9ab4SSathya Perla 	u16 tx_budget;
202d0b9cec3SSathya Perla 	u16 spurious_intr;
2039aebddd1SJeff Kirsher 	struct napi_struct napi;
20410ef9ab4SSathya Perla 	struct be_adapter *adapter;
2056384a4d0SSathya Perla 
2066384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
2076384a4d0SSathya Perla #define BE_EQ_IDLE		0
2086384a4d0SSathya Perla #define BE_EQ_NAPI		1	/* napi owns this EQ */
2096384a4d0SSathya Perla #define BE_EQ_POLL		2	/* poll owns this EQ */
2106384a4d0SSathya Perla #define BE_EQ_LOCKED		(BE_EQ_NAPI | BE_EQ_POLL)
2116384a4d0SSathya Perla #define BE_EQ_NAPI_YIELD	4	/* napi yielded this EQ */
2126384a4d0SSathya Perla #define BE_EQ_POLL_YIELD	8	/* poll yielded this EQ */
2136384a4d0SSathya Perla #define BE_EQ_YIELD		(BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
2146384a4d0SSathya Perla #define BE_EQ_USER_PEND		(BE_EQ_POLL | BE_EQ_POLL_YIELD)
2156384a4d0SSathya Perla 	unsigned int state;
2166384a4d0SSathya Perla 	spinlock_t lock;	/* lock to serialize napi and busy-poll */
2176384a4d0SSathya Perla #endif  /* CONFIG_NET_RX_BUSY_POLL */
21810ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2199aebddd1SJeff Kirsher 
2202632bafdSSathya Perla struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */
2212632bafdSSathya Perla 	bool enable;
2222632bafdSSathya Perla 	u32 min_eqd;		/* in usecs */
2232632bafdSSathya Perla 	u32 max_eqd;		/* in usecs */
2242632bafdSSathya Perla 	u32 prev_eqd;		/* in usecs */
2252632bafdSSathya Perla 	u32 et_eqd;		/* configured val when aic is off */
2262632bafdSSathya Perla 	ulong jiffies;
2272632bafdSSathya Perla 	u64 rx_pkts_prev;	/* Used to calculate RX pps */
2282632bafdSSathya Perla 	u64 tx_reqs_prev;	/* Used to calculate TX pps */
2292632bafdSSathya Perla };
2302632bafdSSathya Perla 
2316384a4d0SSathya Perla enum {
2326384a4d0SSathya Perla 	NAPI_POLLING,
2336384a4d0SSathya Perla 	BUSY_POLLING
2346384a4d0SSathya Perla };
2356384a4d0SSathya Perla 
2369aebddd1SJeff Kirsher struct be_mcc_obj {
2379aebddd1SJeff Kirsher 	struct be_queue_info q;
2389aebddd1SJeff Kirsher 	struct be_queue_info cq;
2399aebddd1SJeff Kirsher 	bool rearm_cq;
2409aebddd1SJeff Kirsher };
2419aebddd1SJeff Kirsher 
2429aebddd1SJeff Kirsher struct be_tx_stats {
2439aebddd1SJeff Kirsher 	u64 tx_bytes;
2449aebddd1SJeff Kirsher 	u64 tx_pkts;
2459aebddd1SJeff Kirsher 	u64 tx_reqs;
2469aebddd1SJeff Kirsher 	u64 tx_wrbs;
2479aebddd1SJeff Kirsher 	u64 tx_compl;
2489aebddd1SJeff Kirsher 	ulong tx_jiffies;
2499aebddd1SJeff Kirsher 	u32 tx_stops;
250bc617526SSathya Perla 	u32 tx_drv_drops;	/* pkts dropped by driver */
251512bb8a2SKalesh AP 	/* the error counters are described in be_ethtool.c */
252512bb8a2SKalesh AP 	u32 tx_hdr_parse_err;
253512bb8a2SKalesh AP 	u32 tx_dma_err;
254512bb8a2SKalesh AP 	u32 tx_tso_err;
255512bb8a2SKalesh AP 	u32 tx_spoof_check_err;
256512bb8a2SKalesh AP 	u32 tx_qinq_err;
257512bb8a2SKalesh AP 	u32 tx_internal_parity_err;
2589aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2599aebddd1SJeff Kirsher 	struct u64_stats_sync sync_compl;
2609aebddd1SJeff Kirsher };
2619aebddd1SJeff Kirsher 
2629aebddd1SJeff Kirsher struct be_tx_obj {
26394d73aaaSVasundhara Volam 	u32 db_offset;
2649aebddd1SJeff Kirsher 	struct be_queue_info q;
2659aebddd1SJeff Kirsher 	struct be_queue_info cq;
2669aebddd1SJeff Kirsher 	/* Remember the skbs that were transmitted */
2679aebddd1SJeff Kirsher 	struct sk_buff *sent_skb_list[TX_Q_LEN];
2689aebddd1SJeff Kirsher 	struct be_tx_stats stats;
26910ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2709aebddd1SJeff Kirsher 
2719aebddd1SJeff Kirsher /* Struct to remember the pages posted for rx frags */
2729aebddd1SJeff Kirsher struct be_rx_page_info {
2739aebddd1SJeff Kirsher 	struct page *page;
274e50287beSSathya Perla 	/* set to page-addr for last frag of the page & frag-addr otherwise */
2759aebddd1SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(bus);
2769aebddd1SJeff Kirsher 	u16 page_offset;
277e50287beSSathya Perla 	bool last_frag;		/* last frag of the page */
2789aebddd1SJeff Kirsher };
2799aebddd1SJeff Kirsher 
2809aebddd1SJeff Kirsher struct be_rx_stats {
2819aebddd1SJeff Kirsher 	u64 rx_bytes;
2829aebddd1SJeff Kirsher 	u64 rx_pkts;
2839aebddd1SJeff Kirsher 	u32 rx_drops_no_skbs;	/* skb allocation errors */
2849aebddd1SJeff Kirsher 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
2859aebddd1SJeff Kirsher 	u32 rx_post_fail;	/* page post alloc failures */
2869aebddd1SJeff Kirsher 	u32 rx_compl;
2879aebddd1SJeff Kirsher 	u32 rx_mcast_pkts;
2889aebddd1SJeff Kirsher 	u32 rx_compl_err;	/* completions with err set */
2899aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2909aebddd1SJeff Kirsher };
2919aebddd1SJeff Kirsher 
2929aebddd1SJeff Kirsher struct be_rx_compl_info {
2939aebddd1SJeff Kirsher 	u32 rss_hash;
2949aebddd1SJeff Kirsher 	u16 vlan_tag;
2959aebddd1SJeff Kirsher 	u16 pkt_size;
2969aebddd1SJeff Kirsher 	u16 port;
2979aebddd1SJeff Kirsher 	u8 vlanf;
2989aebddd1SJeff Kirsher 	u8 num_rcvd;
2999aebddd1SJeff Kirsher 	u8 err;
3009aebddd1SJeff Kirsher 	u8 ipf;
3019aebddd1SJeff Kirsher 	u8 tcpf;
3029aebddd1SJeff Kirsher 	u8 udpf;
3039aebddd1SJeff Kirsher 	u8 ip_csum;
3049aebddd1SJeff Kirsher 	u8 l4_csum;
3059aebddd1SJeff Kirsher 	u8 ipv6;
306f93f160bSVasundhara Volam 	u8 qnq;
3079aebddd1SJeff Kirsher 	u8 pkt_type;
308e38b1706SSomnath Kotur 	u8 ip_frag;
309c9c47142SSathya Perla 	u8 tunneled;
3109aebddd1SJeff Kirsher };
3119aebddd1SJeff Kirsher 
3129aebddd1SJeff Kirsher struct be_rx_obj {
3139aebddd1SJeff Kirsher 	struct be_adapter *adapter;
3149aebddd1SJeff Kirsher 	struct be_queue_info q;
3159aebddd1SJeff Kirsher 	struct be_queue_info cq;
3169aebddd1SJeff Kirsher 	struct be_rx_compl_info rxcp;
3179aebddd1SJeff Kirsher 	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
3189aebddd1SJeff Kirsher 	struct be_rx_stats stats;
3199aebddd1SJeff Kirsher 	u8 rss_id;
3209aebddd1SJeff Kirsher 	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
32110ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
3229aebddd1SJeff Kirsher 
3239aebddd1SJeff Kirsher struct be_drv_stats {
3249ae081c6SSomnath Kotur 	u32 be_on_die_temperature;
3259aebddd1SJeff Kirsher 	u32 eth_red_drops;
326d3de1540SVasundhara Volam 	u32 dma_map_errors;
3279aebddd1SJeff Kirsher 	u32 rx_drops_no_pbuf;
3289aebddd1SJeff Kirsher 	u32 rx_drops_no_txpb;
3299aebddd1SJeff Kirsher 	u32 rx_drops_no_erx_descr;
3309aebddd1SJeff Kirsher 	u32 rx_drops_no_tpre_descr;
3319aebddd1SJeff Kirsher 	u32 rx_drops_too_many_frags;
3329aebddd1SJeff Kirsher 	u32 forwarded_packets;
3339aebddd1SJeff Kirsher 	u32 rx_drops_mtu;
3349aebddd1SJeff Kirsher 	u32 rx_crc_errors;
3359aebddd1SJeff Kirsher 	u32 rx_alignment_symbol_errors;
3369aebddd1SJeff Kirsher 	u32 rx_pause_frames;
3379aebddd1SJeff Kirsher 	u32 rx_priority_pause_frames;
3389aebddd1SJeff Kirsher 	u32 rx_control_frames;
3399aebddd1SJeff Kirsher 	u32 rx_in_range_errors;
3409aebddd1SJeff Kirsher 	u32 rx_out_range_errors;
3419aebddd1SJeff Kirsher 	u32 rx_frame_too_long;
34218fb06a1SSuresh Reddy 	u32 rx_address_filtered;
3439aebddd1SJeff Kirsher 	u32 rx_dropped_too_small;
3449aebddd1SJeff Kirsher 	u32 rx_dropped_too_short;
3459aebddd1SJeff Kirsher 	u32 rx_dropped_header_too_small;
3469aebddd1SJeff Kirsher 	u32 rx_dropped_tcp_length;
3479aebddd1SJeff Kirsher 	u32 rx_dropped_runt;
3489aebddd1SJeff Kirsher 	u32 rx_ip_checksum_errs;
3499aebddd1SJeff Kirsher 	u32 rx_tcp_checksum_errs;
3509aebddd1SJeff Kirsher 	u32 rx_udp_checksum_errs;
3519aebddd1SJeff Kirsher 	u32 tx_pauseframes;
3529aebddd1SJeff Kirsher 	u32 tx_priority_pauseframes;
3539aebddd1SJeff Kirsher 	u32 tx_controlframes;
3549aebddd1SJeff Kirsher 	u32 rxpp_fifo_overflow_drop;
3559aebddd1SJeff Kirsher 	u32 rx_input_fifo_overflow_drop;
3569aebddd1SJeff Kirsher 	u32 pmem_fifo_overflow_drop;
3579aebddd1SJeff Kirsher 	u32 jabber_events;
358461ae379SAjit Khaparde 	u32 rx_roce_bytes_lsd;
359461ae379SAjit Khaparde 	u32 rx_roce_bytes_msd;
360461ae379SAjit Khaparde 	u32 rx_roce_frames;
361461ae379SAjit Khaparde 	u32 roce_drops_payload_len;
362461ae379SAjit Khaparde 	u32 roce_drops_crc;
3639aebddd1SJeff Kirsher };
3649aebddd1SJeff Kirsher 
365c502224eSSomnath Kotur /* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
366c502224eSSomnath Kotur #define BE_RESET_VLAN_TAG_ID	0xFFFF
367c502224eSSomnath Kotur 
3689aebddd1SJeff Kirsher struct be_vf_cfg {
36911ac75edSSathya Perla 	unsigned char mac_addr[ETH_ALEN];
37011ac75edSSathya Perla 	int if_handle;
37111ac75edSSathya Perla 	int pmac_id;
37211ac75edSSathya Perla 	u16 vlan_tag;
37311ac75edSSathya Perla 	u32 tx_rate;
374bdce2ad7SSuresh Reddy 	u32 plink_tracking;
3759aebddd1SJeff Kirsher };
3769aebddd1SJeff Kirsher 
37739f1d94dSSathya Perla enum vf_state {
37839f1d94dSSathya Perla 	ENABLED = 0,
37939f1d94dSSathya Perla 	ASSIGNED = 1
38039f1d94dSSathya Perla };
38139f1d94dSSathya Perla 
382b236916aSAjit Khaparde #define BE_FLAGS_LINK_STATUS_INIT		1
383f174c7ecSVasundhara Volam #define BE_FLAGS_SRIOV_ENABLED			(1 << 2)
384191eb756SSathya Perla #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
385d9d604f8SAjit Khaparde #define BE_FLAGS_VLAN_PROMISC			(1 << 4)
386a0794885SKalesh AP #define BE_FLAGS_MCAST_PROMISC			(1 << 5)
38704d3d624SSomnath Kotur #define BE_FLAGS_NAPI_ENABLED			(1 << 9)
388c9c47142SSathya Perla #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11)
389c9c47142SSathya Perla #define BE_FLAGS_VXLAN_OFFLOADS			(1 << 12)
390e1ad8e33SKalesh AP #define BE_FLAGS_SETUP_DONE			(1 << 13)
391c9c47142SSathya Perla 
392fbc13f01SAjit Khaparde #define BE_UC_PMAC_COUNT			30
393fbc13f01SAjit Khaparde #define BE_VF_UC_PMAC_COUNT			2
394f0613380SKalesh AP 
3955c510811SSomnath Kotur /* Ethtool set_dump flags */
3965c510811SSomnath Kotur #define LANCER_INITIATE_FW_DUMP			0x1
397f0613380SKalesh AP #define LANCER_DELETE_FW_DUMP			0x2
3985c510811SSomnath Kotur 
39942f11cf2SAjit Khaparde struct phy_info {
40042f11cf2SAjit Khaparde 	u8 transceiver;
40142f11cf2SAjit Khaparde 	u8 autoneg;
40242f11cf2SAjit Khaparde 	u8 fc_autoneg;
40342f11cf2SAjit Khaparde 	u8 port_type;
40442f11cf2SAjit Khaparde 	u16 phy_type;
40542f11cf2SAjit Khaparde 	u16 interface_type;
40642f11cf2SAjit Khaparde 	u32 misc_params;
40742f11cf2SAjit Khaparde 	u16 auto_speeds_supported;
40842f11cf2SAjit Khaparde 	u16 fixed_speeds_supported;
40942f11cf2SAjit Khaparde 	int link_speed;
41042f11cf2SAjit Khaparde 	u32 dac_cable_len;
41142f11cf2SAjit Khaparde 	u32 advertising;
41242f11cf2SAjit Khaparde 	u32 supported;
41342f11cf2SAjit Khaparde };
41442f11cf2SAjit Khaparde 
41592bf14abSSathya Perla struct be_resources {
41692bf14abSSathya Perla 	u16 max_vfs;		/* Total VFs "really" supported by FW/HW */
41792bf14abSSathya Perla 	u16 max_mcast_mac;
41892bf14abSSathya Perla 	u16 max_tx_qs;
41992bf14abSSathya Perla 	u16 max_rss_qs;
42092bf14abSSathya Perla 	u16 max_rx_qs;
42192bf14abSSathya Perla 	u16 max_uc_mac;		/* Max UC MACs programmable */
42292bf14abSSathya Perla 	u16 max_vlans;		/* Number of vlans supported */
42392bf14abSSathya Perla 	u16 max_evt_qs;
42492bf14abSSathya Perla 	u32 if_cap_flags;
42510cccf60SVasundhara Volam 	u32 vf_if_cap_flags;	/* VF if capability flags */
42692bf14abSSathya Perla };
42792bf14abSSathya Perla 
428e2557877SVenkata Duvvuru struct rss_info {
429e2557877SVenkata Duvvuru 	u64 rss_flags;
430e2557877SVenkata Duvvuru 	u8 rsstable[RSS_INDIR_TABLE_LEN];
431e2557877SVenkata Duvvuru 	u8 rss_queue[RSS_INDIR_TABLE_LEN];
432e2557877SVenkata Duvvuru 	u8 rss_hkey[RSS_HASH_KEY_LEN];
433e2557877SVenkata Duvvuru };
434e2557877SVenkata Duvvuru 
4359aebddd1SJeff Kirsher struct be_adapter {
4369aebddd1SJeff Kirsher 	struct pci_dev *pdev;
4379aebddd1SJeff Kirsher 	struct net_device *netdev;
4389aebddd1SJeff Kirsher 
439c5b3ad4cSSathya Perla 	u8 __iomem *csr;	/* CSR BAR used only for BE2/3 */
4409aebddd1SJeff Kirsher 	u8 __iomem *db;		/* Door Bell */
4419aebddd1SJeff Kirsher 
4429aebddd1SJeff Kirsher 	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
4439aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem;
4449aebddd1SJeff Kirsher 	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
4459aebddd1SJeff Kirsher 	 * is stored for freeing purpose */
4469aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem_alloced;
4479aebddd1SJeff Kirsher 
4489aebddd1SJeff Kirsher 	struct be_mcc_obj mcc_obj;
4499aebddd1SJeff Kirsher 	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
4509aebddd1SJeff Kirsher 	spinlock_t mcc_cq_lock;
4519aebddd1SJeff Kirsher 
45292bf14abSSathya Perla 	u16 cfg_num_qs;		/* configured via set-channels */
45392bf14abSSathya Perla 	u16 num_evt_qs;
45492bf14abSSathya Perla 	u16 num_msix_vec;
45592bf14abSSathya Perla 	struct be_eq_obj eq_obj[MAX_EVT_QS];
45610ef9ab4SSathya Perla 	struct msix_entry msix_entries[MAX_MSIX_VECTORS];
4579aebddd1SJeff Kirsher 	bool isr_registered;
4589aebddd1SJeff Kirsher 
4599aebddd1SJeff Kirsher 	/* TX Rings */
46092bf14abSSathya Perla 	u16 num_tx_qs;
4619aebddd1SJeff Kirsher 	struct be_tx_obj tx_obj[MAX_TX_QS];
4629aebddd1SJeff Kirsher 
4639aebddd1SJeff Kirsher 	/* Rx rings */
46492bf14abSSathya Perla 	u16 num_rx_qs;
46510ef9ab4SSathya Perla 	struct be_rx_obj rx_obj[MAX_RX_QS];
4669aebddd1SJeff Kirsher 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
4679aebddd1SJeff Kirsher 
4689aebddd1SJeff Kirsher 	struct be_drv_stats drv_stats;
4692632bafdSSathya Perla 	struct be_aic_obj aic_obj[MAX_EVT_QS];
4709aebddd1SJeff Kirsher 	u16 vlans_added;
471f6cbd364SRavikumar Nelavelli 	unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
4729aebddd1SJeff Kirsher 	u8 vlan_prio_bmap;	/* Available Priority BitMap */
4739aebddd1SJeff Kirsher 	u16 recommended_prio;	/* Recommended Priority */
4749aebddd1SJeff Kirsher 	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
4759aebddd1SJeff Kirsher 
4769aebddd1SJeff Kirsher 	struct be_dma_mem stats_cmd;
4779aebddd1SJeff Kirsher 	/* Work queue used to perform periodic tasks like getting statistics */
4789aebddd1SJeff Kirsher 	struct delayed_work work;
4799aebddd1SJeff Kirsher 	u16 work_counter;
4809aebddd1SJeff Kirsher 
481f67ef7baSPadmanabh Ratnakar 	struct delayed_work func_recovery_work;
482b236916aSAjit Khaparde 	u32 flags;
483f25b119cSPadmanabh Ratnakar 	u32 cmd_privileges;
4849aebddd1SJeff Kirsher 	/* Ethtool knobs and info */
4859aebddd1SJeff Kirsher 	char fw_ver[FW_VER_LEN];
486eeb65cedSSomnath Kotur 	char fw_on_flash[FW_VER_LEN];
48730128031SSathya Perla 	int if_handle;		/* Used to configure filtering */
488fbc13f01SAjit Khaparde 	u32 *pmac_id;		/* MAC addr handle used by BE card */
4899aebddd1SJeff Kirsher 	u32 beacon_state;	/* for set_phys_id */
4909aebddd1SJeff Kirsher 
491f67ef7baSPadmanabh Ratnakar 	bool eeh_error;
4926589ade0SSathya Perla 	bool fw_timeout;
493f67ef7baSPadmanabh Ratnakar 	bool hw_error;
494f67ef7baSPadmanabh Ratnakar 
4959aebddd1SJeff Kirsher 	u32 port_num;
4969aebddd1SJeff Kirsher 	bool promiscuous;
497f93f160bSVasundhara Volam 	u8 mc_type;
4989aebddd1SJeff Kirsher 	u32 function_mode;
4999aebddd1SJeff Kirsher 	u32 function_caps;
5009aebddd1SJeff Kirsher 	u32 rx_fc;		/* Rx flow control */
5019aebddd1SJeff Kirsher 	u32 tx_fc;		/* Tx flow control */
5029aebddd1SJeff Kirsher 	bool stats_cmd_sent;
503045508a8SParav Pandit 	struct {
504045508a8SParav Pandit 		u32 size;
505045508a8SParav Pandit 		u32 total_size;
506045508a8SParav Pandit 		u64 io_addr;
507045508a8SParav Pandit 	} roce_db;
508045508a8SParav Pandit 	u32 num_msix_roce_vec;
509045508a8SParav Pandit 	struct ocrdma_dev *ocrdma_dev;
510045508a8SParav Pandit 	struct list_head entry;
511045508a8SParav Pandit 
5129aebddd1SJeff Kirsher 	u32 flash_status;
5135eeff635SSuresh Reddy 	struct completion et_cmd_compl;
5149aebddd1SJeff Kirsher 
515bec84e6bSVasundhara Volam 	struct be_resources pool_res;	/* resources available for the port */
51692bf14abSSathya Perla 	struct be_resources res;	/* resources available for the func */
51792bf14abSSathya Perla 	u16 num_vfs;			/* Number of VFs provisioned by PF */
51839f1d94dSSathya Perla 	u8 virtfn;
51911ac75edSSathya Perla 	struct be_vf_cfg *vf_cfg;
52011ac75edSSathya Perla 	bool be3_native;
5219aebddd1SJeff Kirsher 	u32 sli_family;
5229aebddd1SJeff Kirsher 	u8 hba_port_num;
5239aebddd1SJeff Kirsher 	u16 pvid;
524c9c47142SSathya Perla 	__be16 vxlan_port;
52542f11cf2SAjit Khaparde 	struct phy_info phy;
5264762f6ceSAjit Khaparde 	u8 wol_cap;
52776a9e08eSSuresh Reddy 	bool wol_en;
528fbc13f01SAjit Khaparde 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
5290ad3157eSVasundhara Volam 	u16 asic_rev;
530bc0c3405SAjit Khaparde 	u16 qnq_vid;
531941a77d5SSomnath Kotur 	u32 msg_enable;
5327aeb2156SPadmanabh Ratnakar 	int be_get_temp_freq;
533d5c18473SPadmanabh Ratnakar 	u8 pf_number;
534e2557877SVenkata Duvvuru 	struct rss_info rss_info;
5359aebddd1SJeff Kirsher };
5369aebddd1SJeff Kirsher 
53739f1d94dSSathya Perla #define be_physfn(adapter)		(!adapter->virtfn)
5382c7a9dc1SAjit Khaparde #define be_virtfn(adapter)		(adapter->virtfn)
539f174c7ecSVasundhara Volam #define sriov_enabled(adapter)		(adapter->flags &	\
540f174c7ecSVasundhara Volam 					 BE_FLAGS_SRIOV_ENABLED)
541bec84e6bSVasundhara Volam 
54211ac75edSSathya Perla #define for_all_vfs(adapter, vf_cfg, i)					\
54311ac75edSSathya Perla 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
54411ac75edSSathya Perla 		i++, vf_cfg++)
5459aebddd1SJeff Kirsher 
5469aebddd1SJeff Kirsher #define ON				1
5479aebddd1SJeff Kirsher #define OFF				0
548ca34fe38SSathya Perla 
54992bf14abSSathya Perla #define be_max_vlans(adapter)		(adapter->res.max_vlans)
55092bf14abSSathya Perla #define be_max_uc(adapter)		(adapter->res.max_uc_mac)
55192bf14abSSathya Perla #define be_max_mc(adapter)		(adapter->res.max_mcast_mac)
552bec84e6bSVasundhara Volam #define be_max_vfs(adapter)		(adapter->pool_res.max_vfs)
55392bf14abSSathya Perla #define be_max_rss(adapter)		(adapter->res.max_rss_qs)
55492bf14abSSathya Perla #define be_max_txqs(adapter)		(adapter->res.max_tx_qs)
55592bf14abSSathya Perla #define be_max_prio_txqs(adapter)	(adapter->res.max_prio_tx_qs)
55692bf14abSSathya Perla #define be_max_rxqs(adapter)		(adapter->res.max_rx_qs)
55792bf14abSSathya Perla #define be_max_eqs(adapter)		(adapter->res.max_evt_qs)
55892bf14abSSathya Perla #define be_if_cap_flags(adapter)	(adapter->res.if_cap_flags)
55992bf14abSSathya Perla 
56092bf14abSSathya Perla static inline u16 be_max_qs(struct be_adapter *adapter)
56192bf14abSSathya Perla {
56292bf14abSSathya Perla 	/* If no RSS, need atleast the one def RXQ */
56392bf14abSSathya Perla 	u16 num = max_t(u16, be_max_rss(adapter), 1);
56492bf14abSSathya Perla 
56592bf14abSSathya Perla 	num = min(num, be_max_eqs(adapter));
56692bf14abSSathya Perla 	return min_t(u16, num, num_online_cpus());
56792bf14abSSathya Perla }
56892bf14abSSathya Perla 
569f93f160bSVasundhara Volam /* Is BE in pvid_tagging mode */
570f93f160bSVasundhara Volam #define be_pvid_tagging_enabled(adapter)	(adapter->pvid)
571f93f160bSVasundhara Volam 
572f93f160bSVasundhara Volam /* Is BE in QNQ multi-channel mode */
57366064dbcSSuresh Reddy #define be_is_qnq_mode(adapter)		(adapter->function_mode & QNQ_MODE)
574f93f160bSVasundhara Volam 
575ca34fe38SSathya Perla #define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \
576ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID4)
5779aebddd1SJeff Kirsher 
57876b73530SPadmanabh Ratnakar #define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5 || \
57976b73530SPadmanabh Ratnakar 				 adapter->pdev->device == OC_DEVICE_ID6)
580d3bd3a5eSPadmanabh Ratnakar 
581ca34fe38SSathya Perla #define BE3_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID2 || \
582ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID2)
583ca34fe38SSathya Perla 
584ca34fe38SSathya Perla #define BE2_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID1 || \
585ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID1)
586ca34fe38SSathya Perla 
587ca34fe38SSathya Perla #define BEx_chip(adapter)	(BE3_chip(adapter) || BE2_chip(adapter))
588d3bd3a5eSPadmanabh Ratnakar 
589dbf0f2a7SSathya Perla #define be_roce_supported(adapter)	(skyhawk_chip(adapter) && \
590045508a8SParav Pandit 					(adapter->function_mode & RDMA_ENABLED))
591045508a8SParav Pandit 
5929aebddd1SJeff Kirsher extern const struct ethtool_ops be_ethtool_ops;
5939aebddd1SJeff Kirsher 
5949aebddd1SJeff Kirsher #define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
59510ef9ab4SSathya Perla #define num_irqs(adapter)		(msix_enabled(adapter) ?	\
59610ef9ab4SSathya Perla 						adapter->num_msix_vec : 1)
59710ef9ab4SSathya Perla #define tx_stats(txo)			(&(txo)->stats)
59810ef9ab4SSathya Perla #define rx_stats(rxo)			(&(rxo)->stats)
5999aebddd1SJeff Kirsher 
60010ef9ab4SSathya Perla /* The default RXQ is the last RXQ */
60110ef9ab4SSathya Perla #define default_rxo(adpt)		(&adpt->rx_obj[adpt->num_rx_qs - 1])
6029aebddd1SJeff Kirsher 
6039aebddd1SJeff Kirsher #define for_all_rx_queues(adapter, rxo, i)				\
6049aebddd1SJeff Kirsher 	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
6059aebddd1SJeff Kirsher 		i++, rxo++)
6069aebddd1SJeff Kirsher 
60710ef9ab4SSathya Perla /* Skip the default non-rss queue (last one)*/
6089aebddd1SJeff Kirsher #define for_all_rss_queues(adapter, rxo, i)				\
60910ef9ab4SSathya Perla 	for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
6109aebddd1SJeff Kirsher 		i++, rxo++)
6119aebddd1SJeff Kirsher 
6129aebddd1SJeff Kirsher #define for_all_tx_queues(adapter, txo, i)				\
6139aebddd1SJeff Kirsher 	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
6149aebddd1SJeff Kirsher 		i++, txo++)
6159aebddd1SJeff Kirsher 
61610ef9ab4SSathya Perla #define for_all_evt_queues(adapter, eqo, i)				\
61710ef9ab4SSathya Perla 	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
61810ef9ab4SSathya Perla 		i++, eqo++)
61910ef9ab4SSathya Perla 
6206384a4d0SSathya Perla #define for_all_rx_queues_on_eq(adapter, eqo, rxo, i)			\
6216384a4d0SSathya Perla 	for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
6226384a4d0SSathya Perla 		 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
6236384a4d0SSathya Perla 
62410ef9ab4SSathya Perla #define is_mcc_eqo(eqo)			(eqo->idx == 0)
62510ef9ab4SSathya Perla #define mcc_eqo(adapter)		(&adapter->eq_obj[0])
62610ef9ab4SSathya Perla 
6279aebddd1SJeff Kirsher #define PAGE_SHIFT_4K		12
6289aebddd1SJeff Kirsher #define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)
6299aebddd1SJeff Kirsher 
6309aebddd1SJeff Kirsher /* Returns number of pages spanned by the data starting at the given addr */
6319aebddd1SJeff Kirsher #define PAGES_4K_SPANNED(_address, size) 				\
6329aebddd1SJeff Kirsher 		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
6339aebddd1SJeff Kirsher 			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
6349aebddd1SJeff Kirsher 
6359aebddd1SJeff Kirsher /* Returns bit offset within a DWORD of a bitfield */
6369aebddd1SJeff Kirsher #define AMAP_BIT_OFFSET(_struct, field)  				\
6379aebddd1SJeff Kirsher 		(((size_t)&(((_struct *)0)->field))%32)
6389aebddd1SJeff Kirsher 
6399aebddd1SJeff Kirsher /* Returns the bit mask of the field that is NOT shifted into location. */
6409aebddd1SJeff Kirsher static inline u32 amap_mask(u32 bitsize)
6419aebddd1SJeff Kirsher {
6429aebddd1SJeff Kirsher 	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
6439aebddd1SJeff Kirsher }
6449aebddd1SJeff Kirsher 
6459aebddd1SJeff Kirsher static inline void
6469aebddd1SJeff Kirsher amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
6479aebddd1SJeff Kirsher {
6489aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr + dw_offset;
6499aebddd1SJeff Kirsher 	*dw &= ~(mask << offset);
6509aebddd1SJeff Kirsher 	*dw |= (mask & value) << offset;
6519aebddd1SJeff Kirsher }
6529aebddd1SJeff Kirsher 
6539aebddd1SJeff Kirsher #define AMAP_SET_BITS(_struct, field, ptr, val)				\
6549aebddd1SJeff Kirsher 		amap_set(ptr,						\
6559aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6569aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6579aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field),		\
6589aebddd1SJeff Kirsher 			val)
6599aebddd1SJeff Kirsher 
6609aebddd1SJeff Kirsher static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
6619aebddd1SJeff Kirsher {
6629aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr;
6639aebddd1SJeff Kirsher 	return mask & (*(dw + dw_offset) >> offset);
6649aebddd1SJeff Kirsher }
6659aebddd1SJeff Kirsher 
6669aebddd1SJeff Kirsher #define AMAP_GET_BITS(_struct, field, ptr)				\
6679aebddd1SJeff Kirsher 		amap_get(ptr,						\
6689aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6699aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6709aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field))
6719aebddd1SJeff Kirsher 
672c3c18bc1SSathya Perla #define GET_RX_COMPL_V0_BITS(field, ptr)				\
673c3c18bc1SSathya Perla 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr)
674c3c18bc1SSathya Perla 
675c3c18bc1SSathya Perla #define GET_RX_COMPL_V1_BITS(field, ptr)				\
676c3c18bc1SSathya Perla 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr)
677c3c18bc1SSathya Perla 
678c3c18bc1SSathya Perla #define GET_TX_COMPL_BITS(field, ptr)					\
679c3c18bc1SSathya Perla 		AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr)
680c3c18bc1SSathya Perla 
681c3c18bc1SSathya Perla #define SET_TX_WRB_HDR_BITS(field, ptr, val)				\
682c3c18bc1SSathya Perla 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val)
683c3c18bc1SSathya Perla 
6849aebddd1SJeff Kirsher #define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
6859aebddd1SJeff Kirsher #define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
6869aebddd1SJeff Kirsher static inline void swap_dws(void *wrb, int len)
6879aebddd1SJeff Kirsher {
6889aebddd1SJeff Kirsher #ifdef __BIG_ENDIAN
6899aebddd1SJeff Kirsher 	u32 *dw = wrb;
6909aebddd1SJeff Kirsher 	BUG_ON(len % 4);
6919aebddd1SJeff Kirsher 	do {
6929aebddd1SJeff Kirsher 		*dw = cpu_to_le32(*dw);
6939aebddd1SJeff Kirsher 		dw++;
6949aebddd1SJeff Kirsher 		len -= 4;
6959aebddd1SJeff Kirsher 	} while (len);
6969aebddd1SJeff Kirsher #endif				/* __BIG_ENDIAN */
6979aebddd1SJeff Kirsher }
6989aebddd1SJeff Kirsher 
6990532d4e3SKalesh AP #define be_cmd_status(status)		(status > 0 ? -EIO : status)
7000532d4e3SKalesh AP 
7019aebddd1SJeff Kirsher static inline u8 is_tcp_pkt(struct sk_buff *skb)
7029aebddd1SJeff Kirsher {
7039aebddd1SJeff Kirsher 	u8 val = 0;
7049aebddd1SJeff Kirsher 
7059aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
7069aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
7079aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
7089aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
7099aebddd1SJeff Kirsher 
7109aebddd1SJeff Kirsher 	return val;
7119aebddd1SJeff Kirsher }
7129aebddd1SJeff Kirsher 
7139aebddd1SJeff Kirsher static inline u8 is_udp_pkt(struct sk_buff *skb)
7149aebddd1SJeff Kirsher {
7159aebddd1SJeff Kirsher 	u8 val = 0;
7169aebddd1SJeff Kirsher 
7179aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
7189aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
7199aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
7209aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
7219aebddd1SJeff Kirsher 
7229aebddd1SJeff Kirsher 	return val;
7239aebddd1SJeff Kirsher }
7249aebddd1SJeff Kirsher 
72593040ae5SSomnath Kotur static inline bool is_ipv4_pkt(struct sk_buff *skb)
72693040ae5SSomnath Kotur {
727e8efcec5SLi RongQing 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
72893040ae5SSomnath Kotur }
72993040ae5SSomnath Kotur 
7309aebddd1SJeff Kirsher static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
7319aebddd1SJeff Kirsher {
7329aebddd1SJeff Kirsher 	u32 addr;
7339aebddd1SJeff Kirsher 
7349aebddd1SJeff Kirsher 	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
7359aebddd1SJeff Kirsher 
7369aebddd1SJeff Kirsher 	mac[5] = (u8)(addr & 0xFF);
7379aebddd1SJeff Kirsher 	mac[4] = (u8)((addr >> 8) & 0xFF);
7389aebddd1SJeff Kirsher 	mac[3] = (u8)((addr >> 16) & 0xFF);
7399aebddd1SJeff Kirsher 	/* Use the OUI from the current MAC address */
7409aebddd1SJeff Kirsher 	memcpy(mac, adapter->netdev->dev_addr, 3);
7419aebddd1SJeff Kirsher }
7429aebddd1SJeff Kirsher 
7439aebddd1SJeff Kirsher static inline bool be_multi_rxq(const struct be_adapter *adapter)
7449aebddd1SJeff Kirsher {
7459aebddd1SJeff Kirsher 	return adapter->num_rx_qs > 1;
7469aebddd1SJeff Kirsher }
7479aebddd1SJeff Kirsher 
7486589ade0SSathya Perla static inline bool be_error(struct be_adapter *adapter)
7496589ade0SSathya Perla {
750f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
751f67ef7baSPadmanabh Ratnakar }
752f67ef7baSPadmanabh Ratnakar 
753d23e946cSSathya Perla static inline bool be_hw_error(struct be_adapter *adapter)
754f67ef7baSPadmanabh Ratnakar {
755f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error;
756f67ef7baSPadmanabh Ratnakar }
757f67ef7baSPadmanabh Ratnakar 
758f67ef7baSPadmanabh Ratnakar static inline void  be_clear_all_error(struct be_adapter *adapter)
759f67ef7baSPadmanabh Ratnakar {
760f67ef7baSPadmanabh Ratnakar 	adapter->eeh_error = false;
761f67ef7baSPadmanabh Ratnakar 	adapter->hw_error = false;
762f67ef7baSPadmanabh Ratnakar 	adapter->fw_timeout = false;
7636589ade0SSathya Perla }
7646589ade0SSathya Perla 
7654762f6ceSAjit Khaparde static inline bool be_is_wol_excluded(struct be_adapter *adapter)
7664762f6ceSAjit Khaparde {
7674762f6ceSAjit Khaparde 	struct pci_dev *pdev = adapter->pdev;
7684762f6ceSAjit Khaparde 
7694762f6ceSAjit Khaparde 	if (!be_physfn(adapter))
7704762f6ceSAjit Khaparde 		return true;
7714762f6ceSAjit Khaparde 
7724762f6ceSAjit Khaparde 	switch (pdev->subsystem_device) {
7734762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID1:
7744762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID2:
7754762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID3:
7764762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID4:
7774762f6ceSAjit Khaparde 		return true;
7784762f6ceSAjit Khaparde 	default:
7794762f6ceSAjit Khaparde 		return false;
7804762f6ceSAjit Khaparde 	}
7814762f6ceSAjit Khaparde }
7824762f6ceSAjit Khaparde 
783bc0c3405SAjit Khaparde static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
784bc0c3405SAjit Khaparde {
785bc0c3405SAjit Khaparde 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
786bc0c3405SAjit Khaparde }
787bc0c3405SAjit Khaparde 
7886384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
7896384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
7906384a4d0SSathya Perla {
7916384a4d0SSathya Perla 	bool status = true;
7926384a4d0SSathya Perla 
7936384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
7946384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
7956384a4d0SSathya Perla 		WARN_ON(eqo->state & BE_EQ_NAPI);
7966384a4d0SSathya Perla 		eqo->state |= BE_EQ_NAPI_YIELD;
7976384a4d0SSathya Perla 		status = false;
7986384a4d0SSathya Perla 	} else {
7996384a4d0SSathya Perla 		eqo->state = BE_EQ_NAPI;
8006384a4d0SSathya Perla 	}
8016384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
8026384a4d0SSathya Perla 	return status;
8036384a4d0SSathya Perla }
8046384a4d0SSathya Perla 
8056384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
8066384a4d0SSathya Perla {
8076384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
8086384a4d0SSathya Perla 
8096384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
8106384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
8116384a4d0SSathya Perla 
8126384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
8136384a4d0SSathya Perla }
8146384a4d0SSathya Perla 
8156384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
8166384a4d0SSathya Perla {
8176384a4d0SSathya Perla 	bool status = true;
8186384a4d0SSathya Perla 
8196384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
8206384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
8216384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL_YIELD;
8226384a4d0SSathya Perla 		status = false;
8236384a4d0SSathya Perla 	} else {
8246384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL;
8256384a4d0SSathya Perla 	}
8266384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
8276384a4d0SSathya Perla 	return status;
8286384a4d0SSathya Perla }
8296384a4d0SSathya Perla 
8306384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
8316384a4d0SSathya Perla {
8326384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
8336384a4d0SSathya Perla 
8346384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_NAPI));
8356384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
8366384a4d0SSathya Perla 
8376384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
8386384a4d0SSathya Perla }
8396384a4d0SSathya Perla 
8406384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
8416384a4d0SSathya Perla {
8426384a4d0SSathya Perla 	spin_lock_init(&eqo->lock);
8436384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
8446384a4d0SSathya Perla }
8456384a4d0SSathya Perla 
8466384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
8476384a4d0SSathya Perla {
8486384a4d0SSathya Perla 	local_bh_disable();
8496384a4d0SSathya Perla 
8506384a4d0SSathya Perla 	/* It's enough to just acquire napi lock on the eqo to stop
8516384a4d0SSathya Perla 	 * be_busy_poll() from processing any queueus.
8526384a4d0SSathya Perla 	 */
8536384a4d0SSathya Perla 	while (!be_lock_napi(eqo))
8546384a4d0SSathya Perla 		mdelay(1);
8556384a4d0SSathya Perla 
8566384a4d0SSathya Perla 	local_bh_enable();
8576384a4d0SSathya Perla }
8586384a4d0SSathya Perla 
8596384a4d0SSathya Perla #else /* CONFIG_NET_RX_BUSY_POLL */
8606384a4d0SSathya Perla 
8616384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
8626384a4d0SSathya Perla {
8636384a4d0SSathya Perla 	return true;
8646384a4d0SSathya Perla }
8656384a4d0SSathya Perla 
8666384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
8676384a4d0SSathya Perla {
8686384a4d0SSathya Perla }
8696384a4d0SSathya Perla 
8706384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
8716384a4d0SSathya Perla {
8726384a4d0SSathya Perla 	return false;
8736384a4d0SSathya Perla }
8746384a4d0SSathya Perla 
8756384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
8766384a4d0SSathya Perla {
8776384a4d0SSathya Perla }
8786384a4d0SSathya Perla 
8796384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
8806384a4d0SSathya Perla {
8816384a4d0SSathya Perla }
8826384a4d0SSathya Perla 
8836384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
8846384a4d0SSathya Perla {
8856384a4d0SSathya Perla }
8866384a4d0SSathya Perla #endif /* CONFIG_NET_RX_BUSY_POLL */
8876384a4d0SSathya Perla 
88831886e87SJoe Perches void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
8899aebddd1SJeff Kirsher 		  u16 num_popped);
89031886e87SJoe Perches void be_link_status_update(struct be_adapter *adapter, u8 link_status);
89131886e87SJoe Perches void be_parse_stats(struct be_adapter *adapter);
89231886e87SJoe Perches int be_load_fw(struct be_adapter *adapter, u8 *func);
89331886e87SJoe Perches bool be_is_wol_supported(struct be_adapter *adapter);
89431886e87SJoe Perches bool be_pause_supported(struct be_adapter *adapter);
89531886e87SJoe Perches u32 be_get_fw_log_level(struct be_adapter *adapter);
896394efd19SDavid S. Miller 
897e9e2a904SSomnath Kotur static inline int fw_major_num(const char *fw_ver)
898e9e2a904SSomnath Kotur {
899e9e2a904SSomnath Kotur 	int fw_major = 0;
900e9e2a904SSomnath Kotur 
901e9e2a904SSomnath Kotur 	sscanf(fw_ver, "%d.", &fw_major);
902e9e2a904SSomnath Kotur 
903e9e2a904SSomnath Kotur 	return fw_major;
904e9e2a904SSomnath Kotur }
905e9e2a904SSomnath Kotur 
90668d7bdcbSSathya Perla int be_update_queues(struct be_adapter *adapter);
90768d7bdcbSSathya Perla int be_poll(struct napi_struct *napi, int budget);
908941a77d5SSomnath Kotur 
909045508a8SParav Pandit /*
910045508a8SParav Pandit  * internal function to initialize-cleanup roce device.
911045508a8SParav Pandit  */
91231886e87SJoe Perches void be_roce_dev_add(struct be_adapter *);
91331886e87SJoe Perches void be_roce_dev_remove(struct be_adapter *);
914045508a8SParav Pandit 
915045508a8SParav Pandit /*
916045508a8SParav Pandit  * internal function to open-close roce device during ifup-ifdown.
917045508a8SParav Pandit  */
91831886e87SJoe Perches void be_roce_dev_open(struct be_adapter *);
91931886e87SJoe Perches void be_roce_dev_close(struct be_adapter *);
920d114f99aSDevesh Sharma void be_roce_dev_shutdown(struct be_adapter *);
921045508a8SParav Pandit 
9229aebddd1SJeff Kirsher #endif				/* BE_H */
923