19aebddd1SJeff Kirsher /*
240263820SVasundhara Volam  * Copyright (C) 2005 - 2014 Emulex
39aebddd1SJeff Kirsher  * All rights reserved.
49aebddd1SJeff Kirsher  *
59aebddd1SJeff Kirsher  * This program is free software; you can redistribute it and/or
69aebddd1SJeff Kirsher  * modify it under the terms of the GNU General Public License version 2
79aebddd1SJeff Kirsher  * as published by the Free Software Foundation.  The full GNU General
89aebddd1SJeff Kirsher  * Public License is included in this distribution in the file called COPYING.
99aebddd1SJeff Kirsher  *
109aebddd1SJeff Kirsher  * Contact Information:
119aebddd1SJeff Kirsher  * linux-drivers@emulex.com
129aebddd1SJeff Kirsher  *
139aebddd1SJeff Kirsher  * Emulex
149aebddd1SJeff Kirsher  * 3333 Susan Street
159aebddd1SJeff Kirsher  * Costa Mesa, CA 92626
169aebddd1SJeff Kirsher  */
179aebddd1SJeff Kirsher 
189aebddd1SJeff Kirsher #ifndef BE_H
199aebddd1SJeff Kirsher #define BE_H
209aebddd1SJeff Kirsher 
219aebddd1SJeff Kirsher #include <linux/pci.h>
229aebddd1SJeff Kirsher #include <linux/etherdevice.h>
239aebddd1SJeff Kirsher #include <linux/delay.h>
249aebddd1SJeff Kirsher #include <net/tcp.h>
259aebddd1SJeff Kirsher #include <net/ip.h>
269aebddd1SJeff Kirsher #include <net/ipv6.h>
279aebddd1SJeff Kirsher #include <linux/if_vlan.h>
289aebddd1SJeff Kirsher #include <linux/workqueue.h>
299aebddd1SJeff Kirsher #include <linux/interrupt.h>
309aebddd1SJeff Kirsher #include <linux/firmware.h>
319aebddd1SJeff Kirsher #include <linux/slab.h>
329aebddd1SJeff Kirsher #include <linux/u64_stats_sync.h>
339aebddd1SJeff Kirsher 
349aebddd1SJeff Kirsher #include "be_hw.h"
35045508a8SParav Pandit #include "be_roce.h"
369aebddd1SJeff Kirsher 
37d52afde9SSathya Perla #define DRV_VER			"10.2u"
389aebddd1SJeff Kirsher #define DRV_NAME		"be2net"
3900d3d51eSSarveshwar Bandi #define BE_NAME			"Emulex BladeEngine2"
4000d3d51eSSarveshwar Bandi #define BE3_NAME		"Emulex BladeEngine3"
4100d3d51eSSarveshwar Bandi #define OC_NAME			"Emulex OneConnect"
429aebddd1SJeff Kirsher #define OC_NAME_BE		OC_NAME	"(be3)"
439aebddd1SJeff Kirsher #define OC_NAME_LANCER		OC_NAME "(Lancer)"
44ecedb6aeSAjit Khaparde #define OC_NAME_SH		OC_NAME "(Skyhawk)"
45f3effb45SSuresh Reddy #define DRV_DESC		"Emulex OneConnect NIC Driver"
469aebddd1SJeff Kirsher 
479aebddd1SJeff Kirsher #define BE_VENDOR_ID 		0x19a2
489aebddd1SJeff Kirsher #define EMULEX_VENDOR_ID	0x10df
499aebddd1SJeff Kirsher #define BE_DEVICE_ID1		0x211
509aebddd1SJeff Kirsher #define BE_DEVICE_ID2		0x221
519aebddd1SJeff Kirsher #define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
529aebddd1SJeff Kirsher #define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
539aebddd1SJeff Kirsher #define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
549aebddd1SJeff Kirsher #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
55ecedb6aeSAjit Khaparde #define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
5676b73530SPadmanabh Ratnakar #define OC_DEVICE_ID6		0x728   /* Device id for VF in SkyHawk */
574762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID1	0xE602
584762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID2	0xE642
594762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID3	0xE612
604762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID4	0xE652
619aebddd1SJeff Kirsher 
629aebddd1SJeff Kirsher static inline char *nic_name(struct pci_dev *pdev)
639aebddd1SJeff Kirsher {
649aebddd1SJeff Kirsher 	switch (pdev->device) {
659aebddd1SJeff Kirsher 	case OC_DEVICE_ID1:
669aebddd1SJeff Kirsher 		return OC_NAME;
679aebddd1SJeff Kirsher 	case OC_DEVICE_ID2:
689aebddd1SJeff Kirsher 		return OC_NAME_BE;
699aebddd1SJeff Kirsher 	case OC_DEVICE_ID3:
709aebddd1SJeff Kirsher 	case OC_DEVICE_ID4:
719aebddd1SJeff Kirsher 		return OC_NAME_LANCER;
729aebddd1SJeff Kirsher 	case BE_DEVICE_ID2:
739aebddd1SJeff Kirsher 		return BE3_NAME;
74ecedb6aeSAjit Khaparde 	case OC_DEVICE_ID5:
7576b73530SPadmanabh Ratnakar 	case OC_DEVICE_ID6:
76ecedb6aeSAjit Khaparde 		return OC_NAME_SH;
779aebddd1SJeff Kirsher 	default:
789aebddd1SJeff Kirsher 		return BE_NAME;
799aebddd1SJeff Kirsher 	}
809aebddd1SJeff Kirsher }
819aebddd1SJeff Kirsher 
829aebddd1SJeff Kirsher /* Number of bytes of an RX frame that are copied to skb->data */
839aebddd1SJeff Kirsher #define BE_HDR_LEN		((u16) 64)
84bb349bb4SEric Dumazet /* allocate extra space to allow tunneling decapsulation without head reallocation */
85bb349bb4SEric Dumazet #define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
86bb349bb4SEric Dumazet 
879aebddd1SJeff Kirsher #define BE_MAX_JUMBO_FRAME_SIZE	9018
889aebddd1SJeff Kirsher #define BE_MIN_MTU		256
899aebddd1SJeff Kirsher 
909aebddd1SJeff Kirsher #define BE_NUM_VLANS_SUPPORTED	64
912632bafdSSathya Perla #define BE_MAX_EQD		128u
929aebddd1SJeff Kirsher #define	BE_MAX_TX_FRAG_COUNT	30
939aebddd1SJeff Kirsher 
949aebddd1SJeff Kirsher #define EVNT_Q_LEN		1024
959aebddd1SJeff Kirsher #define TX_Q_LEN		2048
969aebddd1SJeff Kirsher #define TX_CQ_LEN		1024
979aebddd1SJeff Kirsher #define RX_Q_LEN		1024	/* Does not support any other value */
989aebddd1SJeff Kirsher #define RX_CQ_LEN		1024
999aebddd1SJeff Kirsher #define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
1009aebddd1SJeff Kirsher #define MCC_CQ_LEN		256
1019aebddd1SJeff Kirsher 
10210ef9ab4SSathya Perla #define BE2_MAX_RSS_QS		4
10368d7bdcbSSathya Perla #define BE3_MAX_RSS_QS		16
10468d7bdcbSSathya Perla #define BE3_MAX_TX_QS		16
10568d7bdcbSSathya Perla #define BE3_MAX_EVT_QS		16
106e3dc867cSSuresh Reddy #define BE3_SRIOV_MAX_EVT_QS	8
10710ef9ab4SSathya Perla 
10868d7bdcbSSathya Perla #define MAX_RX_QS		32
10968d7bdcbSSathya Perla #define MAX_EVT_QS		32
11068d7bdcbSSathya Perla #define MAX_TX_QS		32
11168d7bdcbSSathya Perla 
112045508a8SParav Pandit #define MAX_ROCE_EQS		5
11368d7bdcbSSathya Perla #define MAX_MSIX_VECTORS	32
11492bf14abSSathya Perla #define MIN_MSIX_VECTORS	1
11510ef9ab4SSathya Perla #define BE_TX_BUDGET		256
1169aebddd1SJeff Kirsher #define BE_NAPI_WEIGHT		64
1179aebddd1SJeff Kirsher #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
1189aebddd1SJeff Kirsher #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)
1199aebddd1SJeff Kirsher 
1207c5a5242SVasundhara Volam #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
1219aebddd1SJeff Kirsher #define FW_VER_LEN		32
1229aebddd1SJeff Kirsher 
1239aebddd1SJeff Kirsher struct be_dma_mem {
1249aebddd1SJeff Kirsher 	void *va;
1259aebddd1SJeff Kirsher 	dma_addr_t dma;
1269aebddd1SJeff Kirsher 	u32 size;
1279aebddd1SJeff Kirsher };
1289aebddd1SJeff Kirsher 
1299aebddd1SJeff Kirsher struct be_queue_info {
1309aebddd1SJeff Kirsher 	struct be_dma_mem dma_mem;
1319aebddd1SJeff Kirsher 	u16 len;
1329aebddd1SJeff Kirsher 	u16 entry_size;	/* Size of an element in the queue */
1339aebddd1SJeff Kirsher 	u16 id;
1349aebddd1SJeff Kirsher 	u16 tail, head;
1359aebddd1SJeff Kirsher 	bool created;
1369aebddd1SJeff Kirsher 	atomic_t used;	/* Number of valid elements in the queue */
1379aebddd1SJeff Kirsher };
1389aebddd1SJeff Kirsher 
1399aebddd1SJeff Kirsher static inline u32 MODULO(u16 val, u16 limit)
1409aebddd1SJeff Kirsher {
1419aebddd1SJeff Kirsher 	BUG_ON(limit & (limit - 1));
1429aebddd1SJeff Kirsher 	return val & (limit - 1);
1439aebddd1SJeff Kirsher }
1449aebddd1SJeff Kirsher 
1459aebddd1SJeff Kirsher static inline void index_adv(u16 *index, u16 val, u16 limit)
1469aebddd1SJeff Kirsher {
1479aebddd1SJeff Kirsher 	*index = MODULO((*index + val), limit);
1489aebddd1SJeff Kirsher }
1499aebddd1SJeff Kirsher 
1509aebddd1SJeff Kirsher static inline void index_inc(u16 *index, u16 limit)
1519aebddd1SJeff Kirsher {
1529aebddd1SJeff Kirsher 	*index = MODULO((*index + 1), limit);
1539aebddd1SJeff Kirsher }
1549aebddd1SJeff Kirsher 
1559aebddd1SJeff Kirsher static inline void *queue_head_node(struct be_queue_info *q)
1569aebddd1SJeff Kirsher {
1579aebddd1SJeff Kirsher 	return q->dma_mem.va + q->head * q->entry_size;
1589aebddd1SJeff Kirsher }
1599aebddd1SJeff Kirsher 
1609aebddd1SJeff Kirsher static inline void *queue_tail_node(struct be_queue_info *q)
1619aebddd1SJeff Kirsher {
1629aebddd1SJeff Kirsher 	return q->dma_mem.va + q->tail * q->entry_size;
1639aebddd1SJeff Kirsher }
1649aebddd1SJeff Kirsher 
1653de09455SSomnath Kotur static inline void *queue_index_node(struct be_queue_info *q, u16 index)
1663de09455SSomnath Kotur {
1673de09455SSomnath Kotur 	return q->dma_mem.va + index * q->entry_size;
1683de09455SSomnath Kotur }
1693de09455SSomnath Kotur 
1709aebddd1SJeff Kirsher static inline void queue_head_inc(struct be_queue_info *q)
1719aebddd1SJeff Kirsher {
1729aebddd1SJeff Kirsher 	index_inc(&q->head, q->len);
1739aebddd1SJeff Kirsher }
1749aebddd1SJeff Kirsher 
175652bf646SPadmanabh Ratnakar static inline void index_dec(u16 *index, u16 limit)
176652bf646SPadmanabh Ratnakar {
177652bf646SPadmanabh Ratnakar 	*index = MODULO((*index - 1), limit);
178652bf646SPadmanabh Ratnakar }
179652bf646SPadmanabh Ratnakar 
1809aebddd1SJeff Kirsher static inline void queue_tail_inc(struct be_queue_info *q)
1819aebddd1SJeff Kirsher {
1829aebddd1SJeff Kirsher 	index_inc(&q->tail, q->len);
1839aebddd1SJeff Kirsher }
1849aebddd1SJeff Kirsher 
1859aebddd1SJeff Kirsher struct be_eq_obj {
1869aebddd1SJeff Kirsher 	struct be_queue_info q;
1879aebddd1SJeff Kirsher 	char desc[32];
1889aebddd1SJeff Kirsher 
1899aebddd1SJeff Kirsher 	/* Adaptive interrupt coalescing (AIC) info */
1909aebddd1SJeff Kirsher 	bool enable_aic;
19110ef9ab4SSathya Perla 	u32 min_eqd;		/* in usecs */
19210ef9ab4SSathya Perla 	u32 max_eqd;		/* in usecs */
19310ef9ab4SSathya Perla 	u32 eqd;		/* configured val when aic is off */
19410ef9ab4SSathya Perla 	u32 cur_eqd;		/* in usecs */
1959aebddd1SJeff Kirsher 
19610ef9ab4SSathya Perla 	u8 idx;			/* array index */
197f2f781a7SSathya Perla 	u8 msix_idx;
19810ef9ab4SSathya Perla 	u16 tx_budget;
199d0b9cec3SSathya Perla 	u16 spurious_intr;
2009aebddd1SJeff Kirsher 	struct napi_struct napi;
20110ef9ab4SSathya Perla 	struct be_adapter *adapter;
2026384a4d0SSathya Perla 
2036384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
2046384a4d0SSathya Perla #define BE_EQ_IDLE		0
2056384a4d0SSathya Perla #define BE_EQ_NAPI		1	/* napi owns this EQ */
2066384a4d0SSathya Perla #define BE_EQ_POLL		2	/* poll owns this EQ */
2076384a4d0SSathya Perla #define BE_EQ_LOCKED		(BE_EQ_NAPI | BE_EQ_POLL)
2086384a4d0SSathya Perla #define BE_EQ_NAPI_YIELD	4	/* napi yielded this EQ */
2096384a4d0SSathya Perla #define BE_EQ_POLL_YIELD	8	/* poll yielded this EQ */
2106384a4d0SSathya Perla #define BE_EQ_YIELD		(BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
2116384a4d0SSathya Perla #define BE_EQ_USER_PEND		(BE_EQ_POLL | BE_EQ_POLL_YIELD)
2126384a4d0SSathya Perla 	unsigned int state;
2136384a4d0SSathya Perla 	spinlock_t lock;	/* lock to serialize napi and busy-poll */
2146384a4d0SSathya Perla #endif  /* CONFIG_NET_RX_BUSY_POLL */
21510ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2169aebddd1SJeff Kirsher 
2172632bafdSSathya Perla struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */
2182632bafdSSathya Perla 	bool enable;
2192632bafdSSathya Perla 	u32 min_eqd;		/* in usecs */
2202632bafdSSathya Perla 	u32 max_eqd;		/* in usecs */
2212632bafdSSathya Perla 	u32 prev_eqd;		/* in usecs */
2222632bafdSSathya Perla 	u32 et_eqd;		/* configured val when aic is off */
2232632bafdSSathya Perla 	ulong jiffies;
2242632bafdSSathya Perla 	u64 rx_pkts_prev;	/* Used to calculate RX pps */
2252632bafdSSathya Perla 	u64 tx_reqs_prev;	/* Used to calculate TX pps */
2262632bafdSSathya Perla };
2272632bafdSSathya Perla 
2286384a4d0SSathya Perla enum {
2296384a4d0SSathya Perla 	NAPI_POLLING,
2306384a4d0SSathya Perla 	BUSY_POLLING
2316384a4d0SSathya Perla };
2326384a4d0SSathya Perla 
2339aebddd1SJeff Kirsher struct be_mcc_obj {
2349aebddd1SJeff Kirsher 	struct be_queue_info q;
2359aebddd1SJeff Kirsher 	struct be_queue_info cq;
2369aebddd1SJeff Kirsher 	bool rearm_cq;
2379aebddd1SJeff Kirsher };
2389aebddd1SJeff Kirsher 
2399aebddd1SJeff Kirsher struct be_tx_stats {
2409aebddd1SJeff Kirsher 	u64 tx_bytes;
2419aebddd1SJeff Kirsher 	u64 tx_pkts;
2429aebddd1SJeff Kirsher 	u64 tx_reqs;
2439aebddd1SJeff Kirsher 	u64 tx_wrbs;
2449aebddd1SJeff Kirsher 	u64 tx_compl;
2459aebddd1SJeff Kirsher 	ulong tx_jiffies;
2469aebddd1SJeff Kirsher 	u32 tx_stops;
247bc617526SSathya Perla 	u32 tx_drv_drops;	/* pkts dropped by driver */
2489aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2499aebddd1SJeff Kirsher 	struct u64_stats_sync sync_compl;
2509aebddd1SJeff Kirsher };
2519aebddd1SJeff Kirsher 
2529aebddd1SJeff Kirsher struct be_tx_obj {
25394d73aaaSVasundhara Volam 	u32 db_offset;
2549aebddd1SJeff Kirsher 	struct be_queue_info q;
2559aebddd1SJeff Kirsher 	struct be_queue_info cq;
2569aebddd1SJeff Kirsher 	/* Remember the skbs that were transmitted */
2579aebddd1SJeff Kirsher 	struct sk_buff *sent_skb_list[TX_Q_LEN];
2589aebddd1SJeff Kirsher 	struct be_tx_stats stats;
25910ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2609aebddd1SJeff Kirsher 
2619aebddd1SJeff Kirsher /* Struct to remember the pages posted for rx frags */
2629aebddd1SJeff Kirsher struct be_rx_page_info {
2639aebddd1SJeff Kirsher 	struct page *page;
264e50287beSSathya Perla 	/* set to page-addr for last frag of the page & frag-addr otherwise */
2659aebddd1SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(bus);
2669aebddd1SJeff Kirsher 	u16 page_offset;
267e50287beSSathya Perla 	bool last_frag;		/* last frag of the page */
2689aebddd1SJeff Kirsher };
2699aebddd1SJeff Kirsher 
2709aebddd1SJeff Kirsher struct be_rx_stats {
2719aebddd1SJeff Kirsher 	u64 rx_bytes;
2729aebddd1SJeff Kirsher 	u64 rx_pkts;
2739aebddd1SJeff Kirsher 	u32 rx_drops_no_skbs;	/* skb allocation errors */
2749aebddd1SJeff Kirsher 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
2759aebddd1SJeff Kirsher 	u32 rx_post_fail;	/* page post alloc failures */
2769aebddd1SJeff Kirsher 	u32 rx_compl;
2779aebddd1SJeff Kirsher 	u32 rx_mcast_pkts;
2789aebddd1SJeff Kirsher 	u32 rx_compl_err;	/* completions with err set */
2799aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2809aebddd1SJeff Kirsher };
2819aebddd1SJeff Kirsher 
2829aebddd1SJeff Kirsher struct be_rx_compl_info {
2839aebddd1SJeff Kirsher 	u32 rss_hash;
2849aebddd1SJeff Kirsher 	u16 vlan_tag;
2859aebddd1SJeff Kirsher 	u16 pkt_size;
2869aebddd1SJeff Kirsher 	u16 port;
2879aebddd1SJeff Kirsher 	u8 vlanf;
2889aebddd1SJeff Kirsher 	u8 num_rcvd;
2899aebddd1SJeff Kirsher 	u8 err;
2909aebddd1SJeff Kirsher 	u8 ipf;
2919aebddd1SJeff Kirsher 	u8 tcpf;
2929aebddd1SJeff Kirsher 	u8 udpf;
2939aebddd1SJeff Kirsher 	u8 ip_csum;
2949aebddd1SJeff Kirsher 	u8 l4_csum;
2959aebddd1SJeff Kirsher 	u8 ipv6;
296f93f160bSVasundhara Volam 	u8 qnq;
2979aebddd1SJeff Kirsher 	u8 pkt_type;
298e38b1706SSomnath Kotur 	u8 ip_frag;
299c9c47142SSathya Perla 	u8 tunneled;
3009aebddd1SJeff Kirsher };
3019aebddd1SJeff Kirsher 
3029aebddd1SJeff Kirsher struct be_rx_obj {
3039aebddd1SJeff Kirsher 	struct be_adapter *adapter;
3049aebddd1SJeff Kirsher 	struct be_queue_info q;
3059aebddd1SJeff Kirsher 	struct be_queue_info cq;
3069aebddd1SJeff Kirsher 	struct be_rx_compl_info rxcp;
3079aebddd1SJeff Kirsher 	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
3089aebddd1SJeff Kirsher 	struct be_rx_stats stats;
3099aebddd1SJeff Kirsher 	u8 rss_id;
3109aebddd1SJeff Kirsher 	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
31110ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
3129aebddd1SJeff Kirsher 
3139aebddd1SJeff Kirsher struct be_drv_stats {
3149ae081c6SSomnath Kotur 	u32 be_on_die_temperature;
3159aebddd1SJeff Kirsher 	u32 eth_red_drops;
3169aebddd1SJeff Kirsher 	u32 rx_drops_no_pbuf;
3179aebddd1SJeff Kirsher 	u32 rx_drops_no_txpb;
3189aebddd1SJeff Kirsher 	u32 rx_drops_no_erx_descr;
3199aebddd1SJeff Kirsher 	u32 rx_drops_no_tpre_descr;
3209aebddd1SJeff Kirsher 	u32 rx_drops_too_many_frags;
3219aebddd1SJeff Kirsher 	u32 forwarded_packets;
3229aebddd1SJeff Kirsher 	u32 rx_drops_mtu;
3239aebddd1SJeff Kirsher 	u32 rx_crc_errors;
3249aebddd1SJeff Kirsher 	u32 rx_alignment_symbol_errors;
3259aebddd1SJeff Kirsher 	u32 rx_pause_frames;
3269aebddd1SJeff Kirsher 	u32 rx_priority_pause_frames;
3279aebddd1SJeff Kirsher 	u32 rx_control_frames;
3289aebddd1SJeff Kirsher 	u32 rx_in_range_errors;
3299aebddd1SJeff Kirsher 	u32 rx_out_range_errors;
3309aebddd1SJeff Kirsher 	u32 rx_frame_too_long;
33118fb06a1SSuresh Reddy 	u32 rx_address_filtered;
3329aebddd1SJeff Kirsher 	u32 rx_dropped_too_small;
3339aebddd1SJeff Kirsher 	u32 rx_dropped_too_short;
3349aebddd1SJeff Kirsher 	u32 rx_dropped_header_too_small;
3359aebddd1SJeff Kirsher 	u32 rx_dropped_tcp_length;
3369aebddd1SJeff Kirsher 	u32 rx_dropped_runt;
3379aebddd1SJeff Kirsher 	u32 rx_ip_checksum_errs;
3389aebddd1SJeff Kirsher 	u32 rx_tcp_checksum_errs;
3399aebddd1SJeff Kirsher 	u32 rx_udp_checksum_errs;
3409aebddd1SJeff Kirsher 	u32 tx_pauseframes;
3419aebddd1SJeff Kirsher 	u32 tx_priority_pauseframes;
3429aebddd1SJeff Kirsher 	u32 tx_controlframes;
3439aebddd1SJeff Kirsher 	u32 rxpp_fifo_overflow_drop;
3449aebddd1SJeff Kirsher 	u32 rx_input_fifo_overflow_drop;
3459aebddd1SJeff Kirsher 	u32 pmem_fifo_overflow_drop;
3469aebddd1SJeff Kirsher 	u32 jabber_events;
347461ae379SAjit Khaparde 	u32 rx_roce_bytes_lsd;
348461ae379SAjit Khaparde 	u32 rx_roce_bytes_msd;
349461ae379SAjit Khaparde 	u32 rx_roce_frames;
350461ae379SAjit Khaparde 	u32 roce_drops_payload_len;
351461ae379SAjit Khaparde 	u32 roce_drops_crc;
3529aebddd1SJeff Kirsher };
3539aebddd1SJeff Kirsher 
354c502224eSSomnath Kotur /* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
355c502224eSSomnath Kotur #define BE_RESET_VLAN_TAG_ID	0xFFFF
356c502224eSSomnath Kotur 
3579aebddd1SJeff Kirsher struct be_vf_cfg {
35811ac75edSSathya Perla 	unsigned char mac_addr[ETH_ALEN];
35911ac75edSSathya Perla 	int if_handle;
36011ac75edSSathya Perla 	int pmac_id;
36111ac75edSSathya Perla 	u16 vlan_tag;
36211ac75edSSathya Perla 	u32 tx_rate;
363bdce2ad7SSuresh Reddy 	u32 plink_tracking;
3649aebddd1SJeff Kirsher };
3659aebddd1SJeff Kirsher 
36639f1d94dSSathya Perla enum vf_state {
36739f1d94dSSathya Perla 	ENABLED = 0,
36839f1d94dSSathya Perla 	ASSIGNED = 1
36939f1d94dSSathya Perla };
37039f1d94dSSathya Perla 
371b236916aSAjit Khaparde #define BE_FLAGS_LINK_STATUS_INIT		1
372191eb756SSathya Perla #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
373d9d604f8SAjit Khaparde #define BE_FLAGS_VLAN_PROMISC			(1 << 4)
37404d3d624SSomnath Kotur #define BE_FLAGS_NAPI_ENABLED			(1 << 9)
375c9c47142SSathya Perla #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11)
376c9c47142SSathya Perla #define BE_FLAGS_VXLAN_OFFLOADS			(1 << 12)
377c9c47142SSathya Perla 
378fbc13f01SAjit Khaparde #define BE_UC_PMAC_COUNT			30
379fbc13f01SAjit Khaparde #define BE_VF_UC_PMAC_COUNT			2
3805c510811SSomnath Kotur /* Ethtool set_dump flags */
3815c510811SSomnath Kotur #define LANCER_INITIATE_FW_DUMP			0x1
3825c510811SSomnath Kotur 
38342f11cf2SAjit Khaparde struct phy_info {
38442f11cf2SAjit Khaparde 	u8 transceiver;
38542f11cf2SAjit Khaparde 	u8 autoneg;
38642f11cf2SAjit Khaparde 	u8 fc_autoneg;
38742f11cf2SAjit Khaparde 	u8 port_type;
38842f11cf2SAjit Khaparde 	u16 phy_type;
38942f11cf2SAjit Khaparde 	u16 interface_type;
39042f11cf2SAjit Khaparde 	u32 misc_params;
39142f11cf2SAjit Khaparde 	u16 auto_speeds_supported;
39242f11cf2SAjit Khaparde 	u16 fixed_speeds_supported;
39342f11cf2SAjit Khaparde 	int link_speed;
39442f11cf2SAjit Khaparde 	u32 dac_cable_len;
39542f11cf2SAjit Khaparde 	u32 advertising;
39642f11cf2SAjit Khaparde 	u32 supported;
39742f11cf2SAjit Khaparde };
39842f11cf2SAjit Khaparde 
39992bf14abSSathya Perla struct be_resources {
40092bf14abSSathya Perla 	u16 max_vfs;		/* Total VFs "really" supported by FW/HW */
40192bf14abSSathya Perla 	u16 max_mcast_mac;
40292bf14abSSathya Perla 	u16 max_tx_qs;
40392bf14abSSathya Perla 	u16 max_rss_qs;
40492bf14abSSathya Perla 	u16 max_rx_qs;
40592bf14abSSathya Perla 	u16 max_uc_mac;		/* Max UC MACs programmable */
40692bf14abSSathya Perla 	u16 max_vlans;		/* Number of vlans supported */
40792bf14abSSathya Perla 	u16 max_evt_qs;
40892bf14abSSathya Perla 	u32 if_cap_flags;
40992bf14abSSathya Perla };
41092bf14abSSathya Perla 
4119aebddd1SJeff Kirsher struct be_adapter {
4129aebddd1SJeff Kirsher 	struct pci_dev *pdev;
4139aebddd1SJeff Kirsher 	struct net_device *netdev;
4149aebddd1SJeff Kirsher 
415c5b3ad4cSSathya Perla 	u8 __iomem *csr;	/* CSR BAR used only for BE2/3 */
4169aebddd1SJeff Kirsher 	u8 __iomem *db;		/* Door Bell */
4179aebddd1SJeff Kirsher 
4189aebddd1SJeff Kirsher 	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
4199aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem;
4209aebddd1SJeff Kirsher 	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
4219aebddd1SJeff Kirsher 	 * is stored for freeing purpose */
4229aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem_alloced;
4239aebddd1SJeff Kirsher 
4249aebddd1SJeff Kirsher 	struct be_mcc_obj mcc_obj;
4259aebddd1SJeff Kirsher 	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
4269aebddd1SJeff Kirsher 	spinlock_t mcc_cq_lock;
4279aebddd1SJeff Kirsher 
42892bf14abSSathya Perla 	u16 cfg_num_qs;		/* configured via set-channels */
42992bf14abSSathya Perla 	u16 num_evt_qs;
43092bf14abSSathya Perla 	u16 num_msix_vec;
43192bf14abSSathya Perla 	struct be_eq_obj eq_obj[MAX_EVT_QS];
43210ef9ab4SSathya Perla 	struct msix_entry msix_entries[MAX_MSIX_VECTORS];
4339aebddd1SJeff Kirsher 	bool isr_registered;
4349aebddd1SJeff Kirsher 
4359aebddd1SJeff Kirsher 	/* TX Rings */
43692bf14abSSathya Perla 	u16 num_tx_qs;
4379aebddd1SJeff Kirsher 	struct be_tx_obj tx_obj[MAX_TX_QS];
4389aebddd1SJeff Kirsher 
4399aebddd1SJeff Kirsher 	/* Rx rings */
44092bf14abSSathya Perla 	u16 num_rx_qs;
44110ef9ab4SSathya Perla 	struct be_rx_obj rx_obj[MAX_RX_QS];
4429aebddd1SJeff Kirsher 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
4439aebddd1SJeff Kirsher 
4449aebddd1SJeff Kirsher 	struct be_drv_stats drv_stats;
4452632bafdSSathya Perla 	struct be_aic_obj aic_obj[MAX_EVT_QS];
4469aebddd1SJeff Kirsher 	u16 vlans_added;
4479aebddd1SJeff Kirsher 	u8 vlan_tag[VLAN_N_VID];
4489aebddd1SJeff Kirsher 	u8 vlan_prio_bmap;	/* Available Priority BitMap */
4499aebddd1SJeff Kirsher 	u16 recommended_prio;	/* Recommended Priority */
4509aebddd1SJeff Kirsher 	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
4519aebddd1SJeff Kirsher 
4529aebddd1SJeff Kirsher 	struct be_dma_mem stats_cmd;
4539aebddd1SJeff Kirsher 	/* Work queue used to perform periodic tasks like getting statistics */
4549aebddd1SJeff Kirsher 	struct delayed_work work;
4559aebddd1SJeff Kirsher 	u16 work_counter;
4569aebddd1SJeff Kirsher 
457f67ef7baSPadmanabh Ratnakar 	struct delayed_work func_recovery_work;
458b236916aSAjit Khaparde 	u32 flags;
459f25b119cSPadmanabh Ratnakar 	u32 cmd_privileges;
4609aebddd1SJeff Kirsher 	/* Ethtool knobs and info */
4619aebddd1SJeff Kirsher 	char fw_ver[FW_VER_LEN];
462eeb65cedSSomnath Kotur 	char fw_on_flash[FW_VER_LEN];
46330128031SSathya Perla 	int if_handle;		/* Used to configure filtering */
464fbc13f01SAjit Khaparde 	u32 *pmac_id;		/* MAC addr handle used by BE card */
4659aebddd1SJeff Kirsher 	u32 beacon_state;	/* for set_phys_id */
4669aebddd1SJeff Kirsher 
467f67ef7baSPadmanabh Ratnakar 	bool eeh_error;
4686589ade0SSathya Perla 	bool fw_timeout;
469f67ef7baSPadmanabh Ratnakar 	bool hw_error;
470f67ef7baSPadmanabh Ratnakar 
4719aebddd1SJeff Kirsher 	u32 port_num;
4729aebddd1SJeff Kirsher 	bool promiscuous;
473f93f160bSVasundhara Volam 	u8 mc_type;
4749aebddd1SJeff Kirsher 	u32 function_mode;
4759aebddd1SJeff Kirsher 	u32 function_caps;
4769aebddd1SJeff Kirsher 	u32 rx_fc;		/* Rx flow control */
4779aebddd1SJeff Kirsher 	u32 tx_fc;		/* Tx flow control */
4789aebddd1SJeff Kirsher 	bool stats_cmd_sent;
479045508a8SParav Pandit 	struct {
480045508a8SParav Pandit 		u32 size;
481045508a8SParav Pandit 		u32 total_size;
482045508a8SParav Pandit 		u64 io_addr;
483045508a8SParav Pandit 	} roce_db;
484045508a8SParav Pandit 	u32 num_msix_roce_vec;
485045508a8SParav Pandit 	struct ocrdma_dev *ocrdma_dev;
486045508a8SParav Pandit 	struct list_head entry;
487045508a8SParav Pandit 
4889aebddd1SJeff Kirsher 	u32 flash_status;
4895eeff635SSuresh Reddy 	struct completion et_cmd_compl;
4909aebddd1SJeff Kirsher 
49192bf14abSSathya Perla 	struct be_resources res;	/* resources available for the func */
49292bf14abSSathya Perla 	u16 num_vfs;			/* Number of VFs provisioned by PF */
49339f1d94dSSathya Perla 	u8 virtfn;
49411ac75edSSathya Perla 	struct be_vf_cfg *vf_cfg;
49511ac75edSSathya Perla 	bool be3_native;
4969aebddd1SJeff Kirsher 	u32 sli_family;
4979aebddd1SJeff Kirsher 	u8 hba_port_num;
4989aebddd1SJeff Kirsher 	u16 pvid;
499c9c47142SSathya Perla 	__be16 vxlan_port;
50042f11cf2SAjit Khaparde 	struct phy_info phy;
5014762f6ceSAjit Khaparde 	u8 wol_cap;
50276a9e08eSSuresh Reddy 	bool wol_en;
503fbc13f01SAjit Khaparde 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
5040ad3157eSVasundhara Volam 	u16 asic_rev;
505bc0c3405SAjit Khaparde 	u16 qnq_vid;
506941a77d5SSomnath Kotur 	u32 msg_enable;
5077aeb2156SPadmanabh Ratnakar 	int be_get_temp_freq;
508d5c18473SPadmanabh Ratnakar 	u8 pf_number;
509594ad54aSSuresh Reddy 	u64 rss_flags;
5109aebddd1SJeff Kirsher };
5119aebddd1SJeff Kirsher 
51239f1d94dSSathya Perla #define be_physfn(adapter)		(!adapter->virtfn)
5132c7a9dc1SAjit Khaparde #define be_virtfn(adapter)		(adapter->virtfn)
51411ac75edSSathya Perla #define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
515b905b5d4SVasundhara Volam #define sriov_want(adapter)             (be_physfn(adapter) &&	\
516b905b5d4SVasundhara Volam 					 (num_vfs || pci_num_vf(adapter->pdev)))
51711ac75edSSathya Perla #define for_all_vfs(adapter, vf_cfg, i)					\
51811ac75edSSathya Perla 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
51911ac75edSSathya Perla 		i++, vf_cfg++)
5209aebddd1SJeff Kirsher 
5219aebddd1SJeff Kirsher #define ON				1
5229aebddd1SJeff Kirsher #define OFF				0
523ca34fe38SSathya Perla 
52492bf14abSSathya Perla #define be_max_vlans(adapter)		(adapter->res.max_vlans)
52592bf14abSSathya Perla #define be_max_uc(adapter)		(adapter->res.max_uc_mac)
52692bf14abSSathya Perla #define be_max_mc(adapter)		(adapter->res.max_mcast_mac)
52792bf14abSSathya Perla #define be_max_vfs(adapter)		(adapter->res.max_vfs)
52892bf14abSSathya Perla #define be_max_rss(adapter)		(adapter->res.max_rss_qs)
52992bf14abSSathya Perla #define be_max_txqs(adapter)		(adapter->res.max_tx_qs)
53092bf14abSSathya Perla #define be_max_prio_txqs(adapter)	(adapter->res.max_prio_tx_qs)
53192bf14abSSathya Perla #define be_max_rxqs(adapter)		(adapter->res.max_rx_qs)
53292bf14abSSathya Perla #define be_max_eqs(adapter)		(adapter->res.max_evt_qs)
53392bf14abSSathya Perla #define be_if_cap_flags(adapter)	(adapter->res.if_cap_flags)
53492bf14abSSathya Perla 
53592bf14abSSathya Perla static inline u16 be_max_qs(struct be_adapter *adapter)
53692bf14abSSathya Perla {
53792bf14abSSathya Perla 	/* If no RSS, need atleast the one def RXQ */
53892bf14abSSathya Perla 	u16 num = max_t(u16, be_max_rss(adapter), 1);
53992bf14abSSathya Perla 
54092bf14abSSathya Perla 	num = min(num, be_max_eqs(adapter));
54192bf14abSSathya Perla 	return min_t(u16, num, num_online_cpus());
54292bf14abSSathya Perla }
54392bf14abSSathya Perla 
544f93f160bSVasundhara Volam /* Is BE in pvid_tagging mode */
545f93f160bSVasundhara Volam #define be_pvid_tagging_enabled(adapter)	(adapter->pvid)
546f93f160bSVasundhara Volam 
547f93f160bSVasundhara Volam /* Is BE in QNQ multi-channel mode */
548f93f160bSVasundhara Volam #define be_is_qnq_mode(adapter)		(adapter->mc_type == FLEX10 ||  \
549f93f160bSVasundhara Volam 					 adapter->mc_type == vNIC1 ||	\
550f93f160bSVasundhara Volam 					 adapter->mc_type == UFP)
551f93f160bSVasundhara Volam 
552ca34fe38SSathya Perla #define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \
553ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID4)
5549aebddd1SJeff Kirsher 
55576b73530SPadmanabh Ratnakar #define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5 || \
55676b73530SPadmanabh Ratnakar 				 adapter->pdev->device == OC_DEVICE_ID6)
557d3bd3a5eSPadmanabh Ratnakar 
558ca34fe38SSathya Perla #define BE3_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID2 || \
559ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID2)
560ca34fe38SSathya Perla 
561ca34fe38SSathya Perla #define BE2_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID1 || \
562ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID1)
563ca34fe38SSathya Perla 
564ca34fe38SSathya Perla #define BEx_chip(adapter)	(BE3_chip(adapter) || BE2_chip(adapter))
565d3bd3a5eSPadmanabh Ratnakar 
566dbf0f2a7SSathya Perla #define be_roce_supported(adapter)	(skyhawk_chip(adapter) && \
567045508a8SParav Pandit 					(adapter->function_mode & RDMA_ENABLED))
568045508a8SParav Pandit 
5699aebddd1SJeff Kirsher extern const struct ethtool_ops be_ethtool_ops;
5709aebddd1SJeff Kirsher 
5719aebddd1SJeff Kirsher #define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
57210ef9ab4SSathya Perla #define num_irqs(adapter)		(msix_enabled(adapter) ?	\
57310ef9ab4SSathya Perla 						adapter->num_msix_vec : 1)
57410ef9ab4SSathya Perla #define tx_stats(txo)			(&(txo)->stats)
57510ef9ab4SSathya Perla #define rx_stats(rxo)			(&(rxo)->stats)
5769aebddd1SJeff Kirsher 
57710ef9ab4SSathya Perla /* The default RXQ is the last RXQ */
57810ef9ab4SSathya Perla #define default_rxo(adpt)		(&adpt->rx_obj[adpt->num_rx_qs - 1])
5799aebddd1SJeff Kirsher 
5809aebddd1SJeff Kirsher #define for_all_rx_queues(adapter, rxo, i)				\
5819aebddd1SJeff Kirsher 	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
5829aebddd1SJeff Kirsher 		i++, rxo++)
5839aebddd1SJeff Kirsher 
58410ef9ab4SSathya Perla /* Skip the default non-rss queue (last one)*/
5859aebddd1SJeff Kirsher #define for_all_rss_queues(adapter, rxo, i)				\
58610ef9ab4SSathya Perla 	for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
5879aebddd1SJeff Kirsher 		i++, rxo++)
5889aebddd1SJeff Kirsher 
5899aebddd1SJeff Kirsher #define for_all_tx_queues(adapter, txo, i)				\
5909aebddd1SJeff Kirsher 	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
5919aebddd1SJeff Kirsher 		i++, txo++)
5929aebddd1SJeff Kirsher 
59310ef9ab4SSathya Perla #define for_all_evt_queues(adapter, eqo, i)				\
59410ef9ab4SSathya Perla 	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
59510ef9ab4SSathya Perla 		i++, eqo++)
59610ef9ab4SSathya Perla 
5976384a4d0SSathya Perla #define for_all_rx_queues_on_eq(adapter, eqo, rxo, i)			\
5986384a4d0SSathya Perla 	for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
5996384a4d0SSathya Perla 		 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
6006384a4d0SSathya Perla 
60110ef9ab4SSathya Perla #define is_mcc_eqo(eqo)			(eqo->idx == 0)
60210ef9ab4SSathya Perla #define mcc_eqo(adapter)		(&adapter->eq_obj[0])
60310ef9ab4SSathya Perla 
6049aebddd1SJeff Kirsher #define PAGE_SHIFT_4K		12
6059aebddd1SJeff Kirsher #define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)
6069aebddd1SJeff Kirsher 
6079aebddd1SJeff Kirsher /* Returns number of pages spanned by the data starting at the given addr */
6089aebddd1SJeff Kirsher #define PAGES_4K_SPANNED(_address, size) 				\
6099aebddd1SJeff Kirsher 		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
6109aebddd1SJeff Kirsher 			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
6119aebddd1SJeff Kirsher 
6129aebddd1SJeff Kirsher /* Returns bit offset within a DWORD of a bitfield */
6139aebddd1SJeff Kirsher #define AMAP_BIT_OFFSET(_struct, field)  				\
6149aebddd1SJeff Kirsher 		(((size_t)&(((_struct *)0)->field))%32)
6159aebddd1SJeff Kirsher 
6169aebddd1SJeff Kirsher /* Returns the bit mask of the field that is NOT shifted into location. */
6179aebddd1SJeff Kirsher static inline u32 amap_mask(u32 bitsize)
6189aebddd1SJeff Kirsher {
6199aebddd1SJeff Kirsher 	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
6209aebddd1SJeff Kirsher }
6219aebddd1SJeff Kirsher 
6229aebddd1SJeff Kirsher static inline void
6239aebddd1SJeff Kirsher amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
6249aebddd1SJeff Kirsher {
6259aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr + dw_offset;
6269aebddd1SJeff Kirsher 	*dw &= ~(mask << offset);
6279aebddd1SJeff Kirsher 	*dw |= (mask & value) << offset;
6289aebddd1SJeff Kirsher }
6299aebddd1SJeff Kirsher 
6309aebddd1SJeff Kirsher #define AMAP_SET_BITS(_struct, field, ptr, val)				\
6319aebddd1SJeff Kirsher 		amap_set(ptr,						\
6329aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6339aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6349aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field),		\
6359aebddd1SJeff Kirsher 			val)
6369aebddd1SJeff Kirsher 
6379aebddd1SJeff Kirsher static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
6389aebddd1SJeff Kirsher {
6399aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr;
6409aebddd1SJeff Kirsher 	return mask & (*(dw + dw_offset) >> offset);
6419aebddd1SJeff Kirsher }
6429aebddd1SJeff Kirsher 
6439aebddd1SJeff Kirsher #define AMAP_GET_BITS(_struct, field, ptr)				\
6449aebddd1SJeff Kirsher 		amap_get(ptr,						\
6459aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6469aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6479aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field))
6489aebddd1SJeff Kirsher 
6499aebddd1SJeff Kirsher #define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
6509aebddd1SJeff Kirsher #define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
6519aebddd1SJeff Kirsher static inline void swap_dws(void *wrb, int len)
6529aebddd1SJeff Kirsher {
6539aebddd1SJeff Kirsher #ifdef __BIG_ENDIAN
6549aebddd1SJeff Kirsher 	u32 *dw = wrb;
6559aebddd1SJeff Kirsher 	BUG_ON(len % 4);
6569aebddd1SJeff Kirsher 	do {
6579aebddd1SJeff Kirsher 		*dw = cpu_to_le32(*dw);
6589aebddd1SJeff Kirsher 		dw++;
6599aebddd1SJeff Kirsher 		len -= 4;
6609aebddd1SJeff Kirsher 	} while (len);
6619aebddd1SJeff Kirsher #endif				/* __BIG_ENDIAN */
6629aebddd1SJeff Kirsher }
6639aebddd1SJeff Kirsher 
6649aebddd1SJeff Kirsher static inline u8 is_tcp_pkt(struct sk_buff *skb)
6659aebddd1SJeff Kirsher {
6669aebddd1SJeff Kirsher 	u8 val = 0;
6679aebddd1SJeff Kirsher 
6689aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
6699aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
6709aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
6719aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
6729aebddd1SJeff Kirsher 
6739aebddd1SJeff Kirsher 	return val;
6749aebddd1SJeff Kirsher }
6759aebddd1SJeff Kirsher 
6769aebddd1SJeff Kirsher static inline u8 is_udp_pkt(struct sk_buff *skb)
6779aebddd1SJeff Kirsher {
6789aebddd1SJeff Kirsher 	u8 val = 0;
6799aebddd1SJeff Kirsher 
6809aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
6819aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
6829aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
6839aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
6849aebddd1SJeff Kirsher 
6859aebddd1SJeff Kirsher 	return val;
6869aebddd1SJeff Kirsher }
6879aebddd1SJeff Kirsher 
68893040ae5SSomnath Kotur static inline bool is_ipv4_pkt(struct sk_buff *skb)
68993040ae5SSomnath Kotur {
690e8efcec5SLi RongQing 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
69193040ae5SSomnath Kotur }
69293040ae5SSomnath Kotur 
6939aebddd1SJeff Kirsher static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
6949aebddd1SJeff Kirsher {
6959aebddd1SJeff Kirsher 	u32 addr;
6969aebddd1SJeff Kirsher 
6979aebddd1SJeff Kirsher 	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
6989aebddd1SJeff Kirsher 
6999aebddd1SJeff Kirsher 	mac[5] = (u8)(addr & 0xFF);
7009aebddd1SJeff Kirsher 	mac[4] = (u8)((addr >> 8) & 0xFF);
7019aebddd1SJeff Kirsher 	mac[3] = (u8)((addr >> 16) & 0xFF);
7029aebddd1SJeff Kirsher 	/* Use the OUI from the current MAC address */
7039aebddd1SJeff Kirsher 	memcpy(mac, adapter->netdev->dev_addr, 3);
7049aebddd1SJeff Kirsher }
7059aebddd1SJeff Kirsher 
7069aebddd1SJeff Kirsher static inline bool be_multi_rxq(const struct be_adapter *adapter)
7079aebddd1SJeff Kirsher {
7089aebddd1SJeff Kirsher 	return adapter->num_rx_qs > 1;
7099aebddd1SJeff Kirsher }
7109aebddd1SJeff Kirsher 
7116589ade0SSathya Perla static inline bool be_error(struct be_adapter *adapter)
7126589ade0SSathya Perla {
713f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
714f67ef7baSPadmanabh Ratnakar }
715f67ef7baSPadmanabh Ratnakar 
716d23e946cSSathya Perla static inline bool be_hw_error(struct be_adapter *adapter)
717f67ef7baSPadmanabh Ratnakar {
718f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error;
719f67ef7baSPadmanabh Ratnakar }
720f67ef7baSPadmanabh Ratnakar 
721f67ef7baSPadmanabh Ratnakar static inline void  be_clear_all_error(struct be_adapter *adapter)
722f67ef7baSPadmanabh Ratnakar {
723f67ef7baSPadmanabh Ratnakar 	adapter->eeh_error = false;
724f67ef7baSPadmanabh Ratnakar 	adapter->hw_error = false;
725f67ef7baSPadmanabh Ratnakar 	adapter->fw_timeout = false;
7266589ade0SSathya Perla }
7276589ade0SSathya Perla 
7284762f6ceSAjit Khaparde static inline bool be_is_wol_excluded(struct be_adapter *adapter)
7294762f6ceSAjit Khaparde {
7304762f6ceSAjit Khaparde 	struct pci_dev *pdev = adapter->pdev;
7314762f6ceSAjit Khaparde 
7324762f6ceSAjit Khaparde 	if (!be_physfn(adapter))
7334762f6ceSAjit Khaparde 		return true;
7344762f6ceSAjit Khaparde 
7354762f6ceSAjit Khaparde 	switch (pdev->subsystem_device) {
7364762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID1:
7374762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID2:
7384762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID3:
7394762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID4:
7404762f6ceSAjit Khaparde 		return true;
7414762f6ceSAjit Khaparde 	default:
7424762f6ceSAjit Khaparde 		return false;
7434762f6ceSAjit Khaparde 	}
7444762f6ceSAjit Khaparde }
7454762f6ceSAjit Khaparde 
746bc0c3405SAjit Khaparde static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
747bc0c3405SAjit Khaparde {
748bc0c3405SAjit Khaparde 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
749bc0c3405SAjit Khaparde }
750bc0c3405SAjit Khaparde 
7516384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
7526384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
7536384a4d0SSathya Perla {
7546384a4d0SSathya Perla 	bool status = true;
7556384a4d0SSathya Perla 
7566384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
7576384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
7586384a4d0SSathya Perla 		WARN_ON(eqo->state & BE_EQ_NAPI);
7596384a4d0SSathya Perla 		eqo->state |= BE_EQ_NAPI_YIELD;
7606384a4d0SSathya Perla 		status = false;
7616384a4d0SSathya Perla 	} else {
7626384a4d0SSathya Perla 		eqo->state = BE_EQ_NAPI;
7636384a4d0SSathya Perla 	}
7646384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
7656384a4d0SSathya Perla 	return status;
7666384a4d0SSathya Perla }
7676384a4d0SSathya Perla 
7686384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
7696384a4d0SSathya Perla {
7706384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
7716384a4d0SSathya Perla 
7726384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
7736384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
7746384a4d0SSathya Perla 
7756384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
7766384a4d0SSathya Perla }
7776384a4d0SSathya Perla 
7786384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
7796384a4d0SSathya Perla {
7806384a4d0SSathya Perla 	bool status = true;
7816384a4d0SSathya Perla 
7826384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
7836384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
7846384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL_YIELD;
7856384a4d0SSathya Perla 		status = false;
7866384a4d0SSathya Perla 	} else {
7876384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL;
7886384a4d0SSathya Perla 	}
7896384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
7906384a4d0SSathya Perla 	return status;
7916384a4d0SSathya Perla }
7926384a4d0SSathya Perla 
7936384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
7946384a4d0SSathya Perla {
7956384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
7966384a4d0SSathya Perla 
7976384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_NAPI));
7986384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
7996384a4d0SSathya Perla 
8006384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
8016384a4d0SSathya Perla }
8026384a4d0SSathya Perla 
8036384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
8046384a4d0SSathya Perla {
8056384a4d0SSathya Perla 	spin_lock_init(&eqo->lock);
8066384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
8076384a4d0SSathya Perla }
8086384a4d0SSathya Perla 
8096384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
8106384a4d0SSathya Perla {
8116384a4d0SSathya Perla 	local_bh_disable();
8126384a4d0SSathya Perla 
8136384a4d0SSathya Perla 	/* It's enough to just acquire napi lock on the eqo to stop
8146384a4d0SSathya Perla 	 * be_busy_poll() from processing any queueus.
8156384a4d0SSathya Perla 	 */
8166384a4d0SSathya Perla 	while (!be_lock_napi(eqo))
8176384a4d0SSathya Perla 		mdelay(1);
8186384a4d0SSathya Perla 
8196384a4d0SSathya Perla 	local_bh_enable();
8206384a4d0SSathya Perla }
8216384a4d0SSathya Perla 
8226384a4d0SSathya Perla #else /* CONFIG_NET_RX_BUSY_POLL */
8236384a4d0SSathya Perla 
8246384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
8256384a4d0SSathya Perla {
8266384a4d0SSathya Perla 	return true;
8276384a4d0SSathya Perla }
8286384a4d0SSathya Perla 
8296384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
8306384a4d0SSathya Perla {
8316384a4d0SSathya Perla }
8326384a4d0SSathya Perla 
8336384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
8346384a4d0SSathya Perla {
8356384a4d0SSathya Perla 	return false;
8366384a4d0SSathya Perla }
8376384a4d0SSathya Perla 
8386384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
8396384a4d0SSathya Perla {
8406384a4d0SSathya Perla }
8416384a4d0SSathya Perla 
8426384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
8436384a4d0SSathya Perla {
8446384a4d0SSathya Perla }
8456384a4d0SSathya Perla 
8466384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
8476384a4d0SSathya Perla {
8486384a4d0SSathya Perla }
8496384a4d0SSathya Perla #endif /* CONFIG_NET_RX_BUSY_POLL */
8506384a4d0SSathya Perla 
85131886e87SJoe Perches void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
8529aebddd1SJeff Kirsher 		  u16 num_popped);
85331886e87SJoe Perches void be_link_status_update(struct be_adapter *adapter, u8 link_status);
85431886e87SJoe Perches void be_parse_stats(struct be_adapter *adapter);
85531886e87SJoe Perches int be_load_fw(struct be_adapter *adapter, u8 *func);
85631886e87SJoe Perches bool be_is_wol_supported(struct be_adapter *adapter);
85731886e87SJoe Perches bool be_pause_supported(struct be_adapter *adapter);
85831886e87SJoe Perches u32 be_get_fw_log_level(struct be_adapter *adapter);
859394efd19SDavid S. Miller 
860e9e2a904SSomnath Kotur static inline int fw_major_num(const char *fw_ver)
861e9e2a904SSomnath Kotur {
862e9e2a904SSomnath Kotur 	int fw_major = 0;
863e9e2a904SSomnath Kotur 
864e9e2a904SSomnath Kotur 	sscanf(fw_ver, "%d.", &fw_major);
865e9e2a904SSomnath Kotur 
866e9e2a904SSomnath Kotur 	return fw_major;
867e9e2a904SSomnath Kotur }
868e9e2a904SSomnath Kotur 
86968d7bdcbSSathya Perla int be_update_queues(struct be_adapter *adapter);
87068d7bdcbSSathya Perla int be_poll(struct napi_struct *napi, int budget);
871941a77d5SSomnath Kotur 
872045508a8SParav Pandit /*
873045508a8SParav Pandit  * internal function to initialize-cleanup roce device.
874045508a8SParav Pandit  */
87531886e87SJoe Perches void be_roce_dev_add(struct be_adapter *);
87631886e87SJoe Perches void be_roce_dev_remove(struct be_adapter *);
877045508a8SParav Pandit 
878045508a8SParav Pandit /*
879045508a8SParav Pandit  * internal function to open-close roce device during ifup-ifdown.
880045508a8SParav Pandit  */
88131886e87SJoe Perches void be_roce_dev_open(struct be_adapter *);
88231886e87SJoe Perches void be_roce_dev_close(struct be_adapter *);
883045508a8SParav Pandit 
8849aebddd1SJeff Kirsher #endif				/* BE_H */
885