19aebddd1SJeff Kirsher /*
2c7bb15a6SVasundhara Volam  * Copyright (C) 2005 - 2013 Emulex
39aebddd1SJeff Kirsher  * All rights reserved.
49aebddd1SJeff Kirsher  *
59aebddd1SJeff Kirsher  * This program is free software; you can redistribute it and/or
69aebddd1SJeff Kirsher  * modify it under the terms of the GNU General Public License version 2
79aebddd1SJeff Kirsher  * as published by the Free Software Foundation.  The full GNU General
89aebddd1SJeff Kirsher  * Public License is included in this distribution in the file called COPYING.
99aebddd1SJeff Kirsher  *
109aebddd1SJeff Kirsher  * Contact Information:
119aebddd1SJeff Kirsher  * linux-drivers@emulex.com
129aebddd1SJeff Kirsher  *
139aebddd1SJeff Kirsher  * Emulex
149aebddd1SJeff Kirsher  * 3333 Susan Street
159aebddd1SJeff Kirsher  * Costa Mesa, CA 92626
169aebddd1SJeff Kirsher  */
179aebddd1SJeff Kirsher 
189aebddd1SJeff Kirsher #ifndef BE_H
199aebddd1SJeff Kirsher #define BE_H
209aebddd1SJeff Kirsher 
219aebddd1SJeff Kirsher #include <linux/pci.h>
229aebddd1SJeff Kirsher #include <linux/etherdevice.h>
239aebddd1SJeff Kirsher #include <linux/delay.h>
249aebddd1SJeff Kirsher #include <net/tcp.h>
259aebddd1SJeff Kirsher #include <net/ip.h>
269aebddd1SJeff Kirsher #include <net/ipv6.h>
279aebddd1SJeff Kirsher #include <linux/if_vlan.h>
289aebddd1SJeff Kirsher #include <linux/workqueue.h>
299aebddd1SJeff Kirsher #include <linux/interrupt.h>
309aebddd1SJeff Kirsher #include <linux/firmware.h>
319aebddd1SJeff Kirsher #include <linux/slab.h>
329aebddd1SJeff Kirsher #include <linux/u64_stats_sync.h>
339aebddd1SJeff Kirsher 
349aebddd1SJeff Kirsher #include "be_hw.h"
35045508a8SParav Pandit #include "be_roce.h"
369aebddd1SJeff Kirsher 
37b68656b2SAjit Khaparde #define DRV_VER			"4.9.224.0u"
389aebddd1SJeff Kirsher #define DRV_NAME		"be2net"
3900d3d51eSSarveshwar Bandi #define BE_NAME			"Emulex BladeEngine2"
4000d3d51eSSarveshwar Bandi #define BE3_NAME		"Emulex BladeEngine3"
4100d3d51eSSarveshwar Bandi #define OC_NAME			"Emulex OneConnect"
429aebddd1SJeff Kirsher #define OC_NAME_BE		OC_NAME	"(be3)"
439aebddd1SJeff Kirsher #define OC_NAME_LANCER		OC_NAME "(Lancer)"
44ecedb6aeSAjit Khaparde #define OC_NAME_SH		OC_NAME "(Skyhawk)"
45f3effb45SSuresh Reddy #define DRV_DESC		"Emulex OneConnect NIC Driver"
469aebddd1SJeff Kirsher 
479aebddd1SJeff Kirsher #define BE_VENDOR_ID 		0x19a2
489aebddd1SJeff Kirsher #define EMULEX_VENDOR_ID	0x10df
499aebddd1SJeff Kirsher #define BE_DEVICE_ID1		0x211
509aebddd1SJeff Kirsher #define BE_DEVICE_ID2		0x221
519aebddd1SJeff Kirsher #define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
529aebddd1SJeff Kirsher #define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
539aebddd1SJeff Kirsher #define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
549aebddd1SJeff Kirsher #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
55ecedb6aeSAjit Khaparde #define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
5676b73530SPadmanabh Ratnakar #define OC_DEVICE_ID6		0x728   /* Device id for VF in SkyHawk */
574762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID1	0xE602
584762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID2	0xE642
594762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID3	0xE612
604762f6ceSAjit Khaparde #define OC_SUBSYS_DEVICE_ID4	0xE652
619aebddd1SJeff Kirsher 
629aebddd1SJeff Kirsher static inline char *nic_name(struct pci_dev *pdev)
639aebddd1SJeff Kirsher {
649aebddd1SJeff Kirsher 	switch (pdev->device) {
659aebddd1SJeff Kirsher 	case OC_DEVICE_ID1:
669aebddd1SJeff Kirsher 		return OC_NAME;
679aebddd1SJeff Kirsher 	case OC_DEVICE_ID2:
689aebddd1SJeff Kirsher 		return OC_NAME_BE;
699aebddd1SJeff Kirsher 	case OC_DEVICE_ID3:
709aebddd1SJeff Kirsher 	case OC_DEVICE_ID4:
719aebddd1SJeff Kirsher 		return OC_NAME_LANCER;
729aebddd1SJeff Kirsher 	case BE_DEVICE_ID2:
739aebddd1SJeff Kirsher 		return BE3_NAME;
74ecedb6aeSAjit Khaparde 	case OC_DEVICE_ID5:
7576b73530SPadmanabh Ratnakar 	case OC_DEVICE_ID6:
76ecedb6aeSAjit Khaparde 		return OC_NAME_SH;
779aebddd1SJeff Kirsher 	default:
789aebddd1SJeff Kirsher 		return BE_NAME;
799aebddd1SJeff Kirsher 	}
809aebddd1SJeff Kirsher }
819aebddd1SJeff Kirsher 
829aebddd1SJeff Kirsher /* Number of bytes of an RX frame that are copied to skb->data */
839aebddd1SJeff Kirsher #define BE_HDR_LEN		((u16) 64)
84bb349bb4SEric Dumazet /* allocate extra space to allow tunneling decapsulation without head reallocation */
85bb349bb4SEric Dumazet #define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
86bb349bb4SEric Dumazet 
879aebddd1SJeff Kirsher #define BE_MAX_JUMBO_FRAME_SIZE	9018
889aebddd1SJeff Kirsher #define BE_MIN_MTU		256
899aebddd1SJeff Kirsher 
909aebddd1SJeff Kirsher #define BE_NUM_VLANS_SUPPORTED	64
911aa9673cSAjit Khaparde #define BE_UMC_NUM_VLANS_SUPPORTED	15
922632bafdSSathya Perla #define BE_MAX_EQD		128u
939aebddd1SJeff Kirsher #define	BE_MAX_TX_FRAG_COUNT	30
949aebddd1SJeff Kirsher 
959aebddd1SJeff Kirsher #define EVNT_Q_LEN		1024
969aebddd1SJeff Kirsher #define TX_Q_LEN		2048
979aebddd1SJeff Kirsher #define TX_CQ_LEN		1024
989aebddd1SJeff Kirsher #define RX_Q_LEN		1024	/* Does not support any other value */
999aebddd1SJeff Kirsher #define RX_CQ_LEN		1024
1009aebddd1SJeff Kirsher #define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
1019aebddd1SJeff Kirsher #define MCC_CQ_LEN		256
1029aebddd1SJeff Kirsher 
10310ef9ab4SSathya Perla #define BE2_MAX_RSS_QS		4
10468d7bdcbSSathya Perla #define BE3_MAX_RSS_QS		16
10568d7bdcbSSathya Perla #define BE3_MAX_TX_QS		16
10668d7bdcbSSathya Perla #define BE3_MAX_EVT_QS		16
107e3dc867cSSuresh Reddy #define BE3_SRIOV_MAX_EVT_QS	8
10810ef9ab4SSathya Perla 
10968d7bdcbSSathya Perla #define MAX_RX_QS		32
11068d7bdcbSSathya Perla #define MAX_EVT_QS		32
11168d7bdcbSSathya Perla #define MAX_TX_QS		32
11268d7bdcbSSathya Perla 
113045508a8SParav Pandit #define MAX_ROCE_EQS		5
11468d7bdcbSSathya Perla #define MAX_MSIX_VECTORS	32
11592bf14abSSathya Perla #define MIN_MSIX_VECTORS	1
11610ef9ab4SSathya Perla #define BE_TX_BUDGET		256
1179aebddd1SJeff Kirsher #define BE_NAPI_WEIGHT		64
1189aebddd1SJeff Kirsher #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
1199aebddd1SJeff Kirsher #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)
1209aebddd1SJeff Kirsher 
1217c5a5242SVasundhara Volam #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
1229aebddd1SJeff Kirsher #define FW_VER_LEN		32
1239aebddd1SJeff Kirsher 
1249aebddd1SJeff Kirsher struct be_dma_mem {
1259aebddd1SJeff Kirsher 	void *va;
1269aebddd1SJeff Kirsher 	dma_addr_t dma;
1279aebddd1SJeff Kirsher 	u32 size;
1289aebddd1SJeff Kirsher };
1299aebddd1SJeff Kirsher 
1309aebddd1SJeff Kirsher struct be_queue_info {
1319aebddd1SJeff Kirsher 	struct be_dma_mem dma_mem;
1329aebddd1SJeff Kirsher 	u16 len;
1339aebddd1SJeff Kirsher 	u16 entry_size;	/* Size of an element in the queue */
1349aebddd1SJeff Kirsher 	u16 id;
1359aebddd1SJeff Kirsher 	u16 tail, head;
1369aebddd1SJeff Kirsher 	bool created;
1379aebddd1SJeff Kirsher 	atomic_t used;	/* Number of valid elements in the queue */
1389aebddd1SJeff Kirsher };
1399aebddd1SJeff Kirsher 
1409aebddd1SJeff Kirsher static inline u32 MODULO(u16 val, u16 limit)
1419aebddd1SJeff Kirsher {
1429aebddd1SJeff Kirsher 	BUG_ON(limit & (limit - 1));
1439aebddd1SJeff Kirsher 	return val & (limit - 1);
1449aebddd1SJeff Kirsher }
1459aebddd1SJeff Kirsher 
1469aebddd1SJeff Kirsher static inline void index_adv(u16 *index, u16 val, u16 limit)
1479aebddd1SJeff Kirsher {
1489aebddd1SJeff Kirsher 	*index = MODULO((*index + val), limit);
1499aebddd1SJeff Kirsher }
1509aebddd1SJeff Kirsher 
1519aebddd1SJeff Kirsher static inline void index_inc(u16 *index, u16 limit)
1529aebddd1SJeff Kirsher {
1539aebddd1SJeff Kirsher 	*index = MODULO((*index + 1), limit);
1549aebddd1SJeff Kirsher }
1559aebddd1SJeff Kirsher 
1569aebddd1SJeff Kirsher static inline void *queue_head_node(struct be_queue_info *q)
1579aebddd1SJeff Kirsher {
1589aebddd1SJeff Kirsher 	return q->dma_mem.va + q->head * q->entry_size;
1599aebddd1SJeff Kirsher }
1609aebddd1SJeff Kirsher 
1619aebddd1SJeff Kirsher static inline void *queue_tail_node(struct be_queue_info *q)
1629aebddd1SJeff Kirsher {
1639aebddd1SJeff Kirsher 	return q->dma_mem.va + q->tail * q->entry_size;
1649aebddd1SJeff Kirsher }
1659aebddd1SJeff Kirsher 
1663de09455SSomnath Kotur static inline void *queue_index_node(struct be_queue_info *q, u16 index)
1673de09455SSomnath Kotur {
1683de09455SSomnath Kotur 	return q->dma_mem.va + index * q->entry_size;
1693de09455SSomnath Kotur }
1703de09455SSomnath Kotur 
1719aebddd1SJeff Kirsher static inline void queue_head_inc(struct be_queue_info *q)
1729aebddd1SJeff Kirsher {
1739aebddd1SJeff Kirsher 	index_inc(&q->head, q->len);
1749aebddd1SJeff Kirsher }
1759aebddd1SJeff Kirsher 
176652bf646SPadmanabh Ratnakar static inline void index_dec(u16 *index, u16 limit)
177652bf646SPadmanabh Ratnakar {
178652bf646SPadmanabh Ratnakar 	*index = MODULO((*index - 1), limit);
179652bf646SPadmanabh Ratnakar }
180652bf646SPadmanabh Ratnakar 
1819aebddd1SJeff Kirsher static inline void queue_tail_inc(struct be_queue_info *q)
1829aebddd1SJeff Kirsher {
1839aebddd1SJeff Kirsher 	index_inc(&q->tail, q->len);
1849aebddd1SJeff Kirsher }
1859aebddd1SJeff Kirsher 
1869aebddd1SJeff Kirsher struct be_eq_obj {
1879aebddd1SJeff Kirsher 	struct be_queue_info q;
1889aebddd1SJeff Kirsher 	char desc[32];
1899aebddd1SJeff Kirsher 
1909aebddd1SJeff Kirsher 	/* Adaptive interrupt coalescing (AIC) info */
1919aebddd1SJeff Kirsher 	bool enable_aic;
19210ef9ab4SSathya Perla 	u32 min_eqd;		/* in usecs */
19310ef9ab4SSathya Perla 	u32 max_eqd;		/* in usecs */
19410ef9ab4SSathya Perla 	u32 eqd;		/* configured val when aic is off */
19510ef9ab4SSathya Perla 	u32 cur_eqd;		/* in usecs */
1969aebddd1SJeff Kirsher 
19710ef9ab4SSathya Perla 	u8 idx;			/* array index */
198f2f781a7SSathya Perla 	u8 msix_idx;
19910ef9ab4SSathya Perla 	u16 tx_budget;
200d0b9cec3SSathya Perla 	u16 spurious_intr;
2019aebddd1SJeff Kirsher 	struct napi_struct napi;
20210ef9ab4SSathya Perla 	struct be_adapter *adapter;
2036384a4d0SSathya Perla 
2046384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
2056384a4d0SSathya Perla #define BE_EQ_IDLE		0
2066384a4d0SSathya Perla #define BE_EQ_NAPI		1	/* napi owns this EQ */
2076384a4d0SSathya Perla #define BE_EQ_POLL		2	/* poll owns this EQ */
2086384a4d0SSathya Perla #define BE_EQ_LOCKED		(BE_EQ_NAPI | BE_EQ_POLL)
2096384a4d0SSathya Perla #define BE_EQ_NAPI_YIELD	4	/* napi yielded this EQ */
2106384a4d0SSathya Perla #define BE_EQ_POLL_YIELD	8	/* poll yielded this EQ */
2116384a4d0SSathya Perla #define BE_EQ_YIELD		(BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
2126384a4d0SSathya Perla #define BE_EQ_USER_PEND		(BE_EQ_POLL | BE_EQ_POLL_YIELD)
2136384a4d0SSathya Perla 	unsigned int state;
2146384a4d0SSathya Perla 	spinlock_t lock;	/* lock to serialize napi and busy-poll */
2156384a4d0SSathya Perla #endif  /* CONFIG_NET_RX_BUSY_POLL */
21610ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2179aebddd1SJeff Kirsher 
2182632bafdSSathya Perla struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */
2192632bafdSSathya Perla 	bool enable;
2202632bafdSSathya Perla 	u32 min_eqd;		/* in usecs */
2212632bafdSSathya Perla 	u32 max_eqd;		/* in usecs */
2222632bafdSSathya Perla 	u32 prev_eqd;		/* in usecs */
2232632bafdSSathya Perla 	u32 et_eqd;		/* configured val when aic is off */
2242632bafdSSathya Perla 	ulong jiffies;
2252632bafdSSathya Perla 	u64 rx_pkts_prev;	/* Used to calculate RX pps */
2262632bafdSSathya Perla 	u64 tx_reqs_prev;	/* Used to calculate TX pps */
2272632bafdSSathya Perla };
2282632bafdSSathya Perla 
2296384a4d0SSathya Perla enum {
2306384a4d0SSathya Perla 	NAPI_POLLING,
2316384a4d0SSathya Perla 	BUSY_POLLING
2326384a4d0SSathya Perla };
2336384a4d0SSathya Perla 
2349aebddd1SJeff Kirsher struct be_mcc_obj {
2359aebddd1SJeff Kirsher 	struct be_queue_info q;
2369aebddd1SJeff Kirsher 	struct be_queue_info cq;
2379aebddd1SJeff Kirsher 	bool rearm_cq;
2389aebddd1SJeff Kirsher };
2399aebddd1SJeff Kirsher 
2409aebddd1SJeff Kirsher struct be_tx_stats {
2419aebddd1SJeff Kirsher 	u64 tx_bytes;
2429aebddd1SJeff Kirsher 	u64 tx_pkts;
2439aebddd1SJeff Kirsher 	u64 tx_reqs;
2449aebddd1SJeff Kirsher 	u64 tx_wrbs;
2459aebddd1SJeff Kirsher 	u64 tx_compl;
2469aebddd1SJeff Kirsher 	ulong tx_jiffies;
2479aebddd1SJeff Kirsher 	u32 tx_stops;
248bc617526SSathya Perla 	u32 tx_drv_drops;	/* pkts dropped by driver */
2499aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2509aebddd1SJeff Kirsher 	struct u64_stats_sync sync_compl;
2519aebddd1SJeff Kirsher };
2529aebddd1SJeff Kirsher 
2539aebddd1SJeff Kirsher struct be_tx_obj {
25494d73aaaSVasundhara Volam 	u32 db_offset;
2559aebddd1SJeff Kirsher 	struct be_queue_info q;
2569aebddd1SJeff Kirsher 	struct be_queue_info cq;
2579aebddd1SJeff Kirsher 	/* Remember the skbs that were transmitted */
2589aebddd1SJeff Kirsher 	struct sk_buff *sent_skb_list[TX_Q_LEN];
2599aebddd1SJeff Kirsher 	struct be_tx_stats stats;
26010ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
2619aebddd1SJeff Kirsher 
2629aebddd1SJeff Kirsher /* Struct to remember the pages posted for rx frags */
2639aebddd1SJeff Kirsher struct be_rx_page_info {
2649aebddd1SJeff Kirsher 	struct page *page;
2659aebddd1SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(bus);
2669aebddd1SJeff Kirsher 	u16 page_offset;
2679aebddd1SJeff Kirsher 	bool last_page_user;
2689aebddd1SJeff Kirsher };
2699aebddd1SJeff Kirsher 
2709aebddd1SJeff Kirsher struct be_rx_stats {
2719aebddd1SJeff Kirsher 	u64 rx_bytes;
2729aebddd1SJeff Kirsher 	u64 rx_pkts;
2739aebddd1SJeff Kirsher 	u32 rx_drops_no_skbs;	/* skb allocation errors */
2749aebddd1SJeff Kirsher 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
2759aebddd1SJeff Kirsher 	u32 rx_post_fail;	/* page post alloc failures */
2769aebddd1SJeff Kirsher 	u32 rx_compl;
2779aebddd1SJeff Kirsher 	u32 rx_mcast_pkts;
2789aebddd1SJeff Kirsher 	u32 rx_compl_err;	/* completions with err set */
2799aebddd1SJeff Kirsher 	struct u64_stats_sync sync;
2809aebddd1SJeff Kirsher };
2819aebddd1SJeff Kirsher 
2829aebddd1SJeff Kirsher struct be_rx_compl_info {
2839aebddd1SJeff Kirsher 	u32 rss_hash;
2849aebddd1SJeff Kirsher 	u16 vlan_tag;
2859aebddd1SJeff Kirsher 	u16 pkt_size;
2869aebddd1SJeff Kirsher 	u16 port;
2879aebddd1SJeff Kirsher 	u8 vlanf;
2889aebddd1SJeff Kirsher 	u8 num_rcvd;
2899aebddd1SJeff Kirsher 	u8 err;
2909aebddd1SJeff Kirsher 	u8 ipf;
2919aebddd1SJeff Kirsher 	u8 tcpf;
2929aebddd1SJeff Kirsher 	u8 udpf;
2939aebddd1SJeff Kirsher 	u8 ip_csum;
2949aebddd1SJeff Kirsher 	u8 l4_csum;
2959aebddd1SJeff Kirsher 	u8 ipv6;
2969aebddd1SJeff Kirsher 	u8 vtm;
2979aebddd1SJeff Kirsher 	u8 pkt_type;
298e38b1706SSomnath Kotur 	u8 ip_frag;
2999aebddd1SJeff Kirsher };
3009aebddd1SJeff Kirsher 
3019aebddd1SJeff Kirsher struct be_rx_obj {
3029aebddd1SJeff Kirsher 	struct be_adapter *adapter;
3039aebddd1SJeff Kirsher 	struct be_queue_info q;
3049aebddd1SJeff Kirsher 	struct be_queue_info cq;
3059aebddd1SJeff Kirsher 	struct be_rx_compl_info rxcp;
3069aebddd1SJeff Kirsher 	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
3079aebddd1SJeff Kirsher 	struct be_rx_stats stats;
3089aebddd1SJeff Kirsher 	u8 rss_id;
3099aebddd1SJeff Kirsher 	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
31010ef9ab4SSathya Perla } ____cacheline_aligned_in_smp;
3119aebddd1SJeff Kirsher 
3129aebddd1SJeff Kirsher struct be_drv_stats {
3139ae081c6SSomnath Kotur 	u32 be_on_die_temperature;
3149aebddd1SJeff Kirsher 	u32 eth_red_drops;
3159aebddd1SJeff Kirsher 	u32 rx_drops_no_pbuf;
3169aebddd1SJeff Kirsher 	u32 rx_drops_no_txpb;
3179aebddd1SJeff Kirsher 	u32 rx_drops_no_erx_descr;
3189aebddd1SJeff Kirsher 	u32 rx_drops_no_tpre_descr;
3199aebddd1SJeff Kirsher 	u32 rx_drops_too_many_frags;
3209aebddd1SJeff Kirsher 	u32 forwarded_packets;
3219aebddd1SJeff Kirsher 	u32 rx_drops_mtu;
3229aebddd1SJeff Kirsher 	u32 rx_crc_errors;
3239aebddd1SJeff Kirsher 	u32 rx_alignment_symbol_errors;
3249aebddd1SJeff Kirsher 	u32 rx_pause_frames;
3259aebddd1SJeff Kirsher 	u32 rx_priority_pause_frames;
3269aebddd1SJeff Kirsher 	u32 rx_control_frames;
3279aebddd1SJeff Kirsher 	u32 rx_in_range_errors;
3289aebddd1SJeff Kirsher 	u32 rx_out_range_errors;
3299aebddd1SJeff Kirsher 	u32 rx_frame_too_long;
33018fb06a1SSuresh Reddy 	u32 rx_address_filtered;
3319aebddd1SJeff Kirsher 	u32 rx_dropped_too_small;
3329aebddd1SJeff Kirsher 	u32 rx_dropped_too_short;
3339aebddd1SJeff Kirsher 	u32 rx_dropped_header_too_small;
3349aebddd1SJeff Kirsher 	u32 rx_dropped_tcp_length;
3359aebddd1SJeff Kirsher 	u32 rx_dropped_runt;
3369aebddd1SJeff Kirsher 	u32 rx_ip_checksum_errs;
3379aebddd1SJeff Kirsher 	u32 rx_tcp_checksum_errs;
3389aebddd1SJeff Kirsher 	u32 rx_udp_checksum_errs;
3399aebddd1SJeff Kirsher 	u32 tx_pauseframes;
3409aebddd1SJeff Kirsher 	u32 tx_priority_pauseframes;
3419aebddd1SJeff Kirsher 	u32 tx_controlframes;
3429aebddd1SJeff Kirsher 	u32 rxpp_fifo_overflow_drop;
3439aebddd1SJeff Kirsher 	u32 rx_input_fifo_overflow_drop;
3449aebddd1SJeff Kirsher 	u32 pmem_fifo_overflow_drop;
3459aebddd1SJeff Kirsher 	u32 jabber_events;
346461ae379SAjit Khaparde 	u32 rx_roce_bytes_lsd;
347461ae379SAjit Khaparde 	u32 rx_roce_bytes_msd;
348461ae379SAjit Khaparde 	u32 rx_roce_frames;
349461ae379SAjit Khaparde 	u32 roce_drops_payload_len;
350461ae379SAjit Khaparde 	u32 roce_drops_crc;
3519aebddd1SJeff Kirsher };
3529aebddd1SJeff Kirsher 
3539aebddd1SJeff Kirsher struct be_vf_cfg {
35411ac75edSSathya Perla 	unsigned char mac_addr[ETH_ALEN];
35511ac75edSSathya Perla 	int if_handle;
35611ac75edSSathya Perla 	int pmac_id;
357f1f3ee1bSAjit Khaparde 	u16 def_vid;
35811ac75edSSathya Perla 	u16 vlan_tag;
35911ac75edSSathya Perla 	u32 tx_rate;
3609aebddd1SJeff Kirsher };
3619aebddd1SJeff Kirsher 
36239f1d94dSSathya Perla enum vf_state {
36339f1d94dSSathya Perla 	ENABLED = 0,
36439f1d94dSSathya Perla 	ASSIGNED = 1
36539f1d94dSSathya Perla };
36639f1d94dSSathya Perla 
367b236916aSAjit Khaparde #define BE_FLAGS_LINK_STATUS_INIT		1
368191eb756SSathya Perla #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
369d9d604f8SAjit Khaparde #define BE_FLAGS_VLAN_PROMISC			(1 << 4)
37004d3d624SSomnath Kotur #define BE_FLAGS_NAPI_ENABLED			(1 << 9)
371fbc13f01SAjit Khaparde #define BE_UC_PMAC_COUNT		30
372fbc13f01SAjit Khaparde #define BE_VF_UC_PMAC_COUNT		2
373bc0c3405SAjit Khaparde #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11)
374b236916aSAjit Khaparde 
3755c510811SSomnath Kotur /* Ethtool set_dump flags */
3765c510811SSomnath Kotur #define LANCER_INITIATE_FW_DUMP			0x1
3775c510811SSomnath Kotur 
37842f11cf2SAjit Khaparde struct phy_info {
37942f11cf2SAjit Khaparde 	u8 transceiver;
38042f11cf2SAjit Khaparde 	u8 autoneg;
38142f11cf2SAjit Khaparde 	u8 fc_autoneg;
38242f11cf2SAjit Khaparde 	u8 port_type;
38342f11cf2SAjit Khaparde 	u16 phy_type;
38442f11cf2SAjit Khaparde 	u16 interface_type;
38542f11cf2SAjit Khaparde 	u32 misc_params;
38642f11cf2SAjit Khaparde 	u16 auto_speeds_supported;
38742f11cf2SAjit Khaparde 	u16 fixed_speeds_supported;
38842f11cf2SAjit Khaparde 	int link_speed;
38942f11cf2SAjit Khaparde 	u32 dac_cable_len;
39042f11cf2SAjit Khaparde 	u32 advertising;
39142f11cf2SAjit Khaparde 	u32 supported;
39242f11cf2SAjit Khaparde };
39342f11cf2SAjit Khaparde 
39492bf14abSSathya Perla struct be_resources {
39592bf14abSSathya Perla 	u16 max_vfs;		/* Total VFs "really" supported by FW/HW */
39692bf14abSSathya Perla 	u16 max_mcast_mac;
39792bf14abSSathya Perla 	u16 max_tx_qs;
39892bf14abSSathya Perla 	u16 max_rss_qs;
39992bf14abSSathya Perla 	u16 max_rx_qs;
40092bf14abSSathya Perla 	u16 max_uc_mac;		/* Max UC MACs programmable */
40192bf14abSSathya Perla 	u16 max_vlans;		/* Number of vlans supported */
40292bf14abSSathya Perla 	u16 max_evt_qs;
40392bf14abSSathya Perla 	u32 if_cap_flags;
40492bf14abSSathya Perla };
40592bf14abSSathya Perla 
4069aebddd1SJeff Kirsher struct be_adapter {
4079aebddd1SJeff Kirsher 	struct pci_dev *pdev;
4089aebddd1SJeff Kirsher 	struct net_device *netdev;
4099aebddd1SJeff Kirsher 
410c5b3ad4cSSathya Perla 	u8 __iomem *csr;	/* CSR BAR used only for BE2/3 */
4119aebddd1SJeff Kirsher 	u8 __iomem *db;		/* Door Bell */
4129aebddd1SJeff Kirsher 
4139aebddd1SJeff Kirsher 	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
4149aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem;
4159aebddd1SJeff Kirsher 	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
4169aebddd1SJeff Kirsher 	 * is stored for freeing purpose */
4179aebddd1SJeff Kirsher 	struct be_dma_mem mbox_mem_alloced;
4189aebddd1SJeff Kirsher 
4199aebddd1SJeff Kirsher 	struct be_mcc_obj mcc_obj;
4209aebddd1SJeff Kirsher 	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
4219aebddd1SJeff Kirsher 	spinlock_t mcc_cq_lock;
4229aebddd1SJeff Kirsher 
42392bf14abSSathya Perla 	u16 cfg_num_qs;		/* configured via set-channels */
42492bf14abSSathya Perla 	u16 num_evt_qs;
42592bf14abSSathya Perla 	u16 num_msix_vec;
42692bf14abSSathya Perla 	struct be_eq_obj eq_obj[MAX_EVT_QS];
42710ef9ab4SSathya Perla 	struct msix_entry msix_entries[MAX_MSIX_VECTORS];
4289aebddd1SJeff Kirsher 	bool isr_registered;
4299aebddd1SJeff Kirsher 
4309aebddd1SJeff Kirsher 	/* TX Rings */
43192bf14abSSathya Perla 	u16 num_tx_qs;
4329aebddd1SJeff Kirsher 	struct be_tx_obj tx_obj[MAX_TX_QS];
4339aebddd1SJeff Kirsher 
4349aebddd1SJeff Kirsher 	/* Rx rings */
43592bf14abSSathya Perla 	u16 num_rx_qs;
43610ef9ab4SSathya Perla 	struct be_rx_obj rx_obj[MAX_RX_QS];
4379aebddd1SJeff Kirsher 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
4389aebddd1SJeff Kirsher 
4399aebddd1SJeff Kirsher 	struct be_drv_stats drv_stats;
4402632bafdSSathya Perla 	struct be_aic_obj aic_obj[MAX_EVT_QS];
4419aebddd1SJeff Kirsher 	u16 vlans_added;
4429aebddd1SJeff Kirsher 	u8 vlan_tag[VLAN_N_VID];
4439aebddd1SJeff Kirsher 	u8 vlan_prio_bmap;	/* Available Priority BitMap */
4449aebddd1SJeff Kirsher 	u16 recommended_prio;	/* Recommended Priority */
4459aebddd1SJeff Kirsher 	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
4469aebddd1SJeff Kirsher 
4479aebddd1SJeff Kirsher 	struct be_dma_mem stats_cmd;
4489aebddd1SJeff Kirsher 	/* Work queue used to perform periodic tasks like getting statistics */
4499aebddd1SJeff Kirsher 	struct delayed_work work;
4509aebddd1SJeff Kirsher 	u16 work_counter;
4519aebddd1SJeff Kirsher 
452f67ef7baSPadmanabh Ratnakar 	struct delayed_work func_recovery_work;
453b236916aSAjit Khaparde 	u32 flags;
454f25b119cSPadmanabh Ratnakar 	u32 cmd_privileges;
4559aebddd1SJeff Kirsher 	/* Ethtool knobs and info */
4569aebddd1SJeff Kirsher 	char fw_ver[FW_VER_LEN];
457eeb65cedSSomnath Kotur 	char fw_on_flash[FW_VER_LEN];
45830128031SSathya Perla 	int if_handle;		/* Used to configure filtering */
459fbc13f01SAjit Khaparde 	u32 *pmac_id;		/* MAC addr handle used by BE card */
4609aebddd1SJeff Kirsher 	u32 beacon_state;	/* for set_phys_id */
4619aebddd1SJeff Kirsher 
462f67ef7baSPadmanabh Ratnakar 	bool eeh_error;
4636589ade0SSathya Perla 	bool fw_timeout;
464f67ef7baSPadmanabh Ratnakar 	bool hw_error;
465f67ef7baSPadmanabh Ratnakar 
4669aebddd1SJeff Kirsher 	u32 port_num;
4679aebddd1SJeff Kirsher 	bool promiscuous;
4689aebddd1SJeff Kirsher 	u32 function_mode;
4699aebddd1SJeff Kirsher 	u32 function_caps;
4709aebddd1SJeff Kirsher 	u32 rx_fc;		/* Rx flow control */
4719aebddd1SJeff Kirsher 	u32 tx_fc;		/* Tx flow control */
4729aebddd1SJeff Kirsher 	bool stats_cmd_sent;
473045508a8SParav Pandit 	struct {
474045508a8SParav Pandit 		u32 size;
475045508a8SParav Pandit 		u32 total_size;
476045508a8SParav Pandit 		u64 io_addr;
477045508a8SParav Pandit 	} roce_db;
478045508a8SParav Pandit 	u32 num_msix_roce_vec;
479045508a8SParav Pandit 	struct ocrdma_dev *ocrdma_dev;
480045508a8SParav Pandit 	struct list_head entry;
481045508a8SParav Pandit 
4829aebddd1SJeff Kirsher 	u32 flash_status;
4835eeff635SSuresh Reddy 	struct completion et_cmd_compl;
4849aebddd1SJeff Kirsher 
48592bf14abSSathya Perla 	struct be_resources res;	/* resources available for the func */
48692bf14abSSathya Perla 	u16 num_vfs;			/* Number of VFs provisioned by PF */
48739f1d94dSSathya Perla 	u8 virtfn;
48811ac75edSSathya Perla 	struct be_vf_cfg *vf_cfg;
48911ac75edSSathya Perla 	bool be3_native;
4909aebddd1SJeff Kirsher 	u32 sli_family;
4919aebddd1SJeff Kirsher 	u8 hba_port_num;
4929aebddd1SJeff Kirsher 	u16 pvid;
49342f11cf2SAjit Khaparde 	struct phy_info phy;
4944762f6ceSAjit Khaparde 	u8 wol_cap;
49576a9e08eSSuresh Reddy 	bool wol_en;
496fbc13f01SAjit Khaparde 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
4970ad3157eSVasundhara Volam 	u16 asic_rev;
498bc0c3405SAjit Khaparde 	u16 qnq_vid;
499941a77d5SSomnath Kotur 	u32 msg_enable;
5007aeb2156SPadmanabh Ratnakar 	int be_get_temp_freq;
501d5c18473SPadmanabh Ratnakar 	u8 pf_number;
502594ad54aSSuresh Reddy 	u64 rss_flags;
5039aebddd1SJeff Kirsher };
5049aebddd1SJeff Kirsher 
50539f1d94dSSathya Perla #define be_physfn(adapter)		(!adapter->virtfn)
5062c7a9dc1SAjit Khaparde #define be_virtfn(adapter)		(adapter->virtfn)
50711ac75edSSathya Perla #define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
508b905b5d4SVasundhara Volam #define sriov_want(adapter)             (be_physfn(adapter) &&	\
509b905b5d4SVasundhara Volam 					 (num_vfs || pci_num_vf(adapter->pdev)))
51011ac75edSSathya Perla #define for_all_vfs(adapter, vf_cfg, i)					\
51111ac75edSSathya Perla 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
51211ac75edSSathya Perla 		i++, vf_cfg++)
5139aebddd1SJeff Kirsher 
5149aebddd1SJeff Kirsher #define ON				1
5159aebddd1SJeff Kirsher #define OFF				0
516ca34fe38SSathya Perla 
51792bf14abSSathya Perla #define be_max_vlans(adapter)		(adapter->res.max_vlans)
51892bf14abSSathya Perla #define be_max_uc(adapter)		(adapter->res.max_uc_mac)
51992bf14abSSathya Perla #define be_max_mc(adapter)		(adapter->res.max_mcast_mac)
52092bf14abSSathya Perla #define be_max_vfs(adapter)		(adapter->res.max_vfs)
52192bf14abSSathya Perla #define be_max_rss(adapter)		(adapter->res.max_rss_qs)
52292bf14abSSathya Perla #define be_max_txqs(adapter)		(adapter->res.max_tx_qs)
52392bf14abSSathya Perla #define be_max_prio_txqs(adapter)	(adapter->res.max_prio_tx_qs)
52492bf14abSSathya Perla #define be_max_rxqs(adapter)		(adapter->res.max_rx_qs)
52592bf14abSSathya Perla #define be_max_eqs(adapter)		(adapter->res.max_evt_qs)
52692bf14abSSathya Perla #define be_if_cap_flags(adapter)	(adapter->res.if_cap_flags)
52792bf14abSSathya Perla 
52892bf14abSSathya Perla static inline u16 be_max_qs(struct be_adapter *adapter)
52992bf14abSSathya Perla {
53092bf14abSSathya Perla 	/* If no RSS, need atleast the one def RXQ */
53192bf14abSSathya Perla 	u16 num = max_t(u16, be_max_rss(adapter), 1);
53292bf14abSSathya Perla 
53392bf14abSSathya Perla 	num = min(num, be_max_eqs(adapter));
53492bf14abSSathya Perla 	return min_t(u16, num, num_online_cpus());
53592bf14abSSathya Perla }
53692bf14abSSathya Perla 
537ca34fe38SSathya Perla #define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \
538ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID4)
5399aebddd1SJeff Kirsher 
54076b73530SPadmanabh Ratnakar #define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5 || \
54176b73530SPadmanabh Ratnakar 				 adapter->pdev->device == OC_DEVICE_ID6)
542d3bd3a5eSPadmanabh Ratnakar 
543ca34fe38SSathya Perla #define BE3_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID2 || \
544ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID2)
545ca34fe38SSathya Perla 
546ca34fe38SSathya Perla #define BE2_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID1 || \
547ca34fe38SSathya Perla 				 adapter->pdev->device == OC_DEVICE_ID1)
548ca34fe38SSathya Perla 
549ca34fe38SSathya Perla #define BEx_chip(adapter)	(BE3_chip(adapter) || BE2_chip(adapter))
550d3bd3a5eSPadmanabh Ratnakar 
551dbf0f2a7SSathya Perla #define be_roce_supported(adapter)	(skyhawk_chip(adapter) && \
552045508a8SParav Pandit 					(adapter->function_mode & RDMA_ENABLED))
553045508a8SParav Pandit 
5549aebddd1SJeff Kirsher extern const struct ethtool_ops be_ethtool_ops;
5559aebddd1SJeff Kirsher 
5569aebddd1SJeff Kirsher #define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
55710ef9ab4SSathya Perla #define num_irqs(adapter)		(msix_enabled(adapter) ?	\
55810ef9ab4SSathya Perla 						adapter->num_msix_vec : 1)
55910ef9ab4SSathya Perla #define tx_stats(txo)			(&(txo)->stats)
56010ef9ab4SSathya Perla #define rx_stats(rxo)			(&(rxo)->stats)
5619aebddd1SJeff Kirsher 
56210ef9ab4SSathya Perla /* The default RXQ is the last RXQ */
56310ef9ab4SSathya Perla #define default_rxo(adpt)		(&adpt->rx_obj[adpt->num_rx_qs - 1])
5649aebddd1SJeff Kirsher 
5659aebddd1SJeff Kirsher #define for_all_rx_queues(adapter, rxo, i)				\
5669aebddd1SJeff Kirsher 	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
5679aebddd1SJeff Kirsher 		i++, rxo++)
5689aebddd1SJeff Kirsher 
56910ef9ab4SSathya Perla /* Skip the default non-rss queue (last one)*/
5709aebddd1SJeff Kirsher #define for_all_rss_queues(adapter, rxo, i)				\
57110ef9ab4SSathya Perla 	for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
5729aebddd1SJeff Kirsher 		i++, rxo++)
5739aebddd1SJeff Kirsher 
5749aebddd1SJeff Kirsher #define for_all_tx_queues(adapter, txo, i)				\
5759aebddd1SJeff Kirsher 	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
5769aebddd1SJeff Kirsher 		i++, txo++)
5779aebddd1SJeff Kirsher 
57810ef9ab4SSathya Perla #define for_all_evt_queues(adapter, eqo, i)				\
57910ef9ab4SSathya Perla 	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
58010ef9ab4SSathya Perla 		i++, eqo++)
58110ef9ab4SSathya Perla 
5826384a4d0SSathya Perla #define for_all_rx_queues_on_eq(adapter, eqo, rxo, i)			\
5836384a4d0SSathya Perla 	for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
5846384a4d0SSathya Perla 		 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
5856384a4d0SSathya Perla 
58610ef9ab4SSathya Perla #define is_mcc_eqo(eqo)			(eqo->idx == 0)
58710ef9ab4SSathya Perla #define mcc_eqo(adapter)		(&adapter->eq_obj[0])
58810ef9ab4SSathya Perla 
5899aebddd1SJeff Kirsher #define PAGE_SHIFT_4K		12
5909aebddd1SJeff Kirsher #define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)
5919aebddd1SJeff Kirsher 
5929aebddd1SJeff Kirsher /* Returns number of pages spanned by the data starting at the given addr */
5939aebddd1SJeff Kirsher #define PAGES_4K_SPANNED(_address, size) 				\
5949aebddd1SJeff Kirsher 		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
5959aebddd1SJeff Kirsher 			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
5969aebddd1SJeff Kirsher 
5979aebddd1SJeff Kirsher /* Returns bit offset within a DWORD of a bitfield */
5989aebddd1SJeff Kirsher #define AMAP_BIT_OFFSET(_struct, field)  				\
5999aebddd1SJeff Kirsher 		(((size_t)&(((_struct *)0)->field))%32)
6009aebddd1SJeff Kirsher 
6019aebddd1SJeff Kirsher /* Returns the bit mask of the field that is NOT shifted into location. */
6029aebddd1SJeff Kirsher static inline u32 amap_mask(u32 bitsize)
6039aebddd1SJeff Kirsher {
6049aebddd1SJeff Kirsher 	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
6059aebddd1SJeff Kirsher }
6069aebddd1SJeff Kirsher 
6079aebddd1SJeff Kirsher static inline void
6089aebddd1SJeff Kirsher amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
6099aebddd1SJeff Kirsher {
6109aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr + dw_offset;
6119aebddd1SJeff Kirsher 	*dw &= ~(mask << offset);
6129aebddd1SJeff Kirsher 	*dw |= (mask & value) << offset;
6139aebddd1SJeff Kirsher }
6149aebddd1SJeff Kirsher 
6159aebddd1SJeff Kirsher #define AMAP_SET_BITS(_struct, field, ptr, val)				\
6169aebddd1SJeff Kirsher 		amap_set(ptr,						\
6179aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6189aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6199aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field),		\
6209aebddd1SJeff Kirsher 			val)
6219aebddd1SJeff Kirsher 
6229aebddd1SJeff Kirsher static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
6239aebddd1SJeff Kirsher {
6249aebddd1SJeff Kirsher 	u32 *dw = (u32 *) ptr;
6259aebddd1SJeff Kirsher 	return mask & (*(dw + dw_offset) >> offset);
6269aebddd1SJeff Kirsher }
6279aebddd1SJeff Kirsher 
6289aebddd1SJeff Kirsher #define AMAP_GET_BITS(_struct, field, ptr)				\
6299aebddd1SJeff Kirsher 		amap_get(ptr,						\
6309aebddd1SJeff Kirsher 			offsetof(_struct, field)/32,			\
6319aebddd1SJeff Kirsher 			amap_mask(sizeof(((_struct *)0)->field)),	\
6329aebddd1SJeff Kirsher 			AMAP_BIT_OFFSET(_struct, field))
6339aebddd1SJeff Kirsher 
6349aebddd1SJeff Kirsher #define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
6359aebddd1SJeff Kirsher #define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
6369aebddd1SJeff Kirsher static inline void swap_dws(void *wrb, int len)
6379aebddd1SJeff Kirsher {
6389aebddd1SJeff Kirsher #ifdef __BIG_ENDIAN
6399aebddd1SJeff Kirsher 	u32 *dw = wrb;
6409aebddd1SJeff Kirsher 	BUG_ON(len % 4);
6419aebddd1SJeff Kirsher 	do {
6429aebddd1SJeff Kirsher 		*dw = cpu_to_le32(*dw);
6439aebddd1SJeff Kirsher 		dw++;
6449aebddd1SJeff Kirsher 		len -= 4;
6459aebddd1SJeff Kirsher 	} while (len);
6469aebddd1SJeff Kirsher #endif				/* __BIG_ENDIAN */
6479aebddd1SJeff Kirsher }
6489aebddd1SJeff Kirsher 
6499aebddd1SJeff Kirsher static inline u8 is_tcp_pkt(struct sk_buff *skb)
6509aebddd1SJeff Kirsher {
6519aebddd1SJeff Kirsher 	u8 val = 0;
6529aebddd1SJeff Kirsher 
6539aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
6549aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
6559aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
6569aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
6579aebddd1SJeff Kirsher 
6589aebddd1SJeff Kirsher 	return val;
6599aebddd1SJeff Kirsher }
6609aebddd1SJeff Kirsher 
6619aebddd1SJeff Kirsher static inline u8 is_udp_pkt(struct sk_buff *skb)
6629aebddd1SJeff Kirsher {
6639aebddd1SJeff Kirsher 	u8 val = 0;
6649aebddd1SJeff Kirsher 
6659aebddd1SJeff Kirsher 	if (ip_hdr(skb)->version == 4)
6669aebddd1SJeff Kirsher 		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
6679aebddd1SJeff Kirsher 	else if (ip_hdr(skb)->version == 6)
6689aebddd1SJeff Kirsher 		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
6699aebddd1SJeff Kirsher 
6709aebddd1SJeff Kirsher 	return val;
6719aebddd1SJeff Kirsher }
6729aebddd1SJeff Kirsher 
67393040ae5SSomnath Kotur static inline bool is_ipv4_pkt(struct sk_buff *skb)
67493040ae5SSomnath Kotur {
675e8efcec5SLi RongQing 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
67693040ae5SSomnath Kotur }
67793040ae5SSomnath Kotur 
6789aebddd1SJeff Kirsher static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
6799aebddd1SJeff Kirsher {
6809aebddd1SJeff Kirsher 	u32 addr;
6819aebddd1SJeff Kirsher 
6829aebddd1SJeff Kirsher 	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
6839aebddd1SJeff Kirsher 
6849aebddd1SJeff Kirsher 	mac[5] = (u8)(addr & 0xFF);
6859aebddd1SJeff Kirsher 	mac[4] = (u8)((addr >> 8) & 0xFF);
6869aebddd1SJeff Kirsher 	mac[3] = (u8)((addr >> 16) & 0xFF);
6879aebddd1SJeff Kirsher 	/* Use the OUI from the current MAC address */
6889aebddd1SJeff Kirsher 	memcpy(mac, adapter->netdev->dev_addr, 3);
6899aebddd1SJeff Kirsher }
6909aebddd1SJeff Kirsher 
6919aebddd1SJeff Kirsher static inline bool be_multi_rxq(const struct be_adapter *adapter)
6929aebddd1SJeff Kirsher {
6939aebddd1SJeff Kirsher 	return adapter->num_rx_qs > 1;
6949aebddd1SJeff Kirsher }
6959aebddd1SJeff Kirsher 
6966589ade0SSathya Perla static inline bool be_error(struct be_adapter *adapter)
6976589ade0SSathya Perla {
698f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
699f67ef7baSPadmanabh Ratnakar }
700f67ef7baSPadmanabh Ratnakar 
701d23e946cSSathya Perla static inline bool be_hw_error(struct be_adapter *adapter)
702f67ef7baSPadmanabh Ratnakar {
703f67ef7baSPadmanabh Ratnakar 	return adapter->eeh_error || adapter->hw_error;
704f67ef7baSPadmanabh Ratnakar }
705f67ef7baSPadmanabh Ratnakar 
706f67ef7baSPadmanabh Ratnakar static inline void  be_clear_all_error(struct be_adapter *adapter)
707f67ef7baSPadmanabh Ratnakar {
708f67ef7baSPadmanabh Ratnakar 	adapter->eeh_error = false;
709f67ef7baSPadmanabh Ratnakar 	adapter->hw_error = false;
710f67ef7baSPadmanabh Ratnakar 	adapter->fw_timeout = false;
7116589ade0SSathya Perla }
7126589ade0SSathya Perla 
7134762f6ceSAjit Khaparde static inline bool be_is_wol_excluded(struct be_adapter *adapter)
7144762f6ceSAjit Khaparde {
7154762f6ceSAjit Khaparde 	struct pci_dev *pdev = adapter->pdev;
7164762f6ceSAjit Khaparde 
7174762f6ceSAjit Khaparde 	if (!be_physfn(adapter))
7184762f6ceSAjit Khaparde 		return true;
7194762f6ceSAjit Khaparde 
7204762f6ceSAjit Khaparde 	switch (pdev->subsystem_device) {
7214762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID1:
7224762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID2:
7234762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID3:
7244762f6ceSAjit Khaparde 	case OC_SUBSYS_DEVICE_ID4:
7254762f6ceSAjit Khaparde 		return true;
7264762f6ceSAjit Khaparde 	default:
7274762f6ceSAjit Khaparde 		return false;
7284762f6ceSAjit Khaparde 	}
7294762f6ceSAjit Khaparde }
7304762f6ceSAjit Khaparde 
731bc0c3405SAjit Khaparde static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
732bc0c3405SAjit Khaparde {
733bc0c3405SAjit Khaparde 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
734bc0c3405SAjit Khaparde }
735bc0c3405SAjit Khaparde 
7366384a4d0SSathya Perla #ifdef CONFIG_NET_RX_BUSY_POLL
7376384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
7386384a4d0SSathya Perla {
7396384a4d0SSathya Perla 	bool status = true;
7406384a4d0SSathya Perla 
7416384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
7426384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
7436384a4d0SSathya Perla 		WARN_ON(eqo->state & BE_EQ_NAPI);
7446384a4d0SSathya Perla 		eqo->state |= BE_EQ_NAPI_YIELD;
7456384a4d0SSathya Perla 		status = false;
7466384a4d0SSathya Perla 	} else {
7476384a4d0SSathya Perla 		eqo->state = BE_EQ_NAPI;
7486384a4d0SSathya Perla 	}
7496384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
7506384a4d0SSathya Perla 	return status;
7516384a4d0SSathya Perla }
7526384a4d0SSathya Perla 
7536384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
7546384a4d0SSathya Perla {
7556384a4d0SSathya Perla 	spin_lock(&eqo->lock); /* BH is already disabled */
7566384a4d0SSathya Perla 
7576384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
7586384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
7596384a4d0SSathya Perla 
7606384a4d0SSathya Perla 	spin_unlock(&eqo->lock);
7616384a4d0SSathya Perla }
7626384a4d0SSathya Perla 
7636384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
7646384a4d0SSathya Perla {
7656384a4d0SSathya Perla 	bool status = true;
7666384a4d0SSathya Perla 
7676384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
7686384a4d0SSathya Perla 	if (eqo->state & BE_EQ_LOCKED) {
7696384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL_YIELD;
7706384a4d0SSathya Perla 		status = false;
7716384a4d0SSathya Perla 	} else {
7726384a4d0SSathya Perla 		eqo->state |= BE_EQ_POLL;
7736384a4d0SSathya Perla 	}
7746384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
7756384a4d0SSathya Perla 	return status;
7766384a4d0SSathya Perla }
7776384a4d0SSathya Perla 
7786384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
7796384a4d0SSathya Perla {
7806384a4d0SSathya Perla 	spin_lock_bh(&eqo->lock);
7816384a4d0SSathya Perla 
7826384a4d0SSathya Perla 	WARN_ON(eqo->state & (BE_EQ_NAPI));
7836384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
7846384a4d0SSathya Perla 
7856384a4d0SSathya Perla 	spin_unlock_bh(&eqo->lock);
7866384a4d0SSathya Perla }
7876384a4d0SSathya Perla 
7886384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
7896384a4d0SSathya Perla {
7906384a4d0SSathya Perla 	spin_lock_init(&eqo->lock);
7916384a4d0SSathya Perla 	eqo->state = BE_EQ_IDLE;
7926384a4d0SSathya Perla }
7936384a4d0SSathya Perla 
7946384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
7956384a4d0SSathya Perla {
7966384a4d0SSathya Perla 	local_bh_disable();
7976384a4d0SSathya Perla 
7986384a4d0SSathya Perla 	/* It's enough to just acquire napi lock on the eqo to stop
7996384a4d0SSathya Perla 	 * be_busy_poll() from processing any queueus.
8006384a4d0SSathya Perla 	 */
8016384a4d0SSathya Perla 	while (!be_lock_napi(eqo))
8026384a4d0SSathya Perla 		mdelay(1);
8036384a4d0SSathya Perla 
8046384a4d0SSathya Perla 	local_bh_enable();
8056384a4d0SSathya Perla }
8066384a4d0SSathya Perla 
8076384a4d0SSathya Perla #else /* CONFIG_NET_RX_BUSY_POLL */
8086384a4d0SSathya Perla 
8096384a4d0SSathya Perla static inline bool be_lock_napi(struct be_eq_obj *eqo)
8106384a4d0SSathya Perla {
8116384a4d0SSathya Perla 	return true;
8126384a4d0SSathya Perla }
8136384a4d0SSathya Perla 
8146384a4d0SSathya Perla static inline void be_unlock_napi(struct be_eq_obj *eqo)
8156384a4d0SSathya Perla {
8166384a4d0SSathya Perla }
8176384a4d0SSathya Perla 
8186384a4d0SSathya Perla static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
8196384a4d0SSathya Perla {
8206384a4d0SSathya Perla 	return false;
8216384a4d0SSathya Perla }
8226384a4d0SSathya Perla 
8236384a4d0SSathya Perla static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
8246384a4d0SSathya Perla {
8256384a4d0SSathya Perla }
8266384a4d0SSathya Perla 
8276384a4d0SSathya Perla static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
8286384a4d0SSathya Perla {
8296384a4d0SSathya Perla }
8306384a4d0SSathya Perla 
8316384a4d0SSathya Perla static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
8326384a4d0SSathya Perla {
8336384a4d0SSathya Perla }
8346384a4d0SSathya Perla #endif /* CONFIG_NET_RX_BUSY_POLL */
8356384a4d0SSathya Perla 
83631886e87SJoe Perches void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
8379aebddd1SJeff Kirsher 		  u16 num_popped);
83831886e87SJoe Perches void be_link_status_update(struct be_adapter *adapter, u8 link_status);
83931886e87SJoe Perches void be_parse_stats(struct be_adapter *adapter);
84031886e87SJoe Perches int be_load_fw(struct be_adapter *adapter, u8 *func);
84131886e87SJoe Perches bool be_is_wol_supported(struct be_adapter *adapter);
84231886e87SJoe Perches bool be_pause_supported(struct be_adapter *adapter);
84331886e87SJoe Perches u32 be_get_fw_log_level(struct be_adapter *adapter);
844394efd19SDavid S. Miller 
845e9e2a904SSomnath Kotur static inline int fw_major_num(const char *fw_ver)
846e9e2a904SSomnath Kotur {
847e9e2a904SSomnath Kotur 	int fw_major = 0;
848e9e2a904SSomnath Kotur 
849e9e2a904SSomnath Kotur 	sscanf(fw_ver, "%d.", &fw_major);
850e9e2a904SSomnath Kotur 
851e9e2a904SSomnath Kotur 	return fw_major;
852e9e2a904SSomnath Kotur }
853e9e2a904SSomnath Kotur 
85468d7bdcbSSathya Perla int be_update_queues(struct be_adapter *adapter);
85568d7bdcbSSathya Perla int be_poll(struct napi_struct *napi, int budget);
856941a77d5SSomnath Kotur 
857045508a8SParav Pandit /*
858045508a8SParav Pandit  * internal function to initialize-cleanup roce device.
859045508a8SParav Pandit  */
86031886e87SJoe Perches void be_roce_dev_add(struct be_adapter *);
86131886e87SJoe Perches void be_roce_dev_remove(struct be_adapter *);
862045508a8SParav Pandit 
863045508a8SParav Pandit /*
864045508a8SParav Pandit  * internal function to open-close roce device during ifup-ifdown.
865045508a8SParav Pandit  */
86631886e87SJoe Perches void be_roce_dev_open(struct be_adapter *);
86731886e87SJoe Perches void be_roce_dev_close(struct be_adapter *);
868045508a8SParav Pandit 
8699aebddd1SJeff Kirsher #endif				/* BE_H */
870