1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5f7917c00SJeff Kirsher  *
6f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
7f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
8f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
9f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
10f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
11f7917c00SJeff Kirsher  *
12f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
13f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
14f7917c00SJeff Kirsher  *     conditions are met:
15f7917c00SJeff Kirsher  *
16f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
17f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
18f7917c00SJeff Kirsher  *        disclaimer.
19f7917c00SJeff Kirsher  *
20f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
21f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
22f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
23f7917c00SJeff Kirsher  *        provided with the distribution.
24f7917c00SJeff Kirsher  *
25f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f7917c00SJeff Kirsher  * SOFTWARE.
33f7917c00SJeff Kirsher  */
34f7917c00SJeff Kirsher 
35f7917c00SJeff Kirsher #ifndef __CXGB4_H__
36f7917c00SJeff Kirsher #define __CXGB4_H__
37f7917c00SJeff Kirsher 
38dca4faebSVipul Pandya #include "t4_hw.h"
39dca4faebSVipul Pandya 
40f7917c00SJeff Kirsher #include <linux/bitops.h>
41f7917c00SJeff Kirsher #include <linux/cache.h>
42f7917c00SJeff Kirsher #include <linux/interrupt.h>
43f7917c00SJeff Kirsher #include <linux/list.h>
44f7917c00SJeff Kirsher #include <linux/netdevice.h>
45f7917c00SJeff Kirsher #include <linux/pci.h>
46f7917c00SJeff Kirsher #include <linux/spinlock.h>
47f7917c00SJeff Kirsher #include <linux/timer.h>
48c0b8b992SDavid S. Miller #include <linux/vmalloc.h>
49f7917c00SJeff Kirsher #include <asm/io.h>
50f7917c00SJeff Kirsher #include "cxgb4_uld.h"
51f7917c00SJeff Kirsher 
5216e47624SHariprasad Shenai #define T4FW_VERSION_MAJOR 0x01
53451cd14eSHariprasad Shenai #define T4FW_VERSION_MINOR 0x09
54451cd14eSHariprasad Shenai #define T4FW_VERSION_MICRO 0x17
5516e47624SHariprasad Shenai #define T4FW_VERSION_BUILD 0x00
56f7917c00SJeff Kirsher 
5716e47624SHariprasad Shenai #define T5FW_VERSION_MAJOR 0x01
58451cd14eSHariprasad Shenai #define T5FW_VERSION_MINOR 0x09
59451cd14eSHariprasad Shenai #define T5FW_VERSION_MICRO 0x17
6016e47624SHariprasad Shenai #define T5FW_VERSION_BUILD 0x00
612422d9a3SSantosh Rastapur 
623069ee9bSVipul Pandya #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
633069ee9bSVipul Pandya 
64f7917c00SJeff Kirsher enum {
65f7917c00SJeff Kirsher 	MAX_NPORTS = 4,     /* max # of ports */
66f7917c00SJeff Kirsher 	SERNUM_LEN = 24,    /* Serial # length */
67f7917c00SJeff Kirsher 	EC_LEN     = 16,    /* E/C length */
68f7917c00SJeff Kirsher 	ID_LEN     = 16,    /* ID length */
69f7917c00SJeff Kirsher };
70f7917c00SJeff Kirsher 
71f7917c00SJeff Kirsher enum {
72f7917c00SJeff Kirsher 	MEM_EDC0,
73f7917c00SJeff Kirsher 	MEM_EDC1,
742422d9a3SSantosh Rastapur 	MEM_MC,
752422d9a3SSantosh Rastapur 	MEM_MC0 = MEM_MC,
762422d9a3SSantosh Rastapur 	MEM_MC1
77f7917c00SJeff Kirsher };
78f7917c00SJeff Kirsher 
793069ee9bSVipul Pandya enum {
803eb4afbfSVipul Pandya 	MEMWIN0_APERTURE = 2048,
813eb4afbfSVipul Pandya 	MEMWIN0_BASE     = 0x1b800,
823069ee9bSVipul Pandya 	MEMWIN1_APERTURE = 32768,
833069ee9bSVipul Pandya 	MEMWIN1_BASE     = 0x28000,
842422d9a3SSantosh Rastapur 	MEMWIN1_BASE_T5  = 0x52000,
853eb4afbfSVipul Pandya 	MEMWIN2_APERTURE = 65536,
863eb4afbfSVipul Pandya 	MEMWIN2_BASE     = 0x30000,
872422d9a3SSantosh Rastapur 	MEMWIN2_BASE_T5  = 0x54000,
883069ee9bSVipul Pandya };
893069ee9bSVipul Pandya 
90f7917c00SJeff Kirsher enum dev_master {
91f7917c00SJeff Kirsher 	MASTER_CANT,
92f7917c00SJeff Kirsher 	MASTER_MAY,
93f7917c00SJeff Kirsher 	MASTER_MUST
94f7917c00SJeff Kirsher };
95f7917c00SJeff Kirsher 
96f7917c00SJeff Kirsher enum dev_state {
97f7917c00SJeff Kirsher 	DEV_STATE_UNINIT,
98f7917c00SJeff Kirsher 	DEV_STATE_INIT,
99f7917c00SJeff Kirsher 	DEV_STATE_ERR
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher enum {
103f7917c00SJeff Kirsher 	PAUSE_RX      = 1 << 0,
104f7917c00SJeff Kirsher 	PAUSE_TX      = 1 << 1,
105f7917c00SJeff Kirsher 	PAUSE_AUTONEG = 1 << 2
106f7917c00SJeff Kirsher };
107f7917c00SJeff Kirsher 
108f7917c00SJeff Kirsher struct port_stats {
109f7917c00SJeff Kirsher 	u64 tx_octets;            /* total # of octets in good frames */
110f7917c00SJeff Kirsher 	u64 tx_frames;            /* all good frames */
111f7917c00SJeff Kirsher 	u64 tx_bcast_frames;      /* all broadcast frames */
112f7917c00SJeff Kirsher 	u64 tx_mcast_frames;      /* all multicast frames */
113f7917c00SJeff Kirsher 	u64 tx_ucast_frames;      /* all unicast frames */
114f7917c00SJeff Kirsher 	u64 tx_error_frames;      /* all error frames */
115f7917c00SJeff Kirsher 
116f7917c00SJeff Kirsher 	u64 tx_frames_64;         /* # of Tx frames in a particular range */
117f7917c00SJeff Kirsher 	u64 tx_frames_65_127;
118f7917c00SJeff Kirsher 	u64 tx_frames_128_255;
119f7917c00SJeff Kirsher 	u64 tx_frames_256_511;
120f7917c00SJeff Kirsher 	u64 tx_frames_512_1023;
121f7917c00SJeff Kirsher 	u64 tx_frames_1024_1518;
122f7917c00SJeff Kirsher 	u64 tx_frames_1519_max;
123f7917c00SJeff Kirsher 
124f7917c00SJeff Kirsher 	u64 tx_drop;              /* # of dropped Tx frames */
125f7917c00SJeff Kirsher 	u64 tx_pause;             /* # of transmitted pause frames */
126f7917c00SJeff Kirsher 	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
127f7917c00SJeff Kirsher 	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
128f7917c00SJeff Kirsher 	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
129f7917c00SJeff Kirsher 	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
130f7917c00SJeff Kirsher 	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
131f7917c00SJeff Kirsher 	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
132f7917c00SJeff Kirsher 	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
133f7917c00SJeff Kirsher 	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
134f7917c00SJeff Kirsher 
135f7917c00SJeff Kirsher 	u64 rx_octets;            /* total # of octets in good frames */
136f7917c00SJeff Kirsher 	u64 rx_frames;            /* all good frames */
137f7917c00SJeff Kirsher 	u64 rx_bcast_frames;      /* all broadcast frames */
138f7917c00SJeff Kirsher 	u64 rx_mcast_frames;      /* all multicast frames */
139f7917c00SJeff Kirsher 	u64 rx_ucast_frames;      /* all unicast frames */
140f7917c00SJeff Kirsher 	u64 rx_too_long;          /* # of frames exceeding MTU */
141f7917c00SJeff Kirsher 	u64 rx_jabber;            /* # of jabber frames */
142f7917c00SJeff Kirsher 	u64 rx_fcs_err;           /* # of received frames with bad FCS */
143f7917c00SJeff Kirsher 	u64 rx_len_err;           /* # of received frames with length error */
144f7917c00SJeff Kirsher 	u64 rx_symbol_err;        /* symbol errors */
145f7917c00SJeff Kirsher 	u64 rx_runt;              /* # of short frames */
146f7917c00SJeff Kirsher 
147f7917c00SJeff Kirsher 	u64 rx_frames_64;         /* # of Rx frames in a particular range */
148f7917c00SJeff Kirsher 	u64 rx_frames_65_127;
149f7917c00SJeff Kirsher 	u64 rx_frames_128_255;
150f7917c00SJeff Kirsher 	u64 rx_frames_256_511;
151f7917c00SJeff Kirsher 	u64 rx_frames_512_1023;
152f7917c00SJeff Kirsher 	u64 rx_frames_1024_1518;
153f7917c00SJeff Kirsher 	u64 rx_frames_1519_max;
154f7917c00SJeff Kirsher 
155f7917c00SJeff Kirsher 	u64 rx_pause;             /* # of received pause frames */
156f7917c00SJeff Kirsher 	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
157f7917c00SJeff Kirsher 	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
158f7917c00SJeff Kirsher 	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
159f7917c00SJeff Kirsher 	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
160f7917c00SJeff Kirsher 	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
161f7917c00SJeff Kirsher 	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
162f7917c00SJeff Kirsher 	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
163f7917c00SJeff Kirsher 	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
164f7917c00SJeff Kirsher 
165f7917c00SJeff Kirsher 	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
166f7917c00SJeff Kirsher 	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
167f7917c00SJeff Kirsher 	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
168f7917c00SJeff Kirsher 	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
169f7917c00SJeff Kirsher 	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
170f7917c00SJeff Kirsher 	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
171f7917c00SJeff Kirsher 	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
172f7917c00SJeff Kirsher 	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
173f7917c00SJeff Kirsher };
174f7917c00SJeff Kirsher 
175f7917c00SJeff Kirsher struct lb_port_stats {
176f7917c00SJeff Kirsher 	u64 octets;
177f7917c00SJeff Kirsher 	u64 frames;
178f7917c00SJeff Kirsher 	u64 bcast_frames;
179f7917c00SJeff Kirsher 	u64 mcast_frames;
180f7917c00SJeff Kirsher 	u64 ucast_frames;
181f7917c00SJeff Kirsher 	u64 error_frames;
182f7917c00SJeff Kirsher 
183f7917c00SJeff Kirsher 	u64 frames_64;
184f7917c00SJeff Kirsher 	u64 frames_65_127;
185f7917c00SJeff Kirsher 	u64 frames_128_255;
186f7917c00SJeff Kirsher 	u64 frames_256_511;
187f7917c00SJeff Kirsher 	u64 frames_512_1023;
188f7917c00SJeff Kirsher 	u64 frames_1024_1518;
189f7917c00SJeff Kirsher 	u64 frames_1519_max;
190f7917c00SJeff Kirsher 
191f7917c00SJeff Kirsher 	u64 drop;
192f7917c00SJeff Kirsher 
193f7917c00SJeff Kirsher 	u64 ovflow0;
194f7917c00SJeff Kirsher 	u64 ovflow1;
195f7917c00SJeff Kirsher 	u64 ovflow2;
196f7917c00SJeff Kirsher 	u64 ovflow3;
197f7917c00SJeff Kirsher 	u64 trunc0;
198f7917c00SJeff Kirsher 	u64 trunc1;
199f7917c00SJeff Kirsher 	u64 trunc2;
200f7917c00SJeff Kirsher 	u64 trunc3;
201f7917c00SJeff Kirsher };
202f7917c00SJeff Kirsher 
203f7917c00SJeff Kirsher struct tp_tcp_stats {
204f7917c00SJeff Kirsher 	u32 tcpOutRsts;
205f7917c00SJeff Kirsher 	u64 tcpInSegs;
206f7917c00SJeff Kirsher 	u64 tcpOutSegs;
207f7917c00SJeff Kirsher 	u64 tcpRetransSegs;
208f7917c00SJeff Kirsher };
209f7917c00SJeff Kirsher 
210f7917c00SJeff Kirsher struct tp_err_stats {
211f7917c00SJeff Kirsher 	u32 macInErrs[4];
212f7917c00SJeff Kirsher 	u32 hdrInErrs[4];
213f7917c00SJeff Kirsher 	u32 tcpInErrs[4];
214f7917c00SJeff Kirsher 	u32 tnlCongDrops[4];
215f7917c00SJeff Kirsher 	u32 ofldChanDrops[4];
216f7917c00SJeff Kirsher 	u32 tnlTxDrops[4];
217f7917c00SJeff Kirsher 	u32 ofldVlanDrops[4];
218f7917c00SJeff Kirsher 	u32 tcp6InErrs[4];
219f7917c00SJeff Kirsher 	u32 ofldNoNeigh;
220f7917c00SJeff Kirsher 	u32 ofldCongDefer;
221f7917c00SJeff Kirsher };
222f7917c00SJeff Kirsher 
223f7917c00SJeff Kirsher struct tp_params {
224f7917c00SJeff Kirsher 	unsigned int ntxchan;        /* # of Tx channels */
225f7917c00SJeff Kirsher 	unsigned int tre;            /* log2 of core clocks per TP tick */
226dca4faebSVipul Pandya 	unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
227dca4faebSVipul Pandya 				     /* channel map */
228636f9d37SVipul Pandya 
229636f9d37SVipul Pandya 	uint32_t dack_re;            /* DACK timer resolution */
230636f9d37SVipul Pandya 	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
231dcf7b6f5SKumar Sanghvi 
232dcf7b6f5SKumar Sanghvi 	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
233dcf7b6f5SKumar Sanghvi 	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
234dcf7b6f5SKumar Sanghvi 
235dcf7b6f5SKumar Sanghvi 	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
236dcf7b6f5SKumar Sanghvi 	 * subset of the set of fields which may be present in the Compressed
237dcf7b6f5SKumar Sanghvi 	 * Filter Tuple portion of filters and TCP TCB connections.  The
238dcf7b6f5SKumar Sanghvi 	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239dcf7b6f5SKumar Sanghvi 	 * Since a variable number of fields may or may not be present, their
240dcf7b6f5SKumar Sanghvi 	 * shifted field positions within the Compressed Filter Tuple may
241dcf7b6f5SKumar Sanghvi 	 * vary, or not even be present if the field isn't selected in
242dcf7b6f5SKumar Sanghvi 	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
243dcf7b6f5SKumar Sanghvi 	 * places we store their offsets here, or a -1 if the field isn't
244dcf7b6f5SKumar Sanghvi 	 * present.
245dcf7b6f5SKumar Sanghvi 	 */
246dcf7b6f5SKumar Sanghvi 	int vlan_shift;
247dcf7b6f5SKumar Sanghvi 	int vnic_shift;
248dcf7b6f5SKumar Sanghvi 	int port_shift;
249dcf7b6f5SKumar Sanghvi 	int protocol_shift;
250f7917c00SJeff Kirsher };
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher struct vpd_params {
253f7917c00SJeff Kirsher 	unsigned int cclk;
254f7917c00SJeff Kirsher 	u8 ec[EC_LEN + 1];
255f7917c00SJeff Kirsher 	u8 sn[SERNUM_LEN + 1];
256f7917c00SJeff Kirsher 	u8 id[ID_LEN + 1];
257f7917c00SJeff Kirsher };
258f7917c00SJeff Kirsher 
259f7917c00SJeff Kirsher struct pci_params {
260f7917c00SJeff Kirsher 	unsigned char speed;
261f7917c00SJeff Kirsher 	unsigned char width;
262f7917c00SJeff Kirsher };
263f7917c00SJeff Kirsher 
264d14807ddSHariprasad Shenai #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
265d14807ddSHariprasad Shenai #define CHELSIO_CHIP_FPGA          0x100
266d14807ddSHariprasad Shenai #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
267d14807ddSHariprasad Shenai #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
268d14807ddSHariprasad Shenai 
269d14807ddSHariprasad Shenai #define CHELSIO_T4		0x4
270d14807ddSHariprasad Shenai #define CHELSIO_T5		0x5
271d14807ddSHariprasad Shenai 
272d14807ddSHariprasad Shenai enum chip_type {
273d14807ddSHariprasad Shenai 	T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
274d14807ddSHariprasad Shenai 	T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
275d14807ddSHariprasad Shenai 	T4_FIRST_REV	= T4_A1,
276d14807ddSHariprasad Shenai 	T4_LAST_REV	= T4_A2,
277d14807ddSHariprasad Shenai 
278d14807ddSHariprasad Shenai 	T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
279d14807ddSHariprasad Shenai 	T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
280d14807ddSHariprasad Shenai 	T5_FIRST_REV	= T5_A0,
281d14807ddSHariprasad Shenai 	T5_LAST_REV	= T5_A1,
282d14807ddSHariprasad Shenai };
283d14807ddSHariprasad Shenai 
284f7917c00SJeff Kirsher struct adapter_params {
285f7917c00SJeff Kirsher 	struct tp_params  tp;
286f7917c00SJeff Kirsher 	struct vpd_params vpd;
287f7917c00SJeff Kirsher 	struct pci_params pci;
288f7917c00SJeff Kirsher 
289f7917c00SJeff Kirsher 	unsigned int sf_size;             /* serial flash size in bytes */
290f7917c00SJeff Kirsher 	unsigned int sf_nsec;             /* # of flash sectors */
291f7917c00SJeff Kirsher 	unsigned int sf_fw_start;         /* start of FW image in flash */
292f7917c00SJeff Kirsher 
293f7917c00SJeff Kirsher 	unsigned int fw_vers;
294f7917c00SJeff Kirsher 	unsigned int tp_vers;
295f7917c00SJeff Kirsher 	u8 api_vers[7];
296f7917c00SJeff Kirsher 
297f7917c00SJeff Kirsher 	unsigned short mtus[NMTUS];
298f7917c00SJeff Kirsher 	unsigned short a_wnd[NCCTRL_WIN];
299f7917c00SJeff Kirsher 	unsigned short b_wnd[NCCTRL_WIN];
300f7917c00SJeff Kirsher 
301f7917c00SJeff Kirsher 	unsigned char nports;             /* # of ethernet ports */
302f7917c00SJeff Kirsher 	unsigned char portvec;
303d14807ddSHariprasad Shenai 	enum chip_type chip;               /* chip code */
304f7917c00SJeff Kirsher 	unsigned char offload;
305f7917c00SJeff Kirsher 
3069a4da2cdSVipul Pandya 	unsigned char bypass;
3079a4da2cdSVipul Pandya 
308f7917c00SJeff Kirsher 	unsigned int ofldq_wr_cred;
309f7917c00SJeff Kirsher };
310f7917c00SJeff Kirsher 
31116e47624SHariprasad Shenai #include "t4fw_api.h"
31216e47624SHariprasad Shenai 
31316e47624SHariprasad Shenai #define FW_VERSION(chip) ( \
31416e47624SHariprasad Shenai 		FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
31516e47624SHariprasad Shenai 		FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
31616e47624SHariprasad Shenai 		FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
31716e47624SHariprasad Shenai 		FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
31816e47624SHariprasad Shenai #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
31916e47624SHariprasad Shenai 
32016e47624SHariprasad Shenai struct fw_info {
32116e47624SHariprasad Shenai 	u8 chip;
32216e47624SHariprasad Shenai 	char *fs_name;
32316e47624SHariprasad Shenai 	char *fw_mod_name;
32416e47624SHariprasad Shenai 	struct fw_hdr fw_hdr;
32516e47624SHariprasad Shenai };
32616e47624SHariprasad Shenai 
32716e47624SHariprasad Shenai 
328f7917c00SJeff Kirsher struct trace_params {
329f7917c00SJeff Kirsher 	u32 data[TRACE_LEN / 4];
330f7917c00SJeff Kirsher 	u32 mask[TRACE_LEN / 4];
331f7917c00SJeff Kirsher 	unsigned short snap_len;
332f7917c00SJeff Kirsher 	unsigned short min_len;
333f7917c00SJeff Kirsher 	unsigned char skip_ofst;
334f7917c00SJeff Kirsher 	unsigned char skip_len;
335f7917c00SJeff Kirsher 	unsigned char invert;
336f7917c00SJeff Kirsher 	unsigned char port;
337f7917c00SJeff Kirsher };
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher struct link_config {
340f7917c00SJeff Kirsher 	unsigned short supported;        /* link capabilities */
341f7917c00SJeff Kirsher 	unsigned short advertising;      /* advertised capabilities */
342f7917c00SJeff Kirsher 	unsigned short requested_speed;  /* speed user has requested */
343f7917c00SJeff Kirsher 	unsigned short speed;            /* actual link speed */
344f7917c00SJeff Kirsher 	unsigned char  requested_fc;     /* flow control user has requested */
345f7917c00SJeff Kirsher 	unsigned char  fc;               /* actual link flow control */
346f7917c00SJeff Kirsher 	unsigned char  autoneg;          /* autonegotiating? */
347f7917c00SJeff Kirsher 	unsigned char  link_ok;          /* link up? */
348f7917c00SJeff Kirsher };
349f7917c00SJeff Kirsher 
350f7917c00SJeff Kirsher #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
351f7917c00SJeff Kirsher 
352f7917c00SJeff Kirsher enum {
353f7917c00SJeff Kirsher 	MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
354f7917c00SJeff Kirsher 	MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
355f7917c00SJeff Kirsher 	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
356f7917c00SJeff Kirsher 	MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
357f7917c00SJeff Kirsher };
358f7917c00SJeff Kirsher 
359f7917c00SJeff Kirsher enum {
360f7917c00SJeff Kirsher 	MAX_EGRQ = 128,         /* max # of egress queues, including FLs */
361f7917c00SJeff Kirsher 	MAX_INGQ = 64           /* max # of interrupt-capable ingress queues */
362f7917c00SJeff Kirsher };
363f7917c00SJeff Kirsher 
364f7917c00SJeff Kirsher struct adapter;
365f7917c00SJeff Kirsher struct sge_rspq;
366f7917c00SJeff Kirsher 
367f7917c00SJeff Kirsher struct port_info {
368f7917c00SJeff Kirsher 	struct adapter *adapter;
369f7917c00SJeff Kirsher 	u16    viid;
370f7917c00SJeff Kirsher 	s16    xact_addr_filt;        /* index of exact MAC address filter */
371f7917c00SJeff Kirsher 	u16    rss_size;              /* size of VI's RSS table slice */
372f7917c00SJeff Kirsher 	s8     mdio_addr;
373f7917c00SJeff Kirsher 	u8     port_type;
374f7917c00SJeff Kirsher 	u8     mod_type;
375f7917c00SJeff Kirsher 	u8     port_id;
376f7917c00SJeff Kirsher 	u8     tx_chan;
377f7917c00SJeff Kirsher 	u8     lport;                 /* associated offload logical port */
378f7917c00SJeff Kirsher 	u8     nqsets;                /* # of qsets */
379f7917c00SJeff Kirsher 	u8     first_qset;            /* index of first qset */
380f7917c00SJeff Kirsher 	u8     rss_mode;
381f7917c00SJeff Kirsher 	struct link_config link_cfg;
382f7917c00SJeff Kirsher 	u16   *rss;
383f7917c00SJeff Kirsher };
384f7917c00SJeff Kirsher 
385f7917c00SJeff Kirsher struct dentry;
386f7917c00SJeff Kirsher struct work_struct;
387f7917c00SJeff Kirsher 
388f7917c00SJeff Kirsher enum {                                 /* adapter flags */
389f7917c00SJeff Kirsher 	FULL_INIT_DONE     = (1 << 0),
390f7917c00SJeff Kirsher 	USING_MSI          = (1 << 1),
391f7917c00SJeff Kirsher 	USING_MSIX         = (1 << 2),
392f7917c00SJeff Kirsher 	FW_OK              = (1 << 4),
39313ee15d3SVipul Pandya 	RSS_TNLALLLOOKUP   = (1 << 5),
39452367a76SVipul Pandya 	USING_SOFT_PARAMS  = (1 << 6),
39552367a76SVipul Pandya 	MASTER_PF          = (1 << 7),
39652367a76SVipul Pandya 	FW_OFLD_CONN       = (1 << 9),
397f7917c00SJeff Kirsher };
398f7917c00SJeff Kirsher 
399f7917c00SJeff Kirsher struct rx_sw_desc;
400f7917c00SJeff Kirsher 
401f7917c00SJeff Kirsher struct sge_fl {                     /* SGE free-buffer queue state */
402f7917c00SJeff Kirsher 	unsigned int avail;         /* # of available Rx buffers */
403f7917c00SJeff Kirsher 	unsigned int pend_cred;     /* new buffers since last FL DB ring */
404f7917c00SJeff Kirsher 	unsigned int cidx;          /* consumer index */
405f7917c00SJeff Kirsher 	unsigned int pidx;          /* producer index */
406f7917c00SJeff Kirsher 	unsigned long alloc_failed; /* # of times buffer allocation failed */
407f7917c00SJeff Kirsher 	unsigned long large_alloc_failed;
408f7917c00SJeff Kirsher 	unsigned long starving;
409f7917c00SJeff Kirsher 	/* RO fields */
410f7917c00SJeff Kirsher 	unsigned int cntxt_id;      /* SGE context id for the free list */
411f7917c00SJeff Kirsher 	unsigned int size;          /* capacity of free list */
412f7917c00SJeff Kirsher 	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
413f7917c00SJeff Kirsher 	__be64 *desc;               /* address of HW Rx descriptor ring */
414f7917c00SJeff Kirsher 	dma_addr_t addr;            /* bus address of HW ring start */
415f7917c00SJeff Kirsher };
416f7917c00SJeff Kirsher 
417f7917c00SJeff Kirsher /* A packet gather list */
418f7917c00SJeff Kirsher struct pkt_gl {
419e91b0f24SIan Campbell 	struct page_frag frags[MAX_SKB_FRAGS];
420f7917c00SJeff Kirsher 	void *va;                         /* virtual address of first byte */
421f7917c00SJeff Kirsher 	unsigned int nfrags;              /* # of fragments */
422f7917c00SJeff Kirsher 	unsigned int tot_len;             /* total length of fragments */
423f7917c00SJeff Kirsher };
424f7917c00SJeff Kirsher 
425f7917c00SJeff Kirsher typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
426f7917c00SJeff Kirsher 			      const struct pkt_gl *gl);
427f7917c00SJeff Kirsher 
428f7917c00SJeff Kirsher struct sge_rspq {                   /* state for an SGE response queue */
429f7917c00SJeff Kirsher 	struct napi_struct napi;
430f7917c00SJeff Kirsher 	const __be64 *cur_desc;     /* current descriptor in queue */
431f7917c00SJeff Kirsher 	unsigned int cidx;          /* consumer index */
432f7917c00SJeff Kirsher 	u8 gen;                     /* current generation bit */
433f7917c00SJeff Kirsher 	u8 intr_params;             /* interrupt holdoff parameters */
434f7917c00SJeff Kirsher 	u8 next_intr_params;        /* holdoff params for next interrupt */
435f7917c00SJeff Kirsher 	u8 pktcnt_idx;              /* interrupt packet threshold */
436f7917c00SJeff Kirsher 	u8 uld;                     /* ULD handling this queue */
437f7917c00SJeff Kirsher 	u8 idx;                     /* queue index within its group */
438f7917c00SJeff Kirsher 	int offset;                 /* offset into current Rx buffer */
439f7917c00SJeff Kirsher 	u16 cntxt_id;               /* SGE context id for the response q */
440f7917c00SJeff Kirsher 	u16 abs_id;                 /* absolute SGE id for the response q */
441f7917c00SJeff Kirsher 	__be64 *desc;               /* address of HW response ring */
442f7917c00SJeff Kirsher 	dma_addr_t phys_addr;       /* physical address of the ring */
443f7917c00SJeff Kirsher 	unsigned int iqe_len;       /* entry size */
444f7917c00SJeff Kirsher 	unsigned int size;          /* capacity of response queue */
445f7917c00SJeff Kirsher 	struct adapter *adap;
446f7917c00SJeff Kirsher 	struct net_device *netdev;  /* associated net device */
447f7917c00SJeff Kirsher 	rspq_handler_t handler;
448f7917c00SJeff Kirsher };
449f7917c00SJeff Kirsher 
450f7917c00SJeff Kirsher struct sge_eth_stats {              /* Ethernet queue statistics */
451f7917c00SJeff Kirsher 	unsigned long pkts;         /* # of ethernet packets */
452f7917c00SJeff Kirsher 	unsigned long lro_pkts;     /* # of LRO super packets */
453f7917c00SJeff Kirsher 	unsigned long lro_merged;   /* # of wire packets merged by LRO */
454f7917c00SJeff Kirsher 	unsigned long rx_cso;       /* # of Rx checksum offloads */
455f7917c00SJeff Kirsher 	unsigned long vlan_ex;      /* # of Rx VLAN extractions */
456f7917c00SJeff Kirsher 	unsigned long rx_drops;     /* # of packets dropped due to no mem */
457f7917c00SJeff Kirsher };
458f7917c00SJeff Kirsher 
459f7917c00SJeff Kirsher struct sge_eth_rxq {                /* SW Ethernet Rx queue */
460f7917c00SJeff Kirsher 	struct sge_rspq rspq;
461f7917c00SJeff Kirsher 	struct sge_fl fl;
462f7917c00SJeff Kirsher 	struct sge_eth_stats stats;
463f7917c00SJeff Kirsher } ____cacheline_aligned_in_smp;
464f7917c00SJeff Kirsher 
465f7917c00SJeff Kirsher struct sge_ofld_stats {             /* offload queue statistics */
466f7917c00SJeff Kirsher 	unsigned long pkts;         /* # of packets */
467f7917c00SJeff Kirsher 	unsigned long imm;          /* # of immediate-data packets */
468f7917c00SJeff Kirsher 	unsigned long an;           /* # of asynchronous notifications */
469f7917c00SJeff Kirsher 	unsigned long nomem;        /* # of responses deferred due to no mem */
470f7917c00SJeff Kirsher };
471f7917c00SJeff Kirsher 
472f7917c00SJeff Kirsher struct sge_ofld_rxq {               /* SW offload Rx queue */
473f7917c00SJeff Kirsher 	struct sge_rspq rspq;
474f7917c00SJeff Kirsher 	struct sge_fl fl;
475f7917c00SJeff Kirsher 	struct sge_ofld_stats stats;
476f7917c00SJeff Kirsher } ____cacheline_aligned_in_smp;
477f7917c00SJeff Kirsher 
478f7917c00SJeff Kirsher struct tx_desc {
479f7917c00SJeff Kirsher 	__be64 flit[8];
480f7917c00SJeff Kirsher };
481f7917c00SJeff Kirsher 
482f7917c00SJeff Kirsher struct tx_sw_desc;
483f7917c00SJeff Kirsher 
484f7917c00SJeff Kirsher struct sge_txq {
485f7917c00SJeff Kirsher 	unsigned int  in_use;       /* # of in-use Tx descriptors */
486f7917c00SJeff Kirsher 	unsigned int  size;         /* # of descriptors */
487f7917c00SJeff Kirsher 	unsigned int  cidx;         /* SW consumer index */
488f7917c00SJeff Kirsher 	unsigned int  pidx;         /* producer index */
489f7917c00SJeff Kirsher 	unsigned long stops;        /* # of times q has been stopped */
490f7917c00SJeff Kirsher 	unsigned long restarts;     /* # of queue restarts */
491f7917c00SJeff Kirsher 	unsigned int  cntxt_id;     /* SGE context id for the Tx q */
492f7917c00SJeff Kirsher 	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
493f7917c00SJeff Kirsher 	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
494f7917c00SJeff Kirsher 	struct sge_qstat *stat;     /* queue status entry */
495f7917c00SJeff Kirsher 	dma_addr_t    phys_addr;    /* physical address of the ring */
4963069ee9bSVipul Pandya 	spinlock_t db_lock;
4973069ee9bSVipul Pandya 	int db_disabled;
4983069ee9bSVipul Pandya 	unsigned short db_pidx;
49922adfe0aSSantosh Rastapur 	u64 udb;
500f7917c00SJeff Kirsher };
501f7917c00SJeff Kirsher 
502f7917c00SJeff Kirsher struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
503f7917c00SJeff Kirsher 	struct sge_txq q;
504f7917c00SJeff Kirsher 	struct netdev_queue *txq;   /* associated netdev TX queue */
505f7917c00SJeff Kirsher 	unsigned long tso;          /* # of TSO requests */
506f7917c00SJeff Kirsher 	unsigned long tx_cso;       /* # of Tx checksum offloads */
507f7917c00SJeff Kirsher 	unsigned long vlan_ins;     /* # of Tx VLAN insertions */
508f7917c00SJeff Kirsher 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
509f7917c00SJeff Kirsher } ____cacheline_aligned_in_smp;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher struct sge_ofld_txq {               /* state for an SGE offload Tx queue */
512f7917c00SJeff Kirsher 	struct sge_txq q;
513f7917c00SJeff Kirsher 	struct adapter *adap;
514f7917c00SJeff Kirsher 	struct sk_buff_head sendq;  /* list of backpressured packets */
515f7917c00SJeff Kirsher 	struct tasklet_struct qresume_tsk; /* restarts the queue */
516f7917c00SJeff Kirsher 	u8 full;                    /* the Tx ring is full */
517f7917c00SJeff Kirsher 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
518f7917c00SJeff Kirsher } ____cacheline_aligned_in_smp;
519f7917c00SJeff Kirsher 
520f7917c00SJeff Kirsher struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
521f7917c00SJeff Kirsher 	struct sge_txq q;
522f7917c00SJeff Kirsher 	struct adapter *adap;
523f7917c00SJeff Kirsher 	struct sk_buff_head sendq;  /* list of backpressured packets */
524f7917c00SJeff Kirsher 	struct tasklet_struct qresume_tsk; /* restarts the queue */
525f7917c00SJeff Kirsher 	u8 full;                    /* the Tx ring is full */
526f7917c00SJeff Kirsher } ____cacheline_aligned_in_smp;
527f7917c00SJeff Kirsher 
528f7917c00SJeff Kirsher struct sge {
529f7917c00SJeff Kirsher 	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
530f7917c00SJeff Kirsher 	struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
531f7917c00SJeff Kirsher 	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
532f7917c00SJeff Kirsher 
533f7917c00SJeff Kirsher 	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
534f7917c00SJeff Kirsher 	struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
535f7917c00SJeff Kirsher 	struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
536f7917c00SJeff Kirsher 	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
537f7917c00SJeff Kirsher 
538f7917c00SJeff Kirsher 	struct sge_rspq intrq ____cacheline_aligned_in_smp;
539f7917c00SJeff Kirsher 	spinlock_t intrq_lock;
540f7917c00SJeff Kirsher 
541f7917c00SJeff Kirsher 	u16 max_ethqsets;           /* # of available Ethernet queue sets */
542f7917c00SJeff Kirsher 	u16 ethqsets;               /* # of active Ethernet queue sets */
543f7917c00SJeff Kirsher 	u16 ethtxq_rover;           /* Tx queue to clean up next */
544f7917c00SJeff Kirsher 	u16 ofldqsets;              /* # of active offload queue sets */
545f7917c00SJeff Kirsher 	u16 rdmaqs;                 /* # of available RDMA Rx queues */
546f7917c00SJeff Kirsher 	u16 ofld_rxq[MAX_OFLD_QSETS];
547f7917c00SJeff Kirsher 	u16 rdma_rxq[NCHAN];
548f7917c00SJeff Kirsher 	u16 timer_val[SGE_NTIMERS];
549f7917c00SJeff Kirsher 	u8 counter_val[SGE_NCOUNTERS];
55052367a76SVipul Pandya 	u32 fl_pg_order;            /* large page allocation size */
55152367a76SVipul Pandya 	u32 stat_len;               /* length of status page at ring end */
55252367a76SVipul Pandya 	u32 pktshift;               /* padding between CPL & packet data */
55352367a76SVipul Pandya 	u32 fl_align;               /* response queue message alignment */
55452367a76SVipul Pandya 	u32 fl_starve_thres;        /* Free List starvation threshold */
555f7917c00SJeff Kirsher 	unsigned int starve_thres;
556f7917c00SJeff Kirsher 	u8 idma_state[2];
557f7917c00SJeff Kirsher 	unsigned int egr_start;
558f7917c00SJeff Kirsher 	unsigned int ingr_start;
559f7917c00SJeff Kirsher 	void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
560f7917c00SJeff Kirsher 	struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
561f7917c00SJeff Kirsher 	DECLARE_BITMAP(starving_fl, MAX_EGRQ);
562f7917c00SJeff Kirsher 	DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
563f7917c00SJeff Kirsher 	struct timer_list rx_timer; /* refills starving FLs */
564f7917c00SJeff Kirsher 	struct timer_list tx_timer; /* checks Tx queues */
565f7917c00SJeff Kirsher };
566f7917c00SJeff Kirsher 
567f7917c00SJeff Kirsher #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
568f7917c00SJeff Kirsher #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
569f7917c00SJeff Kirsher #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
570f7917c00SJeff Kirsher 
571f7917c00SJeff Kirsher struct l2t_data;
572f7917c00SJeff Kirsher 
5732422d9a3SSantosh Rastapur #ifdef CONFIG_PCI_IOV
5742422d9a3SSantosh Rastapur 
5757d6727cfSSantosh Rastapur /* T4 supports SRIOV on PF0-3 and T5 on PF0-7.  However, the Serial
5767d6727cfSSantosh Rastapur  * Configuration initialization for T5 only has SR-IOV functionality enabled
5777d6727cfSSantosh Rastapur  * on PF0-3 in order to simplify everything.
5782422d9a3SSantosh Rastapur  */
5797d6727cfSSantosh Rastapur #define NUM_OF_PF_WITH_SRIOV 4
5802422d9a3SSantosh Rastapur 
5812422d9a3SSantosh Rastapur #endif
5822422d9a3SSantosh Rastapur 
583f7917c00SJeff Kirsher struct adapter {
584f7917c00SJeff Kirsher 	void __iomem *regs;
58522adfe0aSSantosh Rastapur 	void __iomem *bar2;
586f7917c00SJeff Kirsher 	struct pci_dev *pdev;
587f7917c00SJeff Kirsher 	struct device *pdev_dev;
5883069ee9bSVipul Pandya 	unsigned int mbox;
589f7917c00SJeff Kirsher 	unsigned int fn;
590f7917c00SJeff Kirsher 	unsigned int flags;
5912422d9a3SSantosh Rastapur 	enum chip_type chip;
592f7917c00SJeff Kirsher 
593f7917c00SJeff Kirsher 	int msg_enable;
594f7917c00SJeff Kirsher 
595f7917c00SJeff Kirsher 	struct adapter_params params;
596f7917c00SJeff Kirsher 	struct cxgb4_virt_res vres;
597f7917c00SJeff Kirsher 	unsigned int swintr;
598f7917c00SJeff Kirsher 
599f7917c00SJeff Kirsher 	unsigned int wol;
600f7917c00SJeff Kirsher 
601f7917c00SJeff Kirsher 	struct {
602f7917c00SJeff Kirsher 		unsigned short vec;
603f7917c00SJeff Kirsher 		char desc[IFNAMSIZ + 10];
604f7917c00SJeff Kirsher 	} msix_info[MAX_INGQ + 1];
605f7917c00SJeff Kirsher 
606f7917c00SJeff Kirsher 	struct sge sge;
607f7917c00SJeff Kirsher 
608f7917c00SJeff Kirsher 	struct net_device *port[MAX_NPORTS];
609f7917c00SJeff Kirsher 	u8 chan_map[NCHAN];                   /* channel -> port map */
610f7917c00SJeff Kirsher 
611793dad94SVipul Pandya 	u32 filter_mode;
612636f9d37SVipul Pandya 	unsigned int l2t_start;
613636f9d37SVipul Pandya 	unsigned int l2t_end;
614f7917c00SJeff Kirsher 	struct l2t_data *l2t;
615f7917c00SJeff Kirsher 	void *uld_handle[CXGB4_ULD_MAX];
616f7917c00SJeff Kirsher 	struct list_head list_node;
61701bcca68SVipul Pandya 	struct list_head rcu_node;
618f7917c00SJeff Kirsher 
619f7917c00SJeff Kirsher 	struct tid_info tids;
620f7917c00SJeff Kirsher 	void **tid_release_head;
621f7917c00SJeff Kirsher 	spinlock_t tid_release_lock;
622f7917c00SJeff Kirsher 	struct work_struct tid_release_task;
623881806bcSVipul Pandya 	struct work_struct db_full_task;
624881806bcSVipul Pandya 	struct work_struct db_drop_task;
625f7917c00SJeff Kirsher 	bool tid_release_task_busy;
626f7917c00SJeff Kirsher 
627f7917c00SJeff Kirsher 	struct dentry *debugfs_root;
628f7917c00SJeff Kirsher 
629f7917c00SJeff Kirsher 	spinlock_t stats_lock;
630f7917c00SJeff Kirsher };
631f7917c00SJeff Kirsher 
632f2b7e78dSVipul Pandya /* Defined bit width of user definable filter tuples
633f2b7e78dSVipul Pandya  */
634f2b7e78dSVipul Pandya #define ETHTYPE_BITWIDTH 16
635f2b7e78dSVipul Pandya #define FRAG_BITWIDTH 1
636f2b7e78dSVipul Pandya #define MACIDX_BITWIDTH 9
637f2b7e78dSVipul Pandya #define FCOE_BITWIDTH 1
638f2b7e78dSVipul Pandya #define IPORT_BITWIDTH 3
639f2b7e78dSVipul Pandya #define MATCHTYPE_BITWIDTH 3
640f2b7e78dSVipul Pandya #define PROTO_BITWIDTH 8
641f2b7e78dSVipul Pandya #define TOS_BITWIDTH 8
642f2b7e78dSVipul Pandya #define PF_BITWIDTH 8
643f2b7e78dSVipul Pandya #define VF_BITWIDTH 8
644f2b7e78dSVipul Pandya #define IVLAN_BITWIDTH 16
645f2b7e78dSVipul Pandya #define OVLAN_BITWIDTH 16
646f2b7e78dSVipul Pandya 
647f2b7e78dSVipul Pandya /* Filter matching rules.  These consist of a set of ingress packet field
648f2b7e78dSVipul Pandya  * (value, mask) tuples.  The associated ingress packet field matches the
649f2b7e78dSVipul Pandya  * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
650f2b7e78dSVipul Pandya  * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
651f2b7e78dSVipul Pandya  * matches an ingress packet when all of the individual individual field
652f2b7e78dSVipul Pandya  * matching rules are true.
653f2b7e78dSVipul Pandya  *
654f2b7e78dSVipul Pandya  * Partial field masks are always valid, however, while it may be easy to
655f2b7e78dSVipul Pandya  * understand their meanings for some fields (e.g. IP address to match a
656f2b7e78dSVipul Pandya  * subnet), for others making sensible partial masks is less intuitive (e.g.
657f2b7e78dSVipul Pandya  * MPS match type) ...
658f2b7e78dSVipul Pandya  *
659f2b7e78dSVipul Pandya  * Most of the following data structures are modeled on T4 capabilities.
660f2b7e78dSVipul Pandya  * Drivers for earlier chips use the subsets which make sense for those chips.
661f2b7e78dSVipul Pandya  * We really need to come up with a hardware-independent mechanism to
662f2b7e78dSVipul Pandya  * represent hardware filter capabilities ...
663f2b7e78dSVipul Pandya  */
664f2b7e78dSVipul Pandya struct ch_filter_tuple {
665f2b7e78dSVipul Pandya 	/* Compressed header matching field rules.  The TP_VLAN_PRI_MAP
666f2b7e78dSVipul Pandya 	 * register selects which of these fields will participate in the
667f2b7e78dSVipul Pandya 	 * filter match rules -- up to a maximum of 36 bits.  Because
668f2b7e78dSVipul Pandya 	 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
669f2b7e78dSVipul Pandya 	 * set of fields.
670f2b7e78dSVipul Pandya 	 */
671f2b7e78dSVipul Pandya 	uint32_t ethtype:ETHTYPE_BITWIDTH;      /* Ethernet type */
672f2b7e78dSVipul Pandya 	uint32_t frag:FRAG_BITWIDTH;            /* IP fragmentation header */
673f2b7e78dSVipul Pandya 	uint32_t ivlan_vld:1;                   /* inner VLAN valid */
674f2b7e78dSVipul Pandya 	uint32_t ovlan_vld:1;                   /* outer VLAN valid */
675f2b7e78dSVipul Pandya 	uint32_t pfvf_vld:1;                    /* PF/VF valid */
676f2b7e78dSVipul Pandya 	uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
677f2b7e78dSVipul Pandya 	uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
678f2b7e78dSVipul Pandya 	uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
679f2b7e78dSVipul Pandya 	uint32_t matchtype:MATCHTYPE_BITWIDTH;  /* MPS match type */
680f2b7e78dSVipul Pandya 	uint32_t proto:PROTO_BITWIDTH;          /* protocol type */
681f2b7e78dSVipul Pandya 	uint32_t tos:TOS_BITWIDTH;              /* TOS/Traffic Type */
682f2b7e78dSVipul Pandya 	uint32_t pf:PF_BITWIDTH;                /* PCI-E PF ID */
683f2b7e78dSVipul Pandya 	uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
684f2b7e78dSVipul Pandya 	uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
685f2b7e78dSVipul Pandya 	uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
686f2b7e78dSVipul Pandya 
687f2b7e78dSVipul Pandya 	/* Uncompressed header matching field rules.  These are always
688f2b7e78dSVipul Pandya 	 * available for field rules.
689f2b7e78dSVipul Pandya 	 */
690f2b7e78dSVipul Pandya 	uint8_t lip[16];        /* local IP address (IPv4 in [3:0]) */
691f2b7e78dSVipul Pandya 	uint8_t fip[16];        /* foreign IP address (IPv4 in [3:0]) */
692f2b7e78dSVipul Pandya 	uint16_t lport;         /* local port */
693f2b7e78dSVipul Pandya 	uint16_t fport;         /* foreign port */
694f2b7e78dSVipul Pandya };
695f2b7e78dSVipul Pandya 
696f2b7e78dSVipul Pandya /* A filter ioctl command.
697f2b7e78dSVipul Pandya  */
698f2b7e78dSVipul Pandya struct ch_filter_specification {
699f2b7e78dSVipul Pandya 	/* Administrative fields for filter.
700f2b7e78dSVipul Pandya 	 */
701f2b7e78dSVipul Pandya 	uint32_t hitcnts:1;     /* count filter hits in TCB */
702f2b7e78dSVipul Pandya 	uint32_t prio:1;        /* filter has priority over active/server */
703f2b7e78dSVipul Pandya 
704f2b7e78dSVipul Pandya 	/* Fundamental filter typing.  This is the one element of filter
705f2b7e78dSVipul Pandya 	 * matching that doesn't exist as a (value, mask) tuple.
706f2b7e78dSVipul Pandya 	 */
707f2b7e78dSVipul Pandya 	uint32_t type:1;        /* 0 => IPv4, 1 => IPv6 */
708f2b7e78dSVipul Pandya 
709f2b7e78dSVipul Pandya 	/* Packet dispatch information.  Ingress packets which match the
710f2b7e78dSVipul Pandya 	 * filter rules will be dropped, passed to the host or switched back
711f2b7e78dSVipul Pandya 	 * out as egress packets.
712f2b7e78dSVipul Pandya 	 */
713f2b7e78dSVipul Pandya 	uint32_t action:2;      /* drop, pass, switch */
714f2b7e78dSVipul Pandya 
715f2b7e78dSVipul Pandya 	uint32_t rpttid:1;      /* report TID in RSS hash field */
716f2b7e78dSVipul Pandya 
717f2b7e78dSVipul Pandya 	uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
718f2b7e78dSVipul Pandya 	uint32_t iq:10;         /* ingress queue */
719f2b7e78dSVipul Pandya 
720f2b7e78dSVipul Pandya 	uint32_t maskhash:1;    /* dirsteer=0: store RSS hash in TCB */
721f2b7e78dSVipul Pandya 	uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
722f2b7e78dSVipul Pandya 				/*             1 => TCB contains IQ ID */
723f2b7e78dSVipul Pandya 
724f2b7e78dSVipul Pandya 	/* Switch proxy/rewrite fields.  An ingress packet which matches a
725f2b7e78dSVipul Pandya 	 * filter with "switch" set will be looped back out as an egress
726f2b7e78dSVipul Pandya 	 * packet -- potentially with some Ethernet header rewriting.
727f2b7e78dSVipul Pandya 	 */
728f2b7e78dSVipul Pandya 	uint32_t eport:2;       /* egress port to switch packet out */
729f2b7e78dSVipul Pandya 	uint32_t newdmac:1;     /* rewrite destination MAC address */
730f2b7e78dSVipul Pandya 	uint32_t newsmac:1;     /* rewrite source MAC address */
731f2b7e78dSVipul Pandya 	uint32_t newvlan:2;     /* rewrite VLAN Tag */
732f2b7e78dSVipul Pandya 	uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
733f2b7e78dSVipul Pandya 	uint8_t smac[ETH_ALEN]; /* new source MAC address */
734f2b7e78dSVipul Pandya 	uint16_t vlan;          /* VLAN Tag to insert */
735f2b7e78dSVipul Pandya 
736f2b7e78dSVipul Pandya 	/* Filter rule value/mask pairs.
737f2b7e78dSVipul Pandya 	 */
738f2b7e78dSVipul Pandya 	struct ch_filter_tuple val;
739f2b7e78dSVipul Pandya 	struct ch_filter_tuple mask;
740f2b7e78dSVipul Pandya };
741f2b7e78dSVipul Pandya 
742f2b7e78dSVipul Pandya enum {
743f2b7e78dSVipul Pandya 	FILTER_PASS = 0,        /* default */
744f2b7e78dSVipul Pandya 	FILTER_DROP,
745f2b7e78dSVipul Pandya 	FILTER_SWITCH
746f2b7e78dSVipul Pandya };
747f2b7e78dSVipul Pandya 
748f2b7e78dSVipul Pandya enum {
749f2b7e78dSVipul Pandya 	VLAN_NOCHANGE = 0,      /* default */
750f2b7e78dSVipul Pandya 	VLAN_REMOVE,
751f2b7e78dSVipul Pandya 	VLAN_INSERT,
752f2b7e78dSVipul Pandya 	VLAN_REWRITE
753f2b7e78dSVipul Pandya };
754f2b7e78dSVipul Pandya 
7552422d9a3SSantosh Rastapur static inline int is_t5(enum chip_type chip)
7562422d9a3SSantosh Rastapur {
757d14807ddSHariprasad Shenai 	return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
7582422d9a3SSantosh Rastapur }
7592422d9a3SSantosh Rastapur 
7602422d9a3SSantosh Rastapur static inline int is_t4(enum chip_type chip)
7612422d9a3SSantosh Rastapur {
762d14807ddSHariprasad Shenai 	return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
7632422d9a3SSantosh Rastapur }
7642422d9a3SSantosh Rastapur 
765f7917c00SJeff Kirsher static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
766f7917c00SJeff Kirsher {
767f7917c00SJeff Kirsher 	return readl(adap->regs + reg_addr);
768f7917c00SJeff Kirsher }
769f7917c00SJeff Kirsher 
770f7917c00SJeff Kirsher static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
771f7917c00SJeff Kirsher {
772f7917c00SJeff Kirsher 	writel(val, adap->regs + reg_addr);
773f7917c00SJeff Kirsher }
774f7917c00SJeff Kirsher 
775f7917c00SJeff Kirsher #ifndef readq
776f7917c00SJeff Kirsher static inline u64 readq(const volatile void __iomem *addr)
777f7917c00SJeff Kirsher {
778f7917c00SJeff Kirsher 	return readl(addr) + ((u64)readl(addr + 4) << 32);
779f7917c00SJeff Kirsher }
780f7917c00SJeff Kirsher 
781f7917c00SJeff Kirsher static inline void writeq(u64 val, volatile void __iomem *addr)
782f7917c00SJeff Kirsher {
783f7917c00SJeff Kirsher 	writel(val, addr);
784f7917c00SJeff Kirsher 	writel(val >> 32, addr + 4);
785f7917c00SJeff Kirsher }
786f7917c00SJeff Kirsher #endif
787f7917c00SJeff Kirsher 
788f7917c00SJeff Kirsher static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
789f7917c00SJeff Kirsher {
790f7917c00SJeff Kirsher 	return readq(adap->regs + reg_addr);
791f7917c00SJeff Kirsher }
792f7917c00SJeff Kirsher 
793f7917c00SJeff Kirsher static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
794f7917c00SJeff Kirsher {
795f7917c00SJeff Kirsher 	writeq(val, adap->regs + reg_addr);
796f7917c00SJeff Kirsher }
797f7917c00SJeff Kirsher 
798f7917c00SJeff Kirsher /**
799f7917c00SJeff Kirsher  * netdev2pinfo - return the port_info structure associated with a net_device
800f7917c00SJeff Kirsher  * @dev: the netdev
801f7917c00SJeff Kirsher  *
802f7917c00SJeff Kirsher  * Return the struct port_info associated with a net_device
803f7917c00SJeff Kirsher  */
804f7917c00SJeff Kirsher static inline struct port_info *netdev2pinfo(const struct net_device *dev)
805f7917c00SJeff Kirsher {
806f7917c00SJeff Kirsher 	return netdev_priv(dev);
807f7917c00SJeff Kirsher }
808f7917c00SJeff Kirsher 
809f7917c00SJeff Kirsher /**
810f7917c00SJeff Kirsher  * adap2pinfo - return the port_info of a port
811f7917c00SJeff Kirsher  * @adap: the adapter
812f7917c00SJeff Kirsher  * @idx: the port index
813f7917c00SJeff Kirsher  *
814f7917c00SJeff Kirsher  * Return the port_info structure for the port of the given index.
815f7917c00SJeff Kirsher  */
816f7917c00SJeff Kirsher static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
817f7917c00SJeff Kirsher {
818f7917c00SJeff Kirsher 	return netdev_priv(adap->port[idx]);
819f7917c00SJeff Kirsher }
820f7917c00SJeff Kirsher 
821f7917c00SJeff Kirsher /**
822f7917c00SJeff Kirsher  * netdev2adap - return the adapter structure associated with a net_device
823f7917c00SJeff Kirsher  * @dev: the netdev
824f7917c00SJeff Kirsher  *
825f7917c00SJeff Kirsher  * Return the struct adapter associated with a net_device
826f7917c00SJeff Kirsher  */
827f7917c00SJeff Kirsher static inline struct adapter *netdev2adap(const struct net_device *dev)
828f7917c00SJeff Kirsher {
829f7917c00SJeff Kirsher 	return netdev2pinfo(dev)->adapter;
830f7917c00SJeff Kirsher }
831f7917c00SJeff Kirsher 
832f7917c00SJeff Kirsher void t4_os_portmod_changed(const struct adapter *adap, int port_id);
833f7917c00SJeff Kirsher void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
834f7917c00SJeff Kirsher 
835f7917c00SJeff Kirsher void *t4_alloc_mem(size_t size);
836f7917c00SJeff Kirsher 
837f7917c00SJeff Kirsher void t4_free_sge_resources(struct adapter *adap);
838f7917c00SJeff Kirsher irq_handler_t t4_intr_handler(struct adapter *adap);
839f7917c00SJeff Kirsher netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
840f7917c00SJeff Kirsher int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
841f7917c00SJeff Kirsher 		     const struct pkt_gl *gl);
842f7917c00SJeff Kirsher int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
843f7917c00SJeff Kirsher int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
844f7917c00SJeff Kirsher int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
845f7917c00SJeff Kirsher 		     struct net_device *dev, int intr_idx,
846f7917c00SJeff Kirsher 		     struct sge_fl *fl, rspq_handler_t hnd);
847f7917c00SJeff Kirsher int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
848f7917c00SJeff Kirsher 			 struct net_device *dev, struct netdev_queue *netdevq,
849f7917c00SJeff Kirsher 			 unsigned int iqid);
850f7917c00SJeff Kirsher int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
851f7917c00SJeff Kirsher 			  struct net_device *dev, unsigned int iqid,
852f7917c00SJeff Kirsher 			  unsigned int cmplqid);
853f7917c00SJeff Kirsher int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
854f7917c00SJeff Kirsher 			  struct net_device *dev, unsigned int iqid);
855f7917c00SJeff Kirsher irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
85652367a76SVipul Pandya int t4_sge_init(struct adapter *adap);
857f7917c00SJeff Kirsher void t4_sge_start(struct adapter *adap);
858f7917c00SJeff Kirsher void t4_sge_stop(struct adapter *adap);
8593069ee9bSVipul Pandya extern int dbfifo_int_thresh;
860f7917c00SJeff Kirsher 
861f7917c00SJeff Kirsher #define for_each_port(adapter, iter) \
862f7917c00SJeff Kirsher 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
863f7917c00SJeff Kirsher 
8649a4da2cdSVipul Pandya static inline int is_bypass(struct adapter *adap)
8659a4da2cdSVipul Pandya {
8669a4da2cdSVipul Pandya 	return adap->params.bypass;
8679a4da2cdSVipul Pandya }
8689a4da2cdSVipul Pandya 
8699a4da2cdSVipul Pandya static inline int is_bypass_device(int device)
8709a4da2cdSVipul Pandya {
8719a4da2cdSVipul Pandya 	/* this should be set based upon device capabilities */
8729a4da2cdSVipul Pandya 	switch (device) {
8739a4da2cdSVipul Pandya 	case 0x440b:
8749a4da2cdSVipul Pandya 	case 0x440c:
8759a4da2cdSVipul Pandya 		return 1;
8769a4da2cdSVipul Pandya 	default:
8779a4da2cdSVipul Pandya 		return 0;
8789a4da2cdSVipul Pandya 	}
8799a4da2cdSVipul Pandya }
8809a4da2cdSVipul Pandya 
881f7917c00SJeff Kirsher static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
882f7917c00SJeff Kirsher {
883f7917c00SJeff Kirsher 	return adap->params.vpd.cclk / 1000;
884f7917c00SJeff Kirsher }
885f7917c00SJeff Kirsher 
886f7917c00SJeff Kirsher static inline unsigned int us_to_core_ticks(const struct adapter *adap,
887f7917c00SJeff Kirsher 					    unsigned int us)
888f7917c00SJeff Kirsher {
889f7917c00SJeff Kirsher 	return (us * adap->params.vpd.cclk) / 1000;
890f7917c00SJeff Kirsher }
891f7917c00SJeff Kirsher 
89252367a76SVipul Pandya static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
89352367a76SVipul Pandya 					    unsigned int ticks)
89452367a76SVipul Pandya {
89552367a76SVipul Pandya 	/* add Core Clock / 2 to round ticks to nearest uS */
89652367a76SVipul Pandya 	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
89752367a76SVipul Pandya 		adapter->params.vpd.cclk);
89852367a76SVipul Pandya }
89952367a76SVipul Pandya 
900f7917c00SJeff Kirsher void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
901f7917c00SJeff Kirsher 		      u32 val);
902f7917c00SJeff Kirsher 
903f7917c00SJeff Kirsher int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
904f7917c00SJeff Kirsher 		    void *rpl, bool sleep_ok);
905f7917c00SJeff Kirsher 
906f7917c00SJeff Kirsher static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
907f7917c00SJeff Kirsher 			     int size, void *rpl)
908f7917c00SJeff Kirsher {
909f7917c00SJeff Kirsher 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
910f7917c00SJeff Kirsher }
911f7917c00SJeff Kirsher 
912f7917c00SJeff Kirsher static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
913f7917c00SJeff Kirsher 				int size, void *rpl)
914f7917c00SJeff Kirsher {
915f7917c00SJeff Kirsher 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
916f7917c00SJeff Kirsher }
917f7917c00SJeff Kirsher 
91813ee15d3SVipul Pandya void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
91913ee15d3SVipul Pandya 		       unsigned int data_reg, const u32 *vals,
92013ee15d3SVipul Pandya 		       unsigned int nregs, unsigned int start_idx);
921f2b7e78dSVipul Pandya void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
922f2b7e78dSVipul Pandya 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
923f2b7e78dSVipul Pandya 		      unsigned int start_idx);
924f2b7e78dSVipul Pandya 
925f2b7e78dSVipul Pandya struct fw_filter_wr;
926f2b7e78dSVipul Pandya 
927f7917c00SJeff Kirsher void t4_intr_enable(struct adapter *adapter);
928f7917c00SJeff Kirsher void t4_intr_disable(struct adapter *adapter);
929f7917c00SJeff Kirsher int t4_slow_intr_handler(struct adapter *adapter);
930f7917c00SJeff Kirsher 
931f7917c00SJeff Kirsher int t4_wait_dev_ready(struct adapter *adap);
932f7917c00SJeff Kirsher int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
933f7917c00SJeff Kirsher 		  struct link_config *lc);
934f7917c00SJeff Kirsher int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
9355afc8b84SVipul Pandya int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
9365afc8b84SVipul Pandya 		    __be32 *buf);
937f7917c00SJeff Kirsher int t4_seeprom_wp(struct adapter *adapter, bool enable);
938636f9d37SVipul Pandya int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
939f7917c00SJeff Kirsher int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
940636f9d37SVipul Pandya unsigned int t4_flash_cfg_addr(struct adapter *adapter);
94116e47624SHariprasad Shenai int t4_get_fw_version(struct adapter *adapter, u32 *vers);
94216e47624SHariprasad Shenai int t4_get_tp_version(struct adapter *adapter, u32 *vers);
94316e47624SHariprasad Shenai int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
94416e47624SHariprasad Shenai 	       const u8 *fw_data, unsigned int fw_size,
94516e47624SHariprasad Shenai 	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
946f7917c00SJeff Kirsher int t4_prep_adapter(struct adapter *adapter);
947dcf7b6f5SKumar Sanghvi int t4_init_tp_params(struct adapter *adap);
948dcf7b6f5SKumar Sanghvi int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
949f7917c00SJeff Kirsher int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
950f7917c00SJeff Kirsher void t4_fatal_err(struct adapter *adapter);
951f7917c00SJeff Kirsher int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
952f7917c00SJeff Kirsher 			int start, int n, const u16 *rspq, unsigned int nrspq);
953f7917c00SJeff Kirsher int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
954f7917c00SJeff Kirsher 		       unsigned int flags);
95519dd37baSSantosh Rastapur int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
95619dd37baSSantosh Rastapur 	       u64 *parity);
957f7917c00SJeff Kirsher int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
958f7917c00SJeff Kirsher 		u64 *parity);
959f7917c00SJeff Kirsher 
960f7917c00SJeff Kirsher void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
961f7917c00SJeff Kirsher void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
962636f9d37SVipul Pandya void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
963636f9d37SVipul Pandya 			    unsigned int mask, unsigned int val);
964f7917c00SJeff Kirsher void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
965f7917c00SJeff Kirsher 			 struct tp_tcp_stats *v6);
966f7917c00SJeff Kirsher void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
967f7917c00SJeff Kirsher 		  const unsigned short *alpha, const unsigned short *beta);
968f7917c00SJeff Kirsher 
969f2b7e78dSVipul Pandya void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
970f2b7e78dSVipul Pandya 
971f7917c00SJeff Kirsher void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
972f7917c00SJeff Kirsher 			 const u8 *addr);
973f7917c00SJeff Kirsher int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
974f7917c00SJeff Kirsher 		      u64 mask0, u64 mask1, unsigned int crc, bool enable);
975f7917c00SJeff Kirsher 
976f7917c00SJeff Kirsher int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
977f7917c00SJeff Kirsher 		enum dev_master master, enum dev_state *state);
978f7917c00SJeff Kirsher int t4_fw_bye(struct adapter *adap, unsigned int mbox);
979f7917c00SJeff Kirsher int t4_early_init(struct adapter *adap, unsigned int mbox);
980f7917c00SJeff Kirsher int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
981636f9d37SVipul Pandya int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
982636f9d37SVipul Pandya 			  unsigned int cache_line_size);
983636f9d37SVipul Pandya int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
984f7917c00SJeff Kirsher int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
985f7917c00SJeff Kirsher 		    unsigned int vf, unsigned int nparams, const u32 *params,
986f7917c00SJeff Kirsher 		    u32 *val);
987f7917c00SJeff Kirsher int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
988f7917c00SJeff Kirsher 		  unsigned int vf, unsigned int nparams, const u32 *params,
989f7917c00SJeff Kirsher 		  const u32 *val);
990f7917c00SJeff Kirsher int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
991f7917c00SJeff Kirsher 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
992f7917c00SJeff Kirsher 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
993f7917c00SJeff Kirsher 		unsigned int vi, unsigned int cmask, unsigned int pmask,
994f7917c00SJeff Kirsher 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
995f7917c00SJeff Kirsher int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
996f7917c00SJeff Kirsher 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
997f7917c00SJeff Kirsher 		unsigned int *rss_size);
998f7917c00SJeff Kirsher int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
999f7917c00SJeff Kirsher 		int mtu, int promisc, int all_multi, int bcast, int vlanex,
1000f7917c00SJeff Kirsher 		bool sleep_ok);
1001f7917c00SJeff Kirsher int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
1002f7917c00SJeff Kirsher 		      unsigned int viid, bool free, unsigned int naddr,
1003f7917c00SJeff Kirsher 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
1004f7917c00SJeff Kirsher int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1005f7917c00SJeff Kirsher 		  int idx, const u8 *addr, bool persist, bool add_smt);
1006f7917c00SJeff Kirsher int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1007f7917c00SJeff Kirsher 		     bool ucast, u64 vec, bool sleep_ok);
1008f7917c00SJeff Kirsher int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1009f7917c00SJeff Kirsher 		 bool rx_en, bool tx_en);
1010f7917c00SJeff Kirsher int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
1011f7917c00SJeff Kirsher 		     unsigned int nblinks);
1012f7917c00SJeff Kirsher int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1013f7917c00SJeff Kirsher 	       unsigned int mmd, unsigned int reg, u16 *valp);
1014f7917c00SJeff Kirsher int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1015f7917c00SJeff Kirsher 	       unsigned int mmd, unsigned int reg, u16 val);
1016f7917c00SJeff Kirsher int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1017f7917c00SJeff Kirsher 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
1018f7917c00SJeff Kirsher 	       unsigned int fl0id, unsigned int fl1id);
1019f7917c00SJeff Kirsher int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1020f7917c00SJeff Kirsher 		   unsigned int vf, unsigned int eqid);
1021f7917c00SJeff Kirsher int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1022f7917c00SJeff Kirsher 		    unsigned int vf, unsigned int eqid);
1023f7917c00SJeff Kirsher int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1024f7917c00SJeff Kirsher 		    unsigned int vf, unsigned int eqid);
1025f7917c00SJeff Kirsher int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
1026881806bcSVipul Pandya void t4_db_full(struct adapter *adapter);
1027881806bcSVipul Pandya void t4_db_dropped(struct adapter *adapter);
10288caa1e84SVipul Pandya int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
10298caa1e84SVipul Pandya int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
10308caa1e84SVipul Pandya 			 u32 addr, u32 val);
1031f7917c00SJeff Kirsher #endif /* __CXGB4_H__ */
1032