1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef __CXGB4_H__
36 #define __CXGB4_H__
37 
38 #include "t4_hw.h"
39 
40 #include <linux/bitops.h>
41 #include <linux/cache.h>
42 #include <linux/ethtool.h>
43 #include <linux/interrupt.h>
44 #include <linux/list.h>
45 #include <linux/netdevice.h>
46 #include <linux/pci.h>
47 #include <linux/spinlock.h>
48 #include <linux/timer.h>
49 #include <linux/vmalloc.h>
50 #include <linux/rhashtable.h>
51 #include <linux/etherdevice.h>
52 #include <linux/net_tstamp.h>
53 #include <linux/ptp_clock_kernel.h>
54 #include <linux/ptp_classify.h>
55 #include <linux/crash_dump.h>
56 #include <linux/thermal.h>
57 #include <asm/io.h>
58 #include "t4_chip_type.h"
59 #include "cxgb4_uld.h"
60 #include "t4fw_api.h"
61 
62 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
63 extern struct list_head adapter_list;
64 extern struct list_head uld_list;
65 extern struct mutex uld_mutex;
66 
67 /* Suspend an Ethernet Tx queue with fewer available descriptors than this.
68  * This is the same as calc_tx_descs() for a TSO packet with
69  * nr_frags == MAX_SKB_FRAGS.
70  */
71 #define ETHTXQ_STOP_THRES \
72 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
73 
74 #define FW_PARAM_DEV(param) \
75 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
76 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
77 
78 #define FW_PARAM_PFVF(param) \
79 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
80 	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) |  \
81 	 FW_PARAMS_PARAM_Y_V(0) | \
82 	 FW_PARAMS_PARAM_Z_V(0))
83 
84 enum {
85 	MAX_NPORTS	= 4,     /* max # of ports */
86 	SERNUM_LEN	= 24,    /* Serial # length */
87 	ID_LEN		= 16,    /* ID length */
88 	PN_LEN		= 16,    /* Part Number length */
89 	MACADDR_LEN	= 12,    /* MAC Address length */
90 };
91 
92 enum {
93 	T4_REGMAP_SIZE = (160 * 1024),
94 	T5_REGMAP_SIZE = (332 * 1024),
95 };
96 
97 enum {
98 	MEM_EDC0,
99 	MEM_EDC1,
100 	MEM_MC,
101 	MEM_MC0 = MEM_MC,
102 	MEM_MC1,
103 	MEM_HMA,
104 };
105 
106 enum {
107 	MEMWIN0_APERTURE = 2048,
108 	MEMWIN0_BASE     = 0x1b800,
109 	MEMWIN1_APERTURE = 32768,
110 	MEMWIN1_BASE     = 0x28000,
111 	MEMWIN1_BASE_T5  = 0x52000,
112 	MEMWIN2_APERTURE = 65536,
113 	MEMWIN2_BASE     = 0x30000,
114 	MEMWIN2_APERTURE_T5 = 131072,
115 	MEMWIN2_BASE_T5  = 0x60000,
116 };
117 
118 enum dev_master {
119 	MASTER_CANT,
120 	MASTER_MAY,
121 	MASTER_MUST
122 };
123 
124 enum dev_state {
125 	DEV_STATE_UNINIT,
126 	DEV_STATE_INIT,
127 	DEV_STATE_ERR
128 };
129 
130 enum cc_pause {
131 	PAUSE_RX      = 1 << 0,
132 	PAUSE_TX      = 1 << 1,
133 	PAUSE_AUTONEG = 1 << 2
134 };
135 
136 enum cc_fec {
137 	FEC_AUTO      = 1 << 0,	 /* IEEE 802.3 "automatic" */
138 	FEC_RS        = 1 << 1,  /* Reed-Solomon */
139 	FEC_BASER_RS  = 1 << 2   /* BaseR/Reed-Solomon */
140 };
141 
142 enum {
143 	CXGB4_ETHTOOL_FLASH_FW = 1,
144 	CXGB4_ETHTOOL_FLASH_PHY = 2,
145 	CXGB4_ETHTOOL_FLASH_BOOT = 3,
146 	CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
147 };
148 
149 enum cxgb4_netdev_tls_ops {
150 	CXGB4_TLSDEV_OPS  = 1,
151 	CXGB4_XFRMDEV_OPS
152 };
153 
154 struct cxgb4_bootcfg_data {
155 	__le16 signature;
156 	__u8 reserved[2];
157 };
158 
159 struct cxgb4_pcir_data {
160 	__le32 signature;	/* Signature. The string "PCIR" */
161 	__le16 vendor_id;	/* Vendor Identification */
162 	__le16 device_id;	/* Device Identification */
163 	__u8 vital_product[2];	/* Pointer to Vital Product Data */
164 	__u8 length[2];		/* PCIR Data Structure Length */
165 	__u8 revision;		/* PCIR Data Structure Revision */
166 	__u8 class_code[3];	/* Class Code */
167 	__u8 image_length[2];	/* Image Length. Multiple of 512B */
168 	__u8 code_revision[2];	/* Revision Level of Code/Data */
169 	__u8 code_type;
170 	__u8 indicator;
171 	__u8 reserved[2];
172 };
173 
174 /* BIOS boot headers */
175 struct cxgb4_pci_exp_rom_header {
176 	__le16 signature;	/* ROM Signature. Should be 0xaa55 */
177 	__u8 reserved[22];	/* Reserved per processor Architecture data */
178 	__le16 pcir_offset;	/* Offset to PCI Data Structure */
179 };
180 
181 /* Legacy PCI Expansion ROM Header */
182 struct legacy_pci_rom_hdr {
183 	__u8 signature[2];	/* ROM Signature. Should be 0xaa55 */
184 	__u8 size512;		/* Current Image Size in units of 512 bytes */
185 	__u8 initentry_point[4];
186 	__u8 cksum;		/* Checksum computed on the entire Image */
187 	__u8 reserved[16];	/* Reserved */
188 	__le16 pcir_offset;	/* Offset to PCI Data Struture */
189 };
190 
191 #define CXGB4_HDR_CODE1 0x00
192 #define CXGB4_HDR_CODE2 0x03
193 #define CXGB4_HDR_INDI 0x80
194 
195 /* BOOT constants */
196 enum {
197 	BOOT_CFG_SIG = 0x4243,
198 	BOOT_SIZE_INC = 512,
199 	BOOT_SIGNATURE = 0xaa55,
200 	BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header),
201 	BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC,
202 	PCIR_SIGNATURE = 0x52494350
203 };
204 
205 struct port_stats {
206 	u64 tx_octets;            /* total # of octets in good frames */
207 	u64 tx_frames;            /* all good frames */
208 	u64 tx_bcast_frames;      /* all broadcast frames */
209 	u64 tx_mcast_frames;      /* all multicast frames */
210 	u64 tx_ucast_frames;      /* all unicast frames */
211 	u64 tx_error_frames;      /* all error frames */
212 
213 	u64 tx_frames_64;         /* # of Tx frames in a particular range */
214 	u64 tx_frames_65_127;
215 	u64 tx_frames_128_255;
216 	u64 tx_frames_256_511;
217 	u64 tx_frames_512_1023;
218 	u64 tx_frames_1024_1518;
219 	u64 tx_frames_1519_max;
220 
221 	u64 tx_drop;              /* # of dropped Tx frames */
222 	u64 tx_pause;             /* # of transmitted pause frames */
223 	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
224 	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
225 	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
226 	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
227 	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
228 	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
229 	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
230 	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
231 
232 	u64 rx_octets;            /* total # of octets in good frames */
233 	u64 rx_frames;            /* all good frames */
234 	u64 rx_bcast_frames;      /* all broadcast frames */
235 	u64 rx_mcast_frames;      /* all multicast frames */
236 	u64 rx_ucast_frames;      /* all unicast frames */
237 	u64 rx_too_long;          /* # of frames exceeding MTU */
238 	u64 rx_jabber;            /* # of jabber frames */
239 	u64 rx_fcs_err;           /* # of received frames with bad FCS */
240 	u64 rx_len_err;           /* # of received frames with length error */
241 	u64 rx_symbol_err;        /* symbol errors */
242 	u64 rx_runt;              /* # of short frames */
243 
244 	u64 rx_frames_64;         /* # of Rx frames in a particular range */
245 	u64 rx_frames_65_127;
246 	u64 rx_frames_128_255;
247 	u64 rx_frames_256_511;
248 	u64 rx_frames_512_1023;
249 	u64 rx_frames_1024_1518;
250 	u64 rx_frames_1519_max;
251 
252 	u64 rx_pause;             /* # of received pause frames */
253 	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
254 	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
255 	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
256 	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
257 	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
258 	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
259 	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
260 	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
261 
262 	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
263 	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
264 	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
265 	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
266 	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
267 	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
268 	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
269 	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
270 };
271 
272 struct lb_port_stats {
273 	u64 octets;
274 	u64 frames;
275 	u64 bcast_frames;
276 	u64 mcast_frames;
277 	u64 ucast_frames;
278 	u64 error_frames;
279 
280 	u64 frames_64;
281 	u64 frames_65_127;
282 	u64 frames_128_255;
283 	u64 frames_256_511;
284 	u64 frames_512_1023;
285 	u64 frames_1024_1518;
286 	u64 frames_1519_max;
287 
288 	u64 drop;
289 
290 	u64 ovflow0;
291 	u64 ovflow1;
292 	u64 ovflow2;
293 	u64 ovflow3;
294 	u64 trunc0;
295 	u64 trunc1;
296 	u64 trunc2;
297 	u64 trunc3;
298 };
299 
300 struct tp_tcp_stats {
301 	u32 tcp_out_rsts;
302 	u64 tcp_in_segs;
303 	u64 tcp_out_segs;
304 	u64 tcp_retrans_segs;
305 };
306 
307 struct tp_usm_stats {
308 	u32 frames;
309 	u32 drops;
310 	u64 octets;
311 };
312 
313 struct tp_fcoe_stats {
314 	u32 frames_ddp;
315 	u32 frames_drop;
316 	u64 octets_ddp;
317 };
318 
319 struct tp_err_stats {
320 	u32 mac_in_errs[4];
321 	u32 hdr_in_errs[4];
322 	u32 tcp_in_errs[4];
323 	u32 tnl_cong_drops[4];
324 	u32 ofld_chan_drops[4];
325 	u32 tnl_tx_drops[4];
326 	u32 ofld_vlan_drops[4];
327 	u32 tcp6_in_errs[4];
328 	u32 ofld_no_neigh;
329 	u32 ofld_cong_defer;
330 };
331 
332 struct tp_cpl_stats {
333 	u32 req[4];
334 	u32 rsp[4];
335 };
336 
337 struct tp_rdma_stats {
338 	u32 rqe_dfr_pkt;
339 	u32 rqe_dfr_mod;
340 };
341 
342 struct sge_params {
343 	u32 hps;			/* host page size for our PF/VF */
344 	u32 eq_qpp;			/* egress queues/page for our PF/VF */
345 	u32 iq_qpp;			/* egress queues/page for our PF/VF */
346 };
347 
348 struct tp_params {
349 	unsigned int tre;            /* log2 of core clocks per TP tick */
350 	unsigned int la_mask;        /* what events are recorded by TP LA */
351 	unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
352 				     /* channel map */
353 
354 	uint32_t dack_re;            /* DACK timer resolution */
355 	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
356 
357 	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
358 	u32 filter_mask;
359 	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
360 
361 	/* cached TP_OUT_CONFIG compressed error vector
362 	 * and passing outer header info for encapsulated packets.
363 	 */
364 	int rx_pkt_encap;
365 
366 	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
367 	 * subset of the set of fields which may be present in the Compressed
368 	 * Filter Tuple portion of filters and TCP TCB connections.  The
369 	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
370 	 * Since a variable number of fields may or may not be present, their
371 	 * shifted field positions within the Compressed Filter Tuple may
372 	 * vary, or not even be present if the field isn't selected in
373 	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
374 	 * places we store their offsets here, or a -1 if the field isn't
375 	 * present.
376 	 */
377 	int fcoe_shift;
378 	int port_shift;
379 	int vnic_shift;
380 	int vlan_shift;
381 	int tos_shift;
382 	int protocol_shift;
383 	int ethertype_shift;
384 	int macmatch_shift;
385 	int matchtype_shift;
386 	int frag_shift;
387 
388 	u64 hash_filter_mask;
389 };
390 
391 struct vpd_params {
392 	unsigned int cclk;
393 	u8 sn[SERNUM_LEN + 1];
394 	u8 id[ID_LEN + 1];
395 	u8 pn[PN_LEN + 1];
396 	u8 na[MACADDR_LEN + 1];
397 };
398 
399 /* Maximum resources provisioned for a PCI PF.
400  */
401 struct pf_resources {
402 	unsigned int nvi;		/* N virtual interfaces */
403 	unsigned int neq;		/* N egress Qs */
404 	unsigned int nethctrl;		/* N egress ETH or CTRL Qs */
405 	unsigned int niqflint;		/* N ingress Qs/w free list(s) & intr */
406 	unsigned int niq;		/* N ingress Qs */
407 	unsigned int tc;		/* PCI-E traffic class */
408 	unsigned int pmask;		/* port access rights mask */
409 	unsigned int nexactf;		/* N exact MPS filters */
410 	unsigned int r_caps;		/* read capabilities */
411 	unsigned int wx_caps;		/* write/execute capabilities */
412 };
413 
414 struct pci_params {
415 	unsigned char speed;
416 	unsigned char width;
417 };
418 
419 struct devlog_params {
420 	u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
421 	u32 start;                      /* start of log in firmware memory */
422 	u32 size;                       /* size of log */
423 };
424 
425 /* Stores chip specific parameters */
426 struct arch_specific_params {
427 	u8 nchan;
428 	u8 pm_stats_cnt;
429 	u8 cng_ch_bits_log;		/* congestion channel map bits width */
430 	u16 mps_rplc_size;
431 	u16 vfcount;
432 	u32 sge_fl_db;
433 	u16 mps_tcam_size;
434 };
435 
436 struct adapter_params {
437 	struct sge_params sge;
438 	struct tp_params  tp;
439 	struct vpd_params vpd;
440 	struct pf_resources pfres;
441 	struct pci_params pci;
442 	struct devlog_params devlog;
443 	enum pcie_memwin drv_memwin;
444 
445 	unsigned int cim_la_size;
446 
447 	unsigned int sf_size;             /* serial flash size in bytes */
448 	unsigned int sf_nsec;             /* # of flash sectors */
449 
450 	unsigned int fw_vers;		  /* firmware version */
451 	unsigned int bs_vers;		  /* bootstrap version */
452 	unsigned int tp_vers;		  /* TP microcode version */
453 	unsigned int er_vers;		  /* expansion ROM version */
454 	unsigned int scfg_vers;		  /* Serial Configuration version */
455 	unsigned int vpd_vers;		  /* VPD Version */
456 	u8 api_vers[7];
457 
458 	unsigned short mtus[NMTUS];
459 	unsigned short a_wnd[NCCTRL_WIN];
460 	unsigned short b_wnd[NCCTRL_WIN];
461 
462 	unsigned char nports;             /* # of ethernet ports */
463 	unsigned char portvec;
464 	enum chip_type chip;               /* chip code */
465 	struct arch_specific_params arch;  /* chip specific params */
466 	unsigned char offload;
467 	unsigned char crypto;		/* HW capability for crypto */
468 	unsigned char ethofld;		/* QoS support */
469 
470 	unsigned char bypass;
471 	unsigned char hash_filter;
472 
473 	unsigned int ofldq_wr_cred;
474 	bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
475 
476 	unsigned int nsched_cls;          /* number of traffic classes */
477 	unsigned int max_ordird_qp;       /* Max read depth per RDMA QP */
478 	unsigned int max_ird_adapter;     /* Max read depth per adapter */
479 	bool fr_nsmr_tpte_wr_support;	  /* FW support for FR_NSMR_TPTE_WR */
480 	u8 fw_caps_support;		/* 32-bit Port Capabilities */
481 	bool filter2_wr_support;	/* FW support for FILTER2_WR */
482 	unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */
483 
484 	/* MPS Buffer Group Map[per Port].  Bit i is set if buffer group i is
485 	 * used by the Port
486 	 */
487 	u8 mps_bg_map[MAX_NPORTS];	/* MPS Buffer Group Map */
488 	bool write_w_imm_support;       /* FW supports WRITE_WITH_IMMEDIATE */
489 	bool write_cmpl_support;        /* FW supports WRITE_CMPL */
490 };
491 
492 /* State needed to monitor the forward progress of SGE Ingress DMA activities
493  * and possible hangs.
494  */
495 struct sge_idma_monitor_state {
496 	unsigned int idma_1s_thresh;	/* 1s threshold in Core Clock ticks */
497 	unsigned int idma_stalled[2];	/* synthesized stalled timers in HZ */
498 	unsigned int idma_state[2];	/* IDMA Hang detect state */
499 	unsigned int idma_qid[2];	/* IDMA Hung Ingress Queue ID */
500 	unsigned int idma_warn[2];	/* time to warning in HZ */
501 };
502 
503 /* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
504  * The access and execute times are signed in order to accommodate negative
505  * error returns.
506  */
507 struct mbox_cmd {
508 	u64 cmd[MBOX_LEN / 8];		/* a Firmware Mailbox Command/Reply */
509 	u64 timestamp;			/* OS-dependent timestamp */
510 	u32 seqno;			/* sequence number */
511 	s16 access;			/* time (ms) to access mailbox */
512 	s16 execute;			/* time (ms) to execute */
513 };
514 
515 struct mbox_cmd_log {
516 	unsigned int size;		/* number of entries in the log */
517 	unsigned int cursor;		/* next position in the log to write */
518 	u32 seqno;			/* next sequence number */
519 	/* variable length mailbox command log starts here */
520 };
521 
522 /* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
523  * return a pointer to the specified entry.
524  */
525 static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
526 						  unsigned int entry_idx)
527 {
528 	return &((struct mbox_cmd *)&(log)[1])[entry_idx];
529 }
530 
531 #define FW_VERSION(chip) ( \
532 		FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
533 		FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
534 		FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
535 		FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
536 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
537 
538 struct cxgb4_ethtool_lb_test {
539 	struct completion completion;
540 	int result;
541 	int loopback;
542 };
543 
544 struct fw_info {
545 	u8 chip;
546 	char *fs_name;
547 	char *fw_mod_name;
548 	struct fw_hdr fw_hdr;
549 };
550 
551 struct trace_params {
552 	u32 data[TRACE_LEN / 4];
553 	u32 mask[TRACE_LEN / 4];
554 	unsigned short snap_len;
555 	unsigned short min_len;
556 	unsigned char skip_ofst;
557 	unsigned char skip_len;
558 	unsigned char invert;
559 	unsigned char port;
560 };
561 
562 struct cxgb4_fw_data {
563 	__be32 signature;
564 	__u8 reserved[4];
565 };
566 
567 /* Firmware Port Capabilities types. */
568 
569 typedef u16 fw_port_cap16_t;	/* 16-bit Port Capabilities integral value */
570 typedef u32 fw_port_cap32_t;	/* 32-bit Port Capabilities integral value */
571 
572 enum fw_caps {
573 	FW_CAPS_UNKNOWN	= 0,	/* 0'ed out initial state */
574 	FW_CAPS16	= 1,	/* old Firmware: 16-bit Port Capabilities */
575 	FW_CAPS32	= 2,	/* new Firmware: 32-bit Port Capabilities */
576 };
577 
578 struct link_config {
579 	fw_port_cap32_t pcaps;           /* link capabilities */
580 	fw_port_cap32_t def_acaps;       /* default advertised capabilities */
581 	fw_port_cap32_t acaps;           /* advertised capabilities */
582 	fw_port_cap32_t lpacaps;         /* peer advertised capabilities */
583 
584 	fw_port_cap32_t speed_caps;      /* speed(s) user has requested */
585 	unsigned int   speed;            /* actual link speed (Mb/s) */
586 
587 	enum cc_pause  requested_fc;     /* flow control user has requested */
588 	enum cc_pause  fc;               /* actual link flow control */
589 	enum cc_pause  advertised_fc;    /* actual advertised flow control */
590 
591 	enum cc_fec    requested_fec;	 /* Forward Error Correction: */
592 	enum cc_fec    fec;		 /* requested and actual in use */
593 
594 	unsigned char  autoneg;          /* autonegotiating? */
595 
596 	unsigned char  link_ok;          /* link up? */
597 	unsigned char  link_down_rc;     /* link down reason */
598 
599 	bool new_module;		 /* ->OS Transceiver Module inserted */
600 	bool redo_l1cfg;		 /* ->CC redo current "sticky" L1 CFG */
601 };
602 
603 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
604 
605 enum {
606 	MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
607 	MAX_OFLD_QSETS = 16,          /* # of offload Tx, iscsi Rx queue sets */
608 	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
609 };
610 
611 enum {
612 	MAX_TXQ_ENTRIES      = 16384,
613 	MAX_CTRL_TXQ_ENTRIES = 1024,
614 	MAX_RSPQ_ENTRIES     = 16384,
615 	MAX_RX_BUFFERS       = 16384,
616 	MIN_TXQ_ENTRIES      = 32,
617 	MIN_CTRL_TXQ_ENTRIES = 32,
618 	MIN_RSPQ_ENTRIES     = 128,
619 	MIN_FL_ENTRIES       = 16
620 };
621 
622 enum {
623 	MAX_TXQ_DESC_SIZE      = 64,
624 	MAX_RXQ_DESC_SIZE      = 128,
625 	MAX_FL_DESC_SIZE       = 8,
626 	MAX_CTRL_TXQ_DESC_SIZE = 64,
627 };
628 
629 enum {
630 	INGQ_EXTRAS = 2,        /* firmware event queue and */
631 				/*   forwarded interrupts */
632 	MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
633 };
634 
635 enum {
636 	PRIV_FLAG_PORT_TX_VM_BIT,
637 };
638 
639 #define PRIV_FLAG_PORT_TX_VM		BIT(PRIV_FLAG_PORT_TX_VM_BIT)
640 
641 #define PRIV_FLAGS_ADAP			0
642 #define PRIV_FLAGS_PORT			PRIV_FLAG_PORT_TX_VM
643 
644 struct adapter;
645 struct sge_rspq;
646 
647 #include "cxgb4_dcb.h"
648 
649 #ifdef CONFIG_CHELSIO_T4_FCOE
650 #include "cxgb4_fcoe.h"
651 #endif /* CONFIG_CHELSIO_T4_FCOE */
652 
653 struct port_info {
654 	struct adapter *adapter;
655 	u16    viid;
656 	int    xact_addr_filt;        /* index of exact MAC address filter */
657 	u16    rss_size;              /* size of VI's RSS table slice */
658 	s8     mdio_addr;
659 	enum fw_port_type port_type;
660 	u8     mod_type;
661 	u8     port_id;
662 	u8     tx_chan;
663 	u8     lport;                 /* associated offload logical port */
664 	u8     nqsets;                /* # of qsets */
665 	u8     first_qset;            /* index of first qset */
666 	u8     rss_mode;
667 	struct link_config link_cfg;
668 	u16   *rss;
669 	struct port_stats stats_base;
670 #ifdef CONFIG_CHELSIO_T4_DCB
671 	struct port_dcb_info dcb;     /* Data Center Bridging support */
672 #endif
673 #ifdef CONFIG_CHELSIO_T4_FCOE
674 	struct cxgb_fcoe fcoe;
675 #endif /* CONFIG_CHELSIO_T4_FCOE */
676 	bool rxtstamp;  /* Enable TS */
677 	struct hwtstamp_config tstamp_config;
678 	bool ptp_enable;
679 	struct sched_table *sched_tbl;
680 	u32 eth_flags;
681 
682 	/* viid and smt fields either returned by fw
683 	 * or decoded by parsing viid by driver.
684 	 */
685 	u8 vin;
686 	u8 vivld;
687 	u8 smt_idx;
688 	u8 rx_cchan;
689 
690 	bool tc_block_shared;
691 
692 	/* Mirror VI information */
693 	u16 viid_mirror;
694 	u16 nmirrorqsets;
695 	u32 vi_mirror_count;
696 	struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */
697 	struct cxgb4_ethtool_lb_test ethtool_lb;
698 };
699 
700 struct dentry;
701 struct work_struct;
702 
703 enum {                                 /* adapter flags */
704 	CXGB4_FULL_INIT_DONE		= (1 << 0),
705 	CXGB4_DEV_ENABLED		= (1 << 1),
706 	CXGB4_USING_MSI			= (1 << 2),
707 	CXGB4_USING_MSIX		= (1 << 3),
708 	CXGB4_FW_OK			= (1 << 4),
709 	CXGB4_RSS_TNLALLLOOKUP		= (1 << 5),
710 	CXGB4_USING_SOFT_PARAMS		= (1 << 6),
711 	CXGB4_MASTER_PF			= (1 << 7),
712 	CXGB4_FW_OFLD_CONN		= (1 << 9),
713 	CXGB4_ROOT_NO_RELAXED_ORDERING	= (1 << 10),
714 	CXGB4_SHUTTING_DOWN		= (1 << 11),
715 	CXGB4_SGE_DBQ_TIMER		= (1 << 12),
716 };
717 
718 enum {
719 	ULP_CRYPTO_LOOKASIDE = 1 << 0,
720 	ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
721 	ULP_CRYPTO_KTLS_INLINE  = 1 << 3,
722 };
723 
724 #define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024
725 #define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64
726 #define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5
727 #define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8
728 
729 #define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72
730 
731 struct rx_sw_desc;
732 
733 struct sge_fl {                     /* SGE free-buffer queue state */
734 	unsigned int avail;         /* # of available Rx buffers */
735 	unsigned int pend_cred;     /* new buffers since last FL DB ring */
736 	unsigned int cidx;          /* consumer index */
737 	unsigned int pidx;          /* producer index */
738 	unsigned long alloc_failed; /* # of times buffer allocation failed */
739 	unsigned long large_alloc_failed;
740 	unsigned long mapping_err;  /* # of RX Buffer DMA Mapping failures */
741 	unsigned long low;          /* # of times momentarily starving */
742 	unsigned long starving;
743 	/* RO fields */
744 	unsigned int cntxt_id;      /* SGE context id for the free list */
745 	unsigned int size;          /* capacity of free list */
746 	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
747 	__be64 *desc;               /* address of HW Rx descriptor ring */
748 	dma_addr_t addr;            /* bus address of HW ring start */
749 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
750 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
751 };
752 
753 /* A packet gather list */
754 struct pkt_gl {
755 	u64 sgetstamp;		    /* SGE Time Stamp for Ingress Packet */
756 	struct page_frag frags[MAX_SKB_FRAGS];
757 	void *va;                         /* virtual address of first byte */
758 	unsigned int nfrags;              /* # of fragments */
759 	unsigned int tot_len;             /* total length of fragments */
760 };
761 
762 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
763 			      const struct pkt_gl *gl);
764 typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
765 /* LRO related declarations for ULD */
766 struct t4_lro_mgr {
767 #define MAX_LRO_SESSIONS		64
768 	u8 lro_session_cnt;         /* # of sessions to aggregate */
769 	unsigned long lro_pkts;     /* # of LRO super packets */
770 	unsigned long lro_merged;   /* # of wire packets merged by LRO */
771 	struct sk_buff_head lroq;   /* list of aggregated sessions */
772 };
773 
774 struct sge_rspq {                   /* state for an SGE response queue */
775 	struct napi_struct napi;
776 	const __be64 *cur_desc;     /* current descriptor in queue */
777 	unsigned int cidx;          /* consumer index */
778 	u8 gen;                     /* current generation bit */
779 	u8 intr_params;             /* interrupt holdoff parameters */
780 	u8 next_intr_params;        /* holdoff params for next interrupt */
781 	u8 adaptive_rx;
782 	u8 pktcnt_idx;              /* interrupt packet threshold */
783 	u8 uld;                     /* ULD handling this queue */
784 	u8 idx;                     /* queue index within its group */
785 	int offset;                 /* offset into current Rx buffer */
786 	u16 cntxt_id;               /* SGE context id for the response q */
787 	u16 abs_id;                 /* absolute SGE id for the response q */
788 	__be64 *desc;               /* address of HW response ring */
789 	dma_addr_t phys_addr;       /* physical address of the ring */
790 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
791 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
792 	unsigned int iqe_len;       /* entry size */
793 	unsigned int size;          /* capacity of response queue */
794 	struct adapter *adap;
795 	struct net_device *netdev;  /* associated net device */
796 	rspq_handler_t handler;
797 	rspq_flush_handler_t flush_handler;
798 	struct t4_lro_mgr lro_mgr;
799 };
800 
801 struct sge_eth_stats {              /* Ethernet queue statistics */
802 	unsigned long pkts;         /* # of ethernet packets */
803 	unsigned long lro_pkts;     /* # of LRO super packets */
804 	unsigned long lro_merged;   /* # of wire packets merged by LRO */
805 	unsigned long rx_cso;       /* # of Rx checksum offloads */
806 	unsigned long vlan_ex;      /* # of Rx VLAN extractions */
807 	unsigned long rx_drops;     /* # of packets dropped due to no mem */
808 	unsigned long bad_rx_pkts;  /* # of packets with err_vec!=0 */
809 };
810 
811 struct sge_eth_rxq {                /* SW Ethernet Rx queue */
812 	struct sge_rspq rspq;
813 	struct sge_fl fl;
814 	struct sge_eth_stats stats;
815 	struct msix_info *msix;
816 } ____cacheline_aligned_in_smp;
817 
818 struct sge_ofld_stats {             /* offload queue statistics */
819 	unsigned long pkts;         /* # of packets */
820 	unsigned long imm;          /* # of immediate-data packets */
821 	unsigned long an;           /* # of asynchronous notifications */
822 	unsigned long nomem;        /* # of responses deferred due to no mem */
823 };
824 
825 struct sge_ofld_rxq {               /* SW offload Rx queue */
826 	struct sge_rspq rspq;
827 	struct sge_fl fl;
828 	struct sge_ofld_stats stats;
829 	struct msix_info *msix;
830 } ____cacheline_aligned_in_smp;
831 
832 struct tx_desc {
833 	__be64 flit[8];
834 };
835 
836 struct ulptx_sgl;
837 
838 struct tx_sw_desc {
839 	struct sk_buff *skb; /* SKB to free after getting completion */
840 	dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
841 };
842 
843 struct sge_txq {
844 	unsigned int  in_use;       /* # of in-use Tx descriptors */
845 	unsigned int  q_type;	    /* Q type Eth/Ctrl/Ofld */
846 	unsigned int  size;         /* # of descriptors */
847 	unsigned int  cidx;         /* SW consumer index */
848 	unsigned int  pidx;         /* producer index */
849 	unsigned long stops;        /* # of times q has been stopped */
850 	unsigned long restarts;     /* # of queue restarts */
851 	unsigned int  cntxt_id;     /* SGE context id for the Tx q */
852 	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
853 	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
854 	struct sge_qstat *stat;     /* queue status entry */
855 	dma_addr_t    phys_addr;    /* physical address of the ring */
856 	spinlock_t db_lock;
857 	int db_disabled;
858 	unsigned short db_pidx;
859 	unsigned short db_pidx_inc;
860 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
861 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
862 };
863 
864 struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
865 	struct sge_txq q;
866 	struct netdev_queue *txq;   /* associated netdev TX queue */
867 #ifdef CONFIG_CHELSIO_T4_DCB
868 	u8 dcb_prio;		    /* DCB Priority bound to queue */
869 #endif
870 	u8 dbqt;                    /* SGE Doorbell Queue Timer in use */
871 	unsigned int dbqtimerix;    /* SGE Doorbell Queue Timer Index */
872 	unsigned long tso;          /* # of TSO requests */
873 	unsigned long uso;          /* # of USO requests */
874 	unsigned long tx_cso;       /* # of Tx checksum offloads */
875 	unsigned long vlan_ins;     /* # of Tx VLAN insertions */
876 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
877 } ____cacheline_aligned_in_smp;
878 
879 struct sge_uld_txq {               /* state for an SGE offload Tx queue */
880 	struct sge_txq q;
881 	struct adapter *adap;
882 	struct sk_buff_head sendq;  /* list of backpressured packets */
883 	struct tasklet_struct qresume_tsk; /* restarts the queue */
884 	bool service_ofldq_running; /* service_ofldq() is processing sendq */
885 	u8 full;                    /* the Tx ring is full */
886 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
887 } ____cacheline_aligned_in_smp;
888 
889 struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
890 	struct sge_txq q;
891 	struct adapter *adap;
892 	struct sk_buff_head sendq;  /* list of backpressured packets */
893 	struct tasklet_struct qresume_tsk; /* restarts the queue */
894 	u8 full;                    /* the Tx ring is full */
895 } ____cacheline_aligned_in_smp;
896 
897 struct sge_uld_rxq_info {
898 	char name[IFNAMSIZ];	/* name of ULD driver */
899 	struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
900 	u16 *rspq_id;		/* response queue id's of rxq */
901 	u16 nrxq;		/* # of ingress uld queues */
902 	u16 nciq;		/* # of completion queues */
903 	u8 uld;			/* uld type */
904 };
905 
906 struct sge_uld_txq_info {
907 	struct sge_uld_txq *uldtxq; /* Txq's for ULD */
908 	atomic_t users;		/* num users */
909 	u16 ntxq;		/* # of egress uld queues */
910 };
911 
912 /* struct to maintain ULD list to reallocate ULD resources on hotplug */
913 struct cxgb4_uld_list {
914 	struct cxgb4_uld_info uld_info;
915 	struct list_head list_node;
916 	enum cxgb4_uld uld_type;
917 };
918 
919 enum sge_eosw_state {
920 	CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
921 	CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
922 	CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
923 	CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
924 	CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
925 	CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
926 };
927 
928 struct sge_eosw_txq {
929 	spinlock_t lock; /* Per queue lock to synchronize completions */
930 	enum sge_eosw_state state; /* Current ETHOFLD State */
931 	struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
932 	u32 ndesc; /* Number of descriptors */
933 	u32 pidx; /* Current Producer Index */
934 	u32 last_pidx; /* Last successfully transmitted Producer Index */
935 	u32 cidx; /* Current Consumer Index */
936 	u32 last_cidx; /* Last successfully reclaimed Consumer Index */
937 	u32 flowc_idx; /* Descriptor containing a FLOWC request */
938 	u32 inuse; /* Number of packets held in ring */
939 
940 	u32 cred; /* Current available credits */
941 	u32 ncompl; /* # of completions posted */
942 	u32 last_compl; /* # of credits consumed since last completion req */
943 
944 	u32 eotid; /* Index into EOTID table in software */
945 	u32 hwtid; /* Hardware EOTID index */
946 
947 	u32 hwqid; /* Underlying hardware queue index */
948 	struct net_device *netdev; /* Pointer to netdevice */
949 	struct tasklet_struct qresume_tsk; /* Restarts the queue */
950 	struct completion completion; /* completion for FLOWC rendezvous */
951 };
952 
953 struct sge_eohw_txq {
954 	spinlock_t lock; /* Per queue lock */
955 	struct sge_txq q; /* HW Txq */
956 	struct adapter *adap; /* Backpointer to adapter */
957 	unsigned long tso; /* # of TSO requests */
958 	unsigned long uso; /* # of USO requests */
959 	unsigned long tx_cso; /* # of Tx checksum offloads */
960 	unsigned long vlan_ins; /* # of Tx VLAN insertions */
961 	unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
962 };
963 
964 struct sge {
965 	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
966 	struct sge_eth_txq ptptxq;
967 	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
968 
969 	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
970 	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
971 	struct sge_uld_rxq_info **uld_rxq_info;
972 	struct sge_uld_txq_info **uld_txq_info;
973 
974 	struct sge_rspq intrq ____cacheline_aligned_in_smp;
975 	spinlock_t intrq_lock;
976 
977 	struct sge_eohw_txq *eohw_txq;
978 	struct sge_ofld_rxq *eohw_rxq;
979 
980 	struct sge_eth_rxq *mirror_rxq[NCHAN];
981 
982 	u16 max_ethqsets;           /* # of available Ethernet queue sets */
983 	u16 ethqsets;               /* # of active Ethernet queue sets */
984 	u16 ethtxq_rover;           /* Tx queue to clean up next */
985 	u16 ofldqsets;              /* # of active ofld queue sets */
986 	u16 nqs_per_uld;	    /* # of Rx queues per ULD */
987 	u16 eoqsets;                /* # of ETHOFLD queues */
988 	u16 mirrorqsets;            /* # of Mirror queues */
989 
990 	u16 timer_val[SGE_NTIMERS];
991 	u8 counter_val[SGE_NCOUNTERS];
992 	u16 dbqtimer_tick;
993 	u16 dbqtimer_val[SGE_NDBQTIMERS];
994 	u32 fl_pg_order;            /* large page allocation size */
995 	u32 stat_len;               /* length of status page at ring end */
996 	u32 pktshift;               /* padding between CPL & packet data */
997 	u32 fl_align;               /* response queue message alignment */
998 	u32 fl_starve_thres;        /* Free List starvation threshold */
999 
1000 	struct sge_idma_monitor_state idma_monitor;
1001 	unsigned int egr_start;
1002 	unsigned int egr_sz;
1003 	unsigned int ingr_start;
1004 	unsigned int ingr_sz;
1005 	void **egr_map;    /* qid->queue egress queue map */
1006 	struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
1007 	unsigned long *starving_fl;
1008 	unsigned long *txq_maperr;
1009 	unsigned long *blocked_fl;
1010 	struct timer_list rx_timer; /* refills starving FLs */
1011 	struct timer_list tx_timer; /* checks Tx queues */
1012 
1013 	int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
1014 	int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
1015 };
1016 
1017 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
1018 #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
1019 
1020 struct l2t_data;
1021 
1022 #ifdef CONFIG_PCI_IOV
1023 
1024 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7.  However, the Serial
1025  * Configuration initialization for T5 only has SR-IOV functionality enabled
1026  * on PF0-3 in order to simplify everything.
1027  */
1028 #define NUM_OF_PF_WITH_SRIOV 4
1029 
1030 #endif
1031 
1032 struct doorbell_stats {
1033 	u32 db_drop;
1034 	u32 db_empty;
1035 	u32 db_full;
1036 };
1037 
1038 struct hash_mac_addr {
1039 	struct list_head list;
1040 	u8 addr[ETH_ALEN];
1041 	unsigned int iface_mac;
1042 };
1043 
1044 struct msix_bmap {
1045 	unsigned long *msix_bmap;
1046 	unsigned int mapsize;
1047 	spinlock_t lock; /* lock for acquiring bitmap */
1048 };
1049 
1050 struct msix_info {
1051 	unsigned short vec;
1052 	char desc[IFNAMSIZ + 10];
1053 	unsigned int idx;
1054 	cpumask_var_t aff_mask;
1055 };
1056 
1057 struct vf_info {
1058 	unsigned char vf_mac_addr[ETH_ALEN];
1059 	unsigned int tx_rate;
1060 	bool pf_set_mac;
1061 	u16 vlan;
1062 	int link_state;
1063 };
1064 
1065 enum {
1066 	HMA_DMA_MAPPED_FLAG = 1
1067 };
1068 
1069 struct hma_data {
1070 	unsigned char flags;
1071 	struct sg_table *sgt;
1072 	dma_addr_t *phy_addr;	/* physical address of the page */
1073 };
1074 
1075 struct mbox_list {
1076 	struct list_head list;
1077 };
1078 
1079 #if IS_ENABLED(CONFIG_THERMAL)
1080 struct ch_thermal {
1081 	struct thermal_zone_device *tzdev;
1082 	int trip_temp;
1083 	int trip_type;
1084 };
1085 #endif
1086 
1087 struct mps_entries_ref {
1088 	struct list_head list;
1089 	u8 addr[ETH_ALEN];
1090 	u8 mask[ETH_ALEN];
1091 	u16 idx;
1092 	refcount_t refcnt;
1093 };
1094 
1095 struct cxgb4_ethtool_filter_info {
1096 	u32 *loc_array; /* Array holding the actual TIDs set to filters */
1097 	unsigned long *bmap; /* Bitmap for managing filters in use */
1098 	u32 in_use; /* # of filters in use */
1099 };
1100 
1101 struct cxgb4_ethtool_filter {
1102 	u32 nentries; /* Adapter wide number of supported filters */
1103 	struct cxgb4_ethtool_filter_info *port; /* Per port entry */
1104 };
1105 
1106 struct adapter {
1107 	void __iomem *regs;
1108 	void __iomem *bar2;
1109 	u32 t4_bar0;
1110 	struct pci_dev *pdev;
1111 	struct device *pdev_dev;
1112 	const char *name;
1113 	unsigned int mbox;
1114 	unsigned int pf;
1115 	unsigned int flags;
1116 	unsigned int adap_idx;
1117 	enum chip_type chip;
1118 	u32 eth_flags;
1119 
1120 	int msg_enable;
1121 	__be16 vxlan_port;
1122 	__be16 geneve_port;
1123 
1124 	struct adapter_params params;
1125 	struct cxgb4_virt_res vres;
1126 	unsigned int swintr;
1127 
1128 	/* MSI-X Info for NIC and OFLD queues */
1129 	struct msix_info *msix_info;
1130 	struct msix_bmap msix_bmap;
1131 
1132 	struct doorbell_stats db_stats;
1133 	struct sge sge;
1134 
1135 	struct net_device *port[MAX_NPORTS];
1136 	u8 chan_map[NCHAN];                   /* channel -> port map */
1137 
1138 	struct vf_info *vfinfo;
1139 	u8 num_vfs;
1140 
1141 	u32 filter_mode;
1142 	unsigned int l2t_start;
1143 	unsigned int l2t_end;
1144 	struct l2t_data *l2t;
1145 	unsigned int clipt_start;
1146 	unsigned int clipt_end;
1147 	struct clip_tbl *clipt;
1148 	unsigned int rawf_start;
1149 	unsigned int rawf_cnt;
1150 	struct smt_data *smt;
1151 	struct cxgb4_uld_info *uld;
1152 	void *uld_handle[CXGB4_ULD_MAX];
1153 	unsigned int num_uld;
1154 	unsigned int num_ofld_uld;
1155 	struct list_head list_node;
1156 	struct list_head rcu_node;
1157 	struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
1158 	struct list_head mps_ref;
1159 	spinlock_t mps_ref_lock; /* lock for syncing mps ref/def activities */
1160 
1161 	void *iscsi_ppm;
1162 
1163 	struct tid_info tids;
1164 	void **tid_release_head;
1165 	spinlock_t tid_release_lock;
1166 	struct workqueue_struct *workq;
1167 	struct work_struct tid_release_task;
1168 	struct work_struct db_full_task;
1169 	struct work_struct db_drop_task;
1170 	struct work_struct fatal_err_notify_task;
1171 	bool tid_release_task_busy;
1172 
1173 	/* lock for mailbox cmd list */
1174 	spinlock_t mbox_lock;
1175 	struct mbox_list mlist;
1176 
1177 	/* support for mailbox command/reply logging */
1178 #define T4_OS_LOG_MBOX_CMDS 256
1179 	struct mbox_cmd_log *mbox_log;
1180 
1181 	struct mutex uld_mutex;
1182 
1183 	struct dentry *debugfs_root;
1184 	bool use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
1185 	bool trace_rss;	/* 1 implies that different RSS flit per filter is
1186 			 * used per filter else if 0 default RSS flit is
1187 			 * used for all 4 filters.
1188 			 */
1189 
1190 	struct ptp_clock *ptp_clock;
1191 	struct ptp_clock_info ptp_clock_info;
1192 	struct sk_buff *ptp_tx_skb;
1193 	/* ptp lock */
1194 	spinlock_t ptp_lock;
1195 	spinlock_t stats_lock;
1196 	spinlock_t win0_lock ____cacheline_aligned_in_smp;
1197 
1198 	/* TC u32 offload */
1199 	struct cxgb4_tc_u32_table *tc_u32;
1200 	struct chcr_ktls chcr_ktls;
1201 	struct chcr_stats_debug chcr_stats;
1202 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
1203 	struct ch_ktls_stats_debug ch_ktls_stats;
1204 #endif
1205 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
1206 	struct ch_ipsec_stats_debug ch_ipsec_stats;
1207 #endif
1208 
1209 	/* TC flower offload */
1210 	bool tc_flower_initialized;
1211 	struct rhashtable flower_tbl;
1212 	struct rhashtable_params flower_ht_params;
1213 	struct timer_list flower_stats_timer;
1214 	struct work_struct flower_stats_work;
1215 
1216 	/* Ethtool Dump */
1217 	struct ethtool_dump eth_dump;
1218 
1219 	/* HMA */
1220 	struct hma_data hma;
1221 
1222 	struct srq_data *srq;
1223 
1224 	/* Dump buffer for collecting logs in kdump kernel */
1225 	struct vmcoredd_data vmcoredd;
1226 #if IS_ENABLED(CONFIG_THERMAL)
1227 	struct ch_thermal ch_thermal;
1228 #endif
1229 
1230 	/* TC MQPRIO offload */
1231 	struct cxgb4_tc_mqprio *tc_mqprio;
1232 
1233 	/* TC MATCHALL classifier offload */
1234 	struct cxgb4_tc_matchall *tc_matchall;
1235 
1236 	/* Ethtool n-tuple */
1237 	struct cxgb4_ethtool_filter *ethtool_filters;
1238 };
1239 
1240 /* Support for "sched-class" command to allow a TX Scheduling Class to be
1241  * programmed with various parameters.
1242  */
1243 struct ch_sched_params {
1244 	u8   type;                     /* packet or flow */
1245 	union {
1246 		struct {
1247 			u8   level;    /* scheduler hierarchy level */
1248 			u8   mode;     /* per-class or per-flow */
1249 			u8   rateunit; /* bit or packet rate */
1250 			u8   ratemode; /* %port relative or kbps absolute */
1251 			u8   channel;  /* scheduler channel [0..N] */
1252 			u8   class;    /* scheduler class [0..N] */
1253 			u32  minrate;  /* minimum rate */
1254 			u32  maxrate;  /* maximum rate */
1255 			u16  weight;   /* percent weight */
1256 			u16  pktsize;  /* average packet size */
1257 			u16  burstsize;  /* burst buffer size */
1258 		} params;
1259 	} u;
1260 };
1261 
1262 enum {
1263 	SCHED_CLASS_TYPE_PACKET = 0,    /* class type */
1264 };
1265 
1266 enum {
1267 	SCHED_CLASS_LEVEL_CL_RL = 0,    /* class rate limiter */
1268 	SCHED_CLASS_LEVEL_CH_RL = 2,    /* channel rate limiter */
1269 };
1270 
1271 enum {
1272 	SCHED_CLASS_MODE_CLASS = 0,     /* per-class scheduling */
1273 	SCHED_CLASS_MODE_FLOW,          /* per-flow scheduling */
1274 };
1275 
1276 enum {
1277 	SCHED_CLASS_RATEUNIT_BITS = 0,  /* bit rate scheduling */
1278 };
1279 
1280 enum {
1281 	SCHED_CLASS_RATEMODE_ABS = 1,   /* Kb/s */
1282 };
1283 
1284 /* Support for "sched_queue" command to allow one or more NIC TX Queues
1285  * to be bound to a TX Scheduling Class.
1286  */
1287 struct ch_sched_queue {
1288 	s8   queue;    /* queue index */
1289 	s8   class;    /* class index */
1290 };
1291 
1292 /* Support for "sched_flowc" command to allow one or more FLOWC
1293  * to be bound to a TX Scheduling Class.
1294  */
1295 struct ch_sched_flowc {
1296 	s32 tid;   /* TID to bind */
1297 	s8  class; /* class index */
1298 };
1299 
1300 /* Defined bit width of user definable filter tuples
1301  */
1302 #define ETHTYPE_BITWIDTH 16
1303 #define FRAG_BITWIDTH 1
1304 #define MACIDX_BITWIDTH 9
1305 #define FCOE_BITWIDTH 1
1306 #define IPORT_BITWIDTH 3
1307 #define MATCHTYPE_BITWIDTH 3
1308 #define PROTO_BITWIDTH 8
1309 #define TOS_BITWIDTH 8
1310 #define PF_BITWIDTH 8
1311 #define VF_BITWIDTH 8
1312 #define IVLAN_BITWIDTH 16
1313 #define OVLAN_BITWIDTH 16
1314 #define ENCAP_VNI_BITWIDTH 24
1315 
1316 /* Filter matching rules.  These consist of a set of ingress packet field
1317  * (value, mask) tuples.  The associated ingress packet field matches the
1318  * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
1319  * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
1320  * matches an ingress packet when all of the individual individual field
1321  * matching rules are true.
1322  *
1323  * Partial field masks are always valid, however, while it may be easy to
1324  * understand their meanings for some fields (e.g. IP address to match a
1325  * subnet), for others making sensible partial masks is less intuitive (e.g.
1326  * MPS match type) ...
1327  *
1328  * Most of the following data structures are modeled on T4 capabilities.
1329  * Drivers for earlier chips use the subsets which make sense for those chips.
1330  * We really need to come up with a hardware-independent mechanism to
1331  * represent hardware filter capabilities ...
1332  */
1333 struct ch_filter_tuple {
1334 	/* Compressed header matching field rules.  The TP_VLAN_PRI_MAP
1335 	 * register selects which of these fields will participate in the
1336 	 * filter match rules -- up to a maximum of 36 bits.  Because
1337 	 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
1338 	 * set of fields.
1339 	 */
1340 	uint32_t ethtype:ETHTYPE_BITWIDTH;      /* Ethernet type */
1341 	uint32_t frag:FRAG_BITWIDTH;            /* IP fragmentation header */
1342 	uint32_t ivlan_vld:1;                   /* inner VLAN valid */
1343 	uint32_t ovlan_vld:1;                   /* outer VLAN valid */
1344 	uint32_t pfvf_vld:1;                    /* PF/VF valid */
1345 	uint32_t encap_vld:1;			/* Encapsulation valid */
1346 	uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
1347 	uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
1348 	uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
1349 	uint32_t matchtype:MATCHTYPE_BITWIDTH;  /* MPS match type */
1350 	uint32_t proto:PROTO_BITWIDTH;          /* protocol type */
1351 	uint32_t tos:TOS_BITWIDTH;              /* TOS/Traffic Type */
1352 	uint32_t pf:PF_BITWIDTH;                /* PCI-E PF ID */
1353 	uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
1354 	uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
1355 	uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
1356 	uint32_t vni:ENCAP_VNI_BITWIDTH;	/* VNI of tunnel */
1357 
1358 	/* Uncompressed header matching field rules.  These are always
1359 	 * available for field rules.
1360 	 */
1361 	uint8_t lip[16];        /* local IP address (IPv4 in [3:0]) */
1362 	uint8_t fip[16];        /* foreign IP address (IPv4 in [3:0]) */
1363 	uint16_t lport;         /* local port */
1364 	uint16_t fport;         /* foreign port */
1365 };
1366 
1367 /* A filter ioctl command.
1368  */
1369 struct ch_filter_specification {
1370 	/* Administrative fields for filter.
1371 	 */
1372 	uint32_t hitcnts:1;     /* count filter hits in TCB */
1373 	uint32_t prio:1;        /* filter has priority over active/server */
1374 
1375 	/* Fundamental filter typing.  This is the one element of filter
1376 	 * matching that doesn't exist as a (value, mask) tuple.
1377 	 */
1378 	uint32_t type:1;        /* 0 => IPv4, 1 => IPv6 */
1379 	u32 hash:1;		/* 0 => wild-card, 1 => exact-match */
1380 
1381 	/* Packet dispatch information.  Ingress packets which match the
1382 	 * filter rules will be dropped, passed to the host or switched back
1383 	 * out as egress packets.
1384 	 */
1385 	uint32_t action:2;      /* drop, pass, switch */
1386 
1387 	uint32_t rpttid:1;      /* report TID in RSS hash field */
1388 
1389 	uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
1390 	uint32_t iq:10;         /* ingress queue */
1391 
1392 	uint32_t maskhash:1;    /* dirsteer=0: store RSS hash in TCB */
1393 	uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
1394 				/*             1 => TCB contains IQ ID */
1395 
1396 	/* Switch proxy/rewrite fields.  An ingress packet which matches a
1397 	 * filter with "switch" set will be looped back out as an egress
1398 	 * packet -- potentially with some Ethernet header rewriting.
1399 	 */
1400 	uint32_t eport:2;       /* egress port to switch packet out */
1401 	uint32_t newdmac:1;     /* rewrite destination MAC address */
1402 	uint32_t newsmac:1;     /* rewrite source MAC address */
1403 	uint32_t newvlan:2;     /* rewrite VLAN Tag */
1404 	uint32_t nat_mode:3;    /* specify NAT operation mode */
1405 	uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
1406 	uint8_t smac[ETH_ALEN]; /* new source MAC address */
1407 	uint16_t vlan;          /* VLAN Tag to insert */
1408 
1409 	u8 nat_lip[16];		/* local IP to use after NAT'ing */
1410 	u8 nat_fip[16];		/* foreign IP to use after NAT'ing */
1411 	u16 nat_lport;		/* local port to use after NAT'ing */
1412 	u16 nat_fport;		/* foreign port to use after NAT'ing */
1413 
1414 	u32 tc_prio;		/* TC's filter priority index */
1415 	u64 tc_cookie;		/* Unique cookie identifying TC rules */
1416 
1417 	/* reservation for future additions */
1418 	u8 rsvd[12];
1419 
1420 	/* Filter rule value/mask pairs.
1421 	 */
1422 	struct ch_filter_tuple val;
1423 	struct ch_filter_tuple mask;
1424 };
1425 
1426 enum {
1427 	FILTER_PASS = 0,        /* default */
1428 	FILTER_DROP,
1429 	FILTER_SWITCH
1430 };
1431 
1432 enum {
1433 	VLAN_NOCHANGE = 0,      /* default */
1434 	VLAN_REMOVE,
1435 	VLAN_INSERT,
1436 	VLAN_REWRITE
1437 };
1438 
1439 enum {
1440 	NAT_MODE_NONE = 0,	/* No NAT performed */
1441 	NAT_MODE_DIP,		/* NAT on Dst IP */
1442 	NAT_MODE_DIP_DP,	/* NAT on Dst IP, Dst Port */
1443 	NAT_MODE_DIP_DP_SIP,	/* NAT on Dst IP, Dst Port and Src IP */
1444 	NAT_MODE_DIP_DP_SP,	/* NAT on Dst IP, Dst Port and Src Port */
1445 	NAT_MODE_SIP_SP,	/* NAT on Src IP and Src Port */
1446 	NAT_MODE_DIP_SIP_SP,	/* NAT on Dst IP, Src IP and Src Port */
1447 	NAT_MODE_ALL		/* NAT on entire 4-tuple */
1448 };
1449 
1450 #define CXGB4_FILTER_TYPE_MAX 2
1451 
1452 /* Host shadow copy of ingress filter entry.  This is in host native format
1453  * and doesn't match the ordering or bit order, etc. of the hardware of the
1454  * firmware command.  The use of bit-field structure elements is purely to
1455  * remind ourselves of the field size limitations and save memory in the case
1456  * where the filter table is large.
1457  */
1458 struct filter_entry {
1459 	/* Administrative fields for filter. */
1460 	u32 valid:1;            /* filter allocated and valid */
1461 	u32 locked:1;           /* filter is administratively locked */
1462 
1463 	u32 pending:1;          /* filter action is pending firmware reply */
1464 	struct filter_ctx *ctx; /* Caller's completion hook */
1465 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
1466 	struct smt_entry *smt;  /* Source Mac Table entry for smac */
1467 	struct net_device *dev; /* Associated net device */
1468 	u32 tid;                /* This will store the actual tid */
1469 
1470 	/* The filter itself.  Most of this is a straight copy of information
1471 	 * provided by the extended ioctl().  Some fields are translated to
1472 	 * internal forms -- for instance the Ingress Queue ID passed in from
1473 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
1474 	 */
1475 	struct ch_filter_specification fs;
1476 };
1477 
1478 static inline int is_offload(const struct adapter *adap)
1479 {
1480 	return adap->params.offload;
1481 }
1482 
1483 static inline int is_hashfilter(const struct adapter *adap)
1484 {
1485 	return adap->params.hash_filter;
1486 }
1487 
1488 static inline int is_pci_uld(const struct adapter *adap)
1489 {
1490 	return adap->params.crypto;
1491 }
1492 
1493 static inline int is_uld(const struct adapter *adap)
1494 {
1495 	return (adap->params.offload || adap->params.crypto);
1496 }
1497 
1498 static inline int is_ethofld(const struct adapter *adap)
1499 {
1500 	return adap->params.ethofld;
1501 }
1502 
1503 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
1504 {
1505 	return readl(adap->regs + reg_addr);
1506 }
1507 
1508 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
1509 {
1510 	writel(val, adap->regs + reg_addr);
1511 }
1512 
1513 #ifndef readq
1514 static inline u64 readq(const volatile void __iomem *addr)
1515 {
1516 	return readl(addr) + ((u64)readl(addr + 4) << 32);
1517 }
1518 
1519 static inline void writeq(u64 val, volatile void __iomem *addr)
1520 {
1521 	writel(val, addr);
1522 	writel(val >> 32, addr + 4);
1523 }
1524 #endif
1525 
1526 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
1527 {
1528 	return readq(adap->regs + reg_addr);
1529 }
1530 
1531 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
1532 {
1533 	writeq(val, adap->regs + reg_addr);
1534 }
1535 
1536 /**
1537  * t4_set_hw_addr - store a port's MAC address in SW
1538  * @adapter: the adapter
1539  * @port_idx: the port index
1540  * @hw_addr: the Ethernet address
1541  *
1542  * Store the Ethernet address of the given port in SW.  Called by the common
1543  * code when it retrieves a port's Ethernet address from EEPROM.
1544  */
1545 static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
1546 				  u8 hw_addr[])
1547 {
1548 	ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
1549 	ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
1550 }
1551 
1552 /**
1553  * netdev2pinfo - return the port_info structure associated with a net_device
1554  * @dev: the netdev
1555  *
1556  * Return the struct port_info associated with a net_device
1557  */
1558 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
1559 {
1560 	return netdev_priv(dev);
1561 }
1562 
1563 /**
1564  * adap2pinfo - return the port_info of a port
1565  * @adap: the adapter
1566  * @idx: the port index
1567  *
1568  * Return the port_info structure for the port of the given index.
1569  */
1570 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
1571 {
1572 	return netdev_priv(adap->port[idx]);
1573 }
1574 
1575 /**
1576  * netdev2adap - return the adapter structure associated with a net_device
1577  * @dev: the netdev
1578  *
1579  * Return the struct adapter associated with a net_device
1580  */
1581 static inline struct adapter *netdev2adap(const struct net_device *dev)
1582 {
1583 	return netdev2pinfo(dev)->adapter;
1584 }
1585 
1586 /* Return a version number to identify the type of adapter.  The scheme is:
1587  * - bits 0..9: chip version
1588  * - bits 10..15: chip revision
1589  * - bits 16..23: register dump version
1590  */
1591 static inline unsigned int mk_adap_vers(struct adapter *ap)
1592 {
1593 	return CHELSIO_CHIP_VERSION(ap->params.chip) |
1594 		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1595 }
1596 
1597 /* Return a queue's interrupt hold-off time in us.  0 means no timer. */
1598 static inline unsigned int qtimer_val(const struct adapter *adap,
1599 				      const struct sge_rspq *q)
1600 {
1601 	unsigned int idx = q->intr_params >> 1;
1602 
1603 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1604 }
1605 
1606 /* driver name used for ethtool_drvinfo */
1607 extern char cxgb4_driver_name[];
1608 
1609 void t4_os_portmod_changed(struct adapter *adap, int port_id);
1610 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
1611 
1612 void t4_free_sge_resources(struct adapter *adap);
1613 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
1614 irq_handler_t t4_intr_handler(struct adapter *adap);
1615 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
1616 int cxgb4_selftest_lb_pkt(struct net_device *netdev);
1617 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1618 		     const struct pkt_gl *gl);
1619 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
1620 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
1621 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1622 		     struct net_device *dev, int intr_idx,
1623 		     struct sge_fl *fl, rspq_handler_t hnd,
1624 		     rspq_flush_handler_t flush_handler, int cong);
1625 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1626 			 struct net_device *dev, struct netdev_queue *netdevq,
1627 			 unsigned int iqid, u8 dbqt);
1628 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
1629 			  struct net_device *dev, unsigned int iqid,
1630 			  unsigned int cmplqid);
1631 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
1632 			unsigned int cmplqid);
1633 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
1634 			 struct net_device *dev, unsigned int iqid,
1635 			 unsigned int uld_type);
1636 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
1637 			     struct net_device *dev, u32 iqid);
1638 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
1639 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
1640 int t4_sge_init(struct adapter *adap);
1641 void t4_sge_start(struct adapter *adap);
1642 void t4_sge_stop(struct adapter *adap);
1643 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
1644 				 int maxreclaim);
1645 void cxgb4_set_ethtool_ops(struct net_device *netdev);
1646 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
1647 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
1648 extern int dbfifo_int_thresh;
1649 
1650 #define for_each_port(adapter, iter) \
1651 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
1652 
1653 static inline int is_bypass(struct adapter *adap)
1654 {
1655 	return adap->params.bypass;
1656 }
1657 
1658 static inline int is_bypass_device(int device)
1659 {
1660 	/* this should be set based upon device capabilities */
1661 	switch (device) {
1662 	case 0x440b:
1663 	case 0x440c:
1664 		return 1;
1665 	default:
1666 		return 0;
1667 	}
1668 }
1669 
1670 static inline int is_10gbt_device(int device)
1671 {
1672 	/* this should be set based upon device capabilities */
1673 	switch (device) {
1674 	case 0x4409:
1675 	case 0x4486:
1676 		return 1;
1677 
1678 	default:
1679 		return 0;
1680 	}
1681 }
1682 
1683 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
1684 {
1685 	return adap->params.vpd.cclk / 1000;
1686 }
1687 
1688 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
1689 					    unsigned int us)
1690 {
1691 	return (us * adap->params.vpd.cclk) / 1000;
1692 }
1693 
1694 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
1695 					    unsigned int ticks)
1696 {
1697 	/* add Core Clock / 2 to round ticks to nearest uS */
1698 	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
1699 		adapter->params.vpd.cclk);
1700 }
1701 
1702 static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
1703 					      unsigned int ticks)
1704 {
1705 	return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
1706 }
1707 
1708 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
1709 		      u32 val);
1710 
1711 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1712 			    int size, void *rpl, bool sleep_ok, int timeout);
1713 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
1714 		    void *rpl, bool sleep_ok);
1715 
1716 static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
1717 				     const void *cmd, int size, void *rpl,
1718 				     int timeout)
1719 {
1720 	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
1721 				       timeout);
1722 }
1723 
1724 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
1725 			     int size, void *rpl)
1726 {
1727 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
1728 }
1729 
1730 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
1731 				int size, void *rpl)
1732 {
1733 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
1734 }
1735 
1736 /**
1737  *	hash_mac_addr - return the hash value of a MAC address
1738  *	@addr: the 48-bit Ethernet MAC address
1739  *
1740  *	Hashes a MAC address according to the hash function used by HW inexact
1741  *	(hash) address matching.
1742  */
1743 static inline int hash_mac_addr(const u8 *addr)
1744 {
1745 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1746 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1747 
1748 	a ^= b;
1749 	a ^= (a >> 12);
1750 	a ^= (a >> 6);
1751 	return a & 0x3f;
1752 }
1753 
1754 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
1755 			       unsigned int cnt);
1756 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
1757 			     unsigned int us, unsigned int cnt,
1758 			     unsigned int size, unsigned int iqe_size)
1759 {
1760 	q->adap = adap;
1761 	cxgb4_set_rspq_intr_params(q, us, cnt);
1762 	q->iqe_len = iqe_size;
1763 	q->size = size;
1764 }
1765 
1766 /**
1767  *     t4_is_inserted_mod_type - is a plugged in Firmware Module Type
1768  *     @fw_mod_type: the Firmware Mofule Type
1769  *
1770  *     Return whether the Firmware Module Type represents a real Transceiver
1771  *     Module/Cable Module Type which has been inserted.
1772  */
1773 static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
1774 {
1775 	return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
1776 		fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
1777 		fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
1778 		fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
1779 }
1780 
1781 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
1782 		       unsigned int data_reg, const u32 *vals,
1783 		       unsigned int nregs, unsigned int start_idx);
1784 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
1785 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
1786 		      unsigned int start_idx);
1787 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
1788 
1789 struct fw_filter_wr;
1790 
1791 void t4_intr_enable(struct adapter *adapter);
1792 void t4_intr_disable(struct adapter *adapter);
1793 int t4_slow_intr_handler(struct adapter *adapter);
1794 
1795 int t4_wait_dev_ready(void __iomem *regs);
1796 
1797 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
1798 			      struct link_config *lc);
1799 int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
1800 		       unsigned int port, struct link_config *lc,
1801 		       u8 sleep_ok, int timeout);
1802 
1803 static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
1804 				unsigned int port, struct link_config *lc)
1805 {
1806 	return t4_link_l1cfg_core(adapter, mbox, port, lc,
1807 				  true, FW_CMD_MAX_TIMEOUT);
1808 }
1809 
1810 static inline int t4_link_l1cfg_ns(struct adapter *adapter, unsigned int mbox,
1811 				   unsigned int port, struct link_config *lc)
1812 {
1813 	return t4_link_l1cfg_core(adapter, mbox, port, lc,
1814 				  false, FW_CMD_MAX_TIMEOUT);
1815 }
1816 
1817 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1818 
1819 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
1820 u32 t4_get_util_window(struct adapter *adap);
1821 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
1822 
1823 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
1824 		      u32 *mem_base, u32 *mem_aperture);
1825 void t4_memory_update_win(struct adapter *adap, int win, u32 addr);
1826 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
1827 			   int dir);
1828 #define T4_MEMORY_WRITE	0
1829 #define T4_MEMORY_READ	1
1830 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1831 		 void *buf, int dir);
1832 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1833 				  u32 len, __be32 *buf)
1834 {
1835 	return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
1836 }
1837 
1838 unsigned int t4_get_regs_len(struct adapter *adapter);
1839 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
1840 
1841 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
1842 int t4_seeprom_wp(struct adapter *adapter, bool enable);
1843 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
1844 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1845 int t4_get_pfres(struct adapter *adapter);
1846 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1847 		  unsigned int nwords, u32 *data, int byte_oriented);
1848 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1849 int t4_load_phy_fw(struct adapter *adap, int win,
1850 		   int (*phy_fw_version)(const u8 *, size_t),
1851 		   const u8 *phy_fw_data, size_t phy_fw_size);
1852 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
1853 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
1854 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1855 		  const u8 *fw_data, unsigned int size, int force);
1856 int t4_fl_pkt_align(struct adapter *adap);
1857 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1858 int t4_check_fw_version(struct adapter *adap);
1859 int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
1860 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1861 int t4_get_bs_version(struct adapter *adapter, u32 *vers);
1862 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1863 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
1864 int t4_get_scfg_version(struct adapter *adapter, u32 *vers);
1865 int t4_get_vpd_version(struct adapter *adapter, u32 *vers);
1866 int t4_get_version_info(struct adapter *adapter);
1867 void t4_dump_version_info(struct adapter *adapter);
1868 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1869 	       const u8 *fw_data, unsigned int fw_size,
1870 	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
1871 int t4_prep_adapter(struct adapter *adapter);
1872 int t4_shutdown_adapter(struct adapter *adapter);
1873 
1874 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
1875 int t4_bar2_sge_qregs(struct adapter *adapter,
1876 		      unsigned int qid,
1877 		      enum t4_bar2_qtype qtype,
1878 		      int user,
1879 		      u64 *pbar2_qoffset,
1880 		      unsigned int *pbar2_qid);
1881 
1882 unsigned int qtimer_val(const struct adapter *adap,
1883 			const struct sge_rspq *q);
1884 
1885 int t4_init_devlog_params(struct adapter *adapter);
1886 int t4_init_sge_params(struct adapter *adapter);
1887 int t4_init_tp_params(struct adapter *adap, bool sleep_ok);
1888 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
1889 int t4_init_rss_mode(struct adapter *adap, int mbox);
1890 int t4_init_portinfo(struct port_info *pi, int mbox,
1891 		     int port, int pf, int vf, u8 mac[]);
1892 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
1893 int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
1894 			u16 *mirror_viid);
1895 void t4_fatal_err(struct adapter *adapter);
1896 unsigned int t4_chip_rss_size(struct adapter *adapter);
1897 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1898 			int start, int n, const u16 *rspq, unsigned int nrspq);
1899 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1900 		       unsigned int flags);
1901 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
1902 		     unsigned int flags, unsigned int defq);
1903 int t4_read_rss(struct adapter *adapter, u16 *entries);
1904 void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok);
1905 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
1906 		      bool sleep_ok);
1907 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
1908 			   u32 *valp, bool sleep_ok);
1909 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
1910 			   u32 *vfl, u32 *vfh, bool sleep_ok);
1911 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok);
1912 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok);
1913 
1914 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
1915 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
1916 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1917 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1918 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
1919 		    size_t n);
1920 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
1921 		    size_t n);
1922 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1923 		unsigned int *valp);
1924 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1925 		 const unsigned int *valp);
1926 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1927 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1928 			unsigned int *pif_req_wrptr,
1929 			unsigned int *pif_rsp_wrptr);
1930 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
1931 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
1932 const char *t4_get_port_type_description(enum fw_port_type port_type);
1933 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1934 void t4_get_port_stats_offset(struct adapter *adap, int idx,
1935 			      struct port_stats *stats,
1936 			      struct port_stats *offset);
1937 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
1938 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1939 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
1940 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1941 			    unsigned int mask, unsigned int val);
1942 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
1943 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
1944 			 bool sleep_ok);
1945 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
1946 			 bool sleep_ok);
1947 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
1948 			  bool sleep_ok);
1949 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
1950 		      bool sleep_ok);
1951 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1952 			 struct tp_tcp_stats *v6, bool sleep_ok);
1953 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
1954 		       struct tp_fcoe_stats *st, bool sleep_ok);
1955 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1956 		  const unsigned short *alpha, const unsigned short *beta);
1957 
1958 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
1959 
1960 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
1961 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1962 
1963 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1964 			 const u8 *addr);
1965 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1966 		      u64 mask0, u64 mask1, unsigned int crc, bool enable);
1967 
1968 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1969 		enum dev_master master, enum dev_state *state);
1970 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1971 int t4_early_init(struct adapter *adap, unsigned int mbox);
1972 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
1973 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1974 			  unsigned int cache_line_size);
1975 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
1976 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1977 		    unsigned int vf, unsigned int nparams, const u32 *params,
1978 		    u32 *val);
1979 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
1980 		       unsigned int vf, unsigned int nparams, const u32 *params,
1981 		       u32 *val);
1982 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
1983 		       unsigned int vf, unsigned int nparams, const u32 *params,
1984 		       u32 *val, int rw, bool sleep_ok);
1985 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
1986 			  unsigned int pf, unsigned int vf,
1987 			  unsigned int nparams, const u32 *params,
1988 			  const u32 *val, int timeout);
1989 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1990 		  unsigned int vf, unsigned int nparams, const u32 *params,
1991 		  const u32 *val);
1992 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1993 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1994 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
1995 		unsigned int vi, unsigned int cmask, unsigned int pmask,
1996 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1997 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1998 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1999 		unsigned int *rss_size, u8 *vivld, u8 *vin);
2000 int t4_free_vi(struct adapter *adap, unsigned int mbox,
2001 	       unsigned int pf, unsigned int vf,
2002 	       unsigned int viid);
2003 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2004 		  unsigned int viid_mirror, int mtu, int promisc, int all_multi,
2005 		  int bcast, int vlanex, bool sleep_ok);
2006 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
2007 			 const u8 *addr, const u8 *mask, unsigned int idx,
2008 			 u8 lookup_type, u8 port_id, bool sleep_ok);
2009 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx,
2010 			   bool sleep_ok);
2011 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
2012 			    const u8 *addr, const u8 *mask, unsigned int vni,
2013 			    unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
2014 			    bool sleep_ok);
2015 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
2016 			  const u8 *addr, const u8 *mask, unsigned int idx,
2017 			  u8 lookup_type, u8 port_id, bool sleep_ok);
2018 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2019 		      unsigned int viid, bool free, unsigned int naddr,
2020 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
2021 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
2022 		     unsigned int viid, unsigned int naddr,
2023 		     const u8 **addr, bool sleep_ok);
2024 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2025 		  int idx, const u8 *addr, bool persist, u8 *smt_idx);
2026 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2027 		     bool ucast, u64 vec, bool sleep_ok);
2028 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
2029 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
2030 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
2031 			struct port_info *pi,
2032 			bool rx_en, bool tx_en, bool dcb_en);
2033 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2034 		 bool rx_en, bool tx_en);
2035 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2036 		     unsigned int nblinks);
2037 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2038 	       unsigned int mmd, unsigned int reg, u16 *valp);
2039 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2040 	       unsigned int mmd, unsigned int reg, u16 val);
2041 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
2042 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
2043 	       unsigned int fl0id, unsigned int fl1id);
2044 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2045 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
2046 	       unsigned int fl0id, unsigned int fl1id);
2047 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2048 		   unsigned int vf, unsigned int eqid);
2049 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2050 		    unsigned int vf, unsigned int eqid);
2051 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2052 		    unsigned int vf, unsigned int eqid);
2053 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
2054 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
2055 			  u16 *dbqtimers);
2056 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
2057 int t4_update_port_info(struct port_info *pi);
2058 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
2059 		       unsigned int *speedp, unsigned int *mtup);
2060 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
2061 void t4_db_full(struct adapter *adapter);
2062 void t4_db_dropped(struct adapter *adapter);
2063 int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
2064 			int filter_index, int enable);
2065 void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
2066 			 int filter_index, int *enabled);
2067 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2068 			 u32 addr, u32 val);
2069 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
2070 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
2071 		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
2072 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
2073 		   enum ctxt_type ctype, u32 *data);
2074 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
2075 		      enum ctxt_type ctype, u32 *data);
2076 int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
2077 		    u8 rateunit, u8 ratemode, u8 channel, u8 class,
2078 		    u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
2079 		    u16 burstsize);
2080 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
2081 void t4_idma_monitor_init(struct adapter *adapter,
2082 			  struct sge_idma_monitor_state *idma);
2083 void t4_idma_monitor(struct adapter *adapter,
2084 		     struct sge_idma_monitor_state *idma,
2085 		     int hz, int ticks);
2086 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
2087 		      unsigned int naddr, u8 *addr);
2088 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
2089 		    u32 start_index, bool sleep_ok);
2090 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
2091 		       u32 start_index, bool sleep_ok);
2092 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs,
2093 		    u32 start_index, bool sleep_ok);
2094 
2095 void t4_uld_mem_free(struct adapter *adap);
2096 int t4_uld_mem_alloc(struct adapter *adap);
2097 void t4_uld_clean_up(struct adapter *adap);
2098 void t4_register_netevent_notifier(void);
2099 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
2100 	      unsigned int devid, unsigned int offset,
2101 	      unsigned int len, u8 *buf);
2102 int t4_load_boot(struct adapter *adap, u8 *boot_data,
2103 		 unsigned int boot_addr, unsigned int size);
2104 int t4_load_bootcfg(struct adapter *adap,
2105 		    const u8 *cfg_data, unsigned int size);
2106 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
2107 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
2108 		  unsigned int n, bool unmap);
2109 void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
2110 			      u32 ndesc);
2111 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
2112 void cxgb4_ethofld_restart(struct tasklet_struct *t);
2113 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
2114 			     const struct pkt_gl *si);
2115 void free_txq(struct adapter *adap, struct sge_txq *q);
2116 void cxgb4_reclaim_completed_tx(struct adapter *adap,
2117 				struct sge_txq *q, bool unmap);
2118 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
2119 		  dma_addr_t *addr);
2120 void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
2121 			 void *pos);
2122 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
2123 		     struct ulptx_sgl *sgl, u64 *end, unsigned int start,
2124 		     const dma_addr_t *addr);
2125 void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
2126 			     struct ulptx_sgl *sgl, u64 *end,
2127 			     const dma_addr_t *addr, u32 start, u32 send_len);
2128 void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
2129 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
2130 		    u16 vlan);
2131 int cxgb4_dcb_enabled(const struct net_device *dev);
2132 
2133 int cxgb4_thermal_init(struct adapter *adap);
2134 int cxgb4_thermal_remove(struct adapter *adap);
2135 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
2136 		       cpumask_var_t *aff_mask, int idx);
2137 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask);
2138 
2139 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
2140 		     int *tcam_idx, const u8 *addr,
2141 		     bool persistent, u8 *smt_idx);
2142 
2143 int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
2144 			 bool free, unsigned int naddr,
2145 			 const u8 **addr, u16 *idx,
2146 			 u64 *hash, bool sleep_ok);
2147 int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
2148 			unsigned int naddr, const u8 **addr, bool sleep_ok);
2149 int cxgb4_init_mps_ref_entries(struct adapter *adap);
2150 void cxgb4_free_mps_ref_entries(struct adapter *adap);
2151 int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
2152 			       const u8 *addr, const u8 *mask,
2153 			       unsigned int vni, unsigned int vni_mask,
2154 			       u8 dip_hit, u8 lookup_type, bool sleep_ok);
2155 int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
2156 			      int idx, bool sleep_ok);
2157 int cxgb4_free_raw_mac_filt(struct adapter *adap,
2158 			    unsigned int viid,
2159 			    const u8 *addr,
2160 			    const u8 *mask,
2161 			    unsigned int idx,
2162 			    u8 lookup_type,
2163 			    u8 port_id,
2164 			    bool sleep_ok);
2165 int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
2166 			     unsigned int viid,
2167 			     const u8 *addr,
2168 			     const u8 *mask,
2169 			     unsigned int idx,
2170 			     u8 lookup_type,
2171 			     u8 port_id,
2172 			     bool sleep_ok);
2173 int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
2174 			  int *tcam_idx, const u8 *addr,
2175 			  bool persistent, u8 *smt_idx);
2176 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
2177 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
2178 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
2179 void cxgb4_quiesce_rx(struct sge_rspq *q);
2180 int cxgb4_port_mirror_alloc(struct net_device *dev);
2181 void cxgb4_port_mirror_free(struct net_device *dev);
2182 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
2183 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
2184 #endif
2185 #endif /* __CXGB4_H__ */
2186