xref: /openbmc/linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c (revision 7eec52db361a6ae6fbbd86c2299718586866b664)
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_values.h"
39 #include "t4fw_api.h"
40 
41 /**
42  *	t4_wait_op_done_val - wait until an operation is completed
43  *	@adapter: the adapter performing the operation
44  *	@reg: the register to check for completion
45  *	@mask: a single-bit field within @reg that indicates completion
46  *	@polarity: the value of the field when the operation is completed
47  *	@attempts: number of check iterations
48  *	@delay: delay in usecs between iterations
49  *	@valp: where to store the value of the register at completion time
50  *
51  *	Wait until an operation is completed by checking a bit in a register
52  *	up to @attempts times.  If @valp is not NULL the value of the register
53  *	at the time it indicated completion is stored there.  Returns 0 if the
54  *	operation completes and	-EAGAIN	otherwise.
55  */
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 			       int polarity, int attempts, int delay, u32 *valp)
58 {
59 	while (1) {
60 		u32 val = t4_read_reg(adapter, reg);
61 
62 		if (!!(val & mask) == polarity) {
63 			if (valp)
64 				*valp = val;
65 			return 0;
66 		}
67 		if (--attempts == 0)
68 			return -EAGAIN;
69 		if (delay)
70 			udelay(delay);
71 	}
72 }
73 
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 				  int polarity, int attempts, int delay)
76 {
77 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 				   delay, NULL);
79 }
80 
81 /**
82  *	t4_set_reg_field - set a register field to a value
83  *	@adapter: the adapter to program
84  *	@addr: the register address
85  *	@mask: specifies the portion of the register to modify
86  *	@val: the new value for the register field
87  *
88  *	Sets a register field specified by the supplied mask to the
89  *	given value.
90  */
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 		      u32 val)
93 {
94 	u32 v = t4_read_reg(adapter, addr) & ~mask;
95 
96 	t4_write_reg(adapter, addr, v | val);
97 	(void) t4_read_reg(adapter, addr);      /* flush */
98 }
99 
100 /**
101  *	t4_read_indirect - read indirectly addressed registers
102  *	@adap: the adapter
103  *	@addr_reg: register holding the indirect address
104  *	@data_reg: register holding the value of the indirect register
105  *	@vals: where the read register values are stored
106  *	@nregs: how many indirect registers to read
107  *	@start_idx: index of first indirect register to read
108  *
109  *	Reads registers that are accessed indirectly through an address/data
110  *	register pair.
111  */
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 			     unsigned int data_reg, u32 *vals,
114 			     unsigned int nregs, unsigned int start_idx)
115 {
116 	while (nregs--) {
117 		t4_write_reg(adap, addr_reg, start_idx);
118 		*vals++ = t4_read_reg(adap, data_reg);
119 		start_idx++;
120 	}
121 }
122 
123 /**
124  *	t4_write_indirect - write indirectly addressed registers
125  *	@adap: the adapter
126  *	@addr_reg: register holding the indirect addresses
127  *	@data_reg: register holding the value for the indirect registers
128  *	@vals: values to write
129  *	@nregs: how many indirect registers to write
130  *	@start_idx: address of first indirect register to write
131  *
132  *	Writes a sequential block of registers that are accessed indirectly
133  *	through an address/data register pair.
134  */
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 		       unsigned int data_reg, const u32 *vals,
137 		       unsigned int nregs, unsigned int start_idx)
138 {
139 	while (nregs--) {
140 		t4_write_reg(adap, addr_reg, start_idx++);
141 		t4_write_reg(adap, data_reg, *vals++);
142 	}
143 }
144 
145 /*
146  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147  * mechanism.  This guarantees that we get the real value even if we're
148  * operating within a Virtual Machine and the Hypervisor is trapping our
149  * Configuration Space accesses.
150  */
151 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152 {
153 	u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
154 
155 	if (is_t4(adap->params.chip))
156 		req |= LOCALCFG_F;
157 
158 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
159 	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
160 
161 	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 	 * Configuration Space read.  (None of the other fields matter when
163 	 * ENABLE is 0 so a simple register write is easier than a
164 	 * read-modify-write via t4_set_reg_field().)
165 	 */
166 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
167 }
168 
169 /*
170  * t4_report_fw_error - report firmware error
171  * @adap: the adapter
172  *
173  * The adapter firmware can indicate error conditions to the host.
174  * If the firmware has indicated an error, print out the reason for
175  * the firmware error.
176  */
177 static void t4_report_fw_error(struct adapter *adap)
178 {
179 	static const char *const reason[] = {
180 		"Crash",                        /* PCIE_FW_EVAL_CRASH */
181 		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
182 		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
183 		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
186 		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 		"Reserved",                     /* reserved */
188 	};
189 	u32 pcie_fw;
190 
191 	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
192 	if (pcie_fw & PCIE_FW_ERR_F)
193 		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
194 			reason[PCIE_FW_EVAL_G(pcie_fw)]);
195 }
196 
197 /*
198  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
199  */
200 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
201 			 u32 mbox_addr)
202 {
203 	for ( ; nflit; nflit--, mbox_addr += 8)
204 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
205 }
206 
207 /*
208  * Handle a FW assertion reported in a mailbox.
209  */
210 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
211 {
212 	struct fw_debug_cmd asrt;
213 
214 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
215 	dev_alert(adap->pdev_dev,
216 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
217 		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
218 		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
219 }
220 
221 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
222 {
223 	dev_err(adap->pdev_dev,
224 		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
225 		(unsigned long long)t4_read_reg64(adap, data_reg),
226 		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
227 		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
228 		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
229 		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
230 		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
231 		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
232 		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
233 }
234 
235 /**
236  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
237  *	@adap: the adapter
238  *	@mbox: index of the mailbox to use
239  *	@cmd: the command to write
240  *	@size: command length in bytes
241  *	@rpl: where to optionally store the reply
242  *	@sleep_ok: if true we may sleep while awaiting command completion
243  *
244  *	Sends the given command to FW through the selected mailbox and waits
245  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
246  *	store the FW's reply to the command.  The command and its optional
247  *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
248  *	to respond.  @sleep_ok determines whether we may sleep while awaiting
249  *	the response.  If sleeping is allowed we use progressive backoff
250  *	otherwise we spin.
251  *
252  *	The return value is 0 on success or a negative errno on failure.  A
253  *	failure can happen either because we are not able to execute the
254  *	command or FW executes it but signals an error.  In the latter case
255  *	the return value is the error code indicated by FW (negated).
256  */
257 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
258 		    void *rpl, bool sleep_ok)
259 {
260 	static const int delay[] = {
261 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
262 	};
263 
264 	u32 v;
265 	u64 res;
266 	int i, ms, delay_idx;
267 	const __be64 *p = cmd;
268 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
269 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
270 
271 	if ((size & 15) || size > MBOX_LEN)
272 		return -EINVAL;
273 
274 	/*
275 	 * If the device is off-line, as in EEH, commands will time out.
276 	 * Fail them early so we don't waste time waiting.
277 	 */
278 	if (adap->pdev->error_state != pci_channel_io_normal)
279 		return -EIO;
280 
281 	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
282 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
283 		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
284 
285 	if (v != MBOX_OWNER_DRV)
286 		return v ? -EBUSY : -ETIMEDOUT;
287 
288 	for (i = 0; i < size; i += 8)
289 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
290 
291 	t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
292 	t4_read_reg(adap, ctl_reg);          /* flush write */
293 
294 	delay_idx = 0;
295 	ms = delay[0];
296 
297 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
298 		if (sleep_ok) {
299 			ms = delay[delay_idx];  /* last element may repeat */
300 			if (delay_idx < ARRAY_SIZE(delay) - 1)
301 				delay_idx++;
302 			msleep(ms);
303 		} else
304 			mdelay(ms);
305 
306 		v = t4_read_reg(adap, ctl_reg);
307 		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
308 			if (!(v & MBMSGVALID_F)) {
309 				t4_write_reg(adap, ctl_reg, 0);
310 				continue;
311 			}
312 
313 			res = t4_read_reg64(adap, data_reg);
314 			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
315 				fw_asrt(adap, data_reg);
316 				res = FW_CMD_RETVAL_V(EIO);
317 			} else if (rpl) {
318 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
319 			}
320 
321 			if (FW_CMD_RETVAL_G((int)res))
322 				dump_mbox(adap, mbox, data_reg);
323 			t4_write_reg(adap, ctl_reg, 0);
324 			return -FW_CMD_RETVAL_G((int)res);
325 		}
326 	}
327 
328 	dump_mbox(adap, mbox, data_reg);
329 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
330 		*(const u8 *)cmd, mbox);
331 	t4_report_fw_error(adap);
332 	return -ETIMEDOUT;
333 }
334 
335 /**
336  *	t4_mc_read - read from MC through backdoor accesses
337  *	@adap: the adapter
338  *	@addr: address of first byte requested
339  *	@idx: which MC to access
340  *	@data: 64 bytes of data containing the requested address
341  *	@ecc: where to store the corresponding 64-bit ECC word
342  *
343  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
344  *	that covers the requested address @addr.  If @parity is not %NULL it
345  *	is assigned the 64-bit ECC word for the read data.
346  */
347 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
348 {
349 	int i;
350 	u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
351 	u32 mc_bist_status_rdata, mc_bist_data_pattern;
352 
353 	if (is_t4(adap->params.chip)) {
354 		mc_bist_cmd = MC_BIST_CMD_A;
355 		mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
356 		mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
357 		mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
358 		mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
359 	} else {
360 		mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
361 		mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
362 		mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
363 		mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
364 		mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
365 	}
366 
367 	if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
368 		return -EBUSY;
369 	t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
370 	t4_write_reg(adap, mc_bist_cmd_len, 64);
371 	t4_write_reg(adap, mc_bist_data_pattern, 0xc);
372 	t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
373 		     BIST_CMD_GAP_V(1));
374 	i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
375 	if (i)
376 		return i;
377 
378 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
379 
380 	for (i = 15; i >= 0; i--)
381 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 	if (ecc)
383 		*ecc = t4_read_reg64(adap, MC_DATA(16));
384 #undef MC_DATA
385 	return 0;
386 }
387 
388 /**
389  *	t4_edc_read - read from EDC through backdoor accesses
390  *	@adap: the adapter
391  *	@idx: which EDC to access
392  *	@addr: address of first byte requested
393  *	@data: 64 bytes of data containing the requested address
394  *	@ecc: where to store the corresponding 64-bit ECC word
395  *
396  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
397  *	that covers the requested address @addr.  If @parity is not %NULL it
398  *	is assigned the 64-bit ECC word for the read data.
399  */
400 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
401 {
402 	int i;
403 	u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
404 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
405 
406 	if (is_t4(adap->params.chip)) {
407 		edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
408 		edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
409 		edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
410 		edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
411 						    idx);
412 		edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
413 						idx);
414 	} else {
415 		edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
416 		edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
417 		edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
418 		edc_bist_cmd_data_pattern =
419 			EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
420 		edc_bist_status_rdata =
421 			 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
422 	}
423 
424 	if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
425 		return -EBUSY;
426 	t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
427 	t4_write_reg(adap, edc_bist_cmd_len, 64);
428 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
429 	t4_write_reg(adap, edc_bist_cmd,
430 		     BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
431 	i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
432 	if (i)
433 		return i;
434 
435 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
436 
437 	for (i = 15; i >= 0; i--)
438 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 	if (ecc)
440 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
441 #undef EDC_DATA
442 	return 0;
443 }
444 
445 /**
446  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447  *	@adap: the adapter
448  *	@win: PCI-E Memory Window to use
449  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450  *	@addr: address within indicated memory type
451  *	@len: amount of memory to transfer
452  *	@buf: host memory buffer
453  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454  *
455  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
456  *	firmware memory address and host buffer must be aligned on 32-bit
457  *	boudaries; the length may be arbitrary.  The memory is transferred as
458  *	a raw byte sequence from/to the firmware's memory.  If this memory
459  *	contains data structures which contain multi-byte integers, it's the
460  *	caller's responsibility to perform appropriate byte order conversions.
461  */
462 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 		 u32 len, __be32 *buf, int dir)
464 {
465 	u32 pos, offset, resid, memoffset;
466 	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 
468 	/* Argument sanity checks ...
469 	 */
470 	if (addr & 0x3)
471 		return -EINVAL;
472 
473 	/* It's convenient to be able to handle lengths which aren't a
474 	 * multiple of 32-bits because we often end up transferring files to
475 	 * the firmware.  So we'll handle that by normalizing the length here
476 	 * and then handling any residual transfer at the end.
477 	 */
478 	resid = len & 0x3;
479 	len -= resid;
480 
481 	/* Offset into the region of memory which is being accessed
482 	 * MEM_EDC0 = 0
483 	 * MEM_EDC1 = 1
484 	 * MEM_MC   = 2 -- T4
485 	 * MEM_MC0  = 2 -- For T5
486 	 * MEM_MC1  = 3 -- For T5
487 	 */
488 	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
489 	if (mtype != MEM_MC1)
490 		memoffset = (mtype * (edc_size * 1024 * 1024));
491 	else {
492 		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
493 						      MA_EXT_MEMORY1_BAR_A));
494 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
495 	}
496 
497 	/* Determine the PCIE_MEM_ACCESS_OFFSET */
498 	addr = addr + memoffset;
499 
500 	/* Each PCI-E Memory Window is programmed with a window size -- or
501 	 * "aperture" -- which controls the granularity of its mapping onto
502 	 * adapter memory.  We need to grab that aperture in order to know
503 	 * how to use the specified window.  The window is also programmed
504 	 * with the base address of the Memory Window in BAR0's address
505 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
506 	 * the address is relative to BAR0.
507 	 */
508 	mem_reg = t4_read_reg(adap,
509 			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
510 						  win));
511 	mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
512 	mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
513 	if (is_t4(adap->params.chip))
514 		mem_base -= adap->t4_bar0;
515 	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
516 
517 	/* Calculate our initial PCI-E Memory Window Position and Offset into
518 	 * that Window.
519 	 */
520 	pos = addr & ~(mem_aperture-1);
521 	offset = addr - pos;
522 
523 	/* Set up initial PCI-E Memory Window to cover the start of our
524 	 * transfer.  (Read it back to ensure that changes propagate before we
525 	 * attempt to use the new value.)
526 	 */
527 	t4_write_reg(adap,
528 		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
529 		     pos | win_pf);
530 	t4_read_reg(adap,
531 		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
532 
533 	/* Transfer data to/from the adapter as long as there's an integral
534 	 * number of 32-bit transfers to complete.
535 	 */
536 	while (len > 0) {
537 		if (dir == T4_MEMORY_READ)
538 			*buf++ = (__force __be32) t4_read_reg(adap,
539 							mem_base + offset);
540 		else
541 			t4_write_reg(adap, mem_base + offset,
542 				     (__force u32) *buf++);
543 		offset += sizeof(__be32);
544 		len -= sizeof(__be32);
545 
546 		/* If we've reached the end of our current window aperture,
547 		 * move the PCI-E Memory Window on to the next.  Note that
548 		 * doing this here after "len" may be 0 allows us to set up
549 		 * the PCI-E Memory Window for a possible final residual
550 		 * transfer below ...
551 		 */
552 		if (offset == mem_aperture) {
553 			pos += mem_aperture;
554 			offset = 0;
555 			t4_write_reg(adap,
556 				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
557 						    win), pos | win_pf);
558 			t4_read_reg(adap,
559 				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
560 						    win));
561 		}
562 	}
563 
564 	/* If the original transfer had a length which wasn't a multiple of
565 	 * 32-bits, now's where we need to finish off the transfer of the
566 	 * residual amount.  The PCI-E Memory Window has already been moved
567 	 * above (if necessary) to cover this final transfer.
568 	 */
569 	if (resid) {
570 		union {
571 			__be32 word;
572 			char byte[4];
573 		} last;
574 		unsigned char *bp;
575 		int i;
576 
577 		if (dir == T4_MEMORY_READ) {
578 			last.word = (__force __be32) t4_read_reg(adap,
579 							mem_base + offset);
580 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 				bp[i] = last.byte[i];
582 		} else {
583 			last.word = *buf;
584 			for (i = resid; i < 4; i++)
585 				last.byte[i] = 0;
586 			t4_write_reg(adap, mem_base + offset,
587 				     (__force u32) last.word);
588 		}
589 	}
590 
591 	return 0;
592 }
593 
594 #define EEPROM_STAT_ADDR   0x7bfc
595 #define VPD_BASE           0x400
596 #define VPD_BASE_OLD       0
597 #define VPD_LEN            1024
598 #define CHELSIO_VPD_UNIQUE_ID 0x82
599 
600 /**
601  *	t4_seeprom_wp - enable/disable EEPROM write protection
602  *	@adapter: the adapter
603  *	@enable: whether to enable or disable write protection
604  *
605  *	Enables or disables write protection on the serial EEPROM.
606  */
607 int t4_seeprom_wp(struct adapter *adapter, bool enable)
608 {
609 	unsigned int v = enable ? 0xc : 0;
610 	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
611 	return ret < 0 ? ret : 0;
612 }
613 
614 /**
615  *	get_vpd_params - read VPD parameters from VPD EEPROM
616  *	@adapter: adapter to read
617  *	@p: where to store the parameters
618  *
619  *	Reads card parameters stored in VPD EEPROM.
620  */
621 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
622 {
623 	u32 cclk_param, cclk_val;
624 	int i, ret, addr;
625 	int ec, sn, pn;
626 	u8 *vpd, csum;
627 	unsigned int vpdr_len, kw_offset, id_len;
628 
629 	vpd = vmalloc(VPD_LEN);
630 	if (!vpd)
631 		return -ENOMEM;
632 
633 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
634 	if (ret < 0)
635 		goto out;
636 
637 	/* The VPD shall have a unique identifier specified by the PCI SIG.
638 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 	 * is expected to automatically put this entry at the
641 	 * beginning of the VPD.
642 	 */
643 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
644 
645 	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
646 	if (ret < 0)
647 		goto out;
648 
649 	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
650 		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
651 		ret = -EINVAL;
652 		goto out;
653 	}
654 
655 	id_len = pci_vpd_lrdt_size(vpd);
656 	if (id_len > ID_LEN)
657 		id_len = ID_LEN;
658 
659 	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 	if (i < 0) {
661 		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
662 		ret = -EINVAL;
663 		goto out;
664 	}
665 
666 	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
667 	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
668 	if (vpdr_len + kw_offset > VPD_LEN) {
669 		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
670 		ret = -EINVAL;
671 		goto out;
672 	}
673 
674 #define FIND_VPD_KW(var, name) do { \
675 	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
676 	if (var < 0) { \
677 		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
678 		ret = -EINVAL; \
679 		goto out; \
680 	} \
681 	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
682 } while (0)
683 
684 	FIND_VPD_KW(i, "RV");
685 	for (csum = 0; i >= 0; i--)
686 		csum += vpd[i];
687 
688 	if (csum) {
689 		dev_err(adapter->pdev_dev,
690 			"corrupted VPD EEPROM, actual csum %u\n", csum);
691 		ret = -EINVAL;
692 		goto out;
693 	}
694 
695 	FIND_VPD_KW(ec, "EC");
696 	FIND_VPD_KW(sn, "SN");
697 	FIND_VPD_KW(pn, "PN");
698 #undef FIND_VPD_KW
699 
700 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
701 	strim(p->id);
702 	memcpy(p->ec, vpd + ec, EC_LEN);
703 	strim(p->ec);
704 	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
705 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
706 	strim(p->sn);
707 	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
708 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
709 	strim(p->pn);
710 
711 	/*
712 	 * Ask firmware for the Core Clock since it knows how to translate the
713 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 	 */
715 	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
716 		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
717 	ret = t4_query_params(adapter, adapter->mbox, 0, 0,
718 			      1, &cclk_param, &cclk_val);
719 
720 out:
721 	vfree(vpd);
722 	if (ret)
723 		return ret;
724 	p->cclk = cclk_val;
725 
726 	return 0;
727 }
728 
729 /* serial flash and firmware constants */
730 enum {
731 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
732 
733 	/* flash command opcodes */
734 	SF_PROG_PAGE    = 2,          /* program page */
735 	SF_WR_DISABLE   = 4,          /* disable writes */
736 	SF_RD_STATUS    = 5,          /* read status register */
737 	SF_WR_ENABLE    = 6,          /* enable writes */
738 	SF_RD_DATA_FAST = 0xb,        /* read flash */
739 	SF_RD_ID        = 0x9f,       /* read ID */
740 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
741 
742 	FW_MAX_SIZE = 16 * SF_SEC_SIZE,
743 };
744 
745 /**
746  *	sf1_read - read data from the serial flash
747  *	@adapter: the adapter
748  *	@byte_cnt: number of bytes to read
749  *	@cont: whether another operation will be chained
750  *	@lock: whether to lock SF for PL access only
751  *	@valp: where to store the read data
752  *
753  *	Reads up to 4 bytes of data from the serial flash.  The location of
754  *	the read needs to be specified prior to calling this by issuing the
755  *	appropriate commands to the serial flash.
756  */
757 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 		    int lock, u32 *valp)
759 {
760 	int ret;
761 
762 	if (!byte_cnt || byte_cnt > 4)
763 		return -EINVAL;
764 	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
765 		return -EBUSY;
766 	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
767 		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
768 	ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
769 	if (!ret)
770 		*valp = t4_read_reg(adapter, SF_DATA_A);
771 	return ret;
772 }
773 
774 /**
775  *	sf1_write - write data to the serial flash
776  *	@adapter: the adapter
777  *	@byte_cnt: number of bytes to write
778  *	@cont: whether another operation will be chained
779  *	@lock: whether to lock SF for PL access only
780  *	@val: value to write
781  *
782  *	Writes up to 4 bytes of data to the serial flash.  The location of
783  *	the write needs to be specified prior to calling this by issuing the
784  *	appropriate commands to the serial flash.
785  */
786 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
787 		     int lock, u32 val)
788 {
789 	if (!byte_cnt || byte_cnt > 4)
790 		return -EINVAL;
791 	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
792 		return -EBUSY;
793 	t4_write_reg(adapter, SF_DATA_A, val);
794 	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
795 		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
796 	return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
797 }
798 
799 /**
800  *	flash_wait_op - wait for a flash operation to complete
801  *	@adapter: the adapter
802  *	@attempts: max number of polls of the status register
803  *	@delay: delay between polls in ms
804  *
805  *	Wait for a flash operation to complete by polling the status register.
806  */
807 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
808 {
809 	int ret;
810 	u32 status;
811 
812 	while (1) {
813 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
814 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
815 			return ret;
816 		if (!(status & 1))
817 			return 0;
818 		if (--attempts == 0)
819 			return -EAGAIN;
820 		if (delay)
821 			msleep(delay);
822 	}
823 }
824 
825 /**
826  *	t4_read_flash - read words from serial flash
827  *	@adapter: the adapter
828  *	@addr: the start address for the read
829  *	@nwords: how many 32-bit words to read
830  *	@data: where to store the read data
831  *	@byte_oriented: whether to store data as bytes or as words
832  *
833  *	Read the specified number of 32-bit words from the serial flash.
834  *	If @byte_oriented is set the read data is stored as a byte array
835  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
836  *	natural endianess.
837  */
838 int t4_read_flash(struct adapter *adapter, unsigned int addr,
839 		  unsigned int nwords, u32 *data, int byte_oriented)
840 {
841 	int ret;
842 
843 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
844 		return -EINVAL;
845 
846 	addr = swab32(addr) | SF_RD_DATA_FAST;
847 
848 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
849 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
850 		return ret;
851 
852 	for ( ; nwords; nwords--, data++) {
853 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
854 		if (nwords == 1)
855 			t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
856 		if (ret)
857 			return ret;
858 		if (byte_oriented)
859 			*data = (__force __u32) (htonl(*data));
860 	}
861 	return 0;
862 }
863 
864 /**
865  *	t4_write_flash - write up to a page of data to the serial flash
866  *	@adapter: the adapter
867  *	@addr: the start address to write
868  *	@n: length of data to write in bytes
869  *	@data: the data to write
870  *
871  *	Writes up to a page of data (256 bytes) to the serial flash starting
872  *	at the given address.  All the data must be written to the same page.
873  */
874 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
875 			  unsigned int n, const u8 *data)
876 {
877 	int ret;
878 	u32 buf[64];
879 	unsigned int i, c, left, val, offset = addr & 0xff;
880 
881 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
882 		return -EINVAL;
883 
884 	val = swab32(addr) | SF_PROG_PAGE;
885 
886 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
887 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
888 		goto unlock;
889 
890 	for (left = n; left; left -= c) {
891 		c = min(left, 4U);
892 		for (val = 0, i = 0; i < c; ++i)
893 			val = (val << 8) + *data++;
894 
895 		ret = sf1_write(adapter, c, c != left, 1, val);
896 		if (ret)
897 			goto unlock;
898 	}
899 	ret = flash_wait_op(adapter, 8, 1);
900 	if (ret)
901 		goto unlock;
902 
903 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
904 
905 	/* Read the page to verify the write succeeded */
906 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
907 	if (ret)
908 		return ret;
909 
910 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
911 		dev_err(adapter->pdev_dev,
912 			"failed to correctly write the flash page at %#x\n",
913 			addr);
914 		return -EIO;
915 	}
916 	return 0;
917 
918 unlock:
919 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
920 	return ret;
921 }
922 
923 /**
924  *	t4_get_fw_version - read the firmware version
925  *	@adapter: the adapter
926  *	@vers: where to place the version
927  *
928  *	Reads the FW version from flash.
929  */
930 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
931 {
932 	return t4_read_flash(adapter, FLASH_FW_START +
933 			     offsetof(struct fw_hdr, fw_ver), 1,
934 			     vers, 0);
935 }
936 
937 /**
938  *	t4_get_tp_version - read the TP microcode version
939  *	@adapter: the adapter
940  *	@vers: where to place the version
941  *
942  *	Reads the TP microcode version from flash.
943  */
944 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
945 {
946 	return t4_read_flash(adapter, FLASH_FW_START +
947 			     offsetof(struct fw_hdr, tp_microcode_ver),
948 			     1, vers, 0);
949 }
950 
951 /**
952  *	t4_get_exprom_version - return the Expansion ROM version (if any)
953  *	@adapter: the adapter
954  *	@vers: where to place the version
955  *
956  *	Reads the Expansion ROM header from FLASH and returns the version
957  *	number (if present) through the @vers return value pointer.  We return
958  *	this in the Firmware Version Format since it's convenient.  Return
959  *	0 on success, -ENOENT if no Expansion ROM is present.
960  */
961 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
962 {
963 	struct exprom_header {
964 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
965 		unsigned char hdr_ver[4];	/* Expansion ROM version */
966 	} *hdr;
967 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
968 					   sizeof(u32))];
969 	int ret;
970 
971 	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
972 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
973 			    0);
974 	if (ret)
975 		return ret;
976 
977 	hdr = (struct exprom_header *)exprom_header_buf;
978 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
979 		return -ENOENT;
980 
981 	*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
982 		 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
983 		 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
984 		 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
985 	return 0;
986 }
987 
988 /* Is the given firmware API compatible with the one the driver was compiled
989  * with?
990  */
991 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
992 {
993 
994 	/* short circuit if it's the exact same firmware version */
995 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
996 		return 1;
997 
998 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
999 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1000 	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
1001 		return 1;
1002 #undef SAME_INTF
1003 
1004 	return 0;
1005 }
1006 
1007 /* The firmware in the filesystem is usable, but should it be installed?
1008  * This routine explains itself in detail if it indicates the filesystem
1009  * firmware should be installed.
1010  */
1011 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
1012 				int k, int c)
1013 {
1014 	const char *reason;
1015 
1016 	if (!card_fw_usable) {
1017 		reason = "incompatible or unusable";
1018 		goto install;
1019 	}
1020 
1021 	if (k > c) {
1022 		reason = "older than the version supported with this driver";
1023 		goto install;
1024 	}
1025 
1026 	return 0;
1027 
1028 install:
1029 	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
1030 		"installing firmware %u.%u.%u.%u on card.\n",
1031 		FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1032 		FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
1033 		FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1034 		FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
1035 
1036 	return 1;
1037 }
1038 
1039 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1040 	       const u8 *fw_data, unsigned int fw_size,
1041 	       struct fw_hdr *card_fw, enum dev_state state,
1042 	       int *reset)
1043 {
1044 	int ret, card_fw_usable, fs_fw_usable;
1045 	const struct fw_hdr *fs_fw;
1046 	const struct fw_hdr *drv_fw;
1047 
1048 	drv_fw = &fw_info->fw_hdr;
1049 
1050 	/* Read the header of the firmware on the card */
1051 	ret = -t4_read_flash(adap, FLASH_FW_START,
1052 			    sizeof(*card_fw) / sizeof(uint32_t),
1053 			    (uint32_t *)card_fw, 1);
1054 	if (ret == 0) {
1055 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1056 	} else {
1057 		dev_err(adap->pdev_dev,
1058 			"Unable to read card's firmware header: %d\n", ret);
1059 		card_fw_usable = 0;
1060 	}
1061 
1062 	if (fw_data != NULL) {
1063 		fs_fw = (const void *)fw_data;
1064 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1065 	} else {
1066 		fs_fw = NULL;
1067 		fs_fw_usable = 0;
1068 	}
1069 
1070 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1071 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1072 		/* Common case: the firmware on the card is an exact match and
1073 		 * the filesystem one is an exact match too, or the filesystem
1074 		 * one is absent/incompatible.
1075 		 */
1076 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1077 		   should_install_fs_fw(adap, card_fw_usable,
1078 					be32_to_cpu(fs_fw->fw_ver),
1079 					be32_to_cpu(card_fw->fw_ver))) {
1080 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1081 				     fw_size, 0);
1082 		if (ret != 0) {
1083 			dev_err(adap->pdev_dev,
1084 				"failed to install firmware: %d\n", ret);
1085 			goto bye;
1086 		}
1087 
1088 		/* Installed successfully, update the cached header too. */
1089 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
1090 		card_fw_usable = 1;
1091 		*reset = 0;	/* already reset as part of load_fw */
1092 	}
1093 
1094 	if (!card_fw_usable) {
1095 		uint32_t d, c, k;
1096 
1097 		d = be32_to_cpu(drv_fw->fw_ver);
1098 		c = be32_to_cpu(card_fw->fw_ver);
1099 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1100 
1101 		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1102 			"chip state %d, "
1103 			"driver compiled with %d.%d.%d.%d, "
1104 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1105 			state,
1106 			FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
1107 			FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
1108 			FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1109 			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
1110 			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1111 			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
1112 		ret = EINVAL;
1113 		goto bye;
1114 	}
1115 
1116 	/* We're using whatever's on the card and it's known to be good. */
1117 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1118 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1119 
1120 bye:
1121 	return ret;
1122 }
1123 
1124 /**
1125  *	t4_flash_erase_sectors - erase a range of flash sectors
1126  *	@adapter: the adapter
1127  *	@start: the first sector to erase
1128  *	@end: the last sector to erase
1129  *
1130  *	Erases the sectors in the given inclusive range.
1131  */
1132 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1133 {
1134 	int ret = 0;
1135 
1136 	if (end >= adapter->params.sf_nsec)
1137 		return -EINVAL;
1138 
1139 	while (start <= end) {
1140 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1141 		    (ret = sf1_write(adapter, 4, 0, 1,
1142 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1143 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1144 			dev_err(adapter->pdev_dev,
1145 				"erase of flash sector %d failed, error %d\n",
1146 				start, ret);
1147 			break;
1148 		}
1149 		start++;
1150 	}
1151 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
1152 	return ret;
1153 }
1154 
1155 /**
1156  *	t4_flash_cfg_addr - return the address of the flash configuration file
1157  *	@adapter: the adapter
1158  *
1159  *	Return the address within the flash where the Firmware Configuration
1160  *	File is stored.
1161  */
1162 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1163 {
1164 	if (adapter->params.sf_size == 0x100000)
1165 		return FLASH_FPGA_CFG_START;
1166 	else
1167 		return FLASH_CFG_START;
1168 }
1169 
1170 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
1171  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
1172  * and emit an error message for mismatched firmware to save our caller the
1173  * effort ...
1174  */
1175 static bool t4_fw_matches_chip(const struct adapter *adap,
1176 			       const struct fw_hdr *hdr)
1177 {
1178 	/* The expression below will return FALSE for any unsupported adapter
1179 	 * which will keep us "honest" in the future ...
1180 	 */
1181 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
1182 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
1183 		return true;
1184 
1185 	dev_err(adap->pdev_dev,
1186 		"FW image (%d) is not suitable for this adapter (%d)\n",
1187 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
1188 	return false;
1189 }
1190 
1191 /**
1192  *	t4_load_fw - download firmware
1193  *	@adap: the adapter
1194  *	@fw_data: the firmware image to write
1195  *	@size: image size
1196  *
1197  *	Write the supplied firmware image to the card's serial flash.
1198  */
1199 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1200 {
1201 	u32 csum;
1202 	int ret, addr;
1203 	unsigned int i;
1204 	u8 first_page[SF_PAGE_SIZE];
1205 	const __be32 *p = (const __be32 *)fw_data;
1206 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1207 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1208 	unsigned int fw_img_start = adap->params.sf_fw_start;
1209 	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1210 
1211 	if (!size) {
1212 		dev_err(adap->pdev_dev, "FW image has no data\n");
1213 		return -EINVAL;
1214 	}
1215 	if (size & 511) {
1216 		dev_err(adap->pdev_dev,
1217 			"FW image size not multiple of 512 bytes\n");
1218 		return -EINVAL;
1219 	}
1220 	if (ntohs(hdr->len512) * 512 != size) {
1221 		dev_err(adap->pdev_dev,
1222 			"FW image size differs from size in FW header\n");
1223 		return -EINVAL;
1224 	}
1225 	if (size > FW_MAX_SIZE) {
1226 		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1227 			FW_MAX_SIZE);
1228 		return -EFBIG;
1229 	}
1230 	if (!t4_fw_matches_chip(adap, hdr))
1231 		return -EINVAL;
1232 
1233 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1234 		csum += ntohl(p[i]);
1235 
1236 	if (csum != 0xffffffff) {
1237 		dev_err(adap->pdev_dev,
1238 			"corrupted firmware image, checksum %#x\n", csum);
1239 		return -EINVAL;
1240 	}
1241 
1242 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1243 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1244 	if (ret)
1245 		goto out;
1246 
1247 	/*
1248 	 * We write the correct version at the end so the driver can see a bad
1249 	 * version if the FW write fails.  Start by writing a copy of the
1250 	 * first page with a bad version.
1251 	 */
1252 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1253 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1254 	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1255 	if (ret)
1256 		goto out;
1257 
1258 	addr = fw_img_start;
1259 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1260 		addr += SF_PAGE_SIZE;
1261 		fw_data += SF_PAGE_SIZE;
1262 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1263 		if (ret)
1264 			goto out;
1265 	}
1266 
1267 	ret = t4_write_flash(adap,
1268 			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
1269 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1270 out:
1271 	if (ret)
1272 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1273 			ret);
1274 	else
1275 		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
1276 	return ret;
1277 }
1278 
1279 /**
1280  *	t4_fwcache - firmware cache operation
1281  *	@adap: the adapter
1282  *	@op  : the operation (flush or flush and invalidate)
1283  */
1284 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
1285 {
1286 	struct fw_params_cmd c;
1287 
1288 	memset(&c, 0, sizeof(c));
1289 	c.op_to_vfn =
1290 		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
1291 			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
1292 			    FW_PARAMS_CMD_PFN_V(adap->fn) |
1293 			    FW_PARAMS_CMD_VFN_V(0));
1294 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1295 	c.param[0].mnem =
1296 		cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1297 			    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
1298 	c.param[0].val = (__force __be32)op;
1299 
1300 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
1301 }
1302 
1303 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1304 {
1305 	unsigned int i, j;
1306 
1307 	for (i = 0; i < 8; i++) {
1308 		u32 *p = la_buf + i;
1309 
1310 		t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
1311 		j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
1312 		t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
1313 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1314 			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
1315 	}
1316 }
1317 
1318 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1319 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1320 		     FW_PORT_CAP_ANEG)
1321 
1322 /**
1323  *	t4_link_start - apply link configuration to MAC/PHY
1324  *	@phy: the PHY to setup
1325  *	@mac: the MAC to setup
1326  *	@lc: the requested link configuration
1327  *
1328  *	Set up a port's MAC and PHY according to a desired link configuration.
1329  *	- If the PHY can auto-negotiate first decide what to advertise, then
1330  *	  enable/disable auto-negotiation as desired, and reset.
1331  *	- If the PHY does not auto-negotiate just reset it.
1332  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1333  *	  otherwise do it later based on the outcome of auto-negotiation.
1334  */
1335 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1336 		  struct link_config *lc)
1337 {
1338 	struct fw_port_cmd c;
1339 	unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
1340 
1341 	lc->link_ok = 0;
1342 	if (lc->requested_fc & PAUSE_RX)
1343 		fc |= FW_PORT_CAP_FC_RX;
1344 	if (lc->requested_fc & PAUSE_TX)
1345 		fc |= FW_PORT_CAP_FC_TX;
1346 
1347 	memset(&c, 0, sizeof(c));
1348 	c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1349 			       FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1350 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
1351 				  FW_LEN16(c));
1352 
1353 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1354 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1355 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1356 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1357 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1358 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1359 	} else
1360 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1361 
1362 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1363 }
1364 
1365 /**
1366  *	t4_restart_aneg - restart autonegotiation
1367  *	@adap: the adapter
1368  *	@mbox: mbox to use for the FW command
1369  *	@port: the port id
1370  *
1371  *	Restarts autonegotiation for the selected port.
1372  */
1373 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1374 {
1375 	struct fw_port_cmd c;
1376 
1377 	memset(&c, 0, sizeof(c));
1378 	c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1379 			       FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1380 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
1381 				  FW_LEN16(c));
1382 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1383 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1384 }
1385 
1386 typedef void (*int_handler_t)(struct adapter *adap);
1387 
1388 struct intr_info {
1389 	unsigned int mask;       /* bits to check in interrupt status */
1390 	const char *msg;         /* message to print or NULL */
1391 	short stat_idx;          /* stat counter to increment or -1 */
1392 	unsigned short fatal;    /* whether the condition reported is fatal */
1393 	int_handler_t int_handler; /* platform-specific int handler */
1394 };
1395 
1396 /**
1397  *	t4_handle_intr_status - table driven interrupt handler
1398  *	@adapter: the adapter that generated the interrupt
1399  *	@reg: the interrupt status register to process
1400  *	@acts: table of interrupt actions
1401  *
1402  *	A table driven interrupt handler that applies a set of masks to an
1403  *	interrupt status word and performs the corresponding actions if the
1404  *	interrupts described by the mask have occurred.  The actions include
1405  *	optionally emitting a warning or alert message.  The table is terminated
1406  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1407  *	conditions.
1408  */
1409 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1410 				 const struct intr_info *acts)
1411 {
1412 	int fatal = 0;
1413 	unsigned int mask = 0;
1414 	unsigned int status = t4_read_reg(adapter, reg);
1415 
1416 	for ( ; acts->mask; ++acts) {
1417 		if (!(status & acts->mask))
1418 			continue;
1419 		if (acts->fatal) {
1420 			fatal++;
1421 			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1422 				  status & acts->mask);
1423 		} else if (acts->msg && printk_ratelimit())
1424 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1425 				 status & acts->mask);
1426 		if (acts->int_handler)
1427 			acts->int_handler(adapter);
1428 		mask |= acts->mask;
1429 	}
1430 	status &= mask;
1431 	if (status)                           /* clear processed interrupts */
1432 		t4_write_reg(adapter, reg, status);
1433 	return fatal;
1434 }
1435 
1436 /*
1437  * Interrupt handler for the PCIE module.
1438  */
1439 static void pcie_intr_handler(struct adapter *adapter)
1440 {
1441 	static const struct intr_info sysbus_intr_info[] = {
1442 		{ RNPP_F, "RXNP array parity error", -1, 1 },
1443 		{ RPCP_F, "RXPC array parity error", -1, 1 },
1444 		{ RCIP_F, "RXCIF array parity error", -1, 1 },
1445 		{ RCCP_F, "Rx completions control array parity error", -1, 1 },
1446 		{ RFTP_F, "RXFT array parity error", -1, 1 },
1447 		{ 0 }
1448 	};
1449 	static const struct intr_info pcie_port_intr_info[] = {
1450 		{ TPCP_F, "TXPC array parity error", -1, 1 },
1451 		{ TNPP_F, "TXNP array parity error", -1, 1 },
1452 		{ TFTP_F, "TXFT array parity error", -1, 1 },
1453 		{ TCAP_F, "TXCA array parity error", -1, 1 },
1454 		{ TCIP_F, "TXCIF array parity error", -1, 1 },
1455 		{ RCAP_F, "RXCA array parity error", -1, 1 },
1456 		{ OTDD_F, "outbound request TLP discarded", -1, 1 },
1457 		{ RDPE_F, "Rx data parity error", -1, 1 },
1458 		{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
1459 		{ 0 }
1460 	};
1461 	static const struct intr_info pcie_intr_info[] = {
1462 		{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
1463 		{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
1464 		{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
1465 		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1466 		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1467 		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1468 		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1469 		{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
1470 		{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
1471 		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1472 		{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
1473 		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1474 		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1475 		{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
1476 		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1477 		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1478 		{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
1479 		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1480 		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1481 		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1482 		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
1483 		{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
1484 		{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
1485 		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1486 		{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
1487 		{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
1488 		{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
1489 		{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
1490 		{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
1491 		{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
1492 		  -1, 0 },
1493 		{ 0 }
1494 	};
1495 
1496 	static struct intr_info t5_pcie_intr_info[] = {
1497 		{ MSTGRPPERR_F, "Master Response Read Queue parity error",
1498 		  -1, 1 },
1499 		{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
1500 		{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
1501 		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1502 		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1503 		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1504 		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1505 		{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
1506 		  -1, 1 },
1507 		{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
1508 		  -1, 1 },
1509 		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1510 		{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
1511 		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1512 		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1513 		{ DREQWRPERR_F, "PCI DMA channel write request parity error",
1514 		  -1, 1 },
1515 		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1516 		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1517 		{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
1518 		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1519 		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1520 		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1521 		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
1522 		{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
1523 		{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
1524 		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1525 		{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
1526 		  -1, 1 },
1527 		{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
1528 		  -1, 1 },
1529 		{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
1530 		{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
1531 		{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1532 		{ READRSPERR_F, "Outbound read error", -1, 0 },
1533 		{ 0 }
1534 	};
1535 
1536 	int fat;
1537 
1538 	if (is_t4(adapter->params.chip))
1539 		fat = t4_handle_intr_status(adapter,
1540 				PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
1541 				sysbus_intr_info) +
1542 			t4_handle_intr_status(adapter,
1543 					PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
1544 					pcie_port_intr_info) +
1545 			t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1546 					      pcie_intr_info);
1547 	else
1548 		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1549 					    t5_pcie_intr_info);
1550 
1551 	if (fat)
1552 		t4_fatal_err(adapter);
1553 }
1554 
1555 /*
1556  * TP interrupt handler.
1557  */
1558 static void tp_intr_handler(struct adapter *adapter)
1559 {
1560 	static const struct intr_info tp_intr_info[] = {
1561 		{ 0x3fffffff, "TP parity error", -1, 1 },
1562 		{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
1563 		{ 0 }
1564 	};
1565 
1566 	if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
1567 		t4_fatal_err(adapter);
1568 }
1569 
1570 /*
1571  * SGE interrupt handler.
1572  */
1573 static void sge_intr_handler(struct adapter *adapter)
1574 {
1575 	u64 v;
1576 
1577 	static const struct intr_info sge_intr_info[] = {
1578 		{ ERR_CPL_EXCEED_IQE_SIZE_F,
1579 		  "SGE received CPL exceeding IQE size", -1, 1 },
1580 		{ ERR_INVALID_CIDX_INC_F,
1581 		  "SGE GTS CIDX increment too large", -1, 0 },
1582 		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
1583 		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
1584 		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
1585 		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
1586 		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
1587 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1588 		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
1589 		  0 },
1590 		{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
1591 		  0 },
1592 		{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
1593 		  0 },
1594 		{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
1595 		  0 },
1596 		{ ERR_ING_CTXT_PRIO_F,
1597 		  "SGE too many priority ingress contexts", -1, 0 },
1598 		{ ERR_EGR_CTXT_PRIO_F,
1599 		  "SGE too many priority egress contexts", -1, 0 },
1600 		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
1601 		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
1602 		{ 0 }
1603 	};
1604 
1605 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
1606 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
1607 	if (v) {
1608 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1609 				(unsigned long long)v);
1610 		t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
1611 		t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
1612 	}
1613 
1614 	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
1615 	    v != 0)
1616 		t4_fatal_err(adapter);
1617 }
1618 
1619 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
1620 		      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
1621 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
1622 		      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
1623 
1624 /*
1625  * CIM interrupt handler.
1626  */
1627 static void cim_intr_handler(struct adapter *adapter)
1628 {
1629 	static const struct intr_info cim_intr_info[] = {
1630 		{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
1631 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1632 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1633 		{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
1634 		{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
1635 		{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
1636 		{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
1637 		{ 0 }
1638 	};
1639 	static const struct intr_info cim_upintr_info[] = {
1640 		{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
1641 		{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
1642 		{ ILLWRINT_F, "CIM illegal write", -1, 1 },
1643 		{ ILLRDINT_F, "CIM illegal read", -1, 1 },
1644 		{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
1645 		{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
1646 		{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
1647 		{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
1648 		{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
1649 		{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
1650 		{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
1651 		{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
1652 		{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
1653 		{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
1654 		{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
1655 		{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
1656 		{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
1657 		{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
1658 		{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
1659 		{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
1660 		{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
1661 		{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
1662 		{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
1663 		{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
1664 		{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
1665 		{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
1666 		{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
1667 		{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
1668 		{ 0 }
1669 	};
1670 
1671 	int fat;
1672 
1673 	if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
1674 		t4_report_fw_error(adapter);
1675 
1676 	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
1677 				    cim_intr_info) +
1678 	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
1679 				    cim_upintr_info);
1680 	if (fat)
1681 		t4_fatal_err(adapter);
1682 }
1683 
1684 /*
1685  * ULP RX interrupt handler.
1686  */
1687 static void ulprx_intr_handler(struct adapter *adapter)
1688 {
1689 	static const struct intr_info ulprx_intr_info[] = {
1690 		{ 0x1800000, "ULPRX context error", -1, 1 },
1691 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1692 		{ 0 }
1693 	};
1694 
1695 	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
1696 		t4_fatal_err(adapter);
1697 }
1698 
1699 /*
1700  * ULP TX interrupt handler.
1701  */
1702 static void ulptx_intr_handler(struct adapter *adapter)
1703 {
1704 	static const struct intr_info ulptx_intr_info[] = {
1705 		{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
1706 		  0 },
1707 		{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
1708 		  0 },
1709 		{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
1710 		  0 },
1711 		{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
1712 		  0 },
1713 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1714 		{ 0 }
1715 	};
1716 
1717 	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
1718 		t4_fatal_err(adapter);
1719 }
1720 
1721 /*
1722  * PM TX interrupt handler.
1723  */
1724 static void pmtx_intr_handler(struct adapter *adapter)
1725 {
1726 	static const struct intr_info pmtx_intr_info[] = {
1727 		{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
1728 		{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
1729 		{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
1730 		{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
1731 		{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
1732 		{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
1733 		{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
1734 		  -1, 1 },
1735 		{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
1736 		{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
1737 		{ 0 }
1738 	};
1739 
1740 	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
1741 		t4_fatal_err(adapter);
1742 }
1743 
1744 /*
1745  * PM RX interrupt handler.
1746  */
1747 static void pmrx_intr_handler(struct adapter *adapter)
1748 {
1749 	static const struct intr_info pmrx_intr_info[] = {
1750 		{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
1751 		{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
1752 		{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
1753 		{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
1754 		  -1, 1 },
1755 		{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
1756 		{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
1757 		{ 0 }
1758 	};
1759 
1760 	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
1761 		t4_fatal_err(adapter);
1762 }
1763 
1764 /*
1765  * CPL switch interrupt handler.
1766  */
1767 static void cplsw_intr_handler(struct adapter *adapter)
1768 {
1769 	static const struct intr_info cplsw_intr_info[] = {
1770 		{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
1771 		{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
1772 		{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
1773 		{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
1774 		{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
1775 		{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
1776 		{ 0 }
1777 	};
1778 
1779 	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
1780 		t4_fatal_err(adapter);
1781 }
1782 
1783 /*
1784  * LE interrupt handler.
1785  */
1786 static void le_intr_handler(struct adapter *adap)
1787 {
1788 	static const struct intr_info le_intr_info[] = {
1789 		{ LIPMISS_F, "LE LIP miss", -1, 0 },
1790 		{ LIP0_F, "LE 0 LIP error", -1, 0 },
1791 		{ PARITYERR_F, "LE parity error", -1, 1 },
1792 		{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
1793 		{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
1794 		{ 0 }
1795 	};
1796 
1797 	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
1798 		t4_fatal_err(adap);
1799 }
1800 
1801 /*
1802  * MPS interrupt handler.
1803  */
1804 static void mps_intr_handler(struct adapter *adapter)
1805 {
1806 	static const struct intr_info mps_rx_intr_info[] = {
1807 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1808 		{ 0 }
1809 	};
1810 	static const struct intr_info mps_tx_intr_info[] = {
1811 		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
1812 		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1813 		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
1814 		  -1, 1 },
1815 		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
1816 		  -1, 1 },
1817 		{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
1818 		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
1819 		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
1820 		{ 0 }
1821 	};
1822 	static const struct intr_info mps_trc_intr_info[] = {
1823 		{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
1824 		{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
1825 		  -1, 1 },
1826 		{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
1827 		{ 0 }
1828 	};
1829 	static const struct intr_info mps_stat_sram_intr_info[] = {
1830 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1831 		{ 0 }
1832 	};
1833 	static const struct intr_info mps_stat_tx_intr_info[] = {
1834 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1835 		{ 0 }
1836 	};
1837 	static const struct intr_info mps_stat_rx_intr_info[] = {
1838 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1839 		{ 0 }
1840 	};
1841 	static const struct intr_info mps_cls_intr_info[] = {
1842 		{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
1843 		{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
1844 		{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
1845 		{ 0 }
1846 	};
1847 
1848 	int fat;
1849 
1850 	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
1851 				    mps_rx_intr_info) +
1852 	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
1853 				    mps_tx_intr_info) +
1854 	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
1855 				    mps_trc_intr_info) +
1856 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
1857 				    mps_stat_sram_intr_info) +
1858 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
1859 				    mps_stat_tx_intr_info) +
1860 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
1861 				    mps_stat_rx_intr_info) +
1862 	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
1863 				    mps_cls_intr_info);
1864 
1865 	t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
1866 	t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
1867 	if (fat)
1868 		t4_fatal_err(adapter);
1869 }
1870 
1871 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
1872 		      ECC_UE_INT_CAUSE_F)
1873 
1874 /*
1875  * EDC/MC interrupt handler.
1876  */
1877 static void mem_intr_handler(struct adapter *adapter, int idx)
1878 {
1879 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
1880 
1881 	unsigned int addr, cnt_addr, v;
1882 
1883 	if (idx <= MEM_EDC1) {
1884 		addr = EDC_REG(EDC_INT_CAUSE_A, idx);
1885 		cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
1886 	} else if (idx == MEM_MC) {
1887 		if (is_t4(adapter->params.chip)) {
1888 			addr = MC_INT_CAUSE_A;
1889 			cnt_addr = MC_ECC_STATUS_A;
1890 		} else {
1891 			addr = MC_P_INT_CAUSE_A;
1892 			cnt_addr = MC_P_ECC_STATUS_A;
1893 		}
1894 	} else {
1895 		addr = MC_REG(MC_P_INT_CAUSE_A, 1);
1896 		cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
1897 	}
1898 
1899 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1900 	if (v & PERR_INT_CAUSE_F)
1901 		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1902 			  name[idx]);
1903 	if (v & ECC_CE_INT_CAUSE_F) {
1904 		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
1905 
1906 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
1907 		if (printk_ratelimit())
1908 			dev_warn(adapter->pdev_dev,
1909 				 "%u %s correctable ECC data error%s\n",
1910 				 cnt, name[idx], cnt > 1 ? "s" : "");
1911 	}
1912 	if (v & ECC_UE_INT_CAUSE_F)
1913 		dev_alert(adapter->pdev_dev,
1914 			  "%s uncorrectable ECC data error\n", name[idx]);
1915 
1916 	t4_write_reg(adapter, addr, v);
1917 	if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
1918 		t4_fatal_err(adapter);
1919 }
1920 
1921 /*
1922  * MA interrupt handler.
1923  */
1924 static void ma_intr_handler(struct adapter *adap)
1925 {
1926 	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
1927 
1928 	if (status & MEM_PERR_INT_CAUSE_F) {
1929 		dev_alert(adap->pdev_dev,
1930 			  "MA parity error, parity status %#x\n",
1931 			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
1932 		if (is_t5(adap->params.chip))
1933 			dev_alert(adap->pdev_dev,
1934 				  "MA parity error, parity status %#x\n",
1935 				  t4_read_reg(adap,
1936 					      MA_PARITY_ERROR_STATUS2_A));
1937 	}
1938 	if (status & MEM_WRAP_INT_CAUSE_F) {
1939 		v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
1940 		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1941 			  "client %u to address %#x\n",
1942 			  MEM_WRAP_CLIENT_NUM_G(v),
1943 			  MEM_WRAP_ADDRESS_G(v) << 4);
1944 	}
1945 	t4_write_reg(adap, MA_INT_CAUSE_A, status);
1946 	t4_fatal_err(adap);
1947 }
1948 
1949 /*
1950  * SMB interrupt handler.
1951  */
1952 static void smb_intr_handler(struct adapter *adap)
1953 {
1954 	static const struct intr_info smb_intr_info[] = {
1955 		{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
1956 		{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
1957 		{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
1958 		{ 0 }
1959 	};
1960 
1961 	if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
1962 		t4_fatal_err(adap);
1963 }
1964 
1965 /*
1966  * NC-SI interrupt handler.
1967  */
1968 static void ncsi_intr_handler(struct adapter *adap)
1969 {
1970 	static const struct intr_info ncsi_intr_info[] = {
1971 		{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
1972 		{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
1973 		{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
1974 		{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
1975 		{ 0 }
1976 	};
1977 
1978 	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
1979 		t4_fatal_err(adap);
1980 }
1981 
1982 /*
1983  * XGMAC interrupt handler.
1984  */
1985 static void xgmac_intr_handler(struct adapter *adap, int port)
1986 {
1987 	u32 v, int_cause_reg;
1988 
1989 	if (is_t4(adap->params.chip))
1990 		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
1991 	else
1992 		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
1993 
1994 	v = t4_read_reg(adap, int_cause_reg);
1995 
1996 	v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
1997 	if (!v)
1998 		return;
1999 
2000 	if (v & TXFIFO_PRTY_ERR_F)
2001 		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
2002 			  port);
2003 	if (v & RXFIFO_PRTY_ERR_F)
2004 		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
2005 			  port);
2006 	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
2007 	t4_fatal_err(adap);
2008 }
2009 
2010 /*
2011  * PL interrupt handler.
2012  */
2013 static void pl_intr_handler(struct adapter *adap)
2014 {
2015 	static const struct intr_info pl_intr_info[] = {
2016 		{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
2017 		{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
2018 		{ 0 }
2019 	};
2020 
2021 	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
2022 		t4_fatal_err(adap);
2023 }
2024 
2025 #define PF_INTR_MASK (PFSW_F)
2026 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
2027 		EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
2028 		CPL_SWITCH_F | SGE_F | ULP_TX_F)
2029 
2030 /**
2031  *	t4_slow_intr_handler - control path interrupt handler
2032  *	@adapter: the adapter
2033  *
2034  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2035  *	The designation 'slow' is because it involves register reads, while
2036  *	data interrupts typically don't involve any MMIOs.
2037  */
2038 int t4_slow_intr_handler(struct adapter *adapter)
2039 {
2040 	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
2041 
2042 	if (!(cause & GLBL_INTR_MASK))
2043 		return 0;
2044 	if (cause & CIM_F)
2045 		cim_intr_handler(adapter);
2046 	if (cause & MPS_F)
2047 		mps_intr_handler(adapter);
2048 	if (cause & NCSI_F)
2049 		ncsi_intr_handler(adapter);
2050 	if (cause & PL_F)
2051 		pl_intr_handler(adapter);
2052 	if (cause & SMB_F)
2053 		smb_intr_handler(adapter);
2054 	if (cause & XGMAC0_F)
2055 		xgmac_intr_handler(adapter, 0);
2056 	if (cause & XGMAC1_F)
2057 		xgmac_intr_handler(adapter, 1);
2058 	if (cause & XGMAC_KR0_F)
2059 		xgmac_intr_handler(adapter, 2);
2060 	if (cause & XGMAC_KR1_F)
2061 		xgmac_intr_handler(adapter, 3);
2062 	if (cause & PCIE_F)
2063 		pcie_intr_handler(adapter);
2064 	if (cause & MC_F)
2065 		mem_intr_handler(adapter, MEM_MC);
2066 	if (!is_t4(adapter->params.chip) && (cause & MC1_S))
2067 		mem_intr_handler(adapter, MEM_MC1);
2068 	if (cause & EDC0_F)
2069 		mem_intr_handler(adapter, MEM_EDC0);
2070 	if (cause & EDC1_F)
2071 		mem_intr_handler(adapter, MEM_EDC1);
2072 	if (cause & LE_F)
2073 		le_intr_handler(adapter);
2074 	if (cause & TP_F)
2075 		tp_intr_handler(adapter);
2076 	if (cause & MA_F)
2077 		ma_intr_handler(adapter);
2078 	if (cause & PM_TX_F)
2079 		pmtx_intr_handler(adapter);
2080 	if (cause & PM_RX_F)
2081 		pmrx_intr_handler(adapter);
2082 	if (cause & ULP_RX_F)
2083 		ulprx_intr_handler(adapter);
2084 	if (cause & CPL_SWITCH_F)
2085 		cplsw_intr_handler(adapter);
2086 	if (cause & SGE_F)
2087 		sge_intr_handler(adapter);
2088 	if (cause & ULP_TX_F)
2089 		ulptx_intr_handler(adapter);
2090 
2091 	/* Clear the interrupts just processed for which we are the master. */
2092 	t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
2093 	(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
2094 	return 1;
2095 }
2096 
2097 /**
2098  *	t4_intr_enable - enable interrupts
2099  *	@adapter: the adapter whose interrupts should be enabled
2100  *
2101  *	Enable PF-specific interrupts for the calling function and the top-level
2102  *	interrupt concentrator for global interrupts.  Interrupts are already
2103  *	enabled at each module,	here we just enable the roots of the interrupt
2104  *	hierarchies.
2105  *
2106  *	Note: this function should be called only when the driver manages
2107  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2108  *	function at a time should be doing this.
2109  */
2110 void t4_intr_enable(struct adapter *adapter)
2111 {
2112 	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2113 
2114 	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
2115 		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
2116 		     ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
2117 		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2118 		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2119 		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2120 		     ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
2121 		     DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
2122 		     EGRESS_SIZE_ERR_F);
2123 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
2124 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
2125 }
2126 
2127 /**
2128  *	t4_intr_disable - disable interrupts
2129  *	@adapter: the adapter whose interrupts should be disabled
2130  *
2131  *	Disable interrupts.  We only disable the top-level interrupt
2132  *	concentrators.  The caller must be a PCI function managing global
2133  *	interrupts.
2134  */
2135 void t4_intr_disable(struct adapter *adapter)
2136 {
2137 	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2138 
2139 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
2140 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
2141 }
2142 
2143 /**
2144  *	hash_mac_addr - return the hash value of a MAC address
2145  *	@addr: the 48-bit Ethernet MAC address
2146  *
2147  *	Hashes a MAC address according to the hash function used by HW inexact
2148  *	(hash) address matching.
2149  */
2150 static int hash_mac_addr(const u8 *addr)
2151 {
2152 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2153 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2154 	a ^= b;
2155 	a ^= (a >> 12);
2156 	a ^= (a >> 6);
2157 	return a & 0x3f;
2158 }
2159 
2160 /**
2161  *	t4_config_rss_range - configure a portion of the RSS mapping table
2162  *	@adapter: the adapter
2163  *	@mbox: mbox to use for the FW command
2164  *	@viid: virtual interface whose RSS subtable is to be written
2165  *	@start: start entry in the table to write
2166  *	@n: how many table entries to write
2167  *	@rspq: values for the response queue lookup table
2168  *	@nrspq: number of values in @rspq
2169  *
2170  *	Programs the selected part of the VI's RSS mapping table with the
2171  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2172  *	until the full table range is populated.
2173  *
2174  *	The caller must ensure the values in @rspq are in the range allowed for
2175  *	@viid.
2176  */
2177 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2178 			int start, int n, const u16 *rspq, unsigned int nrspq)
2179 {
2180 	int ret;
2181 	const u16 *rsp = rspq;
2182 	const u16 *rsp_end = rspq + nrspq;
2183 	struct fw_rss_ind_tbl_cmd cmd;
2184 
2185 	memset(&cmd, 0, sizeof(cmd));
2186 	cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2187 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2188 			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
2189 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2190 
2191 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2192 	while (n > 0) {
2193 		int nq = min(n, 32);
2194 		__be32 *qp = &cmd.iq0_to_iq2;
2195 
2196 		cmd.niqid = htons(nq);
2197 		cmd.startidx = htons(start);
2198 
2199 		start += nq;
2200 		n -= nq;
2201 
2202 		while (nq > 0) {
2203 			unsigned int v;
2204 
2205 			v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
2206 			if (++rsp >= rsp_end)
2207 				rsp = rspq;
2208 			v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
2209 			if (++rsp >= rsp_end)
2210 				rsp = rspq;
2211 			v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
2212 			if (++rsp >= rsp_end)
2213 				rsp = rspq;
2214 
2215 			*qp++ = htonl(v);
2216 			nq -= 3;
2217 		}
2218 
2219 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2220 		if (ret)
2221 			return ret;
2222 	}
2223 	return 0;
2224 }
2225 
2226 /**
2227  *	t4_config_glbl_rss - configure the global RSS mode
2228  *	@adapter: the adapter
2229  *	@mbox: mbox to use for the FW command
2230  *	@mode: global RSS mode
2231  *	@flags: mode-specific flags
2232  *
2233  *	Sets the global RSS mode.
2234  */
2235 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2236 		       unsigned int flags)
2237 {
2238 	struct fw_rss_glb_config_cmd c;
2239 
2240 	memset(&c, 0, sizeof(c));
2241 	c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2242 			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
2243 	c.retval_len16 = htonl(FW_LEN16(c));
2244 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2245 		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
2246 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2247 		c.u.basicvirtual.mode_pkd =
2248 			htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
2249 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2250 	} else
2251 		return -EINVAL;
2252 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2253 }
2254 
2255 /* Read an RSS table row */
2256 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2257 {
2258 	t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
2259 	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
2260 				   5, 0, val);
2261 }
2262 
2263 /**
2264  *	t4_read_rss - read the contents of the RSS mapping table
2265  *	@adapter: the adapter
2266  *	@map: holds the contents of the RSS mapping table
2267  *
2268  *	Reads the contents of the RSS hash->queue mapping table.
2269  */
2270 int t4_read_rss(struct adapter *adapter, u16 *map)
2271 {
2272 	u32 val;
2273 	int i, ret;
2274 
2275 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2276 		ret = rd_rss_row(adapter, i, &val);
2277 		if (ret)
2278 			return ret;
2279 		*map++ = LKPTBLQUEUE0_G(val);
2280 		*map++ = LKPTBLQUEUE1_G(val);
2281 	}
2282 	return 0;
2283 }
2284 
2285 /**
2286  *	t4_read_rss_key - read the global RSS key
2287  *	@adap: the adapter
2288  *	@key: 10-entry array holding the 320-bit RSS key
2289  *
2290  *	Reads the global 320-bit RSS key.
2291  */
2292 void t4_read_rss_key(struct adapter *adap, u32 *key)
2293 {
2294 	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2295 			 TP_RSS_SECRET_KEY0_A);
2296 }
2297 
2298 /**
2299  *	t4_write_rss_key - program one of the RSS keys
2300  *	@adap: the adapter
2301  *	@key: 10-entry array holding the 320-bit RSS key
2302  *	@idx: which RSS key to write
2303  *
2304  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2305  *	0..15 the corresponding entry in the RSS key table is written,
2306  *	otherwise the global RSS key is written.
2307  */
2308 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2309 {
2310 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2311 			  TP_RSS_SECRET_KEY0_A);
2312 	if (idx >= 0 && idx < 16)
2313 		t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
2314 			     KEYWRADDR_V(idx) | KEYWREN_F);
2315 }
2316 
2317 /**
2318  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2319  *	@adapter: the adapter
2320  *	@index: the entry in the PF RSS table to read
2321  *	@valp: where to store the returned value
2322  *
2323  *	Reads the PF RSS Configuration Table at the specified index and returns
2324  *	the value found there.
2325  */
2326 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
2327 			   u32 *valp)
2328 {
2329 	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2330 			 valp, 1, TP_RSS_PF0_CONFIG_A + index);
2331 }
2332 
2333 /**
2334  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2335  *	@adapter: the adapter
2336  *	@index: the entry in the VF RSS table to read
2337  *	@vfl: where to store the returned VFL
2338  *	@vfh: where to store the returned VFH
2339  *
2340  *	Reads the VF RSS Configuration Table at the specified index and returns
2341  *	the (VFL, VFH) values found there.
2342  */
2343 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2344 			   u32 *vfl, u32 *vfh)
2345 {
2346 	u32 vrt, mask, data;
2347 
2348 	mask = VFWRADDR_V(VFWRADDR_M);
2349 	data = VFWRADDR_V(index);
2350 
2351 	/* Request that the index'th VF Table values be read into VFL/VFH.
2352 	 */
2353 	vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
2354 	vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
2355 	vrt |= data | VFRDEN_F;
2356 	t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
2357 
2358 	/* Grab the VFL/VFH values ...
2359 	 */
2360 	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2361 			 vfl, 1, TP_RSS_VFL_CONFIG_A);
2362 	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2363 			 vfh, 1, TP_RSS_VFH_CONFIG_A);
2364 }
2365 
2366 /**
2367  *	t4_read_rss_pf_map - read PF RSS Map
2368  *	@adapter: the adapter
2369  *
2370  *	Reads the PF RSS Map register and returns its value.
2371  */
2372 u32 t4_read_rss_pf_map(struct adapter *adapter)
2373 {
2374 	u32 pfmap;
2375 
2376 	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2377 			 &pfmap, 1, TP_RSS_PF_MAP_A);
2378 	return pfmap;
2379 }
2380 
2381 /**
2382  *	t4_read_rss_pf_mask - read PF RSS Mask
2383  *	@adapter: the adapter
2384  *
2385  *	Reads the PF RSS Mask register and returns its value.
2386  */
2387 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2388 {
2389 	u32 pfmask;
2390 
2391 	t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2392 			 &pfmask, 1, TP_RSS_PF_MSK_A);
2393 	return pfmask;
2394 }
2395 
2396 /**
2397  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2398  *	@adap: the adapter
2399  *	@v4: holds the TCP/IP counter values
2400  *	@v6: holds the TCP/IPv6 counter values
2401  *
2402  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2403  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2404  */
2405 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2406 			 struct tp_tcp_stats *v6)
2407 {
2408 	u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
2409 
2410 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
2411 #define STAT(x)     val[STAT_IDX(x)]
2412 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2413 
2414 	if (v4) {
2415 		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2416 				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
2417 		v4->tcpOutRsts = STAT(OUT_RST);
2418 		v4->tcpInSegs  = STAT64(IN_SEG);
2419 		v4->tcpOutSegs = STAT64(OUT_SEG);
2420 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2421 	}
2422 	if (v6) {
2423 		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2424 				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
2425 		v6->tcpOutRsts = STAT(OUT_RST);
2426 		v6->tcpInSegs  = STAT64(IN_SEG);
2427 		v6->tcpOutSegs = STAT64(OUT_SEG);
2428 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2429 	}
2430 #undef STAT64
2431 #undef STAT
2432 #undef STAT_IDX
2433 }
2434 
2435 /**
2436  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2437  *	@adap: the adapter
2438  *	@mtus: where to store the MTU values
2439  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2440  *
2441  *	Reads the HW path MTU table.
2442  */
2443 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2444 {
2445 	u32 v;
2446 	int i;
2447 
2448 	for (i = 0; i < NMTUS; ++i) {
2449 		t4_write_reg(adap, TP_MTU_TABLE_A,
2450 			     MTUINDEX_V(0xff) | MTUVALUE_V(i));
2451 		v = t4_read_reg(adap, TP_MTU_TABLE_A);
2452 		mtus[i] = MTUVALUE_G(v);
2453 		if (mtu_log)
2454 			mtu_log[i] = MTUWIDTH_G(v);
2455 	}
2456 }
2457 
2458 /**
2459  *	t4_read_cong_tbl - reads the congestion control table
2460  *	@adap: the adapter
2461  *	@incr: where to store the alpha values
2462  *
2463  *	Reads the additive increments programmed into the HW congestion
2464  *	control table.
2465  */
2466 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2467 {
2468 	unsigned int mtu, w;
2469 
2470 	for (mtu = 0; mtu < NMTUS; ++mtu)
2471 		for (w = 0; w < NCCTRL_WIN; ++w) {
2472 			t4_write_reg(adap, TP_CCTRL_TABLE_A,
2473 				     ROWINDEX_V(0xffff) | (mtu << 5) | w);
2474 			incr[mtu][w] = (u16)t4_read_reg(adap,
2475 						TP_CCTRL_TABLE_A) & 0x1fff;
2476 		}
2477 }
2478 
2479 /**
2480  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2481  *	@adap: the adapter
2482  *	@addr: the indirect TP register address
2483  *	@mask: specifies the field within the register to modify
2484  *	@val: new value for the field
2485  *
2486  *	Sets a field of an indirect TP register to the given value.
2487  */
2488 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2489 			    unsigned int mask, unsigned int val)
2490 {
2491 	t4_write_reg(adap, TP_PIO_ADDR_A, addr);
2492 	val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
2493 	t4_write_reg(adap, TP_PIO_DATA_A, val);
2494 }
2495 
2496 /**
2497  *	init_cong_ctrl - initialize congestion control parameters
2498  *	@a: the alpha values for congestion control
2499  *	@b: the beta values for congestion control
2500  *
2501  *	Initialize the congestion control parameters.
2502  */
2503 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2504 {
2505 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2506 	a[9] = 2;
2507 	a[10] = 3;
2508 	a[11] = 4;
2509 	a[12] = 5;
2510 	a[13] = 6;
2511 	a[14] = 7;
2512 	a[15] = 8;
2513 	a[16] = 9;
2514 	a[17] = 10;
2515 	a[18] = 14;
2516 	a[19] = 17;
2517 	a[20] = 21;
2518 	a[21] = 25;
2519 	a[22] = 30;
2520 	a[23] = 35;
2521 	a[24] = 45;
2522 	a[25] = 60;
2523 	a[26] = 80;
2524 	a[27] = 100;
2525 	a[28] = 200;
2526 	a[29] = 300;
2527 	a[30] = 400;
2528 	a[31] = 500;
2529 
2530 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2531 	b[9] = b[10] = 1;
2532 	b[11] = b[12] = 2;
2533 	b[13] = b[14] = b[15] = b[16] = 3;
2534 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2535 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2536 	b[28] = b[29] = 6;
2537 	b[30] = b[31] = 7;
2538 }
2539 
2540 /* The minimum additive increment value for the congestion control table */
2541 #define CC_MIN_INCR 2U
2542 
2543 /**
2544  *	t4_load_mtus - write the MTU and congestion control HW tables
2545  *	@adap: the adapter
2546  *	@mtus: the values for the MTU table
2547  *	@alpha: the values for the congestion control alpha parameter
2548  *	@beta: the values for the congestion control beta parameter
2549  *
2550  *	Write the HW MTU table with the supplied MTUs and the high-speed
2551  *	congestion control table with the supplied alpha, beta, and MTUs.
2552  *	We write the two tables together because the additive increments
2553  *	depend on the MTUs.
2554  */
2555 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2556 		  const unsigned short *alpha, const unsigned short *beta)
2557 {
2558 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2559 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2560 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2561 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2562 	};
2563 
2564 	unsigned int i, w;
2565 
2566 	for (i = 0; i < NMTUS; ++i) {
2567 		unsigned int mtu = mtus[i];
2568 		unsigned int log2 = fls(mtu);
2569 
2570 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2571 			log2--;
2572 		t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
2573 			     MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
2574 
2575 		for (w = 0; w < NCCTRL_WIN; ++w) {
2576 			unsigned int inc;
2577 
2578 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2579 				  CC_MIN_INCR);
2580 
2581 			t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
2582 				     (w << 16) | (beta[w] << 13) | inc);
2583 		}
2584 	}
2585 }
2586 
2587 /**
2588  *	t4_pmtx_get_stats - returns the HW stats from PMTX
2589  *	@adap: the adapter
2590  *	@cnt: where to store the count statistics
2591  *	@cycles: where to store the cycle statistics
2592  *
2593  *	Returns performance statistics from PMTX.
2594  */
2595 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2596 {
2597 	int i;
2598 	u32 data[2];
2599 
2600 	for (i = 0; i < PM_NSTATS; i++) {
2601 		t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
2602 		cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
2603 		if (is_t4(adap->params.chip)) {
2604 			cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
2605 		} else {
2606 			t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
2607 					 PM_TX_DBG_DATA_A, data, 2,
2608 					 PM_TX_DBG_STAT_MSB_A);
2609 			cycles[i] = (((u64)data[0] << 32) | data[1]);
2610 		}
2611 	}
2612 }
2613 
2614 /**
2615  *	t4_pmrx_get_stats - returns the HW stats from PMRX
2616  *	@adap: the adapter
2617  *	@cnt: where to store the count statistics
2618  *	@cycles: where to store the cycle statistics
2619  *
2620  *	Returns performance statistics from PMRX.
2621  */
2622 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2623 {
2624 	int i;
2625 	u32 data[2];
2626 
2627 	for (i = 0; i < PM_NSTATS; i++) {
2628 		t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
2629 		cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
2630 		if (is_t4(adap->params.chip)) {
2631 			cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
2632 		} else {
2633 			t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
2634 					 PM_RX_DBG_DATA_A, data, 2,
2635 					 PM_RX_DBG_STAT_MSB_A);
2636 			cycles[i] = (((u64)data[0] << 32) | data[1]);
2637 		}
2638 	}
2639 }
2640 
2641 /**
2642  *	get_mps_bg_map - return the buffer groups associated with a port
2643  *	@adap: the adapter
2644  *	@idx: the port index
2645  *
2646  *	Returns a bitmap indicating which MPS buffer groups are associated
2647  *	with the given port.  Bit i is set if buffer group i is used by the
2648  *	port.
2649  */
2650 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2651 {
2652 	u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
2653 
2654 	if (n == 0)
2655 		return idx == 0 ? 0xf : 0;
2656 	if (n == 1)
2657 		return idx < 2 ? (3 << (2 * idx)) : 0;
2658 	return 1 << idx;
2659 }
2660 
2661 /**
2662  *      t4_get_port_type_description - return Port Type string description
2663  *      @port_type: firmware Port Type enumeration
2664  */
2665 const char *t4_get_port_type_description(enum fw_port_type port_type)
2666 {
2667 	static const char *const port_type_description[] = {
2668 		"R XFI",
2669 		"R XAUI",
2670 		"T SGMII",
2671 		"T XFI",
2672 		"T XAUI",
2673 		"KX4",
2674 		"CX4",
2675 		"KX",
2676 		"KR",
2677 		"R SFP+",
2678 		"KR/KX",
2679 		"KR/KX/KX4",
2680 		"R QSFP_10G",
2681 		"R QSA",
2682 		"R QSFP",
2683 		"R BP40_BA",
2684 	};
2685 
2686 	if (port_type < ARRAY_SIZE(port_type_description))
2687 		return port_type_description[port_type];
2688 	return "UNKNOWN";
2689 }
2690 
2691 /**
2692  *	t4_get_port_stats - collect port statistics
2693  *	@adap: the adapter
2694  *	@idx: the port index
2695  *	@p: the stats structure to fill
2696  *
2697  *	Collect statistics related to the given port from HW.
2698  */
2699 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2700 {
2701 	u32 bgmap = get_mps_bg_map(adap, idx);
2702 
2703 #define GET_STAT(name) \
2704 	t4_read_reg64(adap, \
2705 	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2706 	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2707 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2708 
2709 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2710 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2711 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2712 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2713 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2714 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2715 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2716 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2717 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2718 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2719 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2720 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2721 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2722 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
2723 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2724 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2725 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2726 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2727 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2728 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2729 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2730 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2731 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2732 
2733 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2734 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2735 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2736 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2737 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2738 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2739 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2740 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2741 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2742 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2743 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2744 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2745 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2746 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2747 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2748 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2749 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2750 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2751 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2752 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2753 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2754 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2755 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2756 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2757 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2758 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2759 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2760 
2761 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2762 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2763 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2764 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2765 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2766 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2767 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2768 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2769 
2770 #undef GET_STAT
2771 #undef GET_STAT_COM
2772 }
2773 
2774 /**
2775  *	t4_wol_magic_enable - enable/disable magic packet WoL
2776  *	@adap: the adapter
2777  *	@port: the physical port index
2778  *	@addr: MAC address expected in magic packets, %NULL to disable
2779  *
2780  *	Enables/disables magic packet wake-on-LAN for the selected port.
2781  */
2782 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2783 			 const u8 *addr)
2784 {
2785 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2786 
2787 	if (is_t4(adap->params.chip)) {
2788 		mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2789 		mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2790 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2791 	} else {
2792 		mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2793 		mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2794 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2795 	}
2796 
2797 	if (addr) {
2798 		t4_write_reg(adap, mag_id_reg_l,
2799 			     (addr[2] << 24) | (addr[3] << 16) |
2800 			     (addr[4] << 8) | addr[5]);
2801 		t4_write_reg(adap, mag_id_reg_h,
2802 			     (addr[0] << 8) | addr[1]);
2803 	}
2804 	t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
2805 			 addr ? MAGICEN_F : 0);
2806 }
2807 
2808 /**
2809  *	t4_wol_pat_enable - enable/disable pattern-based WoL
2810  *	@adap: the adapter
2811  *	@port: the physical port index
2812  *	@map: bitmap of which HW pattern filters to set
2813  *	@mask0: byte mask for bytes 0-63 of a packet
2814  *	@mask1: byte mask for bytes 64-127 of a packet
2815  *	@crc: Ethernet CRC for selected bytes
2816  *	@enable: enable/disable switch
2817  *
2818  *	Sets the pattern filters indicated in @map to mask out the bytes
2819  *	specified in @mask0/@mask1 in received packets and compare the CRC of
2820  *	the resulting packet against @crc.  If @enable is %true pattern-based
2821  *	WoL is enabled, otherwise disabled.
2822  */
2823 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2824 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
2825 {
2826 	int i;
2827 	u32 port_cfg_reg;
2828 
2829 	if (is_t4(adap->params.chip))
2830 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2831 	else
2832 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2833 
2834 	if (!enable) {
2835 		t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
2836 		return 0;
2837 	}
2838 	if (map > 0xff)
2839 		return -EINVAL;
2840 
2841 #define EPIO_REG(name) \
2842 	(is_t4(adap->params.chip) ? \
2843 	 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
2844 	 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
2845 
2846 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2847 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2848 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2849 
2850 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2851 		if (!(map & 1))
2852 			continue;
2853 
2854 		/* write byte masks */
2855 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2856 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
2857 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2858 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2859 			return -ETIMEDOUT;
2860 
2861 		/* write CRC */
2862 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
2863 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
2864 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2865 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2866 			return -ETIMEDOUT;
2867 	}
2868 #undef EPIO_REG
2869 
2870 	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
2871 	return 0;
2872 }
2873 
2874 /*     t4_mk_filtdelwr - create a delete filter WR
2875  *     @ftid: the filter ID
2876  *     @wr: the filter work request to populate
2877  *     @qid: ingress queue to receive the delete notification
2878  *
2879  *     Creates a filter work request to delete the supplied filter.  If @qid is
2880  *     negative the delete notification is suppressed.
2881  */
2882 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2883 {
2884 	memset(wr, 0, sizeof(*wr));
2885 	wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2886 	wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
2887 	wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2888 			FW_FILTER_WR_NOREPLY_V(qid < 0));
2889 	wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
2890 	if (qid >= 0)
2891 		wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
2892 }
2893 
2894 #define INIT_CMD(var, cmd, rd_wr) do { \
2895 	(var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2896 				  FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
2897 	(var).retval_len16 = htonl(FW_LEN16(var)); \
2898 } while (0)
2899 
2900 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2901 			  u32 addr, u32 val)
2902 {
2903 	struct fw_ldst_cmd c;
2904 
2905 	memset(&c, 0, sizeof(c));
2906 	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2907 			    FW_CMD_WRITE_F |
2908 			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
2909 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2910 	c.u.addrval.addr = htonl(addr);
2911 	c.u.addrval.val = htonl(val);
2912 
2913 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2914 }
2915 
2916 /**
2917  *	t4_mdio_rd - read a PHY register through MDIO
2918  *	@adap: the adapter
2919  *	@mbox: mailbox to use for the FW command
2920  *	@phy_addr: the PHY address
2921  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2922  *	@reg: the register to read
2923  *	@valp: where to store the value
2924  *
2925  *	Issues a FW command through the given mailbox to read a PHY register.
2926  */
2927 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2928 	       unsigned int mmd, unsigned int reg, u16 *valp)
2929 {
2930 	int ret;
2931 	struct fw_ldst_cmd c;
2932 
2933 	memset(&c, 0, sizeof(c));
2934 	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2935 		FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
2936 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2937 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2938 				   FW_LDST_CMD_MMD_V(mmd));
2939 	c.u.mdio.raddr = htons(reg);
2940 
2941 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2942 	if (ret == 0)
2943 		*valp = ntohs(c.u.mdio.rval);
2944 	return ret;
2945 }
2946 
2947 /**
2948  *	t4_mdio_wr - write a PHY register through MDIO
2949  *	@adap: the adapter
2950  *	@mbox: mailbox to use for the FW command
2951  *	@phy_addr: the PHY address
2952  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2953  *	@reg: the register to write
2954  *	@valp: value to write
2955  *
2956  *	Issues a FW command through the given mailbox to write a PHY register.
2957  */
2958 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2959 	       unsigned int mmd, unsigned int reg, u16 val)
2960 {
2961 	struct fw_ldst_cmd c;
2962 
2963 	memset(&c, 0, sizeof(c));
2964 	c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2965 		FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
2966 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2967 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2968 				   FW_LDST_CMD_MMD_V(mmd));
2969 	c.u.mdio.raddr = htons(reg);
2970 	c.u.mdio.rval = htons(val);
2971 
2972 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2973 }
2974 
2975 /**
2976  *	t4_sge_decode_idma_state - decode the idma state
2977  *	@adap: the adapter
2978  *	@state: the state idma is stuck in
2979  */
2980 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2981 {
2982 	static const char * const t4_decode[] = {
2983 		"IDMA_IDLE",
2984 		"IDMA_PUSH_MORE_CPL_FIFO",
2985 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2986 		"Not used",
2987 		"IDMA_PHYSADDR_SEND_PCIEHDR",
2988 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2989 		"IDMA_PHYSADDR_SEND_PAYLOAD",
2990 		"IDMA_SEND_FIFO_TO_IMSG",
2991 		"IDMA_FL_REQ_DATA_FL_PREP",
2992 		"IDMA_FL_REQ_DATA_FL",
2993 		"IDMA_FL_DROP",
2994 		"IDMA_FL_H_REQ_HEADER_FL",
2995 		"IDMA_FL_H_SEND_PCIEHDR",
2996 		"IDMA_FL_H_PUSH_CPL_FIFO",
2997 		"IDMA_FL_H_SEND_CPL",
2998 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
2999 		"IDMA_FL_H_SEND_IP_HDR",
3000 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
3001 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
3002 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
3003 		"IDMA_FL_D_SEND_PCIEHDR",
3004 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3005 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
3006 		"IDMA_FL_SEND_PCIEHDR",
3007 		"IDMA_FL_PUSH_CPL_FIFO",
3008 		"IDMA_FL_SEND_CPL",
3009 		"IDMA_FL_SEND_PAYLOAD_FIRST",
3010 		"IDMA_FL_SEND_PAYLOAD",
3011 		"IDMA_FL_REQ_NEXT_DATA_FL",
3012 		"IDMA_FL_SEND_NEXT_PCIEHDR",
3013 		"IDMA_FL_SEND_PADDING",
3014 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
3015 		"IDMA_FL_SEND_FIFO_TO_IMSG",
3016 		"IDMA_FL_REQ_DATAFL_DONE",
3017 		"IDMA_FL_REQ_HEADERFL_DONE",
3018 	};
3019 	static const char * const t5_decode[] = {
3020 		"IDMA_IDLE",
3021 		"IDMA_ALMOST_IDLE",
3022 		"IDMA_PUSH_MORE_CPL_FIFO",
3023 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
3024 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
3025 		"IDMA_PHYSADDR_SEND_PCIEHDR",
3026 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
3027 		"IDMA_PHYSADDR_SEND_PAYLOAD",
3028 		"IDMA_SEND_FIFO_TO_IMSG",
3029 		"IDMA_FL_REQ_DATA_FL",
3030 		"IDMA_FL_DROP",
3031 		"IDMA_FL_DROP_SEND_INC",
3032 		"IDMA_FL_H_REQ_HEADER_FL",
3033 		"IDMA_FL_H_SEND_PCIEHDR",
3034 		"IDMA_FL_H_PUSH_CPL_FIFO",
3035 		"IDMA_FL_H_SEND_CPL",
3036 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
3037 		"IDMA_FL_H_SEND_IP_HDR",
3038 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
3039 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
3040 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
3041 		"IDMA_FL_D_SEND_PCIEHDR",
3042 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3043 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
3044 		"IDMA_FL_SEND_PCIEHDR",
3045 		"IDMA_FL_PUSH_CPL_FIFO",
3046 		"IDMA_FL_SEND_CPL",
3047 		"IDMA_FL_SEND_PAYLOAD_FIRST",
3048 		"IDMA_FL_SEND_PAYLOAD",
3049 		"IDMA_FL_REQ_NEXT_DATA_FL",
3050 		"IDMA_FL_SEND_NEXT_PCIEHDR",
3051 		"IDMA_FL_SEND_PADDING",
3052 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
3053 	};
3054 	static const u32 sge_regs[] = {
3055 		SGE_DEBUG_DATA_LOW_INDEX_2_A,
3056 		SGE_DEBUG_DATA_LOW_INDEX_3_A,
3057 		SGE_DEBUG_DATA_HIGH_INDEX_10_A,
3058 	};
3059 	const char **sge_idma_decode;
3060 	int sge_idma_decode_nstates;
3061 	int i;
3062 
3063 	if (is_t4(adapter->params.chip)) {
3064 		sge_idma_decode = (const char **)t4_decode;
3065 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
3066 	} else {
3067 		sge_idma_decode = (const char **)t5_decode;
3068 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
3069 	}
3070 
3071 	if (state < sge_idma_decode_nstates)
3072 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
3073 	else
3074 		CH_WARN(adapter, "idma state %d unknown\n", state);
3075 
3076 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
3077 		CH_WARN(adapter, "SGE register %#x value %#x\n",
3078 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
3079 }
3080 
3081 /**
3082  *      t4_fw_hello - establish communication with FW
3083  *      @adap: the adapter
3084  *      @mbox: mailbox to use for the FW command
3085  *      @evt_mbox: mailbox to receive async FW events
3086  *      @master: specifies the caller's willingness to be the device master
3087  *	@state: returns the current device state (if non-NULL)
3088  *
3089  *	Issues a command to establish communication with FW.  Returns either
3090  *	an error (negative integer) or the mailbox of the Master PF.
3091  */
3092 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3093 		enum dev_master master, enum dev_state *state)
3094 {
3095 	int ret;
3096 	struct fw_hello_cmd c;
3097 	u32 v;
3098 	unsigned int master_mbox;
3099 	int retries = FW_CMD_HELLO_RETRIES;
3100 
3101 retry:
3102 	memset(&c, 0, sizeof(c));
3103 	INIT_CMD(c, HELLO, WRITE);
3104 	c.err_to_clearinit = htonl(
3105 		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
3106 		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
3107 		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
3108 				      FW_HELLO_CMD_MBMASTER_M) |
3109 		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
3110 		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
3111 		FW_HELLO_CMD_CLEARINIT_F);
3112 
3113 	/*
3114 	 * Issue the HELLO command to the firmware.  If it's not successful
3115 	 * but indicates that we got a "busy" or "timeout" condition, retry
3116 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
3117 	 * retry limit, check to see if the firmware left us any error
3118 	 * information and report that if so.
3119 	 */
3120 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3121 	if (ret < 0) {
3122 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3123 			goto retry;
3124 		if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
3125 			t4_report_fw_error(adap);
3126 		return ret;
3127 	}
3128 
3129 	v = ntohl(c.err_to_clearinit);
3130 	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
3131 	if (state) {
3132 		if (v & FW_HELLO_CMD_ERR_F)
3133 			*state = DEV_STATE_ERR;
3134 		else if (v & FW_HELLO_CMD_INIT_F)
3135 			*state = DEV_STATE_INIT;
3136 		else
3137 			*state = DEV_STATE_UNINIT;
3138 	}
3139 
3140 	/*
3141 	 * If we're not the Master PF then we need to wait around for the
3142 	 * Master PF Driver to finish setting up the adapter.
3143 	 *
3144 	 * Note that we also do this wait if we're a non-Master-capable PF and
3145 	 * there is no current Master PF; a Master PF may show up momentarily
3146 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
3147 	 * OS loads lots of different drivers rapidly at the same time).  In
3148 	 * this case, the Master PF returned by the firmware will be
3149 	 * PCIE_FW_MASTER_M so the test below will work ...
3150 	 */
3151 	if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
3152 	    master_mbox != mbox) {
3153 		int waiting = FW_CMD_HELLO_TIMEOUT;
3154 
3155 		/*
3156 		 * Wait for the firmware to either indicate an error or
3157 		 * initialized state.  If we see either of these we bail out
3158 		 * and report the issue to the caller.  If we exhaust the
3159 		 * "hello timeout" and we haven't exhausted our retries, try
3160 		 * again.  Otherwise bail with a timeout error.
3161 		 */
3162 		for (;;) {
3163 			u32 pcie_fw;
3164 
3165 			msleep(50);
3166 			waiting -= 50;
3167 
3168 			/*
3169 			 * If neither Error nor Initialialized are indicated
3170 			 * by the firmware keep waiting till we exaust our
3171 			 * timeout ... and then retry if we haven't exhausted
3172 			 * our retries ...
3173 			 */
3174 			pcie_fw = t4_read_reg(adap, PCIE_FW_A);
3175 			if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
3176 				if (waiting <= 0) {
3177 					if (retries-- > 0)
3178 						goto retry;
3179 
3180 					return -ETIMEDOUT;
3181 				}
3182 				continue;
3183 			}
3184 
3185 			/*
3186 			 * We either have an Error or Initialized condition
3187 			 * report errors preferentially.
3188 			 */
3189 			if (state) {
3190 				if (pcie_fw & PCIE_FW_ERR_F)
3191 					*state = DEV_STATE_ERR;
3192 				else if (pcie_fw & PCIE_FW_INIT_F)
3193 					*state = DEV_STATE_INIT;
3194 			}
3195 
3196 			/*
3197 			 * If we arrived before a Master PF was selected and
3198 			 * there's not a valid Master PF, grab its identity
3199 			 * for our caller.
3200 			 */
3201 			if (master_mbox == PCIE_FW_MASTER_M &&
3202 			    (pcie_fw & PCIE_FW_MASTER_VLD_F))
3203 				master_mbox = PCIE_FW_MASTER_G(pcie_fw);
3204 			break;
3205 		}
3206 	}
3207 
3208 	return master_mbox;
3209 }
3210 
3211 /**
3212  *	t4_fw_bye - end communication with FW
3213  *	@adap: the adapter
3214  *	@mbox: mailbox to use for the FW command
3215  *
3216  *	Issues a command to terminate communication with FW.
3217  */
3218 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3219 {
3220 	struct fw_bye_cmd c;
3221 
3222 	memset(&c, 0, sizeof(c));
3223 	INIT_CMD(c, BYE, WRITE);
3224 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3225 }
3226 
3227 /**
3228  *	t4_init_cmd - ask FW to initialize the device
3229  *	@adap: the adapter
3230  *	@mbox: mailbox to use for the FW command
3231  *
3232  *	Issues a command to FW to partially initialize the device.  This
3233  *	performs initialization that generally doesn't depend on user input.
3234  */
3235 int t4_early_init(struct adapter *adap, unsigned int mbox)
3236 {
3237 	struct fw_initialize_cmd c;
3238 
3239 	memset(&c, 0, sizeof(c));
3240 	INIT_CMD(c, INITIALIZE, WRITE);
3241 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3242 }
3243 
3244 /**
3245  *	t4_fw_reset - issue a reset to FW
3246  *	@adap: the adapter
3247  *	@mbox: mailbox to use for the FW command
3248  *	@reset: specifies the type of reset to perform
3249  *
3250  *	Issues a reset command of the specified type to FW.
3251  */
3252 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3253 {
3254 	struct fw_reset_cmd c;
3255 
3256 	memset(&c, 0, sizeof(c));
3257 	INIT_CMD(c, RESET, WRITE);
3258 	c.val = htonl(reset);
3259 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3260 }
3261 
3262 /**
3263  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3264  *	@adap: the adapter
3265  *	@mbox: mailbox to use for the FW RESET command (if desired)
3266  *	@force: force uP into RESET even if FW RESET command fails
3267  *
3268  *	Issues a RESET command to firmware (if desired) with a HALT indication
3269  *	and then puts the microprocessor into RESET state.  The RESET command
3270  *	will only be issued if a legitimate mailbox is provided (mbox <=
3271  *	PCIE_FW_MASTER_M).
3272  *
3273  *	This is generally used in order for the host to safely manipulate the
3274  *	adapter without fear of conflicting with whatever the firmware might
3275  *	be doing.  The only way out of this state is to RESTART the firmware
3276  *	...
3277  */
3278 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3279 {
3280 	int ret = 0;
3281 
3282 	/*
3283 	 * If a legitimate mailbox is provided, issue a RESET command
3284 	 * with a HALT indication.
3285 	 */
3286 	if (mbox <= PCIE_FW_MASTER_M) {
3287 		struct fw_reset_cmd c;
3288 
3289 		memset(&c, 0, sizeof(c));
3290 		INIT_CMD(c, RESET, WRITE);
3291 		c.val = htonl(PIORST_F | PIORSTMODE_F);
3292 		c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
3293 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3294 	}
3295 
3296 	/*
3297 	 * Normally we won't complete the operation if the firmware RESET
3298 	 * command fails but if our caller insists we'll go ahead and put the
3299 	 * uP into RESET.  This can be useful if the firmware is hung or even
3300 	 * missing ...  We'll have to take the risk of putting the uP into
3301 	 * RESET without the cooperation of firmware in that case.
3302 	 *
3303 	 * We also force the firmware's HALT flag to be on in case we bypassed
3304 	 * the firmware RESET command above or we're dealing with old firmware
3305 	 * which doesn't have the HALT capability.  This will serve as a flag
3306 	 * for the incoming firmware to know that it's coming out of a HALT
3307 	 * rather than a RESET ... if it's new enough to understand that ...
3308 	 */
3309 	if (ret == 0 || force) {
3310 		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
3311 		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
3312 				 PCIE_FW_HALT_F);
3313 	}
3314 
3315 	/*
3316 	 * And we always return the result of the firmware RESET command
3317 	 * even when we force the uP into RESET ...
3318 	 */
3319 	return ret;
3320 }
3321 
3322 /**
3323  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
3324  *	@adap: the adapter
3325  *	@reset: if we want to do a RESET to restart things
3326  *
3327  *	Restart firmware previously halted by t4_fw_halt().  On successful
3328  *	return the previous PF Master remains as the new PF Master and there
3329  *	is no need to issue a new HELLO command, etc.
3330  *
3331  *	We do this in two ways:
3332  *
3333  *	 1. If we're dealing with newer firmware we'll simply want to take
3334  *	    the chip's microprocessor out of RESET.  This will cause the
3335  *	    firmware to start up from its start vector.  And then we'll loop
3336  *	    until the firmware indicates it's started again (PCIE_FW.HALT
3337  *	    reset to 0) or we timeout.
3338  *
3339  *	 2. If we're dealing with older firmware then we'll need to RESET
3340  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
3341  *	    flag and automatically RESET itself on startup.
3342  */
3343 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3344 {
3345 	if (reset) {
3346 		/*
3347 		 * Since we're directing the RESET instead of the firmware
3348 		 * doing it automatically, we need to clear the PCIE_FW.HALT
3349 		 * bit.
3350 		 */
3351 		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
3352 
3353 		/*
3354 		 * If we've been given a valid mailbox, first try to get the
3355 		 * firmware to do the RESET.  If that works, great and we can
3356 		 * return success.  Otherwise, if we haven't been given a
3357 		 * valid mailbox or the RESET command failed, fall back to
3358 		 * hitting the chip with a hammer.
3359 		 */
3360 		if (mbox <= PCIE_FW_MASTER_M) {
3361 			t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3362 			msleep(100);
3363 			if (t4_fw_reset(adap, mbox,
3364 					PIORST_F | PIORSTMODE_F) == 0)
3365 				return 0;
3366 		}
3367 
3368 		t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
3369 		msleep(2000);
3370 	} else {
3371 		int ms;
3372 
3373 		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3374 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3375 			if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
3376 				return 0;
3377 			msleep(100);
3378 			ms += 100;
3379 		}
3380 		return -ETIMEDOUT;
3381 	}
3382 	return 0;
3383 }
3384 
3385 /**
3386  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3387  *	@adap: the adapter
3388  *	@mbox: mailbox to use for the FW RESET command (if desired)
3389  *	@fw_data: the firmware image to write
3390  *	@size: image size
3391  *	@force: force upgrade even if firmware doesn't cooperate
3392  *
3393  *	Perform all of the steps necessary for upgrading an adapter's
3394  *	firmware image.  Normally this requires the cooperation of the
3395  *	existing firmware in order to halt all existing activities
3396  *	but if an invalid mailbox token is passed in we skip that step
3397  *	(though we'll still put the adapter microprocessor into RESET in
3398  *	that case).
3399  *
3400  *	On successful return the new firmware will have been loaded and
3401  *	the adapter will have been fully RESET losing all previous setup
3402  *	state.  On unsuccessful return the adapter may be completely hosed ...
3403  *	positive errno indicates that the adapter is ~probably~ intact, a
3404  *	negative errno indicates that things are looking bad ...
3405  */
3406 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3407 		  const u8 *fw_data, unsigned int size, int force)
3408 {
3409 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3410 	int reset, ret;
3411 
3412 	if (!t4_fw_matches_chip(adap, fw_hdr))
3413 		return -EINVAL;
3414 
3415 	ret = t4_fw_halt(adap, mbox, force);
3416 	if (ret < 0 && !force)
3417 		return ret;
3418 
3419 	ret = t4_load_fw(adap, fw_data, size);
3420 	if (ret < 0)
3421 		return ret;
3422 
3423 	/*
3424 	 * Older versions of the firmware don't understand the new
3425 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3426 	 * restart.  So for newly loaded older firmware we'll have to do the
3427 	 * RESET for it so it starts up on a clean slate.  We can tell if
3428 	 * the newly loaded firmware will handle this right by checking
3429 	 * its header flags to see if it advertises the capability.
3430 	 */
3431 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3432 	return t4_fw_restart(adap, mbox, reset);
3433 }
3434 
3435 /**
3436  *	t4_fixup_host_params - fix up host-dependent parameters
3437  *	@adap: the adapter
3438  *	@page_size: the host's Base Page Size
3439  *	@cache_line_size: the host's Cache Line Size
3440  *
3441  *	Various registers in T4 contain values which are dependent on the
3442  *	host's Base Page and Cache Line Sizes.  This function will fix all of
3443  *	those registers with the appropriate values as passed in ...
3444  */
3445 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3446 			 unsigned int cache_line_size)
3447 {
3448 	unsigned int page_shift = fls(page_size) - 1;
3449 	unsigned int sge_hps = page_shift - 10;
3450 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3451 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3452 	unsigned int fl_align_log = fls(fl_align) - 1;
3453 
3454 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
3455 		     HOSTPAGESIZEPF0_V(sge_hps) |
3456 		     HOSTPAGESIZEPF1_V(sge_hps) |
3457 		     HOSTPAGESIZEPF2_V(sge_hps) |
3458 		     HOSTPAGESIZEPF3_V(sge_hps) |
3459 		     HOSTPAGESIZEPF4_V(sge_hps) |
3460 		     HOSTPAGESIZEPF5_V(sge_hps) |
3461 		     HOSTPAGESIZEPF6_V(sge_hps) |
3462 		     HOSTPAGESIZEPF7_V(sge_hps));
3463 
3464 	if (is_t4(adap->params.chip)) {
3465 		t4_set_reg_field(adap, SGE_CONTROL_A,
3466 				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3467 				 EGRSTATUSPAGESIZE_F,
3468 				 INGPADBOUNDARY_V(fl_align_log -
3469 						  INGPADBOUNDARY_SHIFT_X) |
3470 				 EGRSTATUSPAGESIZE_V(stat_len != 64));
3471 	} else {
3472 		/* T5 introduced the separation of the Free List Padding and
3473 		 * Packing Boundaries.  Thus, we can select a smaller Padding
3474 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3475 		 * Bandwidth, and use a Packing Boundary which is large enough
3476 		 * to avoid false sharing between CPUs, etc.
3477 		 *
3478 		 * For the PCI Link, the smaller the Padding Boundary the
3479 		 * better.  For the Memory Controller, a smaller Padding
3480 		 * Boundary is better until we cross under the Memory Line
3481 		 * Size (the minimum unit of transfer to/from Memory).  If we
3482 		 * have a Padding Boundary which is smaller than the Memory
3483 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
3484 		 * Memory Controller which is never good.  For T5 the smallest
3485 		 * Padding Boundary which we can select is 32 bytes which is
3486 		 * larger than any known Memory Controller Line Size so we'll
3487 		 * use that.
3488 		 *
3489 		 * T5 has a different interpretation of the "0" value for the
3490 		 * Packing Boundary.  This corresponds to 16 bytes instead of
3491 		 * the expected 32 bytes.  We never have a Packing Boundary
3492 		 * less than 32 bytes so we can't use that special value but
3493 		 * on the other hand, if we wanted 32 bytes, the best we can
3494 		 * really do is 64 bytes.
3495 		*/
3496 		if (fl_align <= 32) {
3497 			fl_align = 64;
3498 			fl_align_log = 6;
3499 		}
3500 		t4_set_reg_field(adap, SGE_CONTROL_A,
3501 				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3502 				 EGRSTATUSPAGESIZE_F,
3503 				 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
3504 				 EGRSTATUSPAGESIZE_V(stat_len != 64));
3505 		t4_set_reg_field(adap, SGE_CONTROL2_A,
3506 				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3507 				 INGPACKBOUNDARY_V(fl_align_log -
3508 						   INGPACKBOUNDARY_SHIFT_X));
3509 	}
3510 	/*
3511 	 * Adjust various SGE Free List Host Buffer Sizes.
3512 	 *
3513 	 * This is something of a crock since we're using fixed indices into
3514 	 * the array which are also known by the sge.c code and the T4
3515 	 * Firmware Configuration File.  We need to come up with a much better
3516 	 * approach to managing this array.  For now, the first four entries
3517 	 * are:
3518 	 *
3519 	 *   0: Host Page Size
3520 	 *   1: 64KB
3521 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3522 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3523 	 *
3524 	 * For the single-MTU buffers in unpacked mode we need to include
3525 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3526 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3527 	 * Padding boundry.  All of these are accommodated in the Factory
3528 	 * Default Firmware Configuration File but we need to adjust it for
3529 	 * this host's cache line size.
3530 	 */
3531 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
3532 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
3533 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
3534 		     & ~(fl_align-1));
3535 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
3536 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
3537 		     & ~(fl_align-1));
3538 
3539 	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
3540 
3541 	return 0;
3542 }
3543 
3544 /**
3545  *	t4_fw_initialize - ask FW to initialize the device
3546  *	@adap: the adapter
3547  *	@mbox: mailbox to use for the FW command
3548  *
3549  *	Issues a command to FW to partially initialize the device.  This
3550  *	performs initialization that generally doesn't depend on user input.
3551  */
3552 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3553 {
3554 	struct fw_initialize_cmd c;
3555 
3556 	memset(&c, 0, sizeof(c));
3557 	INIT_CMD(c, INITIALIZE, WRITE);
3558 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3559 }
3560 
3561 /**
3562  *	t4_query_params - query FW or device parameters
3563  *	@adap: the adapter
3564  *	@mbox: mailbox to use for the FW command
3565  *	@pf: the PF
3566  *	@vf: the VF
3567  *	@nparams: the number of parameters
3568  *	@params: the parameter names
3569  *	@val: the parameter values
3570  *
3571  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3572  *	queried at once.
3573  */
3574 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3575 		    unsigned int vf, unsigned int nparams, const u32 *params,
3576 		    u32 *val)
3577 {
3578 	int i, ret;
3579 	struct fw_params_cmd c;
3580 	__be32 *p = &c.param[0].mnem;
3581 
3582 	if (nparams > 7)
3583 		return -EINVAL;
3584 
3585 	memset(&c, 0, sizeof(c));
3586 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3587 			    FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
3588 			    FW_PARAMS_CMD_VFN_V(vf));
3589 	c.retval_len16 = htonl(FW_LEN16(c));
3590 	for (i = 0; i < nparams; i++, p += 2)
3591 		*p = htonl(*params++);
3592 
3593 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3594 	if (ret == 0)
3595 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3596 			*val++ = ntohl(*p);
3597 	return ret;
3598 }
3599 
3600 /**
3601  *      t4_set_params_nosleep - sets FW or device parameters
3602  *      @adap: the adapter
3603  *      @mbox: mailbox to use for the FW command
3604  *      @pf: the PF
3605  *      @vf: the VF
3606  *      @nparams: the number of parameters
3607  *      @params: the parameter names
3608  *      @val: the parameter values
3609  *
3610  *	 Does not ever sleep
3611  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
3612  *      specified at once.
3613  */
3614 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3615 			  unsigned int pf, unsigned int vf,
3616 			  unsigned int nparams, const u32 *params,
3617 			  const u32 *val)
3618 {
3619 	struct fw_params_cmd c;
3620 	__be32 *p = &c.param[0].mnem;
3621 
3622 	if (nparams > 7)
3623 		return -EINVAL;
3624 
3625 	memset(&c, 0, sizeof(c));
3626 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3627 				FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3628 				FW_PARAMS_CMD_PFN_V(pf) |
3629 				FW_PARAMS_CMD_VFN_V(vf));
3630 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3631 
3632 	while (nparams--) {
3633 		*p++ = cpu_to_be32(*params++);
3634 		*p++ = cpu_to_be32(*val++);
3635 	}
3636 
3637 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3638 }
3639 
3640 /**
3641  *	t4_set_params - sets FW or device parameters
3642  *	@adap: the adapter
3643  *	@mbox: mailbox to use for the FW command
3644  *	@pf: the PF
3645  *	@vf: the VF
3646  *	@nparams: the number of parameters
3647  *	@params: the parameter names
3648  *	@val: the parameter values
3649  *
3650  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3651  *	specified at once.
3652  */
3653 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3654 		  unsigned int vf, unsigned int nparams, const u32 *params,
3655 		  const u32 *val)
3656 {
3657 	struct fw_params_cmd c;
3658 	__be32 *p = &c.param[0].mnem;
3659 
3660 	if (nparams > 7)
3661 		return -EINVAL;
3662 
3663 	memset(&c, 0, sizeof(c));
3664 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3665 			    FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
3666 			    FW_PARAMS_CMD_VFN_V(vf));
3667 	c.retval_len16 = htonl(FW_LEN16(c));
3668 	while (nparams--) {
3669 		*p++ = htonl(*params++);
3670 		*p++ = htonl(*val++);
3671 	}
3672 
3673 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3674 }
3675 
3676 /**
3677  *	t4_cfg_pfvf - configure PF/VF resource limits
3678  *	@adap: the adapter
3679  *	@mbox: mailbox to use for the FW command
3680  *	@pf: the PF being configured
3681  *	@vf: the VF being configured
3682  *	@txq: the max number of egress queues
3683  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
3684  *	@rxqi: the max number of interrupt-capable ingress queues
3685  *	@rxq: the max number of interruptless ingress queues
3686  *	@tc: the PCI traffic class
3687  *	@vi: the max number of virtual interfaces
3688  *	@cmask: the channel access rights mask for the PF/VF
3689  *	@pmask: the port access rights mask for the PF/VF
3690  *	@nexact: the maximum number of exact MPS filters
3691  *	@rcaps: read capabilities
3692  *	@wxcaps: write/execute capabilities
3693  *
3694  *	Configures resource limits and capabilities for a physical or virtual
3695  *	function.
3696  */
3697 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3698 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3699 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
3700 		unsigned int vi, unsigned int cmask, unsigned int pmask,
3701 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3702 {
3703 	struct fw_pfvf_cmd c;
3704 
3705 	memset(&c, 0, sizeof(c));
3706 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
3707 			    FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
3708 			    FW_PFVF_CMD_VFN_V(vf));
3709 	c.retval_len16 = htonl(FW_LEN16(c));
3710 	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
3711 			       FW_PFVF_CMD_NIQ_V(rxq));
3712 	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
3713 			       FW_PFVF_CMD_PMASK_V(pmask) |
3714 			       FW_PFVF_CMD_NEQ_V(txq));
3715 	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
3716 				FW_PFVF_CMD_NEXACTF_V(nexact));
3717 	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
3718 				     FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
3719 				     FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
3720 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3721 }
3722 
3723 /**
3724  *	t4_alloc_vi - allocate a virtual interface
3725  *	@adap: the adapter
3726  *	@mbox: mailbox to use for the FW command
3727  *	@port: physical port associated with the VI
3728  *	@pf: the PF owning the VI
3729  *	@vf: the VF owning the VI
3730  *	@nmac: number of MAC addresses needed (1 to 5)
3731  *	@mac: the MAC addresses of the VI
3732  *	@rss_size: size of RSS table slice associated with this VI
3733  *
3734  *	Allocates a virtual interface for the given physical port.  If @mac is
3735  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
3736  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
3737  *	stored consecutively so the space needed is @nmac * 6 bytes.
3738  *	Returns a negative error number or the non-negative VI id.
3739  */
3740 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3741 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3742 		unsigned int *rss_size)
3743 {
3744 	int ret;
3745 	struct fw_vi_cmd c;
3746 
3747 	memset(&c, 0, sizeof(c));
3748 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3749 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3750 			    FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
3751 	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
3752 	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
3753 	c.nmac = nmac - 1;
3754 
3755 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3756 	if (ret)
3757 		return ret;
3758 
3759 	if (mac) {
3760 		memcpy(mac, c.mac, sizeof(c.mac));
3761 		switch (nmac) {
3762 		case 5:
3763 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3764 		case 4:
3765 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3766 		case 3:
3767 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3768 		case 2:
3769 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3770 		}
3771 	}
3772 	if (rss_size)
3773 		*rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
3774 	return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
3775 }
3776 
3777 /**
3778  *	t4_set_rxmode - set Rx properties of a virtual interface
3779  *	@adap: the adapter
3780  *	@mbox: mailbox to use for the FW command
3781  *	@viid: the VI id
3782  *	@mtu: the new MTU or -1
3783  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3784  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3785  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3786  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3787  *	@sleep_ok: if true we may sleep while awaiting command completion
3788  *
3789  *	Sets Rx properties of a virtual interface.
3790  */
3791 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3792 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3793 		  bool sleep_ok)
3794 {
3795 	struct fw_vi_rxmode_cmd c;
3796 
3797 	/* convert to FW values */
3798 	if (mtu < 0)
3799 		mtu = FW_RXMODE_MTU_NO_CHG;
3800 	if (promisc < 0)
3801 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
3802 	if (all_multi < 0)
3803 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
3804 	if (bcast < 0)
3805 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
3806 	if (vlanex < 0)
3807 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
3808 
3809 	memset(&c, 0, sizeof(c));
3810 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
3811 			     FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
3812 	c.retval_len16 = htonl(FW_LEN16(c));
3813 	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
3814 				  FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
3815 				  FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
3816 				  FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
3817 				  FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
3818 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3819 }
3820 
3821 /**
3822  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3823  *	@adap: the adapter
3824  *	@mbox: mailbox to use for the FW command
3825  *	@viid: the VI id
3826  *	@free: if true any existing filters for this VI id are first removed
3827  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
3828  *	@addr: the MAC address(es)
3829  *	@idx: where to store the index of each allocated filter
3830  *	@hash: pointer to hash address filter bitmap
3831  *	@sleep_ok: call is allowed to sleep
3832  *
3833  *	Allocates an exact-match filter for each of the supplied addresses and
3834  *	sets it to the corresponding address.  If @idx is not %NULL it should
3835  *	have at least @naddr entries, each of which will be set to the index of
3836  *	the filter allocated for the corresponding MAC address.  If a filter
3837  *	could not be allocated for an address its index is set to 0xffff.
3838  *	If @hash is not %NULL addresses that fail to allocate an exact filter
3839  *	are hashed and update the hash filter bitmap pointed at by @hash.
3840  *
3841  *	Returns a negative error number or the number of filters allocated.
3842  */
3843 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3844 		      unsigned int viid, bool free, unsigned int naddr,
3845 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3846 {
3847 	int i, ret;
3848 	struct fw_vi_mac_cmd c;
3849 	struct fw_vi_mac_exact *p;
3850 	unsigned int max_naddr = is_t4(adap->params.chip) ?
3851 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
3852 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3853 
3854 	if (naddr > 7)
3855 		return -EINVAL;
3856 
3857 	memset(&c, 0, sizeof(c));
3858 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3859 			     FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
3860 			     FW_VI_MAC_CMD_VIID_V(viid));
3861 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
3862 				    FW_CMD_LEN16_V((naddr + 2) / 2));
3863 
3864 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3865 		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3866 				      FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
3867 		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3868 	}
3869 
3870 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3871 	if (ret)
3872 		return ret;
3873 
3874 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3875 		u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
3876 
3877 		if (idx)
3878 			idx[i] = index >= max_naddr ? 0xffff : index;
3879 		if (index < max_naddr)
3880 			ret++;
3881 		else if (hash)
3882 			*hash |= (1ULL << hash_mac_addr(addr[i]));
3883 	}
3884 	return ret;
3885 }
3886 
3887 /**
3888  *	t4_change_mac - modifies the exact-match filter for a MAC address
3889  *	@adap: the adapter
3890  *	@mbox: mailbox to use for the FW command
3891  *	@viid: the VI id
3892  *	@idx: index of existing filter for old value of MAC address, or -1
3893  *	@addr: the new MAC address value
3894  *	@persist: whether a new MAC allocation should be persistent
3895  *	@add_smt: if true also add the address to the HW SMT
3896  *
3897  *	Modifies an exact-match filter and sets it to the new MAC address.
3898  *	Note that in general it is not possible to modify the value of a given
3899  *	filter so the generic way to modify an address filter is to free the one
3900  *	being used by the old address value and allocate a new filter for the
3901  *	new address value.  @idx can be -1 if the address is a new addition.
3902  *
3903  *	Returns a negative error number or the index of the filter with the new
3904  *	MAC value.
3905  */
3906 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3907 		  int idx, const u8 *addr, bool persist, bool add_smt)
3908 {
3909 	int ret, mode;
3910 	struct fw_vi_mac_cmd c;
3911 	struct fw_vi_mac_exact *p = c.u.exact;
3912 	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3913 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
3914 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3915 
3916 	if (idx < 0)                             /* new allocation */
3917 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3918 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3919 
3920 	memset(&c, 0, sizeof(c));
3921 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3922 			     FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
3923 	c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
3924 	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3925 				FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
3926 				FW_VI_MAC_CMD_IDX_V(idx));
3927 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
3928 
3929 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3930 	if (ret == 0) {
3931 		ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
3932 		if (ret >= max_mac_addr)
3933 			ret = -ENOMEM;
3934 	}
3935 	return ret;
3936 }
3937 
3938 /**
3939  *	t4_set_addr_hash - program the MAC inexact-match hash filter
3940  *	@adap: the adapter
3941  *	@mbox: mailbox to use for the FW command
3942  *	@viid: the VI id
3943  *	@ucast: whether the hash filter should also match unicast addresses
3944  *	@vec: the value to be written to the hash filter
3945  *	@sleep_ok: call is allowed to sleep
3946  *
3947  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
3948  */
3949 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3950 		     bool ucast, u64 vec, bool sleep_ok)
3951 {
3952 	struct fw_vi_mac_cmd c;
3953 
3954 	memset(&c, 0, sizeof(c));
3955 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3956 			     FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3957 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
3958 				    FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
3959 				    FW_CMD_LEN16_V(1));
3960 	c.u.hash.hashvec = cpu_to_be64(vec);
3961 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3962 }
3963 
3964 /**
3965  *      t4_enable_vi_params - enable/disable a virtual interface
3966  *      @adap: the adapter
3967  *      @mbox: mailbox to use for the FW command
3968  *      @viid: the VI id
3969  *      @rx_en: 1=enable Rx, 0=disable Rx
3970  *      @tx_en: 1=enable Tx, 0=disable Tx
3971  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
3972  *
3973  *      Enables/disables a virtual interface.  Note that setting DCB Enable
3974  *      only makes sense when enabling a Virtual Interface ...
3975  */
3976 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3977 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3978 {
3979 	struct fw_vi_enable_cmd c;
3980 
3981 	memset(&c, 0, sizeof(c));
3982 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
3983 			     FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3984 
3985 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
3986 			       FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
3987 			       FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
3988 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3989 }
3990 
3991 /**
3992  *	t4_enable_vi - enable/disable a virtual interface
3993  *	@adap: the adapter
3994  *	@mbox: mailbox to use for the FW command
3995  *	@viid: the VI id
3996  *	@rx_en: 1=enable Rx, 0=disable Rx
3997  *	@tx_en: 1=enable Tx, 0=disable Tx
3998  *
3999  *	Enables/disables a virtual interface.
4000  */
4001 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4002 		 bool rx_en, bool tx_en)
4003 {
4004 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4005 }
4006 
4007 /**
4008  *	t4_identify_port - identify a VI's port by blinking its LED
4009  *	@adap: the adapter
4010  *	@mbox: mailbox to use for the FW command
4011  *	@viid: the VI id
4012  *	@nblinks: how many times to blink LED at 2.5 Hz
4013  *
4014  *	Identifies a VI's port by blinking its LED.
4015  */
4016 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4017 		     unsigned int nblinks)
4018 {
4019 	struct fw_vi_enable_cmd c;
4020 
4021 	memset(&c, 0, sizeof(c));
4022 	c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
4023 			     FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
4024 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
4025 	c.blinkdur = htons(nblinks);
4026 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4027 }
4028 
4029 /**
4030  *	t4_iq_free - free an ingress queue and its FLs
4031  *	@adap: the adapter
4032  *	@mbox: mailbox to use for the FW command
4033  *	@pf: the PF owning the queues
4034  *	@vf: the VF owning the queues
4035  *	@iqtype: the ingress queue type
4036  *	@iqid: ingress queue id
4037  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4038  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4039  *
4040  *	Frees an ingress queue and its associated FLs, if any.
4041  */
4042 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4043 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4044 	       unsigned int fl0id, unsigned int fl1id)
4045 {
4046 	struct fw_iq_cmd c;
4047 
4048 	memset(&c, 0, sizeof(c));
4049 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4050 			    FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
4051 			    FW_IQ_CMD_VFN_V(vf));
4052 	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
4053 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
4054 	c.iqid = htons(iqid);
4055 	c.fl0id = htons(fl0id);
4056 	c.fl1id = htons(fl1id);
4057 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4058 }
4059 
4060 /**
4061  *	t4_eth_eq_free - free an Ethernet egress queue
4062  *	@adap: the adapter
4063  *	@mbox: mailbox to use for the FW command
4064  *	@pf: the PF owning the queue
4065  *	@vf: the VF owning the queue
4066  *	@eqid: egress queue id
4067  *
4068  *	Frees an Ethernet egress queue.
4069  */
4070 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4071 		   unsigned int vf, unsigned int eqid)
4072 {
4073 	struct fw_eq_eth_cmd c;
4074 
4075 	memset(&c, 0, sizeof(c));
4076 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4077 			    FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
4078 			    FW_EQ_ETH_CMD_VFN_V(vf));
4079 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
4080 	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
4081 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4082 }
4083 
4084 /**
4085  *	t4_ctrl_eq_free - free a control egress queue
4086  *	@adap: the adapter
4087  *	@mbox: mailbox to use for the FW command
4088  *	@pf: the PF owning the queue
4089  *	@vf: the VF owning the queue
4090  *	@eqid: egress queue id
4091  *
4092  *	Frees a control egress queue.
4093  */
4094 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4095 		    unsigned int vf, unsigned int eqid)
4096 {
4097 	struct fw_eq_ctrl_cmd c;
4098 
4099 	memset(&c, 0, sizeof(c));
4100 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4101 			    FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
4102 			    FW_EQ_CTRL_CMD_VFN_V(vf));
4103 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
4104 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
4105 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4106 }
4107 
4108 /**
4109  *	t4_ofld_eq_free - free an offload egress queue
4110  *	@adap: the adapter
4111  *	@mbox: mailbox to use for the FW command
4112  *	@pf: the PF owning the queue
4113  *	@vf: the VF owning the queue
4114  *	@eqid: egress queue id
4115  *
4116  *	Frees a control egress queue.
4117  */
4118 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4119 		    unsigned int vf, unsigned int eqid)
4120 {
4121 	struct fw_eq_ofld_cmd c;
4122 
4123 	memset(&c, 0, sizeof(c));
4124 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
4125 			    FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
4126 			    FW_EQ_OFLD_CMD_VFN_V(vf));
4127 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
4128 	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
4129 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4130 }
4131 
4132 /**
4133  *	t4_handle_fw_rpl - process a FW reply message
4134  *	@adap: the adapter
4135  *	@rpl: start of the FW message
4136  *
4137  *	Processes a FW message, such as link state change messages.
4138  */
4139 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4140 {
4141 	u8 opcode = *(const u8 *)rpl;
4142 
4143 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
4144 		int speed = 0, fc = 0;
4145 		const struct fw_port_cmd *p = (void *)rpl;
4146 		int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
4147 		int port = adap->chan_map[chan];
4148 		struct port_info *pi = adap2pinfo(adap, port);
4149 		struct link_config *lc = &pi->link_cfg;
4150 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4151 		int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
4152 		u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
4153 
4154 		if (stat & FW_PORT_CMD_RXPAUSE_F)
4155 			fc |= PAUSE_RX;
4156 		if (stat & FW_PORT_CMD_TXPAUSE_F)
4157 			fc |= PAUSE_TX;
4158 		if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
4159 			speed = 100;
4160 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
4161 			speed = 1000;
4162 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
4163 			speed = 10000;
4164 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
4165 			speed = 40000;
4166 
4167 		if (link_ok != lc->link_ok || speed != lc->speed ||
4168 		    fc != lc->fc) {                    /* something changed */
4169 			lc->link_ok = link_ok;
4170 			lc->speed = speed;
4171 			lc->fc = fc;
4172 			lc->supported = be16_to_cpu(p->u.info.pcap);
4173 			t4_os_link_changed(adap, port, link_ok);
4174 		}
4175 		if (mod != pi->mod_type) {
4176 			pi->mod_type = mod;
4177 			t4_os_portmod_changed(adap, port);
4178 		}
4179 	}
4180 	return 0;
4181 }
4182 
4183 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4184 {
4185 	u16 val;
4186 
4187 	if (pci_is_pcie(adapter->pdev)) {
4188 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
4189 		p->speed = val & PCI_EXP_LNKSTA_CLS;
4190 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4191 	}
4192 }
4193 
4194 /**
4195  *	init_link_config - initialize a link's SW state
4196  *	@lc: structure holding the link state
4197  *	@caps: link capabilities
4198  *
4199  *	Initializes the SW state maintained for each link, including the link's
4200  *	capabilities and default speed/flow-control/autonegotiation settings.
4201  */
4202 static void init_link_config(struct link_config *lc, unsigned int caps)
4203 {
4204 	lc->supported = caps;
4205 	lc->requested_speed = 0;
4206 	lc->speed = 0;
4207 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4208 	if (lc->supported & FW_PORT_CAP_ANEG) {
4209 		lc->advertising = lc->supported & ADVERT_MASK;
4210 		lc->autoneg = AUTONEG_ENABLE;
4211 		lc->requested_fc |= PAUSE_AUTONEG;
4212 	} else {
4213 		lc->advertising = 0;
4214 		lc->autoneg = AUTONEG_DISABLE;
4215 	}
4216 }
4217 
4218 #define CIM_PF_NOACCESS 0xeeeeeeee
4219 
4220 int t4_wait_dev_ready(void __iomem *regs)
4221 {
4222 	u32 whoami;
4223 
4224 	whoami = readl(regs + PL_WHOAMI_A);
4225 	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
4226 		return 0;
4227 
4228 	msleep(500);
4229 	whoami = readl(regs + PL_WHOAMI_A);
4230 	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
4231 }
4232 
4233 struct flash_desc {
4234 	u32 vendor_and_model_id;
4235 	u32 size_mb;
4236 };
4237 
4238 static int get_flash_params(struct adapter *adap)
4239 {
4240 	/* Table for non-Numonix supported flash parts.  Numonix parts are left
4241 	 * to the preexisting code.  All flash parts have 64KB sectors.
4242 	 */
4243 	static struct flash_desc supported_flash[] = {
4244 		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
4245 	};
4246 
4247 	int ret;
4248 	u32 info;
4249 
4250 	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
4251 	if (!ret)
4252 		ret = sf1_read(adap, 3, 0, 1, &info);
4253 	t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
4254 	if (ret)
4255 		return ret;
4256 
4257 	for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
4258 		if (supported_flash[ret].vendor_and_model_id == info) {
4259 			adap->params.sf_size = supported_flash[ret].size_mb;
4260 			adap->params.sf_nsec =
4261 				adap->params.sf_size / SF_SEC_SIZE;
4262 			return 0;
4263 		}
4264 
4265 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
4266 		return -EINVAL;
4267 	info >>= 16;                           /* log2 of size */
4268 	if (info >= 0x14 && info < 0x18)
4269 		adap->params.sf_nsec = 1 << (info - 16);
4270 	else if (info == 0x18)
4271 		adap->params.sf_nsec = 64;
4272 	else
4273 		return -EINVAL;
4274 	adap->params.sf_size = 1 << info;
4275 	adap->params.sf_fw_start =
4276 		t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
4277 
4278 	if (adap->params.sf_size < FLASH_MIN_SIZE)
4279 		dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
4280 			 adap->params.sf_size, FLASH_MIN_SIZE);
4281 	return 0;
4282 }
4283 
4284 /**
4285  *	t4_prep_adapter - prepare SW and HW for operation
4286  *	@adapter: the adapter
4287  *	@reset: if true perform a HW reset
4288  *
4289  *	Initialize adapter SW state for the various HW modules, set initial
4290  *	values for some adapter tunables, take PHYs out of reset, and
4291  *	initialize the MDIO interface.
4292  */
4293 int t4_prep_adapter(struct adapter *adapter)
4294 {
4295 	int ret, ver;
4296 	uint16_t device_id;
4297 	u32 pl_rev;
4298 
4299 	get_pci_mode(adapter, &adapter->params.pci);
4300 	pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
4301 
4302 	ret = get_flash_params(adapter);
4303 	if (ret < 0) {
4304 		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
4305 		return ret;
4306 	}
4307 
4308 	/* Retrieve adapter's device ID
4309 	 */
4310 	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
4311 	ver = device_id >> 12;
4312 	adapter->params.chip = 0;
4313 	switch (ver) {
4314 	case CHELSIO_T4:
4315 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4316 		break;
4317 	case CHELSIO_T5:
4318 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4319 		break;
4320 	default:
4321 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4322 			device_id);
4323 		return -EINVAL;
4324 	}
4325 
4326 	adapter->params.cim_la_size = CIMLA_SIZE;
4327 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4328 
4329 	/*
4330 	 * Default port for debugging in case we can't reach FW.
4331 	 */
4332 	adapter->params.nports = 1;
4333 	adapter->params.portvec = 1;
4334 	adapter->params.vpd.cclk = 50000;
4335 	return 0;
4336 }
4337 
4338 /**
4339  *	cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
4340  *	@adapter: the adapter
4341  *	@qid: the Queue ID
4342  *	@qtype: the Ingress or Egress type for @qid
4343  *	@pbar2_qoffset: BAR2 Queue Offset
4344  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4345  *
4346  *	Returns the BAR2 SGE Queue Registers information associated with the
4347  *	indicated Absolute Queue ID.  These are passed back in return value
4348  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4349  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4350  *
4351  *	This may return an error which indicates that BAR2 SGE Queue
4352  *	registers aren't available.  If an error is not returned, then the
4353  *	following values are returned:
4354  *
4355  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4356  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4357  *
4358  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4359  *	require the "Inferred Queue ID" ability may be used.  E.g. the
4360  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4361  *	then these "Inferred Queue ID" register may not be used.
4362  */
4363 int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
4364 		      unsigned int qid,
4365 		      enum t4_bar2_qtype qtype,
4366 		      u64 *pbar2_qoffset,
4367 		      unsigned int *pbar2_qid)
4368 {
4369 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4370 	u64 bar2_page_offset, bar2_qoffset;
4371 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4372 
4373 	/* T4 doesn't support BAR2 SGE Queue registers.
4374 	 */
4375 	if (is_t4(adapter->params.chip))
4376 		return -EINVAL;
4377 
4378 	/* Get our SGE Page Size parameters.
4379 	 */
4380 	page_shift = adapter->params.sge.hps + 10;
4381 	page_size = 1 << page_shift;
4382 
4383 	/* Get the right Queues per Page parameters for our Queue.
4384 	 */
4385 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
4386 		     ? adapter->params.sge.eq_qpp
4387 		     : adapter->params.sge.iq_qpp);
4388 	qpp_mask = (1 << qpp_shift) - 1;
4389 
4390 	/*  Calculate the basics of the BAR2 SGE Queue register area:
4391 	 *  o The BAR2 page the Queue registers will be in.
4392 	 *  o The BAR2 Queue ID.
4393 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
4394 	 */
4395 	bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4396 	bar2_qid = qid & qpp_mask;
4397 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4398 
4399 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
4400 	 * hardware will infer the Absolute Queue ID simply from the writes to
4401 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4402 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
4403 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4404 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4405 	 * from the BAR2 Page and BAR2 Queue ID.
4406 	 *
4407 	 * One important censequence of this is that some BAR2 SGE registers
4408 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4409 	 * there.  But other registers synthesize the SGE Queue ID purely
4410 	 * from the writes to the registers -- the Write Combined Doorbell
4411 	 * Buffer is a good example.  These BAR2 SGE Registers are only
4412 	 * available for those BAR2 SGE Register areas where the SGE Absolute
4413 	 * Queue ID can be inferred from simple writes.
4414 	 */
4415 	bar2_qoffset = bar2_page_offset;
4416 	bar2_qinferred = (bar2_qid_offset < page_size);
4417 	if (bar2_qinferred) {
4418 		bar2_qoffset += bar2_qid_offset;
4419 		bar2_qid = 0;
4420 	}
4421 
4422 	*pbar2_qoffset = bar2_qoffset;
4423 	*pbar2_qid = bar2_qid;
4424 	return 0;
4425 }
4426 
4427 /**
4428  *	t4_init_sge_params - initialize adap->params.sge
4429  *	@adapter: the adapter
4430  *
4431  *	Initialize various fields of the adapter's SGE Parameters structure.
4432  */
4433 int t4_init_sge_params(struct adapter *adapter)
4434 {
4435 	struct sge_params *sge_params = &adapter->params.sge;
4436 	u32 hps, qpp;
4437 	unsigned int s_hps, s_qpp;
4438 
4439 	/* Extract the SGE Page Size for our PF.
4440 	 */
4441 	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
4442 	s_hps = (HOSTPAGESIZEPF0_S +
4443 		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4444 	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
4445 
4446 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
4447 	 */
4448 	s_qpp = (QUEUESPERPAGEPF0_S +
4449 		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
4450 	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
4451 	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4452 	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
4453 	sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4454 
4455 	return 0;
4456 }
4457 
4458 /**
4459  *      t4_init_tp_params - initialize adap->params.tp
4460  *      @adap: the adapter
4461  *
4462  *      Initialize various fields of the adapter's TP Parameters structure.
4463  */
4464 int t4_init_tp_params(struct adapter *adap)
4465 {
4466 	int chan;
4467 	u32 v;
4468 
4469 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
4470 	adap->params.tp.tre = TIMERRESOLUTION_G(v);
4471 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
4472 
4473 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4474 	for (chan = 0; chan < NCHAN; chan++)
4475 		adap->params.tp.tx_modq[chan] = chan;
4476 
4477 	/* Cache the adapter's Compressed Filter Mode and global Incress
4478 	 * Configuration.
4479 	 */
4480 	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4481 			 &adap->params.tp.vlan_pri_map, 1,
4482 			 TP_VLAN_PRI_MAP_A);
4483 	t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4484 			 &adap->params.tp.ingress_config, 1,
4485 			 TP_INGRESS_CONFIG_A);
4486 
4487 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4488 	 * shift positions of several elements of the Compressed Filter Tuple
4489 	 * for this adapter which we need frequently ...
4490 	 */
4491 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
4492 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
4493 	adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
4494 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4495 							       PROTOCOL_F);
4496 
4497 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4498 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
4499 	 */
4500 	if ((adap->params.tp.ingress_config & VNIC_F) == 0)
4501 		adap->params.tp.vnic_shift = -1;
4502 
4503 	return 0;
4504 }
4505 
4506 /**
4507  *      t4_filter_field_shift - calculate filter field shift
4508  *      @adap: the adapter
4509  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4510  *
4511  *      Return the shift position of a filter field within the Compressed
4512  *      Filter Tuple.  The filter field is specified via its selection bit
4513  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
4514  */
4515 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4516 {
4517 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4518 	unsigned int sel;
4519 	int field_shift;
4520 
4521 	if ((filter_mode & filter_sel) == 0)
4522 		return -1;
4523 
4524 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4525 		switch (filter_mode & sel) {
4526 		case FCOE_F:
4527 			field_shift += FT_FCOE_W;
4528 			break;
4529 		case PORT_F:
4530 			field_shift += FT_PORT_W;
4531 			break;
4532 		case VNIC_ID_F:
4533 			field_shift += FT_VNIC_ID_W;
4534 			break;
4535 		case VLAN_F:
4536 			field_shift += FT_VLAN_W;
4537 			break;
4538 		case TOS_F:
4539 			field_shift += FT_TOS_W;
4540 			break;
4541 		case PROTOCOL_F:
4542 			field_shift += FT_PROTOCOL_W;
4543 			break;
4544 		case ETHERTYPE_F:
4545 			field_shift += FT_ETHERTYPE_W;
4546 			break;
4547 		case MACMATCH_F:
4548 			field_shift += FT_MACMATCH_W;
4549 			break;
4550 		case MPSHITTYPE_F:
4551 			field_shift += FT_MPSHITTYPE_W;
4552 			break;
4553 		case FRAGMENTATION_F:
4554 			field_shift += FT_FRAGMENTATION_W;
4555 			break;
4556 		}
4557 	}
4558 	return field_shift;
4559 }
4560 
4561 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4562 {
4563 	u8 addr[6];
4564 	int ret, i, j = 0;
4565 	struct fw_port_cmd c;
4566 	struct fw_rss_vi_config_cmd rvc;
4567 
4568 	memset(&c, 0, sizeof(c));
4569 	memset(&rvc, 0, sizeof(rvc));
4570 
4571 	for_each_port(adap, i) {
4572 		unsigned int rss_size;
4573 		struct port_info *p = adap2pinfo(adap, i);
4574 
4575 		while ((adap->params.portvec & (1 << j)) == 0)
4576 			j++;
4577 
4578 		c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4579 				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
4580 				       FW_PORT_CMD_PORTID_V(j));
4581 		c.action_to_len16 = htonl(
4582 			FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
4583 			FW_LEN16(c));
4584 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4585 		if (ret)
4586 			return ret;
4587 
4588 		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4589 		if (ret < 0)
4590 			return ret;
4591 
4592 		p->viid = ret;
4593 		p->tx_chan = j;
4594 		p->lport = j;
4595 		p->rss_size = rss_size;
4596 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
4597 		adap->port[i]->dev_port = j;
4598 
4599 		ret = ntohl(c.u.info.lstatus_to_modtype);
4600 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
4601 			FW_PORT_CMD_MDIOADDR_G(ret) : -1;
4602 		p->port_type = FW_PORT_CMD_PTYPE_G(ret);
4603 		p->mod_type = FW_PORT_MOD_TYPE_NA;
4604 
4605 		rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4606 				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
4607 				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4608 		rvc.retval_len16 = htonl(FW_LEN16(rvc));
4609 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4610 		if (ret)
4611 			return ret;
4612 		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4613 
4614 		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4615 		j++;
4616 	}
4617 	return 0;
4618 }
4619 
4620 /**
4621  *	t4_read_cimq_cfg - read CIM queue configuration
4622  *	@adap: the adapter
4623  *	@base: holds the queue base addresses in bytes
4624  *	@size: holds the queue sizes in bytes
4625  *	@thres: holds the queue full thresholds in bytes
4626  *
4627  *	Returns the current configuration of the CIM queues, starting with
4628  *	the IBQs, then the OBQs.
4629  */
4630 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
4631 {
4632 	unsigned int i, v;
4633 	int cim_num_obq = is_t4(adap->params.chip) ?
4634 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4635 
4636 	for (i = 0; i < CIM_NUM_IBQ; i++) {
4637 		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
4638 			     QUENUMSELECT_V(i));
4639 		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4640 		/* value is in 256-byte units */
4641 		*base++ = CIMQBASE_G(v) * 256;
4642 		*size++ = CIMQSIZE_G(v) * 256;
4643 		*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
4644 	}
4645 	for (i = 0; i < cim_num_obq; i++) {
4646 		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4647 			     QUENUMSELECT_V(i));
4648 		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4649 		/* value is in 256-byte units */
4650 		*base++ = CIMQBASE_G(v) * 256;
4651 		*size++ = CIMQSIZE_G(v) * 256;
4652 	}
4653 }
4654 
4655 /**
4656  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
4657  *	@adap: the adapter
4658  *	@qid: the queue index
4659  *	@data: where to store the queue contents
4660  *	@n: capacity of @data in 32-bit words
4661  *
4662  *	Reads the contents of the selected CIM queue starting at address 0 up
4663  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
4664  *	error and the number of 32-bit words actually read on success.
4665  */
4666 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4667 {
4668 	int i, err, attempts;
4669 	unsigned int addr;
4670 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
4671 
4672 	if (qid > 5 || (n & 3))
4673 		return -EINVAL;
4674 
4675 	addr = qid * nwords;
4676 	if (n > nwords)
4677 		n = nwords;
4678 
4679 	/* It might take 3-10ms before the IBQ debug read access is allowed.
4680 	 * Wait for 1 Sec with a delay of 1 usec.
4681 	 */
4682 	attempts = 1000000;
4683 
4684 	for (i = 0; i < n; i++, addr++) {
4685 		t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
4686 			     IBQDBGEN_F);
4687 		err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
4688 				      attempts, 1);
4689 		if (err)
4690 			return err;
4691 		*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
4692 	}
4693 	t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
4694 	return i;
4695 }
4696 
4697 /**
4698  *	t4_read_cim_obq - read the contents of a CIM outbound queue
4699  *	@adap: the adapter
4700  *	@qid: the queue index
4701  *	@data: where to store the queue contents
4702  *	@n: capacity of @data in 32-bit words
4703  *
4704  *	Reads the contents of the selected CIM queue starting at address 0 up
4705  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
4706  *	error and the number of 32-bit words actually read on success.
4707  */
4708 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4709 {
4710 	int i, err;
4711 	unsigned int addr, v, nwords;
4712 	int cim_num_obq = is_t4(adap->params.chip) ?
4713 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4714 
4715 	if ((qid > (cim_num_obq - 1)) || (n & 3))
4716 		return -EINVAL;
4717 
4718 	t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4719 		     QUENUMSELECT_V(qid));
4720 	v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4721 
4722 	addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
4723 	nwords = CIMQSIZE_G(v) * 64;  /* same */
4724 	if (n > nwords)
4725 		n = nwords;
4726 
4727 	for (i = 0; i < n; i++, addr++) {
4728 		t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
4729 			     OBQDBGEN_F);
4730 		err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
4731 				      2, 1);
4732 		if (err)
4733 			return err;
4734 		*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
4735 	}
4736 	t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
4737 	return i;
4738 }
4739 
4740 /**
4741  *	t4_cim_read - read a block from CIM internal address space
4742  *	@adap: the adapter
4743  *	@addr: the start address within the CIM address space
4744  *	@n: number of words to read
4745  *	@valp: where to store the result
4746  *
4747  *	Reads a block of 4-byte words from the CIM intenal address space.
4748  */
4749 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
4750 		unsigned int *valp)
4751 {
4752 	int ret = 0;
4753 
4754 	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4755 		return -EBUSY;
4756 
4757 	for ( ; !ret && n--; addr += 4) {
4758 		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
4759 		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4760 				      0, 5, 2);
4761 		if (!ret)
4762 			*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
4763 	}
4764 	return ret;
4765 }
4766 
4767 /**
4768  *	t4_cim_write - write a block into CIM internal address space
4769  *	@adap: the adapter
4770  *	@addr: the start address within the CIM address space
4771  *	@n: number of words to write
4772  *	@valp: set of values to write
4773  *
4774  *	Writes a block of 4-byte words into the CIM intenal address space.
4775  */
4776 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
4777 		 const unsigned int *valp)
4778 {
4779 	int ret = 0;
4780 
4781 	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4782 		return -EBUSY;
4783 
4784 	for ( ; !ret && n--; addr += 4) {
4785 		t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
4786 		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
4787 		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4788 				      0, 5, 2);
4789 	}
4790 	return ret;
4791 }
4792 
4793 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
4794 			 unsigned int val)
4795 {
4796 	return t4_cim_write(adap, addr, 1, &val);
4797 }
4798 
4799 /**
4800  *	t4_cim_read_la - read CIM LA capture buffer
4801  *	@adap: the adapter
4802  *	@la_buf: where to store the LA data
4803  *	@wrptr: the HW write pointer within the capture buffer
4804  *
4805  *	Reads the contents of the CIM LA buffer with the most recent entry at
4806  *	the end	of the returned data and with the entry at @wrptr first.
4807  *	We try to leave the LA in the running state we find it in.
4808  */
4809 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
4810 {
4811 	int i, ret;
4812 	unsigned int cfg, val, idx;
4813 
4814 	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
4815 	if (ret)
4816 		return ret;
4817 
4818 	if (cfg & UPDBGLAEN_F) {	/* LA is running, freeze it */
4819 		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
4820 		if (ret)
4821 			return ret;
4822 	}
4823 
4824 	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4825 	if (ret)
4826 		goto restart;
4827 
4828 	idx = UPDBGLAWRPTR_G(val);
4829 	if (wrptr)
4830 		*wrptr = idx;
4831 
4832 	for (i = 0; i < adap->params.cim_la_size; i++) {
4833 		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4834 				    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
4835 		if (ret)
4836 			break;
4837 		ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4838 		if (ret)
4839 			break;
4840 		if (val & UPDBGLARDEN_F) {
4841 			ret = -ETIMEDOUT;
4842 			break;
4843 		}
4844 		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
4845 		if (ret)
4846 			break;
4847 		idx = (idx + 1) & UPDBGLARDPTR_M;
4848 	}
4849 restart:
4850 	if (cfg & UPDBGLAEN_F) {
4851 		int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4852 				      cfg & ~UPDBGLARDEN_F);
4853 		if (!ret)
4854 			ret = r;
4855 	}
4856 	return ret;
4857 }
4858 
4859 /**
4860  *	t4_tp_read_la - read TP LA capture buffer
4861  *	@adap: the adapter
4862  *	@la_buf: where to store the LA data
4863  *	@wrptr: the HW write pointer within the capture buffer
4864  *
4865  *	Reads the contents of the TP LA buffer with the most recent entry at
4866  *	the end	of the returned data and with the entry at @wrptr first.
4867  *	We leave the LA in the running state we find it in.
4868  */
4869 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
4870 {
4871 	bool last_incomplete;
4872 	unsigned int i, cfg, val, idx;
4873 
4874 	cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
4875 	if (cfg & DBGLAENABLE_F)			/* freeze LA */
4876 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4877 			     adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
4878 
4879 	val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
4880 	idx = DBGLAWPTR_G(val);
4881 	last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
4882 	if (last_incomplete)
4883 		idx = (idx + 1) & DBGLARPTR_M;
4884 	if (wrptr)
4885 		*wrptr = idx;
4886 
4887 	val &= 0xffff;
4888 	val &= ~DBGLARPTR_V(DBGLARPTR_M);
4889 	val |= adap->params.tp.la_mask;
4890 
4891 	for (i = 0; i < TPLA_SIZE; i++) {
4892 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
4893 		la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
4894 		idx = (idx + 1) & DBGLARPTR_M;
4895 	}
4896 
4897 	/* Wipe out last entry if it isn't valid */
4898 	if (last_incomplete)
4899 		la_buf[TPLA_SIZE - 1] = ~0ULL;
4900 
4901 	if (cfg & DBGLAENABLE_F)                    /* restore running state */
4902 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4903 			     cfg | adap->params.tp.la_mask);
4904 }
4905