1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4fw_api.h"
39 
40 /**
41  *	t4_wait_op_done_val - wait until an operation is completed
42  *	@adapter: the adapter performing the operation
43  *	@reg: the register to check for completion
44  *	@mask: a single-bit field within @reg that indicates completion
45  *	@polarity: the value of the field when the operation is completed
46  *	@attempts: number of check iterations
47  *	@delay: delay in usecs between iterations
48  *	@valp: where to store the value of the register at completion time
49  *
50  *	Wait until an operation is completed by checking a bit in a register
51  *	up to @attempts times.  If @valp is not NULL the value of the register
52  *	at the time it indicated completion is stored there.  Returns 0 if the
53  *	operation completes and	-EAGAIN	otherwise.
54  */
55 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 			       int polarity, int attempts, int delay, u32 *valp)
57 {
58 	while (1) {
59 		u32 val = t4_read_reg(adapter, reg);
60 
61 		if (!!(val & mask) == polarity) {
62 			if (valp)
63 				*valp = val;
64 			return 0;
65 		}
66 		if (--attempts == 0)
67 			return -EAGAIN;
68 		if (delay)
69 			udelay(delay);
70 	}
71 }
72 
73 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
74 				  int polarity, int attempts, int delay)
75 {
76 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
77 				   delay, NULL);
78 }
79 
80 /**
81  *	t4_set_reg_field - set a register field to a value
82  *	@adapter: the adapter to program
83  *	@addr: the register address
84  *	@mask: specifies the portion of the register to modify
85  *	@val: the new value for the register field
86  *
87  *	Sets a register field specified by the supplied mask to the
88  *	given value.
89  */
90 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 		      u32 val)
92 {
93 	u32 v = t4_read_reg(adapter, addr) & ~mask;
94 
95 	t4_write_reg(adapter, addr, v | val);
96 	(void) t4_read_reg(adapter, addr);      /* flush */
97 }
98 
99 /**
100  *	t4_read_indirect - read indirectly addressed registers
101  *	@adap: the adapter
102  *	@addr_reg: register holding the indirect address
103  *	@data_reg: register holding the value of the indirect register
104  *	@vals: where the read register values are stored
105  *	@nregs: how many indirect registers to read
106  *	@start_idx: index of first indirect register to read
107  *
108  *	Reads registers that are accessed indirectly through an address/data
109  *	register pair.
110  */
111 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
112 			     unsigned int data_reg, u32 *vals,
113 			     unsigned int nregs, unsigned int start_idx)
114 {
115 	while (nregs--) {
116 		t4_write_reg(adap, addr_reg, start_idx);
117 		*vals++ = t4_read_reg(adap, data_reg);
118 		start_idx++;
119 	}
120 }
121 
122 /**
123  *	t4_write_indirect - write indirectly addressed registers
124  *	@adap: the adapter
125  *	@addr_reg: register holding the indirect addresses
126  *	@data_reg: register holding the value for the indirect registers
127  *	@vals: values to write
128  *	@nregs: how many indirect registers to write
129  *	@start_idx: address of first indirect register to write
130  *
131  *	Writes a sequential block of registers that are accessed indirectly
132  *	through an address/data register pair.
133  */
134 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
135 		       unsigned int data_reg, const u32 *vals,
136 		       unsigned int nregs, unsigned int start_idx)
137 {
138 	while (nregs--) {
139 		t4_write_reg(adap, addr_reg, start_idx++);
140 		t4_write_reg(adap, data_reg, *vals++);
141 	}
142 }
143 
144 /*
145  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
146  * mechanism.  This guarantees that we get the real value even if we're
147  * operating within a Virtual Machine and the Hypervisor is trapping our
148  * Configuration Space accesses.
149  */
150 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151 {
152 	u32 req = ENABLE | FUNCTION(adap->fn) | reg;
153 
154 	if (is_t4(adap->params.chip))
155 		req |= F_LOCALCFG;
156 
157 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
158 	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
159 
160 	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 	 * Configuration Space read.  (None of the other fields matter when
162 	 * ENABLE is 0 so a simple register write is easier than a
163 	 * read-modify-write via t4_set_reg_field().)
164 	 */
165 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
166 }
167 
168 /*
169  * t4_report_fw_error - report firmware error
170  * @adap: the adapter
171  *
172  * The adapter firmware can indicate error conditions to the host.
173  * If the firmware has indicated an error, print out the reason for
174  * the firmware error.
175  */
176 static void t4_report_fw_error(struct adapter *adap)
177 {
178 	static const char *const reason[] = {
179 		"Crash",                        /* PCIE_FW_EVAL_CRASH */
180 		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
181 		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
182 		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
183 		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
184 		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
185 		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
186 		"Reserved",                     /* reserved */
187 	};
188 	u32 pcie_fw;
189 
190 	pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
191 	if (pcie_fw & FW_PCIE_FW_ERR)
192 		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 			reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
194 }
195 
196 /*
197  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
198  */
199 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
200 			 u32 mbox_addr)
201 {
202 	for ( ; nflit; nflit--, mbox_addr += 8)
203 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
204 }
205 
206 /*
207  * Handle a FW assertion reported in a mailbox.
208  */
209 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
210 {
211 	struct fw_debug_cmd asrt;
212 
213 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
214 	dev_alert(adap->pdev_dev,
215 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
217 		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
218 }
219 
220 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
221 {
222 	dev_err(adap->pdev_dev,
223 		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
224 		(unsigned long long)t4_read_reg64(adap, data_reg),
225 		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
226 		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
227 		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
228 		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
229 		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
230 		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
231 		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
232 }
233 
234 /**
235  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
236  *	@adap: the adapter
237  *	@mbox: index of the mailbox to use
238  *	@cmd: the command to write
239  *	@size: command length in bytes
240  *	@rpl: where to optionally store the reply
241  *	@sleep_ok: if true we may sleep while awaiting command completion
242  *
243  *	Sends the given command to FW through the selected mailbox and waits
244  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
245  *	store the FW's reply to the command.  The command and its optional
246  *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
247  *	to respond.  @sleep_ok determines whether we may sleep while awaiting
248  *	the response.  If sleeping is allowed we use progressive backoff
249  *	otherwise we spin.
250  *
251  *	The return value is 0 on success or a negative errno on failure.  A
252  *	failure can happen either because we are not able to execute the
253  *	command or FW executes it but signals an error.  In the latter case
254  *	the return value is the error code indicated by FW (negated).
255  */
256 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
257 		    void *rpl, bool sleep_ok)
258 {
259 	static const int delay[] = {
260 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
261 	};
262 
263 	u32 v;
264 	u64 res;
265 	int i, ms, delay_idx;
266 	const __be64 *p = cmd;
267 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
268 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
269 
270 	if ((size & 15) || size > MBOX_LEN)
271 		return -EINVAL;
272 
273 	/*
274 	 * If the device is off-line, as in EEH, commands will time out.
275 	 * Fail them early so we don't waste time waiting.
276 	 */
277 	if (adap->pdev->error_state != pci_channel_io_normal)
278 		return -EIO;
279 
280 	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
281 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283 
284 	if (v != MBOX_OWNER_DRV)
285 		return v ? -EBUSY : -ETIMEDOUT;
286 
287 	for (i = 0; i < size; i += 8)
288 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289 
290 	t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
291 	t4_read_reg(adap, ctl_reg);          /* flush write */
292 
293 	delay_idx = 0;
294 	ms = delay[0];
295 
296 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
297 		if (sleep_ok) {
298 			ms = delay[delay_idx];  /* last element may repeat */
299 			if (delay_idx < ARRAY_SIZE(delay) - 1)
300 				delay_idx++;
301 			msleep(ms);
302 		} else
303 			mdelay(ms);
304 
305 		v = t4_read_reg(adap, ctl_reg);
306 		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
307 			if (!(v & MBMSGVALID)) {
308 				t4_write_reg(adap, ctl_reg, 0);
309 				continue;
310 			}
311 
312 			res = t4_read_reg64(adap, data_reg);
313 			if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
314 				fw_asrt(adap, data_reg);
315 				res = FW_CMD_RETVAL(EIO);
316 			} else if (rpl)
317 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
318 
319 			if (FW_CMD_RETVAL_GET((int)res))
320 				dump_mbox(adap, mbox, data_reg);
321 			t4_write_reg(adap, ctl_reg, 0);
322 			return -FW_CMD_RETVAL_GET((int)res);
323 		}
324 	}
325 
326 	dump_mbox(adap, mbox, data_reg);
327 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
328 		*(const u8 *)cmd, mbox);
329 	t4_report_fw_error(adap);
330 	return -ETIMEDOUT;
331 }
332 
333 /**
334  *	t4_mc_read - read from MC through backdoor accesses
335  *	@adap: the adapter
336  *	@addr: address of first byte requested
337  *	@idx: which MC to access
338  *	@data: 64 bytes of data containing the requested address
339  *	@ecc: where to store the corresponding 64-bit ECC word
340  *
341  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
342  *	that covers the requested address @addr.  If @parity is not %NULL it
343  *	is assigned the 64-bit ECC word for the read data.
344  */
345 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
346 {
347 	int i;
348 	u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
349 	u32 mc_bist_status_rdata, mc_bist_data_pattern;
350 
351 	if (is_t4(adap->params.chip)) {
352 		mc_bist_cmd = MC_BIST_CMD;
353 		mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
354 		mc_bist_cmd_len = MC_BIST_CMD_LEN;
355 		mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
356 		mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
357 	} else {
358 		mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
359 		mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
360 		mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
361 		mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
362 		mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
363 	}
364 
365 	if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
366 		return -EBUSY;
367 	t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
368 	t4_write_reg(adap, mc_bist_cmd_len, 64);
369 	t4_write_reg(adap, mc_bist_data_pattern, 0xc);
370 	t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
371 		     BIST_CMD_GAP(1));
372 	i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
373 	if (i)
374 		return i;
375 
376 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
377 
378 	for (i = 15; i >= 0; i--)
379 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
380 	if (ecc)
381 		*ecc = t4_read_reg64(adap, MC_DATA(16));
382 #undef MC_DATA
383 	return 0;
384 }
385 
386 /**
387  *	t4_edc_read - read from EDC through backdoor accesses
388  *	@adap: the adapter
389  *	@idx: which EDC to access
390  *	@addr: address of first byte requested
391  *	@data: 64 bytes of data containing the requested address
392  *	@ecc: where to store the corresponding 64-bit ECC word
393  *
394  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
395  *	that covers the requested address @addr.  If @parity is not %NULL it
396  *	is assigned the 64-bit ECC word for the read data.
397  */
398 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
399 {
400 	int i;
401 	u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
402 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
403 
404 	if (is_t4(adap->params.chip)) {
405 		edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
406 		edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
407 		edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
408 		edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
409 						    idx);
410 		edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
411 						    idx);
412 	} else {
413 		edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
414 		edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
415 		edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
416 		edc_bist_cmd_data_pattern =
417 			EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
418 		edc_bist_status_rdata =
419 			 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
420 	}
421 
422 	if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
423 		return -EBUSY;
424 	t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
425 	t4_write_reg(adap, edc_bist_cmd_len, 64);
426 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
427 	t4_write_reg(adap, edc_bist_cmd,
428 		     BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
429 	i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
430 	if (i)
431 		return i;
432 
433 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
434 
435 	for (i = 15; i >= 0; i--)
436 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
437 	if (ecc)
438 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
439 #undef EDC_DATA
440 	return 0;
441 }
442 
443 /**
444  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
445  *	@adap: the adapter
446  *	@win: PCI-E Memory Window to use
447  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
448  *	@addr: address within indicated memory type
449  *	@len: amount of memory to transfer
450  *	@buf: host memory buffer
451  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
452  *
453  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
454  *	firmware memory address and host buffer must be aligned on 32-bit
455  *	boudaries; the length may be arbitrary.  The memory is transferred as
456  *	a raw byte sequence from/to the firmware's memory.  If this memory
457  *	contains data structures which contain multi-byte integers, it's the
458  *	caller's responsibility to perform appropriate byte order conversions.
459  */
460 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
461 		 u32 len, __be32 *buf, int dir)
462 {
463 	u32 pos, offset, resid, memoffset;
464 	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
465 
466 	/* Argument sanity checks ...
467 	 */
468 	if (addr & 0x3)
469 		return -EINVAL;
470 
471 	/* It's convenient to be able to handle lengths which aren't a
472 	 * multiple of 32-bits because we often end up transferring files to
473 	 * the firmware.  So we'll handle that by normalizing the length here
474 	 * and then handling any residual transfer at the end.
475 	 */
476 	resid = len & 0x3;
477 	len -= resid;
478 
479 	/* Offset into the region of memory which is being accessed
480 	 * MEM_EDC0 = 0
481 	 * MEM_EDC1 = 1
482 	 * MEM_MC   = 2 -- T4
483 	 * MEM_MC0  = 2 -- For T5
484 	 * MEM_MC1  = 3 -- For T5
485 	 */
486 	edc_size  = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
487 	if (mtype != MEM_MC1)
488 		memoffset = (mtype * (edc_size * 1024 * 1024));
489 	else {
490 		mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
491 						       MA_EXT_MEMORY_BAR));
492 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
493 	}
494 
495 	/* Determine the PCIE_MEM_ACCESS_OFFSET */
496 	addr = addr + memoffset;
497 
498 	/* Each PCI-E Memory Window is programmed with a window size -- or
499 	 * "aperture" -- which controls the granularity of its mapping onto
500 	 * adapter memory.  We need to grab that aperture in order to know
501 	 * how to use the specified window.  The window is also programmed
502 	 * with the base address of the Memory Window in BAR0's address
503 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
504 	 * the address is relative to BAR0.
505 	 */
506 	mem_reg = t4_read_reg(adap,
507 			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
508 						  win));
509 	mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
510 	mem_base = GET_PCIEOFST(mem_reg) << 10;
511 	if (is_t4(adap->params.chip))
512 		mem_base -= adap->t4_bar0;
513 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
514 
515 	/* Calculate our initial PCI-E Memory Window Position and Offset into
516 	 * that Window.
517 	 */
518 	pos = addr & ~(mem_aperture-1);
519 	offset = addr - pos;
520 
521 	/* Set up initial PCI-E Memory Window to cover the start of our
522 	 * transfer.  (Read it back to ensure that changes propagate before we
523 	 * attempt to use the new value.)
524 	 */
525 	t4_write_reg(adap,
526 		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
527 		     pos | win_pf);
528 	t4_read_reg(adap,
529 		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
530 
531 	/* Transfer data to/from the adapter as long as there's an integral
532 	 * number of 32-bit transfers to complete.
533 	 */
534 	while (len > 0) {
535 		if (dir == T4_MEMORY_READ)
536 			*buf++ = (__force __be32) t4_read_reg(adap,
537 							mem_base + offset);
538 		else
539 			t4_write_reg(adap, mem_base + offset,
540 				     (__force u32) *buf++);
541 		offset += sizeof(__be32);
542 		len -= sizeof(__be32);
543 
544 		/* If we've reached the end of our current window aperture,
545 		 * move the PCI-E Memory Window on to the next.  Note that
546 		 * doing this here after "len" may be 0 allows us to set up
547 		 * the PCI-E Memory Window for a possible final residual
548 		 * transfer below ...
549 		 */
550 		if (offset == mem_aperture) {
551 			pos += mem_aperture;
552 			offset = 0;
553 			t4_write_reg(adap,
554 				     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
555 							 win), pos | win_pf);
556 			t4_read_reg(adap,
557 				    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
558 							win));
559 		}
560 	}
561 
562 	/* If the original transfer had a length which wasn't a multiple of
563 	 * 32-bits, now's where we need to finish off the transfer of the
564 	 * residual amount.  The PCI-E Memory Window has already been moved
565 	 * above (if necessary) to cover this final transfer.
566 	 */
567 	if (resid) {
568 		union {
569 			__be32 word;
570 			char byte[4];
571 		} last;
572 		unsigned char *bp;
573 		int i;
574 
575 		if (dir == T4_MEMORY_READ) {
576 			last.word = (__force __be32) t4_read_reg(adap,
577 							mem_base + offset);
578 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
579 				bp[i] = last.byte[i];
580 		} else {
581 			last.word = *buf;
582 			for (i = resid; i < 4; i++)
583 				last.byte[i] = 0;
584 			t4_write_reg(adap, mem_base + offset,
585 				     (__force u32) last.word);
586 		}
587 	}
588 
589 	return 0;
590 }
591 
592 #define EEPROM_STAT_ADDR   0x7bfc
593 #define VPD_BASE           0x400
594 #define VPD_BASE_OLD       0
595 #define VPD_LEN            1024
596 #define CHELSIO_VPD_UNIQUE_ID 0x82
597 
598 /**
599  *	t4_seeprom_wp - enable/disable EEPROM write protection
600  *	@adapter: the adapter
601  *	@enable: whether to enable or disable write protection
602  *
603  *	Enables or disables write protection on the serial EEPROM.
604  */
605 int t4_seeprom_wp(struct adapter *adapter, bool enable)
606 {
607 	unsigned int v = enable ? 0xc : 0;
608 	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
609 	return ret < 0 ? ret : 0;
610 }
611 
612 /**
613  *	get_vpd_params - read VPD parameters from VPD EEPROM
614  *	@adapter: adapter to read
615  *	@p: where to store the parameters
616  *
617  *	Reads card parameters stored in VPD EEPROM.
618  */
619 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
620 {
621 	u32 cclk_param, cclk_val;
622 	int i, ret, addr;
623 	int ec, sn, pn;
624 	u8 *vpd, csum;
625 	unsigned int vpdr_len, kw_offset, id_len;
626 
627 	vpd = vmalloc(VPD_LEN);
628 	if (!vpd)
629 		return -ENOMEM;
630 
631 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
632 	if (ret < 0)
633 		goto out;
634 
635 	/* The VPD shall have a unique identifier specified by the PCI SIG.
636 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
637 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
638 	 * is expected to automatically put this entry at the
639 	 * beginning of the VPD.
640 	 */
641 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
642 
643 	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
644 	if (ret < 0)
645 		goto out;
646 
647 	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
648 		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
649 		ret = -EINVAL;
650 		goto out;
651 	}
652 
653 	id_len = pci_vpd_lrdt_size(vpd);
654 	if (id_len > ID_LEN)
655 		id_len = ID_LEN;
656 
657 	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
658 	if (i < 0) {
659 		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
660 		ret = -EINVAL;
661 		goto out;
662 	}
663 
664 	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
665 	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
666 	if (vpdr_len + kw_offset > VPD_LEN) {
667 		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
668 		ret = -EINVAL;
669 		goto out;
670 	}
671 
672 #define FIND_VPD_KW(var, name) do { \
673 	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
674 	if (var < 0) { \
675 		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
676 		ret = -EINVAL; \
677 		goto out; \
678 	} \
679 	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
680 } while (0)
681 
682 	FIND_VPD_KW(i, "RV");
683 	for (csum = 0; i >= 0; i--)
684 		csum += vpd[i];
685 
686 	if (csum) {
687 		dev_err(adapter->pdev_dev,
688 			"corrupted VPD EEPROM, actual csum %u\n", csum);
689 		ret = -EINVAL;
690 		goto out;
691 	}
692 
693 	FIND_VPD_KW(ec, "EC");
694 	FIND_VPD_KW(sn, "SN");
695 	FIND_VPD_KW(pn, "PN");
696 #undef FIND_VPD_KW
697 
698 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
699 	strim(p->id);
700 	memcpy(p->ec, vpd + ec, EC_LEN);
701 	strim(p->ec);
702 	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
703 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
704 	strim(p->sn);
705 	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
706 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
707 	strim(p->pn);
708 
709 	/*
710 	 * Ask firmware for the Core Clock since it knows how to translate the
711 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
712 	 */
713 	cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
714 		      FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
715 	ret = t4_query_params(adapter, adapter->mbox, 0, 0,
716 			      1, &cclk_param, &cclk_val);
717 
718 out:
719 	vfree(vpd);
720 	if (ret)
721 		return ret;
722 	p->cclk = cclk_val;
723 
724 	return 0;
725 }
726 
727 /* serial flash and firmware constants */
728 enum {
729 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
730 
731 	/* flash command opcodes */
732 	SF_PROG_PAGE    = 2,          /* program page */
733 	SF_WR_DISABLE   = 4,          /* disable writes */
734 	SF_RD_STATUS    = 5,          /* read status register */
735 	SF_WR_ENABLE    = 6,          /* enable writes */
736 	SF_RD_DATA_FAST = 0xb,        /* read flash */
737 	SF_RD_ID        = 0x9f,       /* read ID */
738 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
739 
740 	FW_MAX_SIZE = 16 * SF_SEC_SIZE,
741 };
742 
743 /**
744  *	sf1_read - read data from the serial flash
745  *	@adapter: the adapter
746  *	@byte_cnt: number of bytes to read
747  *	@cont: whether another operation will be chained
748  *	@lock: whether to lock SF for PL access only
749  *	@valp: where to store the read data
750  *
751  *	Reads up to 4 bytes of data from the serial flash.  The location of
752  *	the read needs to be specified prior to calling this by issuing the
753  *	appropriate commands to the serial flash.
754  */
755 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 		    int lock, u32 *valp)
757 {
758 	int ret;
759 
760 	if (!byte_cnt || byte_cnt > 4)
761 		return -EINVAL;
762 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
763 		return -EBUSY;
764 	cont = cont ? SF_CONT : 0;
765 	lock = lock ? SF_LOCK : 0;
766 	t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
767 	ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
768 	if (!ret)
769 		*valp = t4_read_reg(adapter, SF_DATA);
770 	return ret;
771 }
772 
773 /**
774  *	sf1_write - write data to the serial flash
775  *	@adapter: the adapter
776  *	@byte_cnt: number of bytes to write
777  *	@cont: whether another operation will be chained
778  *	@lock: whether to lock SF for PL access only
779  *	@val: value to write
780  *
781  *	Writes up to 4 bytes of data to the serial flash.  The location of
782  *	the write needs to be specified prior to calling this by issuing the
783  *	appropriate commands to the serial flash.
784  */
785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786 		     int lock, u32 val)
787 {
788 	if (!byte_cnt || byte_cnt > 4)
789 		return -EINVAL;
790 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
791 		return -EBUSY;
792 	cont = cont ? SF_CONT : 0;
793 	lock = lock ? SF_LOCK : 0;
794 	t4_write_reg(adapter, SF_DATA, val);
795 	t4_write_reg(adapter, SF_OP, lock |
796 		     cont | BYTECNT(byte_cnt - 1) | OP_WR);
797 	return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
798 }
799 
800 /**
801  *	flash_wait_op - wait for a flash operation to complete
802  *	@adapter: the adapter
803  *	@attempts: max number of polls of the status register
804  *	@delay: delay between polls in ms
805  *
806  *	Wait for a flash operation to complete by polling the status register.
807  */
808 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
809 {
810 	int ret;
811 	u32 status;
812 
813 	while (1) {
814 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
815 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
816 			return ret;
817 		if (!(status & 1))
818 			return 0;
819 		if (--attempts == 0)
820 			return -EAGAIN;
821 		if (delay)
822 			msleep(delay);
823 	}
824 }
825 
826 /**
827  *	t4_read_flash - read words from serial flash
828  *	@adapter: the adapter
829  *	@addr: the start address for the read
830  *	@nwords: how many 32-bit words to read
831  *	@data: where to store the read data
832  *	@byte_oriented: whether to store data as bytes or as words
833  *
834  *	Read the specified number of 32-bit words from the serial flash.
835  *	If @byte_oriented is set the read data is stored as a byte array
836  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
837  *	natural endianess.
838  */
839 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
840 			 unsigned int nwords, u32 *data, int byte_oriented)
841 {
842 	int ret;
843 
844 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
845 		return -EINVAL;
846 
847 	addr = swab32(addr) | SF_RD_DATA_FAST;
848 
849 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
850 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
851 		return ret;
852 
853 	for ( ; nwords; nwords--, data++) {
854 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
855 		if (nwords == 1)
856 			t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
857 		if (ret)
858 			return ret;
859 		if (byte_oriented)
860 			*data = (__force __u32) (htonl(*data));
861 	}
862 	return 0;
863 }
864 
865 /**
866  *	t4_write_flash - write up to a page of data to the serial flash
867  *	@adapter: the adapter
868  *	@addr: the start address to write
869  *	@n: length of data to write in bytes
870  *	@data: the data to write
871  *
872  *	Writes up to a page of data (256 bytes) to the serial flash starting
873  *	at the given address.  All the data must be written to the same page.
874  */
875 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
876 			  unsigned int n, const u8 *data)
877 {
878 	int ret;
879 	u32 buf[64];
880 	unsigned int i, c, left, val, offset = addr & 0xff;
881 
882 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
883 		return -EINVAL;
884 
885 	val = swab32(addr) | SF_PROG_PAGE;
886 
887 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
888 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
889 		goto unlock;
890 
891 	for (left = n; left; left -= c) {
892 		c = min(left, 4U);
893 		for (val = 0, i = 0; i < c; ++i)
894 			val = (val << 8) + *data++;
895 
896 		ret = sf1_write(adapter, c, c != left, 1, val);
897 		if (ret)
898 			goto unlock;
899 	}
900 	ret = flash_wait_op(adapter, 8, 1);
901 	if (ret)
902 		goto unlock;
903 
904 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
905 
906 	/* Read the page to verify the write succeeded */
907 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
908 	if (ret)
909 		return ret;
910 
911 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
912 		dev_err(adapter->pdev_dev,
913 			"failed to correctly write the flash page at %#x\n",
914 			addr);
915 		return -EIO;
916 	}
917 	return 0;
918 
919 unlock:
920 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
921 	return ret;
922 }
923 
924 /**
925  *	t4_get_fw_version - read the firmware version
926  *	@adapter: the adapter
927  *	@vers: where to place the version
928  *
929  *	Reads the FW version from flash.
930  */
931 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
932 {
933 	return t4_read_flash(adapter, FLASH_FW_START +
934 			     offsetof(struct fw_hdr, fw_ver), 1,
935 			     vers, 0);
936 }
937 
938 /**
939  *	t4_get_tp_version - read the TP microcode version
940  *	@adapter: the adapter
941  *	@vers: where to place the version
942  *
943  *	Reads the TP microcode version from flash.
944  */
945 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
946 {
947 	return t4_read_flash(adapter, FLASH_FW_START +
948 			     offsetof(struct fw_hdr, tp_microcode_ver),
949 			     1, vers, 0);
950 }
951 
952 /* Is the given firmware API compatible with the one the driver was compiled
953  * with?
954  */
955 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
956 {
957 
958 	/* short circuit if it's the exact same firmware version */
959 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
960 		return 1;
961 
962 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
963 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
964 	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
965 		return 1;
966 #undef SAME_INTF
967 
968 	return 0;
969 }
970 
971 /* The firmware in the filesystem is usable, but should it be installed?
972  * This routine explains itself in detail if it indicates the filesystem
973  * firmware should be installed.
974  */
975 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
976 				int k, int c)
977 {
978 	const char *reason;
979 
980 	if (!card_fw_usable) {
981 		reason = "incompatible or unusable";
982 		goto install;
983 	}
984 
985 	if (k > c) {
986 		reason = "older than the version supported with this driver";
987 		goto install;
988 	}
989 
990 	return 0;
991 
992 install:
993 	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
994 		"installing firmware %u.%u.%u.%u on card.\n",
995 		FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
996 		FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
997 		FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
998 		FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
999 
1000 	return 1;
1001 }
1002 
1003 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1004 	       const u8 *fw_data, unsigned int fw_size,
1005 	       struct fw_hdr *card_fw, enum dev_state state,
1006 	       int *reset)
1007 {
1008 	int ret, card_fw_usable, fs_fw_usable;
1009 	const struct fw_hdr *fs_fw;
1010 	const struct fw_hdr *drv_fw;
1011 
1012 	drv_fw = &fw_info->fw_hdr;
1013 
1014 	/* Read the header of the firmware on the card */
1015 	ret = -t4_read_flash(adap, FLASH_FW_START,
1016 			    sizeof(*card_fw) / sizeof(uint32_t),
1017 			    (uint32_t *)card_fw, 1);
1018 	if (ret == 0) {
1019 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1020 	} else {
1021 		dev_err(adap->pdev_dev,
1022 			"Unable to read card's firmware header: %d\n", ret);
1023 		card_fw_usable = 0;
1024 	}
1025 
1026 	if (fw_data != NULL) {
1027 		fs_fw = (const void *)fw_data;
1028 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1029 	} else {
1030 		fs_fw = NULL;
1031 		fs_fw_usable = 0;
1032 	}
1033 
1034 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1035 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1036 		/* Common case: the firmware on the card is an exact match and
1037 		 * the filesystem one is an exact match too, or the filesystem
1038 		 * one is absent/incompatible.
1039 		 */
1040 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1041 		   should_install_fs_fw(adap, card_fw_usable,
1042 					be32_to_cpu(fs_fw->fw_ver),
1043 					be32_to_cpu(card_fw->fw_ver))) {
1044 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1045 				     fw_size, 0);
1046 		if (ret != 0) {
1047 			dev_err(adap->pdev_dev,
1048 				"failed to install firmware: %d\n", ret);
1049 			goto bye;
1050 		}
1051 
1052 		/* Installed successfully, update the cached header too. */
1053 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
1054 		card_fw_usable = 1;
1055 		*reset = 0;	/* already reset as part of load_fw */
1056 	}
1057 
1058 	if (!card_fw_usable) {
1059 		uint32_t d, c, k;
1060 
1061 		d = be32_to_cpu(drv_fw->fw_ver);
1062 		c = be32_to_cpu(card_fw->fw_ver);
1063 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1064 
1065 		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1066 			"chip state %d, "
1067 			"driver compiled with %d.%d.%d.%d, "
1068 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1069 			state,
1070 			FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1071 			FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1072 			FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1073 			FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1074 			FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1075 			FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1076 		ret = EINVAL;
1077 		goto bye;
1078 	}
1079 
1080 	/* We're using whatever's on the card and it's known to be good. */
1081 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1082 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1083 
1084 bye:
1085 	return ret;
1086 }
1087 
1088 /**
1089  *	t4_flash_erase_sectors - erase a range of flash sectors
1090  *	@adapter: the adapter
1091  *	@start: the first sector to erase
1092  *	@end: the last sector to erase
1093  *
1094  *	Erases the sectors in the given inclusive range.
1095  */
1096 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1097 {
1098 	int ret = 0;
1099 
1100 	if (end >= adapter->params.sf_nsec)
1101 		return -EINVAL;
1102 
1103 	while (start <= end) {
1104 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1105 		    (ret = sf1_write(adapter, 4, 0, 1,
1106 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1107 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1108 			dev_err(adapter->pdev_dev,
1109 				"erase of flash sector %d failed, error %d\n",
1110 				start, ret);
1111 			break;
1112 		}
1113 		start++;
1114 	}
1115 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
1116 	return ret;
1117 }
1118 
1119 /**
1120  *	t4_flash_cfg_addr - return the address of the flash configuration file
1121  *	@adapter: the adapter
1122  *
1123  *	Return the address within the flash where the Firmware Configuration
1124  *	File is stored.
1125  */
1126 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1127 {
1128 	if (adapter->params.sf_size == 0x100000)
1129 		return FLASH_FPGA_CFG_START;
1130 	else
1131 		return FLASH_CFG_START;
1132 }
1133 
1134 /**
1135  *	t4_load_fw - download firmware
1136  *	@adap: the adapter
1137  *	@fw_data: the firmware image to write
1138  *	@size: image size
1139  *
1140  *	Write the supplied firmware image to the card's serial flash.
1141  */
1142 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1143 {
1144 	u32 csum;
1145 	int ret, addr;
1146 	unsigned int i;
1147 	u8 first_page[SF_PAGE_SIZE];
1148 	const __be32 *p = (const __be32 *)fw_data;
1149 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1150 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1151 	unsigned int fw_img_start = adap->params.sf_fw_start;
1152 	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1153 
1154 	if (!size) {
1155 		dev_err(adap->pdev_dev, "FW image has no data\n");
1156 		return -EINVAL;
1157 	}
1158 	if (size & 511) {
1159 		dev_err(adap->pdev_dev,
1160 			"FW image size not multiple of 512 bytes\n");
1161 		return -EINVAL;
1162 	}
1163 	if (ntohs(hdr->len512) * 512 != size) {
1164 		dev_err(adap->pdev_dev,
1165 			"FW image size differs from size in FW header\n");
1166 		return -EINVAL;
1167 	}
1168 	if (size > FW_MAX_SIZE) {
1169 		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1170 			FW_MAX_SIZE);
1171 		return -EFBIG;
1172 	}
1173 
1174 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1175 		csum += ntohl(p[i]);
1176 
1177 	if (csum != 0xffffffff) {
1178 		dev_err(adap->pdev_dev,
1179 			"corrupted firmware image, checksum %#x\n", csum);
1180 		return -EINVAL;
1181 	}
1182 
1183 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1184 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1185 	if (ret)
1186 		goto out;
1187 
1188 	/*
1189 	 * We write the correct version at the end so the driver can see a bad
1190 	 * version if the FW write fails.  Start by writing a copy of the
1191 	 * first page with a bad version.
1192 	 */
1193 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1194 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1195 	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1196 	if (ret)
1197 		goto out;
1198 
1199 	addr = fw_img_start;
1200 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1201 		addr += SF_PAGE_SIZE;
1202 		fw_data += SF_PAGE_SIZE;
1203 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1204 		if (ret)
1205 			goto out;
1206 	}
1207 
1208 	ret = t4_write_flash(adap,
1209 			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
1210 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1211 out:
1212 	if (ret)
1213 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1214 			ret);
1215 	return ret;
1216 }
1217 
1218 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1219 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1220 		     FW_PORT_CAP_ANEG)
1221 
1222 /**
1223  *	t4_link_start - apply link configuration to MAC/PHY
1224  *	@phy: the PHY to setup
1225  *	@mac: the MAC to setup
1226  *	@lc: the requested link configuration
1227  *
1228  *	Set up a port's MAC and PHY according to a desired link configuration.
1229  *	- If the PHY can auto-negotiate first decide what to advertise, then
1230  *	  enable/disable auto-negotiation as desired, and reset.
1231  *	- If the PHY does not auto-negotiate just reset it.
1232  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1233  *	  otherwise do it later based on the outcome of auto-negotiation.
1234  */
1235 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1236 		  struct link_config *lc)
1237 {
1238 	struct fw_port_cmd c;
1239 	unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1240 
1241 	lc->link_ok = 0;
1242 	if (lc->requested_fc & PAUSE_RX)
1243 		fc |= FW_PORT_CAP_FC_RX;
1244 	if (lc->requested_fc & PAUSE_TX)
1245 		fc |= FW_PORT_CAP_FC_TX;
1246 
1247 	memset(&c, 0, sizeof(c));
1248 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1249 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1250 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1251 				  FW_LEN16(c));
1252 
1253 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1254 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1255 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1256 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1257 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1258 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1259 	} else
1260 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1261 
1262 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1263 }
1264 
1265 /**
1266  *	t4_restart_aneg - restart autonegotiation
1267  *	@adap: the adapter
1268  *	@mbox: mbox to use for the FW command
1269  *	@port: the port id
1270  *
1271  *	Restarts autonegotiation for the selected port.
1272  */
1273 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1274 {
1275 	struct fw_port_cmd c;
1276 
1277 	memset(&c, 0, sizeof(c));
1278 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1279 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1280 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1281 				  FW_LEN16(c));
1282 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1283 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1284 }
1285 
1286 typedef void (*int_handler_t)(struct adapter *adap);
1287 
1288 struct intr_info {
1289 	unsigned int mask;       /* bits to check in interrupt status */
1290 	const char *msg;         /* message to print or NULL */
1291 	short stat_idx;          /* stat counter to increment or -1 */
1292 	unsigned short fatal;    /* whether the condition reported is fatal */
1293 	int_handler_t int_handler; /* platform-specific int handler */
1294 };
1295 
1296 /**
1297  *	t4_handle_intr_status - table driven interrupt handler
1298  *	@adapter: the adapter that generated the interrupt
1299  *	@reg: the interrupt status register to process
1300  *	@acts: table of interrupt actions
1301  *
1302  *	A table driven interrupt handler that applies a set of masks to an
1303  *	interrupt status word and performs the corresponding actions if the
1304  *	interrupts described by the mask have occurred.  The actions include
1305  *	optionally emitting a warning or alert message.  The table is terminated
1306  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1307  *	conditions.
1308  */
1309 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1310 				 const struct intr_info *acts)
1311 {
1312 	int fatal = 0;
1313 	unsigned int mask = 0;
1314 	unsigned int status = t4_read_reg(adapter, reg);
1315 
1316 	for ( ; acts->mask; ++acts) {
1317 		if (!(status & acts->mask))
1318 			continue;
1319 		if (acts->fatal) {
1320 			fatal++;
1321 			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1322 				  status & acts->mask);
1323 		} else if (acts->msg && printk_ratelimit())
1324 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1325 				 status & acts->mask);
1326 		if (acts->int_handler)
1327 			acts->int_handler(adapter);
1328 		mask |= acts->mask;
1329 	}
1330 	status &= mask;
1331 	if (status)                           /* clear processed interrupts */
1332 		t4_write_reg(adapter, reg, status);
1333 	return fatal;
1334 }
1335 
1336 /*
1337  * Interrupt handler for the PCIE module.
1338  */
1339 static void pcie_intr_handler(struct adapter *adapter)
1340 {
1341 	static const struct intr_info sysbus_intr_info[] = {
1342 		{ RNPP, "RXNP array parity error", -1, 1 },
1343 		{ RPCP, "RXPC array parity error", -1, 1 },
1344 		{ RCIP, "RXCIF array parity error", -1, 1 },
1345 		{ RCCP, "Rx completions control array parity error", -1, 1 },
1346 		{ RFTP, "RXFT array parity error", -1, 1 },
1347 		{ 0 }
1348 	};
1349 	static const struct intr_info pcie_port_intr_info[] = {
1350 		{ TPCP, "TXPC array parity error", -1, 1 },
1351 		{ TNPP, "TXNP array parity error", -1, 1 },
1352 		{ TFTP, "TXFT array parity error", -1, 1 },
1353 		{ TCAP, "TXCA array parity error", -1, 1 },
1354 		{ TCIP, "TXCIF array parity error", -1, 1 },
1355 		{ RCAP, "RXCA array parity error", -1, 1 },
1356 		{ OTDD, "outbound request TLP discarded", -1, 1 },
1357 		{ RDPE, "Rx data parity error", -1, 1 },
1358 		{ TDUE, "Tx uncorrectable data error", -1, 1 },
1359 		{ 0 }
1360 	};
1361 	static const struct intr_info pcie_intr_info[] = {
1362 		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1363 		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1364 		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
1365 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1366 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1367 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1368 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1369 		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1370 		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1371 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1372 		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1373 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1374 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1375 		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1376 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1377 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1378 		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1379 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1380 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1381 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1382 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1383 		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1384 		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
1385 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1386 		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1387 		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
1388 		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
1389 		{ PCIESINT, "PCI core secondary fault", -1, 1 },
1390 		{ PCIEPINT, "PCI core primary fault", -1, 1 },
1391 		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1392 		{ 0 }
1393 	};
1394 
1395 	static struct intr_info t5_pcie_intr_info[] = {
1396 		{ MSTGRPPERR, "Master Response Read Queue parity error",
1397 		  -1, 1 },
1398 		{ MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1399 		{ MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1400 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1401 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1402 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1403 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1404 		{ PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1405 		  -1, 1 },
1406 		{ PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1407 		  -1, 1 },
1408 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1409 		{ MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1410 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1411 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1412 		{ DREQWRPERR, "PCI DMA channel write request parity error",
1413 		  -1, 1 },
1414 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1415 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1416 		{ HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1417 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1418 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1419 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1420 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1421 		{ VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1422 		{ MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1423 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1424 		{ IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1425 		  -1, 1 },
1426 		{ IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1427 		{ RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1428 		{ IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1429 		{ TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1430 		{ READRSPERR, "Outbound read error", -1, 0 },
1431 		{ 0 }
1432 	};
1433 
1434 	int fat;
1435 
1436 	if (is_t4(adapter->params.chip))
1437 		fat = t4_handle_intr_status(adapter,
1438 					    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1439 					    sysbus_intr_info) +
1440 			t4_handle_intr_status(adapter,
1441 					      PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1442 					      pcie_port_intr_info) +
1443 			t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1444 					      pcie_intr_info);
1445 	else
1446 		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1447 					    t5_pcie_intr_info);
1448 
1449 	if (fat)
1450 		t4_fatal_err(adapter);
1451 }
1452 
1453 /*
1454  * TP interrupt handler.
1455  */
1456 static void tp_intr_handler(struct adapter *adapter)
1457 {
1458 	static const struct intr_info tp_intr_info[] = {
1459 		{ 0x3fffffff, "TP parity error", -1, 1 },
1460 		{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1461 		{ 0 }
1462 	};
1463 
1464 	if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1465 		t4_fatal_err(adapter);
1466 }
1467 
1468 /*
1469  * SGE interrupt handler.
1470  */
1471 static void sge_intr_handler(struct adapter *adapter)
1472 {
1473 	u64 v;
1474 
1475 	static const struct intr_info sge_intr_info[] = {
1476 		{ ERR_CPL_EXCEED_IQE_SIZE,
1477 		  "SGE received CPL exceeding IQE size", -1, 1 },
1478 		{ ERR_INVALID_CIDX_INC,
1479 		  "SGE GTS CIDX increment too large", -1, 0 },
1480 		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1481 		{ DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1482 		{ DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1483 		{ ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1484 		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1485 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1486 		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1487 		  0 },
1488 		{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1489 		  0 },
1490 		{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1491 		  0 },
1492 		{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1493 		  0 },
1494 		{ ERR_ING_CTXT_PRIO,
1495 		  "SGE too many priority ingress contexts", -1, 0 },
1496 		{ ERR_EGR_CTXT_PRIO,
1497 		  "SGE too many priority egress contexts", -1, 0 },
1498 		{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1499 		{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1500 		{ 0 }
1501 	};
1502 
1503 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1504 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1505 	if (v) {
1506 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1507 				(unsigned long long)v);
1508 		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1509 		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1510 	}
1511 
1512 	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1513 	    v != 0)
1514 		t4_fatal_err(adapter);
1515 }
1516 
1517 /*
1518  * CIM interrupt handler.
1519  */
1520 static void cim_intr_handler(struct adapter *adapter)
1521 {
1522 	static const struct intr_info cim_intr_info[] = {
1523 		{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1524 		{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
1525 		{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
1526 		{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1527 		{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1528 		{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1529 		{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1530 		{ 0 }
1531 	};
1532 	static const struct intr_info cim_upintr_info[] = {
1533 		{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1534 		{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1535 		{ ILLWRINT, "CIM illegal write", -1, 1 },
1536 		{ ILLRDINT, "CIM illegal read", -1, 1 },
1537 		{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1538 		{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1539 		{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1540 		{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1541 		{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1542 		{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1543 		{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1544 		{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1545 		{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1546 		{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1547 		{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1548 		{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1549 		{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1550 		{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1551 		{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1552 		{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1553 		{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1554 		{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1555 		{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1556 		{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1557 		{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1558 		{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1559 		{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1560 		{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1561 		{ 0 }
1562 	};
1563 
1564 	int fat;
1565 
1566 	if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1567 		t4_report_fw_error(adapter);
1568 
1569 	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1570 				    cim_intr_info) +
1571 	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1572 				    cim_upintr_info);
1573 	if (fat)
1574 		t4_fatal_err(adapter);
1575 }
1576 
1577 /*
1578  * ULP RX interrupt handler.
1579  */
1580 static void ulprx_intr_handler(struct adapter *adapter)
1581 {
1582 	static const struct intr_info ulprx_intr_info[] = {
1583 		{ 0x1800000, "ULPRX context error", -1, 1 },
1584 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1585 		{ 0 }
1586 	};
1587 
1588 	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1589 		t4_fatal_err(adapter);
1590 }
1591 
1592 /*
1593  * ULP TX interrupt handler.
1594  */
1595 static void ulptx_intr_handler(struct adapter *adapter)
1596 {
1597 	static const struct intr_info ulptx_intr_info[] = {
1598 		{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1599 		  0 },
1600 		{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1601 		  0 },
1602 		{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1603 		  0 },
1604 		{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1605 		  0 },
1606 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1607 		{ 0 }
1608 	};
1609 
1610 	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1611 		t4_fatal_err(adapter);
1612 }
1613 
1614 /*
1615  * PM TX interrupt handler.
1616  */
1617 static void pmtx_intr_handler(struct adapter *adapter)
1618 {
1619 	static const struct intr_info pmtx_intr_info[] = {
1620 		{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1621 		{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1622 		{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1623 		{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1624 		{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1625 		{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1626 		{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1627 		{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1628 		{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1629 		{ 0 }
1630 	};
1631 
1632 	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1633 		t4_fatal_err(adapter);
1634 }
1635 
1636 /*
1637  * PM RX interrupt handler.
1638  */
1639 static void pmrx_intr_handler(struct adapter *adapter)
1640 {
1641 	static const struct intr_info pmrx_intr_info[] = {
1642 		{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1643 		{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1644 		{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1645 		{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1646 		{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1647 		{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1648 		{ 0 }
1649 	};
1650 
1651 	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1652 		t4_fatal_err(adapter);
1653 }
1654 
1655 /*
1656  * CPL switch interrupt handler.
1657  */
1658 static void cplsw_intr_handler(struct adapter *adapter)
1659 {
1660 	static const struct intr_info cplsw_intr_info[] = {
1661 		{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1662 		{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1663 		{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1664 		{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1665 		{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1666 		{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1667 		{ 0 }
1668 	};
1669 
1670 	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1671 		t4_fatal_err(adapter);
1672 }
1673 
1674 /*
1675  * LE interrupt handler.
1676  */
1677 static void le_intr_handler(struct adapter *adap)
1678 {
1679 	static const struct intr_info le_intr_info[] = {
1680 		{ LIPMISS, "LE LIP miss", -1, 0 },
1681 		{ LIP0, "LE 0 LIP error", -1, 0 },
1682 		{ PARITYERR, "LE parity error", -1, 1 },
1683 		{ UNKNOWNCMD, "LE unknown command", -1, 1 },
1684 		{ REQQPARERR, "LE request queue parity error", -1, 1 },
1685 		{ 0 }
1686 	};
1687 
1688 	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1689 		t4_fatal_err(adap);
1690 }
1691 
1692 /*
1693  * MPS interrupt handler.
1694  */
1695 static void mps_intr_handler(struct adapter *adapter)
1696 {
1697 	static const struct intr_info mps_rx_intr_info[] = {
1698 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1699 		{ 0 }
1700 	};
1701 	static const struct intr_info mps_tx_intr_info[] = {
1702 		{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1703 		{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1704 		{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1705 		{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1706 		{ BUBBLE, "MPS Tx underflow", -1, 1 },
1707 		{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1708 		{ FRMERR, "MPS Tx framing error", -1, 1 },
1709 		{ 0 }
1710 	};
1711 	static const struct intr_info mps_trc_intr_info[] = {
1712 		{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
1713 		{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1714 		{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
1715 		{ 0 }
1716 	};
1717 	static const struct intr_info mps_stat_sram_intr_info[] = {
1718 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1719 		{ 0 }
1720 	};
1721 	static const struct intr_info mps_stat_tx_intr_info[] = {
1722 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1723 		{ 0 }
1724 	};
1725 	static const struct intr_info mps_stat_rx_intr_info[] = {
1726 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1727 		{ 0 }
1728 	};
1729 	static const struct intr_info mps_cls_intr_info[] = {
1730 		{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1731 		{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1732 		{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1733 		{ 0 }
1734 	};
1735 
1736 	int fat;
1737 
1738 	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1739 				    mps_rx_intr_info) +
1740 	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1741 				    mps_tx_intr_info) +
1742 	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1743 				    mps_trc_intr_info) +
1744 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1745 				    mps_stat_sram_intr_info) +
1746 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1747 				    mps_stat_tx_intr_info) +
1748 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1749 				    mps_stat_rx_intr_info) +
1750 	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1751 				    mps_cls_intr_info);
1752 
1753 	t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1754 		     RXINT | TXINT | STATINT);
1755 	t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
1756 	if (fat)
1757 		t4_fatal_err(adapter);
1758 }
1759 
1760 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1761 
1762 /*
1763  * EDC/MC interrupt handler.
1764  */
1765 static void mem_intr_handler(struct adapter *adapter, int idx)
1766 {
1767 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
1768 
1769 	unsigned int addr, cnt_addr, v;
1770 
1771 	if (idx <= MEM_EDC1) {
1772 		addr = EDC_REG(EDC_INT_CAUSE, idx);
1773 		cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1774 	} else if (idx == MEM_MC) {
1775 		if (is_t4(adapter->params.chip)) {
1776 			addr = MC_INT_CAUSE;
1777 			cnt_addr = MC_ECC_STATUS;
1778 		} else {
1779 			addr = MC_P_INT_CAUSE;
1780 			cnt_addr = MC_P_ECC_STATUS;
1781 		}
1782 	} else {
1783 		addr = MC_REG(MC_P_INT_CAUSE, 1);
1784 		cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
1785 	}
1786 
1787 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1788 	if (v & PERR_INT_CAUSE)
1789 		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1790 			  name[idx]);
1791 	if (v & ECC_CE_INT_CAUSE) {
1792 		u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1793 
1794 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1795 		if (printk_ratelimit())
1796 			dev_warn(adapter->pdev_dev,
1797 				 "%u %s correctable ECC data error%s\n",
1798 				 cnt, name[idx], cnt > 1 ? "s" : "");
1799 	}
1800 	if (v & ECC_UE_INT_CAUSE)
1801 		dev_alert(adapter->pdev_dev,
1802 			  "%s uncorrectable ECC data error\n", name[idx]);
1803 
1804 	t4_write_reg(adapter, addr, v);
1805 	if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1806 		t4_fatal_err(adapter);
1807 }
1808 
1809 /*
1810  * MA interrupt handler.
1811  */
1812 static void ma_intr_handler(struct adapter *adap)
1813 {
1814 	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1815 
1816 	if (status & MEM_PERR_INT_CAUSE) {
1817 		dev_alert(adap->pdev_dev,
1818 			  "MA parity error, parity status %#x\n",
1819 			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1820 		if (is_t5(adap->params.chip))
1821 			dev_alert(adap->pdev_dev,
1822 				  "MA parity error, parity status %#x\n",
1823 				  t4_read_reg(adap,
1824 					      MA_PARITY_ERROR_STATUS2));
1825 	}
1826 	if (status & MEM_WRAP_INT_CAUSE) {
1827 		v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1828 		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1829 			  "client %u to address %#x\n",
1830 			  MEM_WRAP_CLIENT_NUM_GET(v),
1831 			  MEM_WRAP_ADDRESS_GET(v) << 4);
1832 	}
1833 	t4_write_reg(adap, MA_INT_CAUSE, status);
1834 	t4_fatal_err(adap);
1835 }
1836 
1837 /*
1838  * SMB interrupt handler.
1839  */
1840 static void smb_intr_handler(struct adapter *adap)
1841 {
1842 	static const struct intr_info smb_intr_info[] = {
1843 		{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1844 		{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1845 		{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1846 		{ 0 }
1847 	};
1848 
1849 	if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1850 		t4_fatal_err(adap);
1851 }
1852 
1853 /*
1854  * NC-SI interrupt handler.
1855  */
1856 static void ncsi_intr_handler(struct adapter *adap)
1857 {
1858 	static const struct intr_info ncsi_intr_info[] = {
1859 		{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1860 		{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1861 		{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1862 		{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1863 		{ 0 }
1864 	};
1865 
1866 	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1867 		t4_fatal_err(adap);
1868 }
1869 
1870 /*
1871  * XGMAC interrupt handler.
1872  */
1873 static void xgmac_intr_handler(struct adapter *adap, int port)
1874 {
1875 	u32 v, int_cause_reg;
1876 
1877 	if (is_t4(adap->params.chip))
1878 		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1879 	else
1880 		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1881 
1882 	v = t4_read_reg(adap, int_cause_reg);
1883 
1884 	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1885 	if (!v)
1886 		return;
1887 
1888 	if (v & TXFIFO_PRTY_ERR)
1889 		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1890 			  port);
1891 	if (v & RXFIFO_PRTY_ERR)
1892 		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1893 			  port);
1894 	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1895 	t4_fatal_err(adap);
1896 }
1897 
1898 /*
1899  * PL interrupt handler.
1900  */
1901 static void pl_intr_handler(struct adapter *adap)
1902 {
1903 	static const struct intr_info pl_intr_info[] = {
1904 		{ FATALPERR, "T4 fatal parity error", -1, 1 },
1905 		{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1906 		{ 0 }
1907 	};
1908 
1909 	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1910 		t4_fatal_err(adap);
1911 }
1912 
1913 #define PF_INTR_MASK (PFSW)
1914 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1915 		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1916 		CPL_SWITCH | SGE | ULP_TX)
1917 
1918 /**
1919  *	t4_slow_intr_handler - control path interrupt handler
1920  *	@adapter: the adapter
1921  *
1922  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
1923  *	The designation 'slow' is because it involves register reads, while
1924  *	data interrupts typically don't involve any MMIOs.
1925  */
1926 int t4_slow_intr_handler(struct adapter *adapter)
1927 {
1928 	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1929 
1930 	if (!(cause & GLBL_INTR_MASK))
1931 		return 0;
1932 	if (cause & CIM)
1933 		cim_intr_handler(adapter);
1934 	if (cause & MPS)
1935 		mps_intr_handler(adapter);
1936 	if (cause & NCSI)
1937 		ncsi_intr_handler(adapter);
1938 	if (cause & PL)
1939 		pl_intr_handler(adapter);
1940 	if (cause & SMB)
1941 		smb_intr_handler(adapter);
1942 	if (cause & XGMAC0)
1943 		xgmac_intr_handler(adapter, 0);
1944 	if (cause & XGMAC1)
1945 		xgmac_intr_handler(adapter, 1);
1946 	if (cause & XGMAC_KR0)
1947 		xgmac_intr_handler(adapter, 2);
1948 	if (cause & XGMAC_KR1)
1949 		xgmac_intr_handler(adapter, 3);
1950 	if (cause & PCIE)
1951 		pcie_intr_handler(adapter);
1952 	if (cause & MC)
1953 		mem_intr_handler(adapter, MEM_MC);
1954 	if (!is_t4(adapter->params.chip) && (cause & MC1))
1955 		mem_intr_handler(adapter, MEM_MC1);
1956 	if (cause & EDC0)
1957 		mem_intr_handler(adapter, MEM_EDC0);
1958 	if (cause & EDC1)
1959 		mem_intr_handler(adapter, MEM_EDC1);
1960 	if (cause & LE)
1961 		le_intr_handler(adapter);
1962 	if (cause & TP)
1963 		tp_intr_handler(adapter);
1964 	if (cause & MA)
1965 		ma_intr_handler(adapter);
1966 	if (cause & PM_TX)
1967 		pmtx_intr_handler(adapter);
1968 	if (cause & PM_RX)
1969 		pmrx_intr_handler(adapter);
1970 	if (cause & ULP_RX)
1971 		ulprx_intr_handler(adapter);
1972 	if (cause & CPL_SWITCH)
1973 		cplsw_intr_handler(adapter);
1974 	if (cause & SGE)
1975 		sge_intr_handler(adapter);
1976 	if (cause & ULP_TX)
1977 		ulptx_intr_handler(adapter);
1978 
1979 	/* Clear the interrupts just processed for which we are the master. */
1980 	t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1981 	(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1982 	return 1;
1983 }
1984 
1985 /**
1986  *	t4_intr_enable - enable interrupts
1987  *	@adapter: the adapter whose interrupts should be enabled
1988  *
1989  *	Enable PF-specific interrupts for the calling function and the top-level
1990  *	interrupt concentrator for global interrupts.  Interrupts are already
1991  *	enabled at each module,	here we just enable the roots of the interrupt
1992  *	hierarchies.
1993  *
1994  *	Note: this function should be called only when the driver manages
1995  *	non PF-specific interrupts from the various HW modules.  Only one PCI
1996  *	function at a time should be doing this.
1997  */
1998 void t4_intr_enable(struct adapter *adapter)
1999 {
2000 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2001 
2002 	t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2003 		     ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2004 		     ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2005 		     ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2006 		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2007 		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2008 		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
2009 		     DBFIFO_HP_INT | DBFIFO_LP_INT |
2010 		     EGRESS_SIZE_ERR);
2011 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2012 	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2013 }
2014 
2015 /**
2016  *	t4_intr_disable - disable interrupts
2017  *	@adapter: the adapter whose interrupts should be disabled
2018  *
2019  *	Disable interrupts.  We only disable the top-level interrupt
2020  *	concentrators.  The caller must be a PCI function managing global
2021  *	interrupts.
2022  */
2023 void t4_intr_disable(struct adapter *adapter)
2024 {
2025 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2026 
2027 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2028 	t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2029 }
2030 
2031 /**
2032  *	hash_mac_addr - return the hash value of a MAC address
2033  *	@addr: the 48-bit Ethernet MAC address
2034  *
2035  *	Hashes a MAC address according to the hash function used by HW inexact
2036  *	(hash) address matching.
2037  */
2038 static int hash_mac_addr(const u8 *addr)
2039 {
2040 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2041 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2042 	a ^= b;
2043 	a ^= (a >> 12);
2044 	a ^= (a >> 6);
2045 	return a & 0x3f;
2046 }
2047 
2048 /**
2049  *	t4_config_rss_range - configure a portion of the RSS mapping table
2050  *	@adapter: the adapter
2051  *	@mbox: mbox to use for the FW command
2052  *	@viid: virtual interface whose RSS subtable is to be written
2053  *	@start: start entry in the table to write
2054  *	@n: how many table entries to write
2055  *	@rspq: values for the response queue lookup table
2056  *	@nrspq: number of values in @rspq
2057  *
2058  *	Programs the selected part of the VI's RSS mapping table with the
2059  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2060  *	until the full table range is populated.
2061  *
2062  *	The caller must ensure the values in @rspq are in the range allowed for
2063  *	@viid.
2064  */
2065 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2066 			int start, int n, const u16 *rspq, unsigned int nrspq)
2067 {
2068 	int ret;
2069 	const u16 *rsp = rspq;
2070 	const u16 *rsp_end = rspq + nrspq;
2071 	struct fw_rss_ind_tbl_cmd cmd;
2072 
2073 	memset(&cmd, 0, sizeof(cmd));
2074 	cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2075 			       FW_CMD_REQUEST | FW_CMD_WRITE |
2076 			       FW_RSS_IND_TBL_CMD_VIID(viid));
2077 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2078 
2079 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2080 	while (n > 0) {
2081 		int nq = min(n, 32);
2082 		__be32 *qp = &cmd.iq0_to_iq2;
2083 
2084 		cmd.niqid = htons(nq);
2085 		cmd.startidx = htons(start);
2086 
2087 		start += nq;
2088 		n -= nq;
2089 
2090 		while (nq > 0) {
2091 			unsigned int v;
2092 
2093 			v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2094 			if (++rsp >= rsp_end)
2095 				rsp = rspq;
2096 			v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2097 			if (++rsp >= rsp_end)
2098 				rsp = rspq;
2099 			v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2100 			if (++rsp >= rsp_end)
2101 				rsp = rspq;
2102 
2103 			*qp++ = htonl(v);
2104 			nq -= 3;
2105 		}
2106 
2107 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2108 		if (ret)
2109 			return ret;
2110 	}
2111 	return 0;
2112 }
2113 
2114 /**
2115  *	t4_config_glbl_rss - configure the global RSS mode
2116  *	@adapter: the adapter
2117  *	@mbox: mbox to use for the FW command
2118  *	@mode: global RSS mode
2119  *	@flags: mode-specific flags
2120  *
2121  *	Sets the global RSS mode.
2122  */
2123 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2124 		       unsigned int flags)
2125 {
2126 	struct fw_rss_glb_config_cmd c;
2127 
2128 	memset(&c, 0, sizeof(c));
2129 	c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2130 			      FW_CMD_REQUEST | FW_CMD_WRITE);
2131 	c.retval_len16 = htonl(FW_LEN16(c));
2132 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2133 		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2134 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2135 		c.u.basicvirtual.mode_pkd =
2136 			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2137 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2138 	} else
2139 		return -EINVAL;
2140 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2141 }
2142 
2143 /**
2144  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2145  *	@adap: the adapter
2146  *	@v4: holds the TCP/IP counter values
2147  *	@v6: holds the TCP/IPv6 counter values
2148  *
2149  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2150  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2151  */
2152 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2153 			 struct tp_tcp_stats *v6)
2154 {
2155 	u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2156 
2157 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2158 #define STAT(x)     val[STAT_IDX(x)]
2159 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2160 
2161 	if (v4) {
2162 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2163 				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2164 		v4->tcpOutRsts = STAT(OUT_RST);
2165 		v4->tcpInSegs  = STAT64(IN_SEG);
2166 		v4->tcpOutSegs = STAT64(OUT_SEG);
2167 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2168 	}
2169 	if (v6) {
2170 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2171 				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2172 		v6->tcpOutRsts = STAT(OUT_RST);
2173 		v6->tcpInSegs  = STAT64(IN_SEG);
2174 		v6->tcpOutSegs = STAT64(OUT_SEG);
2175 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2176 	}
2177 #undef STAT64
2178 #undef STAT
2179 #undef STAT_IDX
2180 }
2181 
2182 /**
2183  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2184  *	@adap: the adapter
2185  *	@mtus: where to store the MTU values
2186  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2187  *
2188  *	Reads the HW path MTU table.
2189  */
2190 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2191 {
2192 	u32 v;
2193 	int i;
2194 
2195 	for (i = 0; i < NMTUS; ++i) {
2196 		t4_write_reg(adap, TP_MTU_TABLE,
2197 			     MTUINDEX(0xff) | MTUVALUE(i));
2198 		v = t4_read_reg(adap, TP_MTU_TABLE);
2199 		mtus[i] = MTUVALUE_GET(v);
2200 		if (mtu_log)
2201 			mtu_log[i] = MTUWIDTH_GET(v);
2202 	}
2203 }
2204 
2205 /**
2206  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2207  *	@adap: the adapter
2208  *	@addr: the indirect TP register address
2209  *	@mask: specifies the field within the register to modify
2210  *	@val: new value for the field
2211  *
2212  *	Sets a field of an indirect TP register to the given value.
2213  */
2214 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2215 			    unsigned int mask, unsigned int val)
2216 {
2217 	t4_write_reg(adap, TP_PIO_ADDR, addr);
2218 	val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2219 	t4_write_reg(adap, TP_PIO_DATA, val);
2220 }
2221 
2222 /**
2223  *	init_cong_ctrl - initialize congestion control parameters
2224  *	@a: the alpha values for congestion control
2225  *	@b: the beta values for congestion control
2226  *
2227  *	Initialize the congestion control parameters.
2228  */
2229 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2230 {
2231 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2232 	a[9] = 2;
2233 	a[10] = 3;
2234 	a[11] = 4;
2235 	a[12] = 5;
2236 	a[13] = 6;
2237 	a[14] = 7;
2238 	a[15] = 8;
2239 	a[16] = 9;
2240 	a[17] = 10;
2241 	a[18] = 14;
2242 	a[19] = 17;
2243 	a[20] = 21;
2244 	a[21] = 25;
2245 	a[22] = 30;
2246 	a[23] = 35;
2247 	a[24] = 45;
2248 	a[25] = 60;
2249 	a[26] = 80;
2250 	a[27] = 100;
2251 	a[28] = 200;
2252 	a[29] = 300;
2253 	a[30] = 400;
2254 	a[31] = 500;
2255 
2256 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2257 	b[9] = b[10] = 1;
2258 	b[11] = b[12] = 2;
2259 	b[13] = b[14] = b[15] = b[16] = 3;
2260 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2261 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2262 	b[28] = b[29] = 6;
2263 	b[30] = b[31] = 7;
2264 }
2265 
2266 /* The minimum additive increment value for the congestion control table */
2267 #define CC_MIN_INCR 2U
2268 
2269 /**
2270  *	t4_load_mtus - write the MTU and congestion control HW tables
2271  *	@adap: the adapter
2272  *	@mtus: the values for the MTU table
2273  *	@alpha: the values for the congestion control alpha parameter
2274  *	@beta: the values for the congestion control beta parameter
2275  *
2276  *	Write the HW MTU table with the supplied MTUs and the high-speed
2277  *	congestion control table with the supplied alpha, beta, and MTUs.
2278  *	We write the two tables together because the additive increments
2279  *	depend on the MTUs.
2280  */
2281 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2282 		  const unsigned short *alpha, const unsigned short *beta)
2283 {
2284 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2285 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2286 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2287 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2288 	};
2289 
2290 	unsigned int i, w;
2291 
2292 	for (i = 0; i < NMTUS; ++i) {
2293 		unsigned int mtu = mtus[i];
2294 		unsigned int log2 = fls(mtu);
2295 
2296 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2297 			log2--;
2298 		t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2299 			     MTUWIDTH(log2) | MTUVALUE(mtu));
2300 
2301 		for (w = 0; w < NCCTRL_WIN; ++w) {
2302 			unsigned int inc;
2303 
2304 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2305 				  CC_MIN_INCR);
2306 
2307 			t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2308 				     (w << 16) | (beta[w] << 13) | inc);
2309 		}
2310 	}
2311 }
2312 
2313 /**
2314  *	get_mps_bg_map - return the buffer groups associated with a port
2315  *	@adap: the adapter
2316  *	@idx: the port index
2317  *
2318  *	Returns a bitmap indicating which MPS buffer groups are associated
2319  *	with the given port.  Bit i is set if buffer group i is used by the
2320  *	port.
2321  */
2322 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2323 {
2324 	u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2325 
2326 	if (n == 0)
2327 		return idx == 0 ? 0xf : 0;
2328 	if (n == 1)
2329 		return idx < 2 ? (3 << (2 * idx)) : 0;
2330 	return 1 << idx;
2331 }
2332 
2333 /**
2334  *      t4_get_port_type_description - return Port Type string description
2335  *      @port_type: firmware Port Type enumeration
2336  */
2337 const char *t4_get_port_type_description(enum fw_port_type port_type)
2338 {
2339 	static const char *const port_type_description[] = {
2340 		"R XFI",
2341 		"R XAUI",
2342 		"T SGMII",
2343 		"T XFI",
2344 		"T XAUI",
2345 		"KX4",
2346 		"CX4",
2347 		"KX",
2348 		"KR",
2349 		"R SFP+",
2350 		"KR/KX",
2351 		"KR/KX/KX4",
2352 		"R QSFP_10G",
2353 		"",
2354 		"R QSFP",
2355 		"R BP40_BA",
2356 	};
2357 
2358 	if (port_type < ARRAY_SIZE(port_type_description))
2359 		return port_type_description[port_type];
2360 	return "UNKNOWN";
2361 }
2362 
2363 /**
2364  *	t4_get_port_stats - collect port statistics
2365  *	@adap: the adapter
2366  *	@idx: the port index
2367  *	@p: the stats structure to fill
2368  *
2369  *	Collect statistics related to the given port from HW.
2370  */
2371 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2372 {
2373 	u32 bgmap = get_mps_bg_map(adap, idx);
2374 
2375 #define GET_STAT(name) \
2376 	t4_read_reg64(adap, \
2377 	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2378 	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2379 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2380 
2381 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2382 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2383 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2384 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2385 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2386 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2387 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2388 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2389 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2390 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2391 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2392 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2393 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2394 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
2395 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2396 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2397 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2398 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2399 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2400 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2401 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2402 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2403 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2404 
2405 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2406 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2407 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2408 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2409 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2410 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2411 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2412 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2413 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2414 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2415 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2416 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2417 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2418 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2419 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2420 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2421 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2422 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2423 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2424 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2425 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2426 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2427 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2428 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2429 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2430 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2431 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2432 
2433 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2434 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2435 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2436 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2437 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2438 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2439 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2440 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2441 
2442 #undef GET_STAT
2443 #undef GET_STAT_COM
2444 }
2445 
2446 /**
2447  *	t4_wol_magic_enable - enable/disable magic packet WoL
2448  *	@adap: the adapter
2449  *	@port: the physical port index
2450  *	@addr: MAC address expected in magic packets, %NULL to disable
2451  *
2452  *	Enables/disables magic packet wake-on-LAN for the selected port.
2453  */
2454 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2455 			 const u8 *addr)
2456 {
2457 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2458 
2459 	if (is_t4(adap->params.chip)) {
2460 		mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2461 		mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2462 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2463 	} else {
2464 		mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2465 		mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2466 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2467 	}
2468 
2469 	if (addr) {
2470 		t4_write_reg(adap, mag_id_reg_l,
2471 			     (addr[2] << 24) | (addr[3] << 16) |
2472 			     (addr[4] << 8) | addr[5]);
2473 		t4_write_reg(adap, mag_id_reg_h,
2474 			     (addr[0] << 8) | addr[1]);
2475 	}
2476 	t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2477 			 addr ? MAGICEN : 0);
2478 }
2479 
2480 /**
2481  *	t4_wol_pat_enable - enable/disable pattern-based WoL
2482  *	@adap: the adapter
2483  *	@port: the physical port index
2484  *	@map: bitmap of which HW pattern filters to set
2485  *	@mask0: byte mask for bytes 0-63 of a packet
2486  *	@mask1: byte mask for bytes 64-127 of a packet
2487  *	@crc: Ethernet CRC for selected bytes
2488  *	@enable: enable/disable switch
2489  *
2490  *	Sets the pattern filters indicated in @map to mask out the bytes
2491  *	specified in @mask0/@mask1 in received packets and compare the CRC of
2492  *	the resulting packet against @crc.  If @enable is %true pattern-based
2493  *	WoL is enabled, otherwise disabled.
2494  */
2495 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2496 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
2497 {
2498 	int i;
2499 	u32 port_cfg_reg;
2500 
2501 	if (is_t4(adap->params.chip))
2502 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2503 	else
2504 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2505 
2506 	if (!enable) {
2507 		t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2508 		return 0;
2509 	}
2510 	if (map > 0xff)
2511 		return -EINVAL;
2512 
2513 #define EPIO_REG(name) \
2514 	(is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2515 	T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2516 
2517 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2518 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2519 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2520 
2521 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2522 		if (!(map & 1))
2523 			continue;
2524 
2525 		/* write byte masks */
2526 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2527 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2528 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2529 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2530 			return -ETIMEDOUT;
2531 
2532 		/* write CRC */
2533 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
2534 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2535 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2536 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2537 			return -ETIMEDOUT;
2538 	}
2539 #undef EPIO_REG
2540 
2541 	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2542 	return 0;
2543 }
2544 
2545 /*     t4_mk_filtdelwr - create a delete filter WR
2546  *     @ftid: the filter ID
2547  *     @wr: the filter work request to populate
2548  *     @qid: ingress queue to receive the delete notification
2549  *
2550  *     Creates a filter work request to delete the supplied filter.  If @qid is
2551  *     negative the delete notification is suppressed.
2552  */
2553 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2554 {
2555 	memset(wr, 0, sizeof(*wr));
2556 	wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2557 	wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2558 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2559 			V_FW_FILTER_WR_NOREPLY(qid < 0));
2560 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2561 	if (qid >= 0)
2562 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2563 }
2564 
2565 #define INIT_CMD(var, cmd, rd_wr) do { \
2566 	(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2567 				  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2568 	(var).retval_len16 = htonl(FW_LEN16(var)); \
2569 } while (0)
2570 
2571 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2572 			  u32 addr, u32 val)
2573 {
2574 	struct fw_ldst_cmd c;
2575 
2576 	memset(&c, 0, sizeof(c));
2577 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2578 			    FW_CMD_WRITE |
2579 			    FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2580 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2581 	c.u.addrval.addr = htonl(addr);
2582 	c.u.addrval.val = htonl(val);
2583 
2584 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2585 }
2586 
2587 /**
2588  *	t4_mdio_rd - read a PHY register through MDIO
2589  *	@adap: the adapter
2590  *	@mbox: mailbox to use for the FW command
2591  *	@phy_addr: the PHY address
2592  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2593  *	@reg: the register to read
2594  *	@valp: where to store the value
2595  *
2596  *	Issues a FW command through the given mailbox to read a PHY register.
2597  */
2598 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2599 	       unsigned int mmd, unsigned int reg, u16 *valp)
2600 {
2601 	int ret;
2602 	struct fw_ldst_cmd c;
2603 
2604 	memset(&c, 0, sizeof(c));
2605 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2606 		FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2607 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2608 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2609 				   FW_LDST_CMD_MMD(mmd));
2610 	c.u.mdio.raddr = htons(reg);
2611 
2612 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2613 	if (ret == 0)
2614 		*valp = ntohs(c.u.mdio.rval);
2615 	return ret;
2616 }
2617 
2618 /**
2619  *	t4_mdio_wr - write a PHY register through MDIO
2620  *	@adap: the adapter
2621  *	@mbox: mailbox to use for the FW command
2622  *	@phy_addr: the PHY address
2623  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2624  *	@reg: the register to write
2625  *	@valp: value to write
2626  *
2627  *	Issues a FW command through the given mailbox to write a PHY register.
2628  */
2629 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2630 	       unsigned int mmd, unsigned int reg, u16 val)
2631 {
2632 	struct fw_ldst_cmd c;
2633 
2634 	memset(&c, 0, sizeof(c));
2635 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2636 		FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2637 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2638 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2639 				   FW_LDST_CMD_MMD(mmd));
2640 	c.u.mdio.raddr = htons(reg);
2641 	c.u.mdio.rval = htons(val);
2642 
2643 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2644 }
2645 
2646 /**
2647  *	t4_sge_decode_idma_state - decode the idma state
2648  *	@adap: the adapter
2649  *	@state: the state idma is stuck in
2650  */
2651 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2652 {
2653 	static const char * const t4_decode[] = {
2654 		"IDMA_IDLE",
2655 		"IDMA_PUSH_MORE_CPL_FIFO",
2656 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2657 		"Not used",
2658 		"IDMA_PHYSADDR_SEND_PCIEHDR",
2659 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2660 		"IDMA_PHYSADDR_SEND_PAYLOAD",
2661 		"IDMA_SEND_FIFO_TO_IMSG",
2662 		"IDMA_FL_REQ_DATA_FL_PREP",
2663 		"IDMA_FL_REQ_DATA_FL",
2664 		"IDMA_FL_DROP",
2665 		"IDMA_FL_H_REQ_HEADER_FL",
2666 		"IDMA_FL_H_SEND_PCIEHDR",
2667 		"IDMA_FL_H_PUSH_CPL_FIFO",
2668 		"IDMA_FL_H_SEND_CPL",
2669 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
2670 		"IDMA_FL_H_SEND_IP_HDR",
2671 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
2672 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
2673 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
2674 		"IDMA_FL_D_SEND_PCIEHDR",
2675 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2676 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
2677 		"IDMA_FL_SEND_PCIEHDR",
2678 		"IDMA_FL_PUSH_CPL_FIFO",
2679 		"IDMA_FL_SEND_CPL",
2680 		"IDMA_FL_SEND_PAYLOAD_FIRST",
2681 		"IDMA_FL_SEND_PAYLOAD",
2682 		"IDMA_FL_REQ_NEXT_DATA_FL",
2683 		"IDMA_FL_SEND_NEXT_PCIEHDR",
2684 		"IDMA_FL_SEND_PADDING",
2685 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
2686 		"IDMA_FL_SEND_FIFO_TO_IMSG",
2687 		"IDMA_FL_REQ_DATAFL_DONE",
2688 		"IDMA_FL_REQ_HEADERFL_DONE",
2689 	};
2690 	static const char * const t5_decode[] = {
2691 		"IDMA_IDLE",
2692 		"IDMA_ALMOST_IDLE",
2693 		"IDMA_PUSH_MORE_CPL_FIFO",
2694 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2695 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2696 		"IDMA_PHYSADDR_SEND_PCIEHDR",
2697 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2698 		"IDMA_PHYSADDR_SEND_PAYLOAD",
2699 		"IDMA_SEND_FIFO_TO_IMSG",
2700 		"IDMA_FL_REQ_DATA_FL",
2701 		"IDMA_FL_DROP",
2702 		"IDMA_FL_DROP_SEND_INC",
2703 		"IDMA_FL_H_REQ_HEADER_FL",
2704 		"IDMA_FL_H_SEND_PCIEHDR",
2705 		"IDMA_FL_H_PUSH_CPL_FIFO",
2706 		"IDMA_FL_H_SEND_CPL",
2707 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
2708 		"IDMA_FL_H_SEND_IP_HDR",
2709 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
2710 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
2711 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
2712 		"IDMA_FL_D_SEND_PCIEHDR",
2713 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2714 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
2715 		"IDMA_FL_SEND_PCIEHDR",
2716 		"IDMA_FL_PUSH_CPL_FIFO",
2717 		"IDMA_FL_SEND_CPL",
2718 		"IDMA_FL_SEND_PAYLOAD_FIRST",
2719 		"IDMA_FL_SEND_PAYLOAD",
2720 		"IDMA_FL_REQ_NEXT_DATA_FL",
2721 		"IDMA_FL_SEND_NEXT_PCIEHDR",
2722 		"IDMA_FL_SEND_PADDING",
2723 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
2724 	};
2725 	static const u32 sge_regs[] = {
2726 		SGE_DEBUG_DATA_LOW_INDEX_2,
2727 		SGE_DEBUG_DATA_LOW_INDEX_3,
2728 		SGE_DEBUG_DATA_HIGH_INDEX_10,
2729 	};
2730 	const char **sge_idma_decode;
2731 	int sge_idma_decode_nstates;
2732 	int i;
2733 
2734 	if (is_t4(adapter->params.chip)) {
2735 		sge_idma_decode = (const char **)t4_decode;
2736 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2737 	} else {
2738 		sge_idma_decode = (const char **)t5_decode;
2739 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2740 	}
2741 
2742 	if (state < sge_idma_decode_nstates)
2743 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2744 	else
2745 		CH_WARN(adapter, "idma state %d unknown\n", state);
2746 
2747 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2748 		CH_WARN(adapter, "SGE register %#x value %#x\n",
2749 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2750 }
2751 
2752 /**
2753  *      t4_fw_hello - establish communication with FW
2754  *      @adap: the adapter
2755  *      @mbox: mailbox to use for the FW command
2756  *      @evt_mbox: mailbox to receive async FW events
2757  *      @master: specifies the caller's willingness to be the device master
2758  *	@state: returns the current device state (if non-NULL)
2759  *
2760  *	Issues a command to establish communication with FW.  Returns either
2761  *	an error (negative integer) or the mailbox of the Master PF.
2762  */
2763 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2764 		enum dev_master master, enum dev_state *state)
2765 {
2766 	int ret;
2767 	struct fw_hello_cmd c;
2768 	u32 v;
2769 	unsigned int master_mbox;
2770 	int retries = FW_CMD_HELLO_RETRIES;
2771 
2772 retry:
2773 	memset(&c, 0, sizeof(c));
2774 	INIT_CMD(c, HELLO, WRITE);
2775 	c.err_to_clearinit = htonl(
2776 		FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2777 		FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2778 		FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2779 				      FW_HELLO_CMD_MBMASTER_MASK) |
2780 		FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2781 		FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2782 		FW_HELLO_CMD_CLEARINIT);
2783 
2784 	/*
2785 	 * Issue the HELLO command to the firmware.  If it's not successful
2786 	 * but indicates that we got a "busy" or "timeout" condition, retry
2787 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
2788 	 * retry limit, check to see if the firmware left us any error
2789 	 * information and report that if so.
2790 	 */
2791 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2792 	if (ret < 0) {
2793 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2794 			goto retry;
2795 		if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2796 			t4_report_fw_error(adap);
2797 		return ret;
2798 	}
2799 
2800 	v = ntohl(c.err_to_clearinit);
2801 	master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2802 	if (state) {
2803 		if (v & FW_HELLO_CMD_ERR)
2804 			*state = DEV_STATE_ERR;
2805 		else if (v & FW_HELLO_CMD_INIT)
2806 			*state = DEV_STATE_INIT;
2807 		else
2808 			*state = DEV_STATE_UNINIT;
2809 	}
2810 
2811 	/*
2812 	 * If we're not the Master PF then we need to wait around for the
2813 	 * Master PF Driver to finish setting up the adapter.
2814 	 *
2815 	 * Note that we also do this wait if we're a non-Master-capable PF and
2816 	 * there is no current Master PF; a Master PF may show up momentarily
2817 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
2818 	 * OS loads lots of different drivers rapidly at the same time).  In
2819 	 * this case, the Master PF returned by the firmware will be
2820 	 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2821 	 */
2822 	if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2823 	    master_mbox != mbox) {
2824 		int waiting = FW_CMD_HELLO_TIMEOUT;
2825 
2826 		/*
2827 		 * Wait for the firmware to either indicate an error or
2828 		 * initialized state.  If we see either of these we bail out
2829 		 * and report the issue to the caller.  If we exhaust the
2830 		 * "hello timeout" and we haven't exhausted our retries, try
2831 		 * again.  Otherwise bail with a timeout error.
2832 		 */
2833 		for (;;) {
2834 			u32 pcie_fw;
2835 
2836 			msleep(50);
2837 			waiting -= 50;
2838 
2839 			/*
2840 			 * If neither Error nor Initialialized are indicated
2841 			 * by the firmware keep waiting till we exaust our
2842 			 * timeout ... and then retry if we haven't exhausted
2843 			 * our retries ...
2844 			 */
2845 			pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2846 			if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2847 				if (waiting <= 0) {
2848 					if (retries-- > 0)
2849 						goto retry;
2850 
2851 					return -ETIMEDOUT;
2852 				}
2853 				continue;
2854 			}
2855 
2856 			/*
2857 			 * We either have an Error or Initialized condition
2858 			 * report errors preferentially.
2859 			 */
2860 			if (state) {
2861 				if (pcie_fw & FW_PCIE_FW_ERR)
2862 					*state = DEV_STATE_ERR;
2863 				else if (pcie_fw & FW_PCIE_FW_INIT)
2864 					*state = DEV_STATE_INIT;
2865 			}
2866 
2867 			/*
2868 			 * If we arrived before a Master PF was selected and
2869 			 * there's not a valid Master PF, grab its identity
2870 			 * for our caller.
2871 			 */
2872 			if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2873 			    (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2874 				master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2875 			break;
2876 		}
2877 	}
2878 
2879 	return master_mbox;
2880 }
2881 
2882 /**
2883  *	t4_fw_bye - end communication with FW
2884  *	@adap: the adapter
2885  *	@mbox: mailbox to use for the FW command
2886  *
2887  *	Issues a command to terminate communication with FW.
2888  */
2889 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2890 {
2891 	struct fw_bye_cmd c;
2892 
2893 	memset(&c, 0, sizeof(c));
2894 	INIT_CMD(c, BYE, WRITE);
2895 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2896 }
2897 
2898 /**
2899  *	t4_init_cmd - ask FW to initialize the device
2900  *	@adap: the adapter
2901  *	@mbox: mailbox to use for the FW command
2902  *
2903  *	Issues a command to FW to partially initialize the device.  This
2904  *	performs initialization that generally doesn't depend on user input.
2905  */
2906 int t4_early_init(struct adapter *adap, unsigned int mbox)
2907 {
2908 	struct fw_initialize_cmd c;
2909 
2910 	memset(&c, 0, sizeof(c));
2911 	INIT_CMD(c, INITIALIZE, WRITE);
2912 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2913 }
2914 
2915 /**
2916  *	t4_fw_reset - issue a reset to FW
2917  *	@adap: the adapter
2918  *	@mbox: mailbox to use for the FW command
2919  *	@reset: specifies the type of reset to perform
2920  *
2921  *	Issues a reset command of the specified type to FW.
2922  */
2923 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2924 {
2925 	struct fw_reset_cmd c;
2926 
2927 	memset(&c, 0, sizeof(c));
2928 	INIT_CMD(c, RESET, WRITE);
2929 	c.val = htonl(reset);
2930 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2931 }
2932 
2933 /**
2934  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2935  *	@adap: the adapter
2936  *	@mbox: mailbox to use for the FW RESET command (if desired)
2937  *	@force: force uP into RESET even if FW RESET command fails
2938  *
2939  *	Issues a RESET command to firmware (if desired) with a HALT indication
2940  *	and then puts the microprocessor into RESET state.  The RESET command
2941  *	will only be issued if a legitimate mailbox is provided (mbox <=
2942  *	FW_PCIE_FW_MASTER_MASK).
2943  *
2944  *	This is generally used in order for the host to safely manipulate the
2945  *	adapter without fear of conflicting with whatever the firmware might
2946  *	be doing.  The only way out of this state is to RESTART the firmware
2947  *	...
2948  */
2949 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2950 {
2951 	int ret = 0;
2952 
2953 	/*
2954 	 * If a legitimate mailbox is provided, issue a RESET command
2955 	 * with a HALT indication.
2956 	 */
2957 	if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2958 		struct fw_reset_cmd c;
2959 
2960 		memset(&c, 0, sizeof(c));
2961 		INIT_CMD(c, RESET, WRITE);
2962 		c.val = htonl(PIORST | PIORSTMODE);
2963 		c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2964 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2965 	}
2966 
2967 	/*
2968 	 * Normally we won't complete the operation if the firmware RESET
2969 	 * command fails but if our caller insists we'll go ahead and put the
2970 	 * uP into RESET.  This can be useful if the firmware is hung or even
2971 	 * missing ...  We'll have to take the risk of putting the uP into
2972 	 * RESET without the cooperation of firmware in that case.
2973 	 *
2974 	 * We also force the firmware's HALT flag to be on in case we bypassed
2975 	 * the firmware RESET command above or we're dealing with old firmware
2976 	 * which doesn't have the HALT capability.  This will serve as a flag
2977 	 * for the incoming firmware to know that it's coming out of a HALT
2978 	 * rather than a RESET ... if it's new enough to understand that ...
2979 	 */
2980 	if (ret == 0 || force) {
2981 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2982 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2983 				 FW_PCIE_FW_HALT);
2984 	}
2985 
2986 	/*
2987 	 * And we always return the result of the firmware RESET command
2988 	 * even when we force the uP into RESET ...
2989 	 */
2990 	return ret;
2991 }
2992 
2993 /**
2994  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
2995  *	@adap: the adapter
2996  *	@reset: if we want to do a RESET to restart things
2997  *
2998  *	Restart firmware previously halted by t4_fw_halt().  On successful
2999  *	return the previous PF Master remains as the new PF Master and there
3000  *	is no need to issue a new HELLO command, etc.
3001  *
3002  *	We do this in two ways:
3003  *
3004  *	 1. If we're dealing with newer firmware we'll simply want to take
3005  *	    the chip's microprocessor out of RESET.  This will cause the
3006  *	    firmware to start up from its start vector.  And then we'll loop
3007  *	    until the firmware indicates it's started again (PCIE_FW.HALT
3008  *	    reset to 0) or we timeout.
3009  *
3010  *	 2. If we're dealing with older firmware then we'll need to RESET
3011  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
3012  *	    flag and automatically RESET itself on startup.
3013  */
3014 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3015 {
3016 	if (reset) {
3017 		/*
3018 		 * Since we're directing the RESET instead of the firmware
3019 		 * doing it automatically, we need to clear the PCIE_FW.HALT
3020 		 * bit.
3021 		 */
3022 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3023 
3024 		/*
3025 		 * If we've been given a valid mailbox, first try to get the
3026 		 * firmware to do the RESET.  If that works, great and we can
3027 		 * return success.  Otherwise, if we haven't been given a
3028 		 * valid mailbox or the RESET command failed, fall back to
3029 		 * hitting the chip with a hammer.
3030 		 */
3031 		if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3032 			t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3033 			msleep(100);
3034 			if (t4_fw_reset(adap, mbox,
3035 					PIORST | PIORSTMODE) == 0)
3036 				return 0;
3037 		}
3038 
3039 		t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3040 		msleep(2000);
3041 	} else {
3042 		int ms;
3043 
3044 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3045 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3046 			if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3047 				return 0;
3048 			msleep(100);
3049 			ms += 100;
3050 		}
3051 		return -ETIMEDOUT;
3052 	}
3053 	return 0;
3054 }
3055 
3056 /**
3057  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3058  *	@adap: the adapter
3059  *	@mbox: mailbox to use for the FW RESET command (if desired)
3060  *	@fw_data: the firmware image to write
3061  *	@size: image size
3062  *	@force: force upgrade even if firmware doesn't cooperate
3063  *
3064  *	Perform all of the steps necessary for upgrading an adapter's
3065  *	firmware image.  Normally this requires the cooperation of the
3066  *	existing firmware in order to halt all existing activities
3067  *	but if an invalid mailbox token is passed in we skip that step
3068  *	(though we'll still put the adapter microprocessor into RESET in
3069  *	that case).
3070  *
3071  *	On successful return the new firmware will have been loaded and
3072  *	the adapter will have been fully RESET losing all previous setup
3073  *	state.  On unsuccessful return the adapter may be completely hosed ...
3074  *	positive errno indicates that the adapter is ~probably~ intact, a
3075  *	negative errno indicates that things are looking bad ...
3076  */
3077 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3078 		  const u8 *fw_data, unsigned int size, int force)
3079 {
3080 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3081 	int reset, ret;
3082 
3083 	ret = t4_fw_halt(adap, mbox, force);
3084 	if (ret < 0 && !force)
3085 		return ret;
3086 
3087 	ret = t4_load_fw(adap, fw_data, size);
3088 	if (ret < 0)
3089 		return ret;
3090 
3091 	/*
3092 	 * Older versions of the firmware don't understand the new
3093 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3094 	 * restart.  So for newly loaded older firmware we'll have to do the
3095 	 * RESET for it so it starts up on a clean slate.  We can tell if
3096 	 * the newly loaded firmware will handle this right by checking
3097 	 * its header flags to see if it advertises the capability.
3098 	 */
3099 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3100 	return t4_fw_restart(adap, mbox, reset);
3101 }
3102 
3103 /**
3104  *	t4_fixup_host_params - fix up host-dependent parameters
3105  *	@adap: the adapter
3106  *	@page_size: the host's Base Page Size
3107  *	@cache_line_size: the host's Cache Line Size
3108  *
3109  *	Various registers in T4 contain values which are dependent on the
3110  *	host's Base Page and Cache Line Sizes.  This function will fix all of
3111  *	those registers with the appropriate values as passed in ...
3112  */
3113 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3114 			 unsigned int cache_line_size)
3115 {
3116 	unsigned int page_shift = fls(page_size) - 1;
3117 	unsigned int sge_hps = page_shift - 10;
3118 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3119 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3120 	unsigned int fl_align_log = fls(fl_align) - 1;
3121 
3122 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3123 		     HOSTPAGESIZEPF0(sge_hps) |
3124 		     HOSTPAGESIZEPF1(sge_hps) |
3125 		     HOSTPAGESIZEPF2(sge_hps) |
3126 		     HOSTPAGESIZEPF3(sge_hps) |
3127 		     HOSTPAGESIZEPF4(sge_hps) |
3128 		     HOSTPAGESIZEPF5(sge_hps) |
3129 		     HOSTPAGESIZEPF6(sge_hps) |
3130 		     HOSTPAGESIZEPF7(sge_hps));
3131 
3132 	if (is_t4(adap->params.chip)) {
3133 		t4_set_reg_field(adap, SGE_CONTROL,
3134 				 INGPADBOUNDARY_MASK |
3135 				 EGRSTATUSPAGESIZE_MASK,
3136 				 INGPADBOUNDARY(fl_align_log - 5) |
3137 				 EGRSTATUSPAGESIZE(stat_len != 64));
3138 	} else {
3139 		/* T5 introduced the separation of the Free List Padding and
3140 		 * Packing Boundaries.  Thus, we can select a smaller Padding
3141 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3142 		 * Bandwidth, and use a Packing Boundary which is large enough
3143 		 * to avoid false sharing between CPUs, etc.
3144 		 *
3145 		 * For the PCI Link, the smaller the Padding Boundary the
3146 		 * better.  For the Memory Controller, a smaller Padding
3147 		 * Boundary is better until we cross under the Memory Line
3148 		 * Size (the minimum unit of transfer to/from Memory).  If we
3149 		 * have a Padding Boundary which is smaller than the Memory
3150 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
3151 		 * Memory Controller which is never good.  For T5 the smallest
3152 		 * Padding Boundary which we can select is 32 bytes which is
3153 		 * larger than any known Memory Controller Line Size so we'll
3154 		 * use that.
3155 		 *
3156 		 * T5 has a different interpretation of the "0" value for the
3157 		 * Packing Boundary.  This corresponds to 16 bytes instead of
3158 		 * the expected 32 bytes.  We never have a Packing Boundary
3159 		 * less than 32 bytes so we can't use that special value but
3160 		 * on the other hand, if we wanted 32 bytes, the best we can
3161 		 * really do is 64 bytes.
3162 		*/
3163 		if (fl_align <= 32) {
3164 			fl_align = 64;
3165 			fl_align_log = 6;
3166 		}
3167 		t4_set_reg_field(adap, SGE_CONTROL,
3168 				 INGPADBOUNDARY_MASK |
3169 				 EGRSTATUSPAGESIZE_MASK,
3170 				 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3171 				 EGRSTATUSPAGESIZE(stat_len != 64));
3172 		t4_set_reg_field(adap, SGE_CONTROL2_A,
3173 				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3174 				 INGPACKBOUNDARY_V(fl_align_log -
3175 						 INGPACKBOUNDARY_SHIFT_X));
3176 	}
3177 	/*
3178 	 * Adjust various SGE Free List Host Buffer Sizes.
3179 	 *
3180 	 * This is something of a crock since we're using fixed indices into
3181 	 * the array which are also known by the sge.c code and the T4
3182 	 * Firmware Configuration File.  We need to come up with a much better
3183 	 * approach to managing this array.  For now, the first four entries
3184 	 * are:
3185 	 *
3186 	 *   0: Host Page Size
3187 	 *   1: 64KB
3188 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3189 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3190 	 *
3191 	 * For the single-MTU buffers in unpacked mode we need to include
3192 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3193 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3194 	 * Padding boundry.  All of these are accommodated in the Factory
3195 	 * Default Firmware Configuration File but we need to adjust it for
3196 	 * this host's cache line size.
3197 	 */
3198 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3199 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3200 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3201 		     & ~(fl_align-1));
3202 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3203 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3204 		     & ~(fl_align-1));
3205 
3206 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3207 
3208 	return 0;
3209 }
3210 
3211 /**
3212  *	t4_fw_initialize - ask FW to initialize the device
3213  *	@adap: the adapter
3214  *	@mbox: mailbox to use for the FW command
3215  *
3216  *	Issues a command to FW to partially initialize the device.  This
3217  *	performs initialization that generally doesn't depend on user input.
3218  */
3219 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3220 {
3221 	struct fw_initialize_cmd c;
3222 
3223 	memset(&c, 0, sizeof(c));
3224 	INIT_CMD(c, INITIALIZE, WRITE);
3225 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3226 }
3227 
3228 /**
3229  *	t4_query_params - query FW or device parameters
3230  *	@adap: the adapter
3231  *	@mbox: mailbox to use for the FW command
3232  *	@pf: the PF
3233  *	@vf: the VF
3234  *	@nparams: the number of parameters
3235  *	@params: the parameter names
3236  *	@val: the parameter values
3237  *
3238  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3239  *	queried at once.
3240  */
3241 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3242 		    unsigned int vf, unsigned int nparams, const u32 *params,
3243 		    u32 *val)
3244 {
3245 	int i, ret;
3246 	struct fw_params_cmd c;
3247 	__be32 *p = &c.param[0].mnem;
3248 
3249 	if (nparams > 7)
3250 		return -EINVAL;
3251 
3252 	memset(&c, 0, sizeof(c));
3253 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3254 			    FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3255 			    FW_PARAMS_CMD_VFN(vf));
3256 	c.retval_len16 = htonl(FW_LEN16(c));
3257 	for (i = 0; i < nparams; i++, p += 2)
3258 		*p = htonl(*params++);
3259 
3260 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3261 	if (ret == 0)
3262 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3263 			*val++ = ntohl(*p);
3264 	return ret;
3265 }
3266 
3267 /**
3268  *      t4_set_params_nosleep - sets FW or device parameters
3269  *      @adap: the adapter
3270  *      @mbox: mailbox to use for the FW command
3271  *      @pf: the PF
3272  *      @vf: the VF
3273  *      @nparams: the number of parameters
3274  *      @params: the parameter names
3275  *      @val: the parameter values
3276  *
3277  *	 Does not ever sleep
3278  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
3279  *      specified at once.
3280  */
3281 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3282 			  unsigned int pf, unsigned int vf,
3283 			  unsigned int nparams, const u32 *params,
3284 			  const u32 *val)
3285 {
3286 	struct fw_params_cmd c;
3287 	__be32 *p = &c.param[0].mnem;
3288 
3289 	if (nparams > 7)
3290 		return -EINVAL;
3291 
3292 	memset(&c, 0, sizeof(c));
3293 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3294 				FW_CMD_REQUEST | FW_CMD_WRITE |
3295 				FW_PARAMS_CMD_PFN(pf) |
3296 				FW_PARAMS_CMD_VFN(vf));
3297 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3298 
3299 	while (nparams--) {
3300 		*p++ = cpu_to_be32(*params++);
3301 		*p++ = cpu_to_be32(*val++);
3302 	}
3303 
3304 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3305 }
3306 
3307 /**
3308  *	t4_set_params - sets FW or device parameters
3309  *	@adap: the adapter
3310  *	@mbox: mailbox to use for the FW command
3311  *	@pf: the PF
3312  *	@vf: the VF
3313  *	@nparams: the number of parameters
3314  *	@params: the parameter names
3315  *	@val: the parameter values
3316  *
3317  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3318  *	specified at once.
3319  */
3320 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3321 		  unsigned int vf, unsigned int nparams, const u32 *params,
3322 		  const u32 *val)
3323 {
3324 	struct fw_params_cmd c;
3325 	__be32 *p = &c.param[0].mnem;
3326 
3327 	if (nparams > 7)
3328 		return -EINVAL;
3329 
3330 	memset(&c, 0, sizeof(c));
3331 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3332 			    FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3333 			    FW_PARAMS_CMD_VFN(vf));
3334 	c.retval_len16 = htonl(FW_LEN16(c));
3335 	while (nparams--) {
3336 		*p++ = htonl(*params++);
3337 		*p++ = htonl(*val++);
3338 	}
3339 
3340 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3341 }
3342 
3343 /**
3344  *	t4_cfg_pfvf - configure PF/VF resource limits
3345  *	@adap: the adapter
3346  *	@mbox: mailbox to use for the FW command
3347  *	@pf: the PF being configured
3348  *	@vf: the VF being configured
3349  *	@txq: the max number of egress queues
3350  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
3351  *	@rxqi: the max number of interrupt-capable ingress queues
3352  *	@rxq: the max number of interruptless ingress queues
3353  *	@tc: the PCI traffic class
3354  *	@vi: the max number of virtual interfaces
3355  *	@cmask: the channel access rights mask for the PF/VF
3356  *	@pmask: the port access rights mask for the PF/VF
3357  *	@nexact: the maximum number of exact MPS filters
3358  *	@rcaps: read capabilities
3359  *	@wxcaps: write/execute capabilities
3360  *
3361  *	Configures resource limits and capabilities for a physical or virtual
3362  *	function.
3363  */
3364 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3365 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3366 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
3367 		unsigned int vi, unsigned int cmask, unsigned int pmask,
3368 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3369 {
3370 	struct fw_pfvf_cmd c;
3371 
3372 	memset(&c, 0, sizeof(c));
3373 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3374 			    FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3375 			    FW_PFVF_CMD_VFN(vf));
3376 	c.retval_len16 = htonl(FW_LEN16(c));
3377 	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3378 			       FW_PFVF_CMD_NIQ(rxq));
3379 	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
3380 			       FW_PFVF_CMD_PMASK(pmask) |
3381 			       FW_PFVF_CMD_NEQ(txq));
3382 	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3383 				FW_PFVF_CMD_NEXACTF(nexact));
3384 	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3385 				     FW_PFVF_CMD_WX_CAPS(wxcaps) |
3386 				     FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3387 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3388 }
3389 
3390 /**
3391  *	t4_alloc_vi - allocate a virtual interface
3392  *	@adap: the adapter
3393  *	@mbox: mailbox to use for the FW command
3394  *	@port: physical port associated with the VI
3395  *	@pf: the PF owning the VI
3396  *	@vf: the VF owning the VI
3397  *	@nmac: number of MAC addresses needed (1 to 5)
3398  *	@mac: the MAC addresses of the VI
3399  *	@rss_size: size of RSS table slice associated with this VI
3400  *
3401  *	Allocates a virtual interface for the given physical port.  If @mac is
3402  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
3403  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
3404  *	stored consecutively so the space needed is @nmac * 6 bytes.
3405  *	Returns a negative error number or the non-negative VI id.
3406  */
3407 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3408 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3409 		unsigned int *rss_size)
3410 {
3411 	int ret;
3412 	struct fw_vi_cmd c;
3413 
3414 	memset(&c, 0, sizeof(c));
3415 	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3416 			    FW_CMD_WRITE | FW_CMD_EXEC |
3417 			    FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3418 	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3419 	c.portid_pkd = FW_VI_CMD_PORTID(port);
3420 	c.nmac = nmac - 1;
3421 
3422 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3423 	if (ret)
3424 		return ret;
3425 
3426 	if (mac) {
3427 		memcpy(mac, c.mac, sizeof(c.mac));
3428 		switch (nmac) {
3429 		case 5:
3430 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3431 		case 4:
3432 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3433 		case 3:
3434 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3435 		case 2:
3436 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3437 		}
3438 	}
3439 	if (rss_size)
3440 		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3441 	return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3442 }
3443 
3444 /**
3445  *	t4_set_rxmode - set Rx properties of a virtual interface
3446  *	@adap: the adapter
3447  *	@mbox: mailbox to use for the FW command
3448  *	@viid: the VI id
3449  *	@mtu: the new MTU or -1
3450  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3451  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3452  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3453  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3454  *	@sleep_ok: if true we may sleep while awaiting command completion
3455  *
3456  *	Sets Rx properties of a virtual interface.
3457  */
3458 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3459 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3460 		  bool sleep_ok)
3461 {
3462 	struct fw_vi_rxmode_cmd c;
3463 
3464 	/* convert to FW values */
3465 	if (mtu < 0)
3466 		mtu = FW_RXMODE_MTU_NO_CHG;
3467 	if (promisc < 0)
3468 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3469 	if (all_multi < 0)
3470 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3471 	if (bcast < 0)
3472 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
3473 	if (vlanex < 0)
3474 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
3475 
3476 	memset(&c, 0, sizeof(c));
3477 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3478 			     FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3479 	c.retval_len16 = htonl(FW_LEN16(c));
3480 	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3481 				  FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3482 				  FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3483 				  FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3484 				  FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3485 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3486 }
3487 
3488 /**
3489  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3490  *	@adap: the adapter
3491  *	@mbox: mailbox to use for the FW command
3492  *	@viid: the VI id
3493  *	@free: if true any existing filters for this VI id are first removed
3494  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
3495  *	@addr: the MAC address(es)
3496  *	@idx: where to store the index of each allocated filter
3497  *	@hash: pointer to hash address filter bitmap
3498  *	@sleep_ok: call is allowed to sleep
3499  *
3500  *	Allocates an exact-match filter for each of the supplied addresses and
3501  *	sets it to the corresponding address.  If @idx is not %NULL it should
3502  *	have at least @naddr entries, each of which will be set to the index of
3503  *	the filter allocated for the corresponding MAC address.  If a filter
3504  *	could not be allocated for an address its index is set to 0xffff.
3505  *	If @hash is not %NULL addresses that fail to allocate an exact filter
3506  *	are hashed and update the hash filter bitmap pointed at by @hash.
3507  *
3508  *	Returns a negative error number or the number of filters allocated.
3509  */
3510 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3511 		      unsigned int viid, bool free, unsigned int naddr,
3512 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3513 {
3514 	int i, ret;
3515 	struct fw_vi_mac_cmd c;
3516 	struct fw_vi_mac_exact *p;
3517 	unsigned int max_naddr = is_t4(adap->params.chip) ?
3518 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
3519 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3520 
3521 	if (naddr > 7)
3522 		return -EINVAL;
3523 
3524 	memset(&c, 0, sizeof(c));
3525 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3526 			     FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3527 			     FW_VI_MAC_CMD_VIID(viid));
3528 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3529 				    FW_CMD_LEN16((naddr + 2) / 2));
3530 
3531 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3532 		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3533 				      FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3534 		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3535 	}
3536 
3537 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3538 	if (ret)
3539 		return ret;
3540 
3541 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3542 		u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3543 
3544 		if (idx)
3545 			idx[i] = index >= max_naddr ? 0xffff : index;
3546 		if (index < max_naddr)
3547 			ret++;
3548 		else if (hash)
3549 			*hash |= (1ULL << hash_mac_addr(addr[i]));
3550 	}
3551 	return ret;
3552 }
3553 
3554 /**
3555  *	t4_change_mac - modifies the exact-match filter for a MAC address
3556  *	@adap: the adapter
3557  *	@mbox: mailbox to use for the FW command
3558  *	@viid: the VI id
3559  *	@idx: index of existing filter for old value of MAC address, or -1
3560  *	@addr: the new MAC address value
3561  *	@persist: whether a new MAC allocation should be persistent
3562  *	@add_smt: if true also add the address to the HW SMT
3563  *
3564  *	Modifies an exact-match filter and sets it to the new MAC address.
3565  *	Note that in general it is not possible to modify the value of a given
3566  *	filter so the generic way to modify an address filter is to free the one
3567  *	being used by the old address value and allocate a new filter for the
3568  *	new address value.  @idx can be -1 if the address is a new addition.
3569  *
3570  *	Returns a negative error number or the index of the filter with the new
3571  *	MAC value.
3572  */
3573 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3574 		  int idx, const u8 *addr, bool persist, bool add_smt)
3575 {
3576 	int ret, mode;
3577 	struct fw_vi_mac_cmd c;
3578 	struct fw_vi_mac_exact *p = c.u.exact;
3579 	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3580 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
3581 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3582 
3583 	if (idx < 0)                             /* new allocation */
3584 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3585 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3586 
3587 	memset(&c, 0, sizeof(c));
3588 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3589 			     FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3590 	c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3591 	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3592 				FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3593 				FW_VI_MAC_CMD_IDX(idx));
3594 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
3595 
3596 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3597 	if (ret == 0) {
3598 		ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3599 		if (ret >= max_mac_addr)
3600 			ret = -ENOMEM;
3601 	}
3602 	return ret;
3603 }
3604 
3605 /**
3606  *	t4_set_addr_hash - program the MAC inexact-match hash filter
3607  *	@adap: the adapter
3608  *	@mbox: mailbox to use for the FW command
3609  *	@viid: the VI id
3610  *	@ucast: whether the hash filter should also match unicast addresses
3611  *	@vec: the value to be written to the hash filter
3612  *	@sleep_ok: call is allowed to sleep
3613  *
3614  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
3615  */
3616 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3617 		     bool ucast, u64 vec, bool sleep_ok)
3618 {
3619 	struct fw_vi_mac_cmd c;
3620 
3621 	memset(&c, 0, sizeof(c));
3622 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3623 			     FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3624 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3625 				    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3626 				    FW_CMD_LEN16(1));
3627 	c.u.hash.hashvec = cpu_to_be64(vec);
3628 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3629 }
3630 
3631 /**
3632  *      t4_enable_vi_params - enable/disable a virtual interface
3633  *      @adap: the adapter
3634  *      @mbox: mailbox to use for the FW command
3635  *      @viid: the VI id
3636  *      @rx_en: 1=enable Rx, 0=disable Rx
3637  *      @tx_en: 1=enable Tx, 0=disable Tx
3638  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
3639  *
3640  *      Enables/disables a virtual interface.  Note that setting DCB Enable
3641  *      only makes sense when enabling a Virtual Interface ...
3642  */
3643 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3644 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3645 {
3646 	struct fw_vi_enable_cmd c;
3647 
3648 	memset(&c, 0, sizeof(c));
3649 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3650 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3651 
3652 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3653 			       FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3654 			       FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3655 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3656 }
3657 
3658 /**
3659  *	t4_enable_vi - enable/disable a virtual interface
3660  *	@adap: the adapter
3661  *	@mbox: mailbox to use for the FW command
3662  *	@viid: the VI id
3663  *	@rx_en: 1=enable Rx, 0=disable Rx
3664  *	@tx_en: 1=enable Tx, 0=disable Tx
3665  *
3666  *	Enables/disables a virtual interface.
3667  */
3668 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3669 		 bool rx_en, bool tx_en)
3670 {
3671 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3672 }
3673 
3674 /**
3675  *	t4_identify_port - identify a VI's port by blinking its LED
3676  *	@adap: the adapter
3677  *	@mbox: mailbox to use for the FW command
3678  *	@viid: the VI id
3679  *	@nblinks: how many times to blink LED at 2.5 Hz
3680  *
3681  *	Identifies a VI's port by blinking its LED.
3682  */
3683 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3684 		     unsigned int nblinks)
3685 {
3686 	struct fw_vi_enable_cmd c;
3687 
3688 	memset(&c, 0, sizeof(c));
3689 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3690 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3691 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3692 	c.blinkdur = htons(nblinks);
3693 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3694 }
3695 
3696 /**
3697  *	t4_iq_free - free an ingress queue and its FLs
3698  *	@adap: the adapter
3699  *	@mbox: mailbox to use for the FW command
3700  *	@pf: the PF owning the queues
3701  *	@vf: the VF owning the queues
3702  *	@iqtype: the ingress queue type
3703  *	@iqid: ingress queue id
3704  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
3705  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
3706  *
3707  *	Frees an ingress queue and its associated FLs, if any.
3708  */
3709 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3710 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
3711 	       unsigned int fl0id, unsigned int fl1id)
3712 {
3713 	struct fw_iq_cmd c;
3714 
3715 	memset(&c, 0, sizeof(c));
3716 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3717 			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3718 			    FW_IQ_CMD_VFN(vf));
3719 	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3720 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3721 	c.iqid = htons(iqid);
3722 	c.fl0id = htons(fl0id);
3723 	c.fl1id = htons(fl1id);
3724 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3725 }
3726 
3727 /**
3728  *	t4_eth_eq_free - free an Ethernet egress queue
3729  *	@adap: the adapter
3730  *	@mbox: mailbox to use for the FW command
3731  *	@pf: the PF owning the queue
3732  *	@vf: the VF owning the queue
3733  *	@eqid: egress queue id
3734  *
3735  *	Frees an Ethernet egress queue.
3736  */
3737 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3738 		   unsigned int vf, unsigned int eqid)
3739 {
3740 	struct fw_eq_eth_cmd c;
3741 
3742 	memset(&c, 0, sizeof(c));
3743 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3744 			    FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3745 			    FW_EQ_ETH_CMD_VFN(vf));
3746 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3747 	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3748 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3749 }
3750 
3751 /**
3752  *	t4_ctrl_eq_free - free a control egress queue
3753  *	@adap: the adapter
3754  *	@mbox: mailbox to use for the FW command
3755  *	@pf: the PF owning the queue
3756  *	@vf: the VF owning the queue
3757  *	@eqid: egress queue id
3758  *
3759  *	Frees a control egress queue.
3760  */
3761 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3762 		    unsigned int vf, unsigned int eqid)
3763 {
3764 	struct fw_eq_ctrl_cmd c;
3765 
3766 	memset(&c, 0, sizeof(c));
3767 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3768 			    FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3769 			    FW_EQ_CTRL_CMD_VFN(vf));
3770 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3771 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3772 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3773 }
3774 
3775 /**
3776  *	t4_ofld_eq_free - free an offload egress queue
3777  *	@adap: the adapter
3778  *	@mbox: mailbox to use for the FW command
3779  *	@pf: the PF owning the queue
3780  *	@vf: the VF owning the queue
3781  *	@eqid: egress queue id
3782  *
3783  *	Frees a control egress queue.
3784  */
3785 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3786 		    unsigned int vf, unsigned int eqid)
3787 {
3788 	struct fw_eq_ofld_cmd c;
3789 
3790 	memset(&c, 0, sizeof(c));
3791 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3792 			    FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3793 			    FW_EQ_OFLD_CMD_VFN(vf));
3794 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3795 	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3796 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3797 }
3798 
3799 /**
3800  *	t4_handle_fw_rpl - process a FW reply message
3801  *	@adap: the adapter
3802  *	@rpl: start of the FW message
3803  *
3804  *	Processes a FW message, such as link state change messages.
3805  */
3806 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3807 {
3808 	u8 opcode = *(const u8 *)rpl;
3809 
3810 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
3811 		int speed = 0, fc = 0;
3812 		const struct fw_port_cmd *p = (void *)rpl;
3813 		int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3814 		int port = adap->chan_map[chan];
3815 		struct port_info *pi = adap2pinfo(adap, port);
3816 		struct link_config *lc = &pi->link_cfg;
3817 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3818 		int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3819 		u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3820 
3821 		if (stat & FW_PORT_CMD_RXPAUSE)
3822 			fc |= PAUSE_RX;
3823 		if (stat & FW_PORT_CMD_TXPAUSE)
3824 			fc |= PAUSE_TX;
3825 		if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3826 			speed = 100;
3827 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3828 			speed = 1000;
3829 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3830 			speed = 10000;
3831 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3832 			speed = 40000;
3833 
3834 		if (link_ok != lc->link_ok || speed != lc->speed ||
3835 		    fc != lc->fc) {                    /* something changed */
3836 			lc->link_ok = link_ok;
3837 			lc->speed = speed;
3838 			lc->fc = fc;
3839 			lc->supported = be16_to_cpu(p->u.info.pcap);
3840 			t4_os_link_changed(adap, port, link_ok);
3841 		}
3842 		if (mod != pi->mod_type) {
3843 			pi->mod_type = mod;
3844 			t4_os_portmod_changed(adap, port);
3845 		}
3846 	}
3847 	return 0;
3848 }
3849 
3850 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3851 {
3852 	u16 val;
3853 
3854 	if (pci_is_pcie(adapter->pdev)) {
3855 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3856 		p->speed = val & PCI_EXP_LNKSTA_CLS;
3857 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3858 	}
3859 }
3860 
3861 /**
3862  *	init_link_config - initialize a link's SW state
3863  *	@lc: structure holding the link state
3864  *	@caps: link capabilities
3865  *
3866  *	Initializes the SW state maintained for each link, including the link's
3867  *	capabilities and default speed/flow-control/autonegotiation settings.
3868  */
3869 static void init_link_config(struct link_config *lc, unsigned int caps)
3870 {
3871 	lc->supported = caps;
3872 	lc->requested_speed = 0;
3873 	lc->speed = 0;
3874 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3875 	if (lc->supported & FW_PORT_CAP_ANEG) {
3876 		lc->advertising = lc->supported & ADVERT_MASK;
3877 		lc->autoneg = AUTONEG_ENABLE;
3878 		lc->requested_fc |= PAUSE_AUTONEG;
3879 	} else {
3880 		lc->advertising = 0;
3881 		lc->autoneg = AUTONEG_DISABLE;
3882 	}
3883 }
3884 
3885 #define CIM_PF_NOACCESS 0xeeeeeeee
3886 
3887 int t4_wait_dev_ready(void __iomem *regs)
3888 {
3889 	u32 whoami;
3890 
3891 	whoami = readl(regs + PL_WHOAMI);
3892 	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
3893 		return 0;
3894 
3895 	msleep(500);
3896 	whoami = readl(regs + PL_WHOAMI);
3897 	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
3898 }
3899 
3900 struct flash_desc {
3901 	u32 vendor_and_model_id;
3902 	u32 size_mb;
3903 };
3904 
3905 static int get_flash_params(struct adapter *adap)
3906 {
3907 	/* Table for non-Numonix supported flash parts.  Numonix parts are left
3908 	 * to the preexisting code.  All flash parts have 64KB sectors.
3909 	 */
3910 	static struct flash_desc supported_flash[] = {
3911 		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
3912 	};
3913 
3914 	int ret;
3915 	u32 info;
3916 
3917 	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3918 	if (!ret)
3919 		ret = sf1_read(adap, 3, 0, 1, &info);
3920 	t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
3921 	if (ret)
3922 		return ret;
3923 
3924 	for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3925 		if (supported_flash[ret].vendor_and_model_id == info) {
3926 			adap->params.sf_size = supported_flash[ret].size_mb;
3927 			adap->params.sf_nsec =
3928 				adap->params.sf_size / SF_SEC_SIZE;
3929 			return 0;
3930 		}
3931 
3932 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
3933 		return -EINVAL;
3934 	info >>= 16;                           /* log2 of size */
3935 	if (info >= 0x14 && info < 0x18)
3936 		adap->params.sf_nsec = 1 << (info - 16);
3937 	else if (info == 0x18)
3938 		adap->params.sf_nsec = 64;
3939 	else
3940 		return -EINVAL;
3941 	adap->params.sf_size = 1 << info;
3942 	adap->params.sf_fw_start =
3943 		t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3944 
3945 	if (adap->params.sf_size < FLASH_MIN_SIZE)
3946 		dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3947 			 adap->params.sf_size, FLASH_MIN_SIZE);
3948 	return 0;
3949 }
3950 
3951 /**
3952  *	t4_prep_adapter - prepare SW and HW for operation
3953  *	@adapter: the adapter
3954  *	@reset: if true perform a HW reset
3955  *
3956  *	Initialize adapter SW state for the various HW modules, set initial
3957  *	values for some adapter tunables, take PHYs out of reset, and
3958  *	initialize the MDIO interface.
3959  */
3960 int t4_prep_adapter(struct adapter *adapter)
3961 {
3962 	int ret, ver;
3963 	uint16_t device_id;
3964 	u32 pl_rev;
3965 
3966 	get_pci_mode(adapter, &adapter->params.pci);
3967 	pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3968 
3969 	ret = get_flash_params(adapter);
3970 	if (ret < 0) {
3971 		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3972 		return ret;
3973 	}
3974 
3975 	/* Retrieve adapter's device ID
3976 	 */
3977 	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3978 	ver = device_id >> 12;
3979 	adapter->params.chip = 0;
3980 	switch (ver) {
3981 	case CHELSIO_T4:
3982 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3983 		break;
3984 	case CHELSIO_T5:
3985 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3986 		break;
3987 	default:
3988 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3989 			device_id);
3990 		return -EINVAL;
3991 	}
3992 
3993 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3994 
3995 	/*
3996 	 * Default port for debugging in case we can't reach FW.
3997 	 */
3998 	adapter->params.nports = 1;
3999 	adapter->params.portvec = 1;
4000 	adapter->params.vpd.cclk = 50000;
4001 	return 0;
4002 }
4003 
4004 /**
4005  *      t4_init_tp_params - initialize adap->params.tp
4006  *      @adap: the adapter
4007  *
4008  *      Initialize various fields of the adapter's TP Parameters structure.
4009  */
4010 int t4_init_tp_params(struct adapter *adap)
4011 {
4012 	int chan;
4013 	u32 v;
4014 
4015 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4016 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4017 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4018 
4019 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4020 	for (chan = 0; chan < NCHAN; chan++)
4021 		adap->params.tp.tx_modq[chan] = chan;
4022 
4023 	/* Cache the adapter's Compressed Filter Mode and global Incress
4024 	 * Configuration.
4025 	 */
4026 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4027 			 &adap->params.tp.vlan_pri_map, 1,
4028 			 TP_VLAN_PRI_MAP);
4029 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4030 			 &adap->params.tp.ingress_config, 1,
4031 			 TP_INGRESS_CONFIG);
4032 
4033 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4034 	 * shift positions of several elements of the Compressed Filter Tuple
4035 	 * for this adapter which we need frequently ...
4036 	 */
4037 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4038 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4039 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4040 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4041 							       F_PROTOCOL);
4042 
4043 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4044 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
4045 	 */
4046 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4047 		adap->params.tp.vnic_shift = -1;
4048 
4049 	return 0;
4050 }
4051 
4052 /**
4053  *      t4_filter_field_shift - calculate filter field shift
4054  *      @adap: the adapter
4055  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4056  *
4057  *      Return the shift position of a filter field within the Compressed
4058  *      Filter Tuple.  The filter field is specified via its selection bit
4059  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
4060  */
4061 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4062 {
4063 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4064 	unsigned int sel;
4065 	int field_shift;
4066 
4067 	if ((filter_mode & filter_sel) == 0)
4068 		return -1;
4069 
4070 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4071 		switch (filter_mode & sel) {
4072 		case F_FCOE:
4073 			field_shift += W_FT_FCOE;
4074 			break;
4075 		case F_PORT:
4076 			field_shift += W_FT_PORT;
4077 			break;
4078 		case F_VNIC_ID:
4079 			field_shift += W_FT_VNIC_ID;
4080 			break;
4081 		case F_VLAN:
4082 			field_shift += W_FT_VLAN;
4083 			break;
4084 		case F_TOS:
4085 			field_shift += W_FT_TOS;
4086 			break;
4087 		case F_PROTOCOL:
4088 			field_shift += W_FT_PROTOCOL;
4089 			break;
4090 		case F_ETHERTYPE:
4091 			field_shift += W_FT_ETHERTYPE;
4092 			break;
4093 		case F_MACMATCH:
4094 			field_shift += W_FT_MACMATCH;
4095 			break;
4096 		case F_MPSHITTYPE:
4097 			field_shift += W_FT_MPSHITTYPE;
4098 			break;
4099 		case F_FRAGMENTATION:
4100 			field_shift += W_FT_FRAGMENTATION;
4101 			break;
4102 		}
4103 	}
4104 	return field_shift;
4105 }
4106 
4107 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4108 {
4109 	u8 addr[6];
4110 	int ret, i, j = 0;
4111 	struct fw_port_cmd c;
4112 	struct fw_rss_vi_config_cmd rvc;
4113 
4114 	memset(&c, 0, sizeof(c));
4115 	memset(&rvc, 0, sizeof(rvc));
4116 
4117 	for_each_port(adap, i) {
4118 		unsigned int rss_size;
4119 		struct port_info *p = adap2pinfo(adap, i);
4120 
4121 		while ((adap->params.portvec & (1 << j)) == 0)
4122 			j++;
4123 
4124 		c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4125 				       FW_CMD_REQUEST | FW_CMD_READ |
4126 				       FW_PORT_CMD_PORTID(j));
4127 		c.action_to_len16 = htonl(
4128 			FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4129 			FW_LEN16(c));
4130 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4131 		if (ret)
4132 			return ret;
4133 
4134 		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4135 		if (ret < 0)
4136 			return ret;
4137 
4138 		p->viid = ret;
4139 		p->tx_chan = j;
4140 		p->lport = j;
4141 		p->rss_size = rss_size;
4142 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
4143 		adap->port[i]->dev_port = j;
4144 
4145 		ret = ntohl(c.u.info.lstatus_to_modtype);
4146 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4147 			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4148 		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
4149 		p->mod_type = FW_PORT_MOD_TYPE_NA;
4150 
4151 		rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4152 				       FW_CMD_REQUEST | FW_CMD_READ |
4153 				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4154 		rvc.retval_len16 = htonl(FW_LEN16(rvc));
4155 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4156 		if (ret)
4157 			return ret;
4158 		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4159 
4160 		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4161 		j++;
4162 	}
4163 	return 0;
4164 }
4165