1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4fw_api.h"
39 
40 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 			 const u8 *fw_data, unsigned int size, int force);
42 /**
43  *	t4_wait_op_done_val - wait until an operation is completed
44  *	@adapter: the adapter performing the operation
45  *	@reg: the register to check for completion
46  *	@mask: a single-bit field within @reg that indicates completion
47  *	@polarity: the value of the field when the operation is completed
48  *	@attempts: number of check iterations
49  *	@delay: delay in usecs between iterations
50  *	@valp: where to store the value of the register at completion time
51  *
52  *	Wait until an operation is completed by checking a bit in a register
53  *	up to @attempts times.  If @valp is not NULL the value of the register
54  *	at the time it indicated completion is stored there.  Returns 0 if the
55  *	operation completes and	-EAGAIN	otherwise.
56  */
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 			       int polarity, int attempts, int delay, u32 *valp)
59 {
60 	while (1) {
61 		u32 val = t4_read_reg(adapter, reg);
62 
63 		if (!!(val & mask) == polarity) {
64 			if (valp)
65 				*valp = val;
66 			return 0;
67 		}
68 		if (--attempts == 0)
69 			return -EAGAIN;
70 		if (delay)
71 			udelay(delay);
72 	}
73 }
74 
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 				  int polarity, int attempts, int delay)
77 {
78 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 				   delay, NULL);
80 }
81 
82 /**
83  *	t4_set_reg_field - set a register field to a value
84  *	@adapter: the adapter to program
85  *	@addr: the register address
86  *	@mask: specifies the portion of the register to modify
87  *	@val: the new value for the register field
88  *
89  *	Sets a register field specified by the supplied mask to the
90  *	given value.
91  */
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 		      u32 val)
94 {
95 	u32 v = t4_read_reg(adapter, addr) & ~mask;
96 
97 	t4_write_reg(adapter, addr, v | val);
98 	(void) t4_read_reg(adapter, addr);      /* flush */
99 }
100 
101 /**
102  *	t4_read_indirect - read indirectly addressed registers
103  *	@adap: the adapter
104  *	@addr_reg: register holding the indirect address
105  *	@data_reg: register holding the value of the indirect register
106  *	@vals: where the read register values are stored
107  *	@nregs: how many indirect registers to read
108  *	@start_idx: index of first indirect register to read
109  *
110  *	Reads registers that are accessed indirectly through an address/data
111  *	register pair.
112  */
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 			     unsigned int data_reg, u32 *vals,
115 			     unsigned int nregs, unsigned int start_idx)
116 {
117 	while (nregs--) {
118 		t4_write_reg(adap, addr_reg, start_idx);
119 		*vals++ = t4_read_reg(adap, data_reg);
120 		start_idx++;
121 	}
122 }
123 
124 /**
125  *	t4_write_indirect - write indirectly addressed registers
126  *	@adap: the adapter
127  *	@addr_reg: register holding the indirect addresses
128  *	@data_reg: register holding the value for the indirect registers
129  *	@vals: values to write
130  *	@nregs: how many indirect registers to write
131  *	@start_idx: address of first indirect register to write
132  *
133  *	Writes a sequential block of registers that are accessed indirectly
134  *	through an address/data register pair.
135  */
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 		       unsigned int data_reg, const u32 *vals,
138 		       unsigned int nregs, unsigned int start_idx)
139 {
140 	while (nregs--) {
141 		t4_write_reg(adap, addr_reg, start_idx++);
142 		t4_write_reg(adap, data_reg, *vals++);
143 	}
144 }
145 
146 /*
147  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148  * mechanism.  This guarantees that we get the real value even if we're
149  * operating within a Virtual Machine and the Hypervisor is trapping our
150  * Configuration Space accesses.
151  */
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153 {
154 	u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155 
156 	if (is_t4(adap->params.chip))
157 		req |= F_LOCALCFG;
158 
159 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161 
162 	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 	 * Configuration Space read.  (None of the other fields matter when
164 	 * ENABLE is 0 so a simple register write is easier than a
165 	 * read-modify-write via t4_set_reg_field().)
166 	 */
167 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168 }
169 
170 /*
171  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172  */
173 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
174 			 u32 mbox_addr)
175 {
176 	for ( ; nflit; nflit--, mbox_addr += 8)
177 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
178 }
179 
180 /*
181  * Handle a FW assertion reported in a mailbox.
182  */
183 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
184 {
185 	struct fw_debug_cmd asrt;
186 
187 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
188 	dev_alert(adap->pdev_dev,
189 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
190 		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
191 		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
192 }
193 
194 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
195 {
196 	dev_err(adap->pdev_dev,
197 		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
198 		(unsigned long long)t4_read_reg64(adap, data_reg),
199 		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
200 		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
201 		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
202 		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
203 		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
204 		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
205 		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
206 }
207 
208 /**
209  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
210  *	@adap: the adapter
211  *	@mbox: index of the mailbox to use
212  *	@cmd: the command to write
213  *	@size: command length in bytes
214  *	@rpl: where to optionally store the reply
215  *	@sleep_ok: if true we may sleep while awaiting command completion
216  *
217  *	Sends the given command to FW through the selected mailbox and waits
218  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
219  *	store the FW's reply to the command.  The command and its optional
220  *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
221  *	to respond.  @sleep_ok determines whether we may sleep while awaiting
222  *	the response.  If sleeping is allowed we use progressive backoff
223  *	otherwise we spin.
224  *
225  *	The return value is 0 on success or a negative errno on failure.  A
226  *	failure can happen either because we are not able to execute the
227  *	command or FW executes it but signals an error.  In the latter case
228  *	the return value is the error code indicated by FW (negated).
229  */
230 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 		    void *rpl, bool sleep_ok)
232 {
233 	static const int delay[] = {
234 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
235 	};
236 
237 	u32 v;
238 	u64 res;
239 	int i, ms, delay_idx;
240 	const __be64 *p = cmd;
241 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
242 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
243 
244 	if ((size & 15) || size > MBOX_LEN)
245 		return -EINVAL;
246 
247 	/*
248 	 * If the device is off-line, as in EEH, commands will time out.
249 	 * Fail them early so we don't waste time waiting.
250 	 */
251 	if (adap->pdev->error_state != pci_channel_io_normal)
252 		return -EIO;
253 
254 	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
255 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
256 		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
257 
258 	if (v != MBOX_OWNER_DRV)
259 		return v ? -EBUSY : -ETIMEDOUT;
260 
261 	for (i = 0; i < size; i += 8)
262 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
263 
264 	t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
265 	t4_read_reg(adap, ctl_reg);          /* flush write */
266 
267 	delay_idx = 0;
268 	ms = delay[0];
269 
270 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271 		if (sleep_ok) {
272 			ms = delay[delay_idx];  /* last element may repeat */
273 			if (delay_idx < ARRAY_SIZE(delay) - 1)
274 				delay_idx++;
275 			msleep(ms);
276 		} else
277 			mdelay(ms);
278 
279 		v = t4_read_reg(adap, ctl_reg);
280 		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
281 			if (!(v & MBMSGVALID)) {
282 				t4_write_reg(adap, ctl_reg, 0);
283 				continue;
284 			}
285 
286 			res = t4_read_reg64(adap, data_reg);
287 			if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
288 				fw_asrt(adap, data_reg);
289 				res = FW_CMD_RETVAL(EIO);
290 			} else if (rpl)
291 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
292 
293 			if (FW_CMD_RETVAL_GET((int)res))
294 				dump_mbox(adap, mbox, data_reg);
295 			t4_write_reg(adap, ctl_reg, 0);
296 			return -FW_CMD_RETVAL_GET((int)res);
297 		}
298 	}
299 
300 	dump_mbox(adap, mbox, data_reg);
301 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 		*(const u8 *)cmd, mbox);
303 	return -ETIMEDOUT;
304 }
305 
306 /**
307  *	t4_mc_read - read from MC through backdoor accesses
308  *	@adap: the adapter
309  *	@addr: address of first byte requested
310  *	@idx: which MC to access
311  *	@data: 64 bytes of data containing the requested address
312  *	@ecc: where to store the corresponding 64-bit ECC word
313  *
314  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
315  *	that covers the requested address @addr.  If @parity is not %NULL it
316  *	is assigned the 64-bit ECC word for the read data.
317  */
318 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
319 {
320 	int i;
321 	u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
322 	u32 mc_bist_status_rdata, mc_bist_data_pattern;
323 
324 	if (is_t4(adap->params.chip)) {
325 		mc_bist_cmd = MC_BIST_CMD;
326 		mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
327 		mc_bist_cmd_len = MC_BIST_CMD_LEN;
328 		mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
329 		mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
330 	} else {
331 		mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
332 		mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
333 		mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
334 		mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
335 		mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
336 	}
337 
338 	if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
339 		return -EBUSY;
340 	t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
341 	t4_write_reg(adap, mc_bist_cmd_len, 64);
342 	t4_write_reg(adap, mc_bist_data_pattern, 0xc);
343 	t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
344 		     BIST_CMD_GAP(1));
345 	i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
346 	if (i)
347 		return i;
348 
349 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
350 
351 	for (i = 15; i >= 0; i--)
352 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
353 	if (ecc)
354 		*ecc = t4_read_reg64(adap, MC_DATA(16));
355 #undef MC_DATA
356 	return 0;
357 }
358 
359 /**
360  *	t4_edc_read - read from EDC through backdoor accesses
361  *	@adap: the adapter
362  *	@idx: which EDC to access
363  *	@addr: address of first byte requested
364  *	@data: 64 bytes of data containing the requested address
365  *	@ecc: where to store the corresponding 64-bit ECC word
366  *
367  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
368  *	that covers the requested address @addr.  If @parity is not %NULL it
369  *	is assigned the 64-bit ECC word for the read data.
370  */
371 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
372 {
373 	int i;
374 	u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
375 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
376 
377 	if (is_t4(adap->params.chip)) {
378 		edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
379 		edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
380 		edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
381 		edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
382 						    idx);
383 		edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
384 						    idx);
385 	} else {
386 		edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
387 		edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
388 		edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
389 		edc_bist_cmd_data_pattern =
390 			EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
391 		edc_bist_status_rdata =
392 			 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
393 	}
394 
395 	if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
396 		return -EBUSY;
397 	t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
398 	t4_write_reg(adap, edc_bist_cmd_len, 64);
399 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
400 	t4_write_reg(adap, edc_bist_cmd,
401 		     BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
402 	i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
403 	if (i)
404 		return i;
405 
406 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
407 
408 	for (i = 15; i >= 0; i--)
409 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
410 	if (ecc)
411 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
412 #undef EDC_DATA
413 	return 0;
414 }
415 
416 /**
417  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
418  *	@adap: the adapter
419  *	@win: PCI-E Memory Window to use
420  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
421  *	@addr: address within indicated memory type
422  *	@len: amount of memory to transfer
423  *	@buf: host memory buffer
424  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
425  *
426  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
427  *	firmware memory address and host buffer must be aligned on 32-bit
428  *	boudaries; the length may be arbitrary.  The memory is transferred as
429  *	a raw byte sequence from/to the firmware's memory.  If this memory
430  *	contains data structures which contain multi-byte integers, it's the
431  *	caller's responsibility to perform appropriate byte order conversions.
432  */
433 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
434 		 u32 len, __be32 *buf, int dir)
435 {
436 	u32 pos, offset, resid, memoffset;
437 	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
438 
439 	/* Argument sanity checks ...
440 	 */
441 	if (addr & 0x3)
442 		return -EINVAL;
443 
444 	/* It's convenient to be able to handle lengths which aren't a
445 	 * multiple of 32-bits because we often end up transferring files to
446 	 * the firmware.  So we'll handle that by normalizing the length here
447 	 * and then handling any residual transfer at the end.
448 	 */
449 	resid = len & 0x3;
450 	len -= resid;
451 
452 	/* Offset into the region of memory which is being accessed
453 	 * MEM_EDC0 = 0
454 	 * MEM_EDC1 = 1
455 	 * MEM_MC   = 2 -- T4
456 	 * MEM_MC0  = 2 -- For T5
457 	 * MEM_MC1  = 3 -- For T5
458 	 */
459 	edc_size  = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
460 	if (mtype != MEM_MC1)
461 		memoffset = (mtype * (edc_size * 1024 * 1024));
462 	else {
463 		mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
464 						       MA_EXT_MEMORY_BAR));
465 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
466 	}
467 
468 	/* Determine the PCIE_MEM_ACCESS_OFFSET */
469 	addr = addr + memoffset;
470 
471 	/* Each PCI-E Memory Window is programmed with a window size -- or
472 	 * "aperture" -- which controls the granularity of its mapping onto
473 	 * adapter memory.  We need to grab that aperture in order to know
474 	 * how to use the specified window.  The window is also programmed
475 	 * with the base address of the Memory Window in BAR0's address
476 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
477 	 * the address is relative to BAR0.
478 	 */
479 	mem_reg = t4_read_reg(adap,
480 			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
481 						  win));
482 	mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
483 	mem_base = GET_PCIEOFST(mem_reg) << 10;
484 	if (is_t4(adap->params.chip))
485 		mem_base -= adap->t4_bar0;
486 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
487 
488 	/* Calculate our initial PCI-E Memory Window Position and Offset into
489 	 * that Window.
490 	 */
491 	pos = addr & ~(mem_aperture-1);
492 	offset = addr - pos;
493 
494 	/* Set up initial PCI-E Memory Window to cover the start of our
495 	 * transfer.  (Read it back to ensure that changes propagate before we
496 	 * attempt to use the new value.)
497 	 */
498 	t4_write_reg(adap,
499 		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
500 		     pos | win_pf);
501 	t4_read_reg(adap,
502 		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
503 
504 	/* Transfer data to/from the adapter as long as there's an integral
505 	 * number of 32-bit transfers to complete.
506 	 */
507 	while (len > 0) {
508 		if (dir == T4_MEMORY_READ)
509 			*buf++ = (__force __be32) t4_read_reg(adap,
510 							mem_base + offset);
511 		else
512 			t4_write_reg(adap, mem_base + offset,
513 				     (__force u32) *buf++);
514 		offset += sizeof(__be32);
515 		len -= sizeof(__be32);
516 
517 		/* If we've reached the end of our current window aperture,
518 		 * move the PCI-E Memory Window on to the next.  Note that
519 		 * doing this here after "len" may be 0 allows us to set up
520 		 * the PCI-E Memory Window for a possible final residual
521 		 * transfer below ...
522 		 */
523 		if (offset == mem_aperture) {
524 			pos += mem_aperture;
525 			offset = 0;
526 			t4_write_reg(adap,
527 				     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
528 							 win), pos | win_pf);
529 			t4_read_reg(adap,
530 				    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
531 							win));
532 		}
533 	}
534 
535 	/* If the original transfer had a length which wasn't a multiple of
536 	 * 32-bits, now's where we need to finish off the transfer of the
537 	 * residual amount.  The PCI-E Memory Window has already been moved
538 	 * above (if necessary) to cover this final transfer.
539 	 */
540 	if (resid) {
541 		union {
542 			__be32 word;
543 			char byte[4];
544 		} last;
545 		unsigned char *bp;
546 		int i;
547 
548 		if (dir == T4_MEMORY_READ) {
549 			last.word = (__force __be32) t4_read_reg(adap,
550 							mem_base + offset);
551 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
552 				bp[i] = last.byte[i];
553 		} else {
554 			last.word = *buf;
555 			for (i = resid; i < 4; i++)
556 				last.byte[i] = 0;
557 			t4_write_reg(adap, mem_base + offset,
558 				     (__force u32) last.word);
559 		}
560 	}
561 
562 	return 0;
563 }
564 
565 #define EEPROM_STAT_ADDR   0x7bfc
566 #define VPD_BASE           0x400
567 #define VPD_BASE_OLD       0
568 #define VPD_LEN            1024
569 
570 /**
571  *	t4_seeprom_wp - enable/disable EEPROM write protection
572  *	@adapter: the adapter
573  *	@enable: whether to enable or disable write protection
574  *
575  *	Enables or disables write protection on the serial EEPROM.
576  */
577 int t4_seeprom_wp(struct adapter *adapter, bool enable)
578 {
579 	unsigned int v = enable ? 0xc : 0;
580 	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
581 	return ret < 0 ? ret : 0;
582 }
583 
584 /**
585  *	get_vpd_params - read VPD parameters from VPD EEPROM
586  *	@adapter: adapter to read
587  *	@p: where to store the parameters
588  *
589  *	Reads card parameters stored in VPD EEPROM.
590  */
591 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
592 {
593 	u32 cclk_param, cclk_val;
594 	int i, ret, addr;
595 	int ec, sn, pn;
596 	u8 *vpd, csum;
597 	unsigned int vpdr_len, kw_offset, id_len;
598 
599 	vpd = vmalloc(VPD_LEN);
600 	if (!vpd)
601 		return -ENOMEM;
602 
603 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
604 	if (ret < 0)
605 		goto out;
606 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
607 
608 	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
609 	if (ret < 0)
610 		goto out;
611 
612 	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
613 		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
614 		ret = -EINVAL;
615 		goto out;
616 	}
617 
618 	id_len = pci_vpd_lrdt_size(vpd);
619 	if (id_len > ID_LEN)
620 		id_len = ID_LEN;
621 
622 	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
623 	if (i < 0) {
624 		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
625 		ret = -EINVAL;
626 		goto out;
627 	}
628 
629 	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
630 	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
631 	if (vpdr_len + kw_offset > VPD_LEN) {
632 		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
633 		ret = -EINVAL;
634 		goto out;
635 	}
636 
637 #define FIND_VPD_KW(var, name) do { \
638 	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
639 	if (var < 0) { \
640 		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
641 		ret = -EINVAL; \
642 		goto out; \
643 	} \
644 	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
645 } while (0)
646 
647 	FIND_VPD_KW(i, "RV");
648 	for (csum = 0; i >= 0; i--)
649 		csum += vpd[i];
650 
651 	if (csum) {
652 		dev_err(adapter->pdev_dev,
653 			"corrupted VPD EEPROM, actual csum %u\n", csum);
654 		ret = -EINVAL;
655 		goto out;
656 	}
657 
658 	FIND_VPD_KW(ec, "EC");
659 	FIND_VPD_KW(sn, "SN");
660 	FIND_VPD_KW(pn, "PN");
661 #undef FIND_VPD_KW
662 
663 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
664 	strim(p->id);
665 	memcpy(p->ec, vpd + ec, EC_LEN);
666 	strim(p->ec);
667 	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
668 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
669 	strim(p->sn);
670 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
671 	strim(p->pn);
672 
673 	/*
674 	 * Ask firmware for the Core Clock since it knows how to translate the
675 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
676 	 */
677 	cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
678 		      FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
679 	ret = t4_query_params(adapter, adapter->mbox, 0, 0,
680 			      1, &cclk_param, &cclk_val);
681 
682 out:
683 	vfree(vpd);
684 	if (ret)
685 		return ret;
686 	p->cclk = cclk_val;
687 
688 	return 0;
689 }
690 
691 /* serial flash and firmware constants */
692 enum {
693 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
694 
695 	/* flash command opcodes */
696 	SF_PROG_PAGE    = 2,          /* program page */
697 	SF_WR_DISABLE   = 4,          /* disable writes */
698 	SF_RD_STATUS    = 5,          /* read status register */
699 	SF_WR_ENABLE    = 6,          /* enable writes */
700 	SF_RD_DATA_FAST = 0xb,        /* read flash */
701 	SF_RD_ID        = 0x9f,       /* read ID */
702 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
703 
704 	FW_MAX_SIZE = 16 * SF_SEC_SIZE,
705 };
706 
707 /**
708  *	sf1_read - read data from the serial flash
709  *	@adapter: the adapter
710  *	@byte_cnt: number of bytes to read
711  *	@cont: whether another operation will be chained
712  *	@lock: whether to lock SF for PL access only
713  *	@valp: where to store the read data
714  *
715  *	Reads up to 4 bytes of data from the serial flash.  The location of
716  *	the read needs to be specified prior to calling this by issuing the
717  *	appropriate commands to the serial flash.
718  */
719 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
720 		    int lock, u32 *valp)
721 {
722 	int ret;
723 
724 	if (!byte_cnt || byte_cnt > 4)
725 		return -EINVAL;
726 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
727 		return -EBUSY;
728 	cont = cont ? SF_CONT : 0;
729 	lock = lock ? SF_LOCK : 0;
730 	t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
731 	ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
732 	if (!ret)
733 		*valp = t4_read_reg(adapter, SF_DATA);
734 	return ret;
735 }
736 
737 /**
738  *	sf1_write - write data to the serial flash
739  *	@adapter: the adapter
740  *	@byte_cnt: number of bytes to write
741  *	@cont: whether another operation will be chained
742  *	@lock: whether to lock SF for PL access only
743  *	@val: value to write
744  *
745  *	Writes up to 4 bytes of data to the serial flash.  The location of
746  *	the write needs to be specified prior to calling this by issuing the
747  *	appropriate commands to the serial flash.
748  */
749 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
750 		     int lock, u32 val)
751 {
752 	if (!byte_cnt || byte_cnt > 4)
753 		return -EINVAL;
754 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
755 		return -EBUSY;
756 	cont = cont ? SF_CONT : 0;
757 	lock = lock ? SF_LOCK : 0;
758 	t4_write_reg(adapter, SF_DATA, val);
759 	t4_write_reg(adapter, SF_OP, lock |
760 		     cont | BYTECNT(byte_cnt - 1) | OP_WR);
761 	return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
762 }
763 
764 /**
765  *	flash_wait_op - wait for a flash operation to complete
766  *	@adapter: the adapter
767  *	@attempts: max number of polls of the status register
768  *	@delay: delay between polls in ms
769  *
770  *	Wait for a flash operation to complete by polling the status register.
771  */
772 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
773 {
774 	int ret;
775 	u32 status;
776 
777 	while (1) {
778 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
779 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
780 			return ret;
781 		if (!(status & 1))
782 			return 0;
783 		if (--attempts == 0)
784 			return -EAGAIN;
785 		if (delay)
786 			msleep(delay);
787 	}
788 }
789 
790 /**
791  *	t4_read_flash - read words from serial flash
792  *	@adapter: the adapter
793  *	@addr: the start address for the read
794  *	@nwords: how many 32-bit words to read
795  *	@data: where to store the read data
796  *	@byte_oriented: whether to store data as bytes or as words
797  *
798  *	Read the specified number of 32-bit words from the serial flash.
799  *	If @byte_oriented is set the read data is stored as a byte array
800  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
801  *	natural endianess.
802  */
803 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
804 			 unsigned int nwords, u32 *data, int byte_oriented)
805 {
806 	int ret;
807 
808 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
809 		return -EINVAL;
810 
811 	addr = swab32(addr) | SF_RD_DATA_FAST;
812 
813 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
814 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
815 		return ret;
816 
817 	for ( ; nwords; nwords--, data++) {
818 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
819 		if (nwords == 1)
820 			t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
821 		if (ret)
822 			return ret;
823 		if (byte_oriented)
824 			*data = (__force __u32) (htonl(*data));
825 	}
826 	return 0;
827 }
828 
829 /**
830  *	t4_write_flash - write up to a page of data to the serial flash
831  *	@adapter: the adapter
832  *	@addr: the start address to write
833  *	@n: length of data to write in bytes
834  *	@data: the data to write
835  *
836  *	Writes up to a page of data (256 bytes) to the serial flash starting
837  *	at the given address.  All the data must be written to the same page.
838  */
839 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
840 			  unsigned int n, const u8 *data)
841 {
842 	int ret;
843 	u32 buf[64];
844 	unsigned int i, c, left, val, offset = addr & 0xff;
845 
846 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
847 		return -EINVAL;
848 
849 	val = swab32(addr) | SF_PROG_PAGE;
850 
851 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
852 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
853 		goto unlock;
854 
855 	for (left = n; left; left -= c) {
856 		c = min(left, 4U);
857 		for (val = 0, i = 0; i < c; ++i)
858 			val = (val << 8) + *data++;
859 
860 		ret = sf1_write(adapter, c, c != left, 1, val);
861 		if (ret)
862 			goto unlock;
863 	}
864 	ret = flash_wait_op(adapter, 8, 1);
865 	if (ret)
866 		goto unlock;
867 
868 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
869 
870 	/* Read the page to verify the write succeeded */
871 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
872 	if (ret)
873 		return ret;
874 
875 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
876 		dev_err(adapter->pdev_dev,
877 			"failed to correctly write the flash page at %#x\n",
878 			addr);
879 		return -EIO;
880 	}
881 	return 0;
882 
883 unlock:
884 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
885 	return ret;
886 }
887 
888 /**
889  *	t4_get_fw_version - read the firmware version
890  *	@adapter: the adapter
891  *	@vers: where to place the version
892  *
893  *	Reads the FW version from flash.
894  */
895 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
896 {
897 	return t4_read_flash(adapter, FLASH_FW_START +
898 			     offsetof(struct fw_hdr, fw_ver), 1,
899 			     vers, 0);
900 }
901 
902 /**
903  *	t4_get_tp_version - read the TP microcode version
904  *	@adapter: the adapter
905  *	@vers: where to place the version
906  *
907  *	Reads the TP microcode version from flash.
908  */
909 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
910 {
911 	return t4_read_flash(adapter, FLASH_FW_START +
912 			     offsetof(struct fw_hdr, tp_microcode_ver),
913 			     1, vers, 0);
914 }
915 
916 /* Is the given firmware API compatible with the one the driver was compiled
917  * with?
918  */
919 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
920 {
921 
922 	/* short circuit if it's the exact same firmware version */
923 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
924 		return 1;
925 
926 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
927 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
928 	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
929 		return 1;
930 #undef SAME_INTF
931 
932 	return 0;
933 }
934 
935 /* The firmware in the filesystem is usable, but should it be installed?
936  * This routine explains itself in detail if it indicates the filesystem
937  * firmware should be installed.
938  */
939 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
940 				int k, int c)
941 {
942 	const char *reason;
943 
944 	if (!card_fw_usable) {
945 		reason = "incompatible or unusable";
946 		goto install;
947 	}
948 
949 	if (k > c) {
950 		reason = "older than the version supported with this driver";
951 		goto install;
952 	}
953 
954 	return 0;
955 
956 install:
957 	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
958 		"installing firmware %u.%u.%u.%u on card.\n",
959 		FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
960 		FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
961 		FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
962 		FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
963 
964 	return 1;
965 }
966 
967 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
968 	       const u8 *fw_data, unsigned int fw_size,
969 	       struct fw_hdr *card_fw, enum dev_state state,
970 	       int *reset)
971 {
972 	int ret, card_fw_usable, fs_fw_usable;
973 	const struct fw_hdr *fs_fw;
974 	const struct fw_hdr *drv_fw;
975 
976 	drv_fw = &fw_info->fw_hdr;
977 
978 	/* Read the header of the firmware on the card */
979 	ret = -t4_read_flash(adap, FLASH_FW_START,
980 			    sizeof(*card_fw) / sizeof(uint32_t),
981 			    (uint32_t *)card_fw, 1);
982 	if (ret == 0) {
983 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
984 	} else {
985 		dev_err(adap->pdev_dev,
986 			"Unable to read card's firmware header: %d\n", ret);
987 		card_fw_usable = 0;
988 	}
989 
990 	if (fw_data != NULL) {
991 		fs_fw = (const void *)fw_data;
992 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
993 	} else {
994 		fs_fw = NULL;
995 		fs_fw_usable = 0;
996 	}
997 
998 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
999 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1000 		/* Common case: the firmware on the card is an exact match and
1001 		 * the filesystem one is an exact match too, or the filesystem
1002 		 * one is absent/incompatible.
1003 		 */
1004 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1005 		   should_install_fs_fw(adap, card_fw_usable,
1006 					be32_to_cpu(fs_fw->fw_ver),
1007 					be32_to_cpu(card_fw->fw_ver))) {
1008 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1009 				     fw_size, 0);
1010 		if (ret != 0) {
1011 			dev_err(adap->pdev_dev,
1012 				"failed to install firmware: %d\n", ret);
1013 			goto bye;
1014 		}
1015 
1016 		/* Installed successfully, update the cached header too. */
1017 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
1018 		card_fw_usable = 1;
1019 		*reset = 0;	/* already reset as part of load_fw */
1020 	}
1021 
1022 	if (!card_fw_usable) {
1023 		uint32_t d, c, k;
1024 
1025 		d = be32_to_cpu(drv_fw->fw_ver);
1026 		c = be32_to_cpu(card_fw->fw_ver);
1027 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1028 
1029 		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1030 			"chip state %d, "
1031 			"driver compiled with %d.%d.%d.%d, "
1032 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1033 			state,
1034 			FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1035 			FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1036 			FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1037 			FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1038 			FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1039 			FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1040 		ret = EINVAL;
1041 		goto bye;
1042 	}
1043 
1044 	/* We're using whatever's on the card and it's known to be good. */
1045 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1046 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1047 
1048 bye:
1049 	return ret;
1050 }
1051 
1052 /**
1053  *	t4_flash_erase_sectors - erase a range of flash sectors
1054  *	@adapter: the adapter
1055  *	@start: the first sector to erase
1056  *	@end: the last sector to erase
1057  *
1058  *	Erases the sectors in the given inclusive range.
1059  */
1060 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1061 {
1062 	int ret = 0;
1063 
1064 	while (start <= end) {
1065 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1066 		    (ret = sf1_write(adapter, 4, 0, 1,
1067 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1068 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1069 			dev_err(adapter->pdev_dev,
1070 				"erase of flash sector %d failed, error %d\n",
1071 				start, ret);
1072 			break;
1073 		}
1074 		start++;
1075 	}
1076 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
1077 	return ret;
1078 }
1079 
1080 /**
1081  *	t4_flash_cfg_addr - return the address of the flash configuration file
1082  *	@adapter: the adapter
1083  *
1084  *	Return the address within the flash where the Firmware Configuration
1085  *	File is stored.
1086  */
1087 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1088 {
1089 	if (adapter->params.sf_size == 0x100000)
1090 		return FLASH_FPGA_CFG_START;
1091 	else
1092 		return FLASH_CFG_START;
1093 }
1094 
1095 /**
1096  *	t4_load_fw - download firmware
1097  *	@adap: the adapter
1098  *	@fw_data: the firmware image to write
1099  *	@size: image size
1100  *
1101  *	Write the supplied firmware image to the card's serial flash.
1102  */
1103 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1104 {
1105 	u32 csum;
1106 	int ret, addr;
1107 	unsigned int i;
1108 	u8 first_page[SF_PAGE_SIZE];
1109 	const __be32 *p = (const __be32 *)fw_data;
1110 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1111 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1112 	unsigned int fw_img_start = adap->params.sf_fw_start;
1113 	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1114 
1115 	if (!size) {
1116 		dev_err(adap->pdev_dev, "FW image has no data\n");
1117 		return -EINVAL;
1118 	}
1119 	if (size & 511) {
1120 		dev_err(adap->pdev_dev,
1121 			"FW image size not multiple of 512 bytes\n");
1122 		return -EINVAL;
1123 	}
1124 	if (ntohs(hdr->len512) * 512 != size) {
1125 		dev_err(adap->pdev_dev,
1126 			"FW image size differs from size in FW header\n");
1127 		return -EINVAL;
1128 	}
1129 	if (size > FW_MAX_SIZE) {
1130 		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1131 			FW_MAX_SIZE);
1132 		return -EFBIG;
1133 	}
1134 
1135 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1136 		csum += ntohl(p[i]);
1137 
1138 	if (csum != 0xffffffff) {
1139 		dev_err(adap->pdev_dev,
1140 			"corrupted firmware image, checksum %#x\n", csum);
1141 		return -EINVAL;
1142 	}
1143 
1144 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1145 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1146 	if (ret)
1147 		goto out;
1148 
1149 	/*
1150 	 * We write the correct version at the end so the driver can see a bad
1151 	 * version if the FW write fails.  Start by writing a copy of the
1152 	 * first page with a bad version.
1153 	 */
1154 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1155 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1156 	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1157 	if (ret)
1158 		goto out;
1159 
1160 	addr = fw_img_start;
1161 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1162 		addr += SF_PAGE_SIZE;
1163 		fw_data += SF_PAGE_SIZE;
1164 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1165 		if (ret)
1166 			goto out;
1167 	}
1168 
1169 	ret = t4_write_flash(adap,
1170 			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
1171 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1172 out:
1173 	if (ret)
1174 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1175 			ret);
1176 	return ret;
1177 }
1178 
1179 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1180 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1181 		     FW_PORT_CAP_ANEG)
1182 
1183 /**
1184  *	t4_link_start - apply link configuration to MAC/PHY
1185  *	@phy: the PHY to setup
1186  *	@mac: the MAC to setup
1187  *	@lc: the requested link configuration
1188  *
1189  *	Set up a port's MAC and PHY according to a desired link configuration.
1190  *	- If the PHY can auto-negotiate first decide what to advertise, then
1191  *	  enable/disable auto-negotiation as desired, and reset.
1192  *	- If the PHY does not auto-negotiate just reset it.
1193  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1194  *	  otherwise do it later based on the outcome of auto-negotiation.
1195  */
1196 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1197 		  struct link_config *lc)
1198 {
1199 	struct fw_port_cmd c;
1200 	unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1201 
1202 	lc->link_ok = 0;
1203 	if (lc->requested_fc & PAUSE_RX)
1204 		fc |= FW_PORT_CAP_FC_RX;
1205 	if (lc->requested_fc & PAUSE_TX)
1206 		fc |= FW_PORT_CAP_FC_TX;
1207 
1208 	memset(&c, 0, sizeof(c));
1209 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1210 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1211 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1212 				  FW_LEN16(c));
1213 
1214 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1215 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1216 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1217 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1218 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1219 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1220 	} else
1221 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1222 
1223 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1224 }
1225 
1226 /**
1227  *	t4_restart_aneg - restart autonegotiation
1228  *	@adap: the adapter
1229  *	@mbox: mbox to use for the FW command
1230  *	@port: the port id
1231  *
1232  *	Restarts autonegotiation for the selected port.
1233  */
1234 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1235 {
1236 	struct fw_port_cmd c;
1237 
1238 	memset(&c, 0, sizeof(c));
1239 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1240 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1241 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1242 				  FW_LEN16(c));
1243 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1244 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1245 }
1246 
1247 typedef void (*int_handler_t)(struct adapter *adap);
1248 
1249 struct intr_info {
1250 	unsigned int mask;       /* bits to check in interrupt status */
1251 	const char *msg;         /* message to print or NULL */
1252 	short stat_idx;          /* stat counter to increment or -1 */
1253 	unsigned short fatal;    /* whether the condition reported is fatal */
1254 	int_handler_t int_handler; /* platform-specific int handler */
1255 };
1256 
1257 /**
1258  *	t4_handle_intr_status - table driven interrupt handler
1259  *	@adapter: the adapter that generated the interrupt
1260  *	@reg: the interrupt status register to process
1261  *	@acts: table of interrupt actions
1262  *
1263  *	A table driven interrupt handler that applies a set of masks to an
1264  *	interrupt status word and performs the corresponding actions if the
1265  *	interrupts described by the mask have occurred.  The actions include
1266  *	optionally emitting a warning or alert message.  The table is terminated
1267  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1268  *	conditions.
1269  */
1270 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1271 				 const struct intr_info *acts)
1272 {
1273 	int fatal = 0;
1274 	unsigned int mask = 0;
1275 	unsigned int status = t4_read_reg(adapter, reg);
1276 
1277 	for ( ; acts->mask; ++acts) {
1278 		if (!(status & acts->mask))
1279 			continue;
1280 		if (acts->fatal) {
1281 			fatal++;
1282 			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1283 				  status & acts->mask);
1284 		} else if (acts->msg && printk_ratelimit())
1285 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1286 				 status & acts->mask);
1287 		if (acts->int_handler)
1288 			acts->int_handler(adapter);
1289 		mask |= acts->mask;
1290 	}
1291 	status &= mask;
1292 	if (status)                           /* clear processed interrupts */
1293 		t4_write_reg(adapter, reg, status);
1294 	return fatal;
1295 }
1296 
1297 /*
1298  * Interrupt handler for the PCIE module.
1299  */
1300 static void pcie_intr_handler(struct adapter *adapter)
1301 {
1302 	static const struct intr_info sysbus_intr_info[] = {
1303 		{ RNPP, "RXNP array parity error", -1, 1 },
1304 		{ RPCP, "RXPC array parity error", -1, 1 },
1305 		{ RCIP, "RXCIF array parity error", -1, 1 },
1306 		{ RCCP, "Rx completions control array parity error", -1, 1 },
1307 		{ RFTP, "RXFT array parity error", -1, 1 },
1308 		{ 0 }
1309 	};
1310 	static const struct intr_info pcie_port_intr_info[] = {
1311 		{ TPCP, "TXPC array parity error", -1, 1 },
1312 		{ TNPP, "TXNP array parity error", -1, 1 },
1313 		{ TFTP, "TXFT array parity error", -1, 1 },
1314 		{ TCAP, "TXCA array parity error", -1, 1 },
1315 		{ TCIP, "TXCIF array parity error", -1, 1 },
1316 		{ RCAP, "RXCA array parity error", -1, 1 },
1317 		{ OTDD, "outbound request TLP discarded", -1, 1 },
1318 		{ RDPE, "Rx data parity error", -1, 1 },
1319 		{ TDUE, "Tx uncorrectable data error", -1, 1 },
1320 		{ 0 }
1321 	};
1322 	static const struct intr_info pcie_intr_info[] = {
1323 		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1324 		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1325 		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
1326 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1327 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1328 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1329 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1330 		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1331 		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1332 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1333 		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1334 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1335 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1336 		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1337 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1338 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1339 		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1340 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1341 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1342 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1343 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1344 		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1345 		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
1346 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1347 		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1348 		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
1349 		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
1350 		{ PCIESINT, "PCI core secondary fault", -1, 1 },
1351 		{ PCIEPINT, "PCI core primary fault", -1, 1 },
1352 		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1353 		{ 0 }
1354 	};
1355 
1356 	static struct intr_info t5_pcie_intr_info[] = {
1357 		{ MSTGRPPERR, "Master Response Read Queue parity error",
1358 		  -1, 1 },
1359 		{ MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1360 		{ MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1361 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1362 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1363 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1364 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1365 		{ PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1366 		  -1, 1 },
1367 		{ PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1368 		  -1, 1 },
1369 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1370 		{ MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1371 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1372 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1373 		{ DREQWRPERR, "PCI DMA channel write request parity error",
1374 		  -1, 1 },
1375 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1376 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1377 		{ HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1378 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1379 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1380 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1381 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1382 		{ VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1383 		{ MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1384 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1385 		{ IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1386 		  -1, 1 },
1387 		{ IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1388 		{ RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1389 		{ IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1390 		{ TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1391 		{ READRSPERR, "Outbound read error", -1, 0 },
1392 		{ 0 }
1393 	};
1394 
1395 	int fat;
1396 
1397 	fat = t4_handle_intr_status(adapter,
1398 				    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1399 				    sysbus_intr_info) +
1400 	      t4_handle_intr_status(adapter,
1401 				    PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1402 				    pcie_port_intr_info) +
1403 	      t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1404 				    is_t4(adapter->params.chip) ?
1405 				    pcie_intr_info : t5_pcie_intr_info);
1406 
1407 	if (fat)
1408 		t4_fatal_err(adapter);
1409 }
1410 
1411 /*
1412  * TP interrupt handler.
1413  */
1414 static void tp_intr_handler(struct adapter *adapter)
1415 {
1416 	static const struct intr_info tp_intr_info[] = {
1417 		{ 0x3fffffff, "TP parity error", -1, 1 },
1418 		{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1419 		{ 0 }
1420 	};
1421 
1422 	if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1423 		t4_fatal_err(adapter);
1424 }
1425 
1426 /*
1427  * SGE interrupt handler.
1428  */
1429 static void sge_intr_handler(struct adapter *adapter)
1430 {
1431 	u64 v;
1432 
1433 	static const struct intr_info sge_intr_info[] = {
1434 		{ ERR_CPL_EXCEED_IQE_SIZE,
1435 		  "SGE received CPL exceeding IQE size", -1, 1 },
1436 		{ ERR_INVALID_CIDX_INC,
1437 		  "SGE GTS CIDX increment too large", -1, 0 },
1438 		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1439 		{ DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1440 		{ DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1441 		{ ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1442 		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1443 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1444 		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1445 		  0 },
1446 		{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1447 		  0 },
1448 		{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1449 		  0 },
1450 		{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1451 		  0 },
1452 		{ ERR_ING_CTXT_PRIO,
1453 		  "SGE too many priority ingress contexts", -1, 0 },
1454 		{ ERR_EGR_CTXT_PRIO,
1455 		  "SGE too many priority egress contexts", -1, 0 },
1456 		{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1457 		{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1458 		{ 0 }
1459 	};
1460 
1461 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1462 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1463 	if (v) {
1464 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1465 				(unsigned long long)v);
1466 		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1467 		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1468 	}
1469 
1470 	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1471 	    v != 0)
1472 		t4_fatal_err(adapter);
1473 }
1474 
1475 /*
1476  * CIM interrupt handler.
1477  */
1478 static void cim_intr_handler(struct adapter *adapter)
1479 {
1480 	static const struct intr_info cim_intr_info[] = {
1481 		{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1482 		{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
1483 		{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
1484 		{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1485 		{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1486 		{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1487 		{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1488 		{ 0 }
1489 	};
1490 	static const struct intr_info cim_upintr_info[] = {
1491 		{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1492 		{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1493 		{ ILLWRINT, "CIM illegal write", -1, 1 },
1494 		{ ILLRDINT, "CIM illegal read", -1, 1 },
1495 		{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1496 		{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1497 		{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1498 		{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1499 		{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1500 		{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1501 		{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1502 		{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1503 		{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1504 		{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1505 		{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1506 		{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1507 		{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1508 		{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1509 		{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1510 		{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1511 		{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1512 		{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1513 		{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1514 		{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1515 		{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1516 		{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1517 		{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1518 		{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1519 		{ 0 }
1520 	};
1521 
1522 	int fat;
1523 
1524 	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1525 				    cim_intr_info) +
1526 	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1527 				    cim_upintr_info);
1528 	if (fat)
1529 		t4_fatal_err(adapter);
1530 }
1531 
1532 /*
1533  * ULP RX interrupt handler.
1534  */
1535 static void ulprx_intr_handler(struct adapter *adapter)
1536 {
1537 	static const struct intr_info ulprx_intr_info[] = {
1538 		{ 0x1800000, "ULPRX context error", -1, 1 },
1539 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1540 		{ 0 }
1541 	};
1542 
1543 	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1544 		t4_fatal_err(adapter);
1545 }
1546 
1547 /*
1548  * ULP TX interrupt handler.
1549  */
1550 static void ulptx_intr_handler(struct adapter *adapter)
1551 {
1552 	static const struct intr_info ulptx_intr_info[] = {
1553 		{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1554 		  0 },
1555 		{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1556 		  0 },
1557 		{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1558 		  0 },
1559 		{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1560 		  0 },
1561 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1562 		{ 0 }
1563 	};
1564 
1565 	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1566 		t4_fatal_err(adapter);
1567 }
1568 
1569 /*
1570  * PM TX interrupt handler.
1571  */
1572 static void pmtx_intr_handler(struct adapter *adapter)
1573 {
1574 	static const struct intr_info pmtx_intr_info[] = {
1575 		{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1576 		{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1577 		{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1578 		{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1579 		{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1580 		{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1581 		{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1582 		{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1583 		{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1584 		{ 0 }
1585 	};
1586 
1587 	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1588 		t4_fatal_err(adapter);
1589 }
1590 
1591 /*
1592  * PM RX interrupt handler.
1593  */
1594 static void pmrx_intr_handler(struct adapter *adapter)
1595 {
1596 	static const struct intr_info pmrx_intr_info[] = {
1597 		{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1598 		{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1599 		{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1600 		{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1601 		{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1602 		{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1603 		{ 0 }
1604 	};
1605 
1606 	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1607 		t4_fatal_err(adapter);
1608 }
1609 
1610 /*
1611  * CPL switch interrupt handler.
1612  */
1613 static void cplsw_intr_handler(struct adapter *adapter)
1614 {
1615 	static const struct intr_info cplsw_intr_info[] = {
1616 		{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1617 		{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1618 		{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1619 		{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1620 		{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1621 		{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1622 		{ 0 }
1623 	};
1624 
1625 	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1626 		t4_fatal_err(adapter);
1627 }
1628 
1629 /*
1630  * LE interrupt handler.
1631  */
1632 static void le_intr_handler(struct adapter *adap)
1633 {
1634 	static const struct intr_info le_intr_info[] = {
1635 		{ LIPMISS, "LE LIP miss", -1, 0 },
1636 		{ LIP0, "LE 0 LIP error", -1, 0 },
1637 		{ PARITYERR, "LE parity error", -1, 1 },
1638 		{ UNKNOWNCMD, "LE unknown command", -1, 1 },
1639 		{ REQQPARERR, "LE request queue parity error", -1, 1 },
1640 		{ 0 }
1641 	};
1642 
1643 	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1644 		t4_fatal_err(adap);
1645 }
1646 
1647 /*
1648  * MPS interrupt handler.
1649  */
1650 static void mps_intr_handler(struct adapter *adapter)
1651 {
1652 	static const struct intr_info mps_rx_intr_info[] = {
1653 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1654 		{ 0 }
1655 	};
1656 	static const struct intr_info mps_tx_intr_info[] = {
1657 		{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1658 		{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1659 		{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1660 		{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1661 		{ BUBBLE, "MPS Tx underflow", -1, 1 },
1662 		{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1663 		{ FRMERR, "MPS Tx framing error", -1, 1 },
1664 		{ 0 }
1665 	};
1666 	static const struct intr_info mps_trc_intr_info[] = {
1667 		{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
1668 		{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1669 		{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
1670 		{ 0 }
1671 	};
1672 	static const struct intr_info mps_stat_sram_intr_info[] = {
1673 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1674 		{ 0 }
1675 	};
1676 	static const struct intr_info mps_stat_tx_intr_info[] = {
1677 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1678 		{ 0 }
1679 	};
1680 	static const struct intr_info mps_stat_rx_intr_info[] = {
1681 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1682 		{ 0 }
1683 	};
1684 	static const struct intr_info mps_cls_intr_info[] = {
1685 		{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1686 		{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1687 		{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1688 		{ 0 }
1689 	};
1690 
1691 	int fat;
1692 
1693 	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1694 				    mps_rx_intr_info) +
1695 	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1696 				    mps_tx_intr_info) +
1697 	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1698 				    mps_trc_intr_info) +
1699 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1700 				    mps_stat_sram_intr_info) +
1701 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1702 				    mps_stat_tx_intr_info) +
1703 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1704 				    mps_stat_rx_intr_info) +
1705 	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1706 				    mps_cls_intr_info);
1707 
1708 	t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1709 		     RXINT | TXINT | STATINT);
1710 	t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
1711 	if (fat)
1712 		t4_fatal_err(adapter);
1713 }
1714 
1715 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1716 
1717 /*
1718  * EDC/MC interrupt handler.
1719  */
1720 static void mem_intr_handler(struct adapter *adapter, int idx)
1721 {
1722 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
1723 
1724 	unsigned int addr, cnt_addr, v;
1725 
1726 	if (idx <= MEM_EDC1) {
1727 		addr = EDC_REG(EDC_INT_CAUSE, idx);
1728 		cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1729 	} else if (idx == MEM_MC) {
1730 		if (is_t4(adapter->params.chip)) {
1731 			addr = MC_INT_CAUSE;
1732 			cnt_addr = MC_ECC_STATUS;
1733 		} else {
1734 			addr = MC_P_INT_CAUSE;
1735 			cnt_addr = MC_P_ECC_STATUS;
1736 		}
1737 	} else {
1738 		addr = MC_REG(MC_P_INT_CAUSE, 1);
1739 		cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
1740 	}
1741 
1742 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1743 	if (v & PERR_INT_CAUSE)
1744 		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1745 			  name[idx]);
1746 	if (v & ECC_CE_INT_CAUSE) {
1747 		u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1748 
1749 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1750 		if (printk_ratelimit())
1751 			dev_warn(adapter->pdev_dev,
1752 				 "%u %s correctable ECC data error%s\n",
1753 				 cnt, name[idx], cnt > 1 ? "s" : "");
1754 	}
1755 	if (v & ECC_UE_INT_CAUSE)
1756 		dev_alert(adapter->pdev_dev,
1757 			  "%s uncorrectable ECC data error\n", name[idx]);
1758 
1759 	t4_write_reg(adapter, addr, v);
1760 	if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1761 		t4_fatal_err(adapter);
1762 }
1763 
1764 /*
1765  * MA interrupt handler.
1766  */
1767 static void ma_intr_handler(struct adapter *adap)
1768 {
1769 	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1770 
1771 	if (status & MEM_PERR_INT_CAUSE)
1772 		dev_alert(adap->pdev_dev,
1773 			  "MA parity error, parity status %#x\n",
1774 			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1775 	if (status & MEM_WRAP_INT_CAUSE) {
1776 		v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1777 		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1778 			  "client %u to address %#x\n",
1779 			  MEM_WRAP_CLIENT_NUM_GET(v),
1780 			  MEM_WRAP_ADDRESS_GET(v) << 4);
1781 	}
1782 	t4_write_reg(adap, MA_INT_CAUSE, status);
1783 	t4_fatal_err(adap);
1784 }
1785 
1786 /*
1787  * SMB interrupt handler.
1788  */
1789 static void smb_intr_handler(struct adapter *adap)
1790 {
1791 	static const struct intr_info smb_intr_info[] = {
1792 		{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1793 		{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1794 		{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1795 		{ 0 }
1796 	};
1797 
1798 	if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1799 		t4_fatal_err(adap);
1800 }
1801 
1802 /*
1803  * NC-SI interrupt handler.
1804  */
1805 static void ncsi_intr_handler(struct adapter *adap)
1806 {
1807 	static const struct intr_info ncsi_intr_info[] = {
1808 		{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1809 		{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1810 		{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1811 		{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1812 		{ 0 }
1813 	};
1814 
1815 	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1816 		t4_fatal_err(adap);
1817 }
1818 
1819 /*
1820  * XGMAC interrupt handler.
1821  */
1822 static void xgmac_intr_handler(struct adapter *adap, int port)
1823 {
1824 	u32 v, int_cause_reg;
1825 
1826 	if (is_t4(adap->params.chip))
1827 		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1828 	else
1829 		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1830 
1831 	v = t4_read_reg(adap, int_cause_reg);
1832 
1833 	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1834 	if (!v)
1835 		return;
1836 
1837 	if (v & TXFIFO_PRTY_ERR)
1838 		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1839 			  port);
1840 	if (v & RXFIFO_PRTY_ERR)
1841 		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1842 			  port);
1843 	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1844 	t4_fatal_err(adap);
1845 }
1846 
1847 /*
1848  * PL interrupt handler.
1849  */
1850 static void pl_intr_handler(struct adapter *adap)
1851 {
1852 	static const struct intr_info pl_intr_info[] = {
1853 		{ FATALPERR, "T4 fatal parity error", -1, 1 },
1854 		{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1855 		{ 0 }
1856 	};
1857 
1858 	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1859 		t4_fatal_err(adap);
1860 }
1861 
1862 #define PF_INTR_MASK (PFSW)
1863 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1864 		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1865 		CPL_SWITCH | SGE | ULP_TX)
1866 
1867 /**
1868  *	t4_slow_intr_handler - control path interrupt handler
1869  *	@adapter: the adapter
1870  *
1871  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
1872  *	The designation 'slow' is because it involves register reads, while
1873  *	data interrupts typically don't involve any MMIOs.
1874  */
1875 int t4_slow_intr_handler(struct adapter *adapter)
1876 {
1877 	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1878 
1879 	if (!(cause & GLBL_INTR_MASK))
1880 		return 0;
1881 	if (cause & CIM)
1882 		cim_intr_handler(adapter);
1883 	if (cause & MPS)
1884 		mps_intr_handler(adapter);
1885 	if (cause & NCSI)
1886 		ncsi_intr_handler(adapter);
1887 	if (cause & PL)
1888 		pl_intr_handler(adapter);
1889 	if (cause & SMB)
1890 		smb_intr_handler(adapter);
1891 	if (cause & XGMAC0)
1892 		xgmac_intr_handler(adapter, 0);
1893 	if (cause & XGMAC1)
1894 		xgmac_intr_handler(adapter, 1);
1895 	if (cause & XGMAC_KR0)
1896 		xgmac_intr_handler(adapter, 2);
1897 	if (cause & XGMAC_KR1)
1898 		xgmac_intr_handler(adapter, 3);
1899 	if (cause & PCIE)
1900 		pcie_intr_handler(adapter);
1901 	if (cause & MC)
1902 		mem_intr_handler(adapter, MEM_MC);
1903 	if (!is_t4(adapter->params.chip) && (cause & MC1))
1904 		mem_intr_handler(adapter, MEM_MC1);
1905 	if (cause & EDC0)
1906 		mem_intr_handler(adapter, MEM_EDC0);
1907 	if (cause & EDC1)
1908 		mem_intr_handler(adapter, MEM_EDC1);
1909 	if (cause & LE)
1910 		le_intr_handler(adapter);
1911 	if (cause & TP)
1912 		tp_intr_handler(adapter);
1913 	if (cause & MA)
1914 		ma_intr_handler(adapter);
1915 	if (cause & PM_TX)
1916 		pmtx_intr_handler(adapter);
1917 	if (cause & PM_RX)
1918 		pmrx_intr_handler(adapter);
1919 	if (cause & ULP_RX)
1920 		ulprx_intr_handler(adapter);
1921 	if (cause & CPL_SWITCH)
1922 		cplsw_intr_handler(adapter);
1923 	if (cause & SGE)
1924 		sge_intr_handler(adapter);
1925 	if (cause & ULP_TX)
1926 		ulptx_intr_handler(adapter);
1927 
1928 	/* Clear the interrupts just processed for which we are the master. */
1929 	t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1930 	(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1931 	return 1;
1932 }
1933 
1934 /**
1935  *	t4_intr_enable - enable interrupts
1936  *	@adapter: the adapter whose interrupts should be enabled
1937  *
1938  *	Enable PF-specific interrupts for the calling function and the top-level
1939  *	interrupt concentrator for global interrupts.  Interrupts are already
1940  *	enabled at each module,	here we just enable the roots of the interrupt
1941  *	hierarchies.
1942  *
1943  *	Note: this function should be called only when the driver manages
1944  *	non PF-specific interrupts from the various HW modules.  Only one PCI
1945  *	function at a time should be doing this.
1946  */
1947 void t4_intr_enable(struct adapter *adapter)
1948 {
1949 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1950 
1951 	t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1952 		     ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1953 		     ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1954 		     ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1955 		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1956 		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1957 		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1958 		     DBFIFO_HP_INT | DBFIFO_LP_INT |
1959 		     EGRESS_SIZE_ERR);
1960 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1961 	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1962 }
1963 
1964 /**
1965  *	t4_intr_disable - disable interrupts
1966  *	@adapter: the adapter whose interrupts should be disabled
1967  *
1968  *	Disable interrupts.  We only disable the top-level interrupt
1969  *	concentrators.  The caller must be a PCI function managing global
1970  *	interrupts.
1971  */
1972 void t4_intr_disable(struct adapter *adapter)
1973 {
1974 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1975 
1976 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1977 	t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1978 }
1979 
1980 /**
1981  *	hash_mac_addr - return the hash value of a MAC address
1982  *	@addr: the 48-bit Ethernet MAC address
1983  *
1984  *	Hashes a MAC address according to the hash function used by HW inexact
1985  *	(hash) address matching.
1986  */
1987 static int hash_mac_addr(const u8 *addr)
1988 {
1989 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1990 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1991 	a ^= b;
1992 	a ^= (a >> 12);
1993 	a ^= (a >> 6);
1994 	return a & 0x3f;
1995 }
1996 
1997 /**
1998  *	t4_config_rss_range - configure a portion of the RSS mapping table
1999  *	@adapter: the adapter
2000  *	@mbox: mbox to use for the FW command
2001  *	@viid: virtual interface whose RSS subtable is to be written
2002  *	@start: start entry in the table to write
2003  *	@n: how many table entries to write
2004  *	@rspq: values for the response queue lookup table
2005  *	@nrspq: number of values in @rspq
2006  *
2007  *	Programs the selected part of the VI's RSS mapping table with the
2008  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2009  *	until the full table range is populated.
2010  *
2011  *	The caller must ensure the values in @rspq are in the range allowed for
2012  *	@viid.
2013  */
2014 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2015 			int start, int n, const u16 *rspq, unsigned int nrspq)
2016 {
2017 	int ret;
2018 	const u16 *rsp = rspq;
2019 	const u16 *rsp_end = rspq + nrspq;
2020 	struct fw_rss_ind_tbl_cmd cmd;
2021 
2022 	memset(&cmd, 0, sizeof(cmd));
2023 	cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2024 			       FW_CMD_REQUEST | FW_CMD_WRITE |
2025 			       FW_RSS_IND_TBL_CMD_VIID(viid));
2026 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2027 
2028 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2029 	while (n > 0) {
2030 		int nq = min(n, 32);
2031 		__be32 *qp = &cmd.iq0_to_iq2;
2032 
2033 		cmd.niqid = htons(nq);
2034 		cmd.startidx = htons(start);
2035 
2036 		start += nq;
2037 		n -= nq;
2038 
2039 		while (nq > 0) {
2040 			unsigned int v;
2041 
2042 			v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2043 			if (++rsp >= rsp_end)
2044 				rsp = rspq;
2045 			v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2046 			if (++rsp >= rsp_end)
2047 				rsp = rspq;
2048 			v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2049 			if (++rsp >= rsp_end)
2050 				rsp = rspq;
2051 
2052 			*qp++ = htonl(v);
2053 			nq -= 3;
2054 		}
2055 
2056 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2057 		if (ret)
2058 			return ret;
2059 	}
2060 	return 0;
2061 }
2062 
2063 /**
2064  *	t4_config_glbl_rss - configure the global RSS mode
2065  *	@adapter: the adapter
2066  *	@mbox: mbox to use for the FW command
2067  *	@mode: global RSS mode
2068  *	@flags: mode-specific flags
2069  *
2070  *	Sets the global RSS mode.
2071  */
2072 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2073 		       unsigned int flags)
2074 {
2075 	struct fw_rss_glb_config_cmd c;
2076 
2077 	memset(&c, 0, sizeof(c));
2078 	c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2079 			      FW_CMD_REQUEST | FW_CMD_WRITE);
2080 	c.retval_len16 = htonl(FW_LEN16(c));
2081 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2082 		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2083 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2084 		c.u.basicvirtual.mode_pkd =
2085 			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2086 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2087 	} else
2088 		return -EINVAL;
2089 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2090 }
2091 
2092 /**
2093  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2094  *	@adap: the adapter
2095  *	@v4: holds the TCP/IP counter values
2096  *	@v6: holds the TCP/IPv6 counter values
2097  *
2098  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2099  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2100  */
2101 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2102 			 struct tp_tcp_stats *v6)
2103 {
2104 	u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2105 
2106 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2107 #define STAT(x)     val[STAT_IDX(x)]
2108 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2109 
2110 	if (v4) {
2111 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2112 				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2113 		v4->tcpOutRsts = STAT(OUT_RST);
2114 		v4->tcpInSegs  = STAT64(IN_SEG);
2115 		v4->tcpOutSegs = STAT64(OUT_SEG);
2116 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2117 	}
2118 	if (v6) {
2119 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2120 				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2121 		v6->tcpOutRsts = STAT(OUT_RST);
2122 		v6->tcpInSegs  = STAT64(IN_SEG);
2123 		v6->tcpOutSegs = STAT64(OUT_SEG);
2124 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2125 	}
2126 #undef STAT64
2127 #undef STAT
2128 #undef STAT_IDX
2129 }
2130 
2131 /**
2132  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2133  *	@adap: the adapter
2134  *	@mtus: where to store the MTU values
2135  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2136  *
2137  *	Reads the HW path MTU table.
2138  */
2139 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2140 {
2141 	u32 v;
2142 	int i;
2143 
2144 	for (i = 0; i < NMTUS; ++i) {
2145 		t4_write_reg(adap, TP_MTU_TABLE,
2146 			     MTUINDEX(0xff) | MTUVALUE(i));
2147 		v = t4_read_reg(adap, TP_MTU_TABLE);
2148 		mtus[i] = MTUVALUE_GET(v);
2149 		if (mtu_log)
2150 			mtu_log[i] = MTUWIDTH_GET(v);
2151 	}
2152 }
2153 
2154 /**
2155  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2156  *	@adap: the adapter
2157  *	@addr: the indirect TP register address
2158  *	@mask: specifies the field within the register to modify
2159  *	@val: new value for the field
2160  *
2161  *	Sets a field of an indirect TP register to the given value.
2162  */
2163 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2164 			    unsigned int mask, unsigned int val)
2165 {
2166 	t4_write_reg(adap, TP_PIO_ADDR, addr);
2167 	val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2168 	t4_write_reg(adap, TP_PIO_DATA, val);
2169 }
2170 
2171 /**
2172  *	init_cong_ctrl - initialize congestion control parameters
2173  *	@a: the alpha values for congestion control
2174  *	@b: the beta values for congestion control
2175  *
2176  *	Initialize the congestion control parameters.
2177  */
2178 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2179 {
2180 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2181 	a[9] = 2;
2182 	a[10] = 3;
2183 	a[11] = 4;
2184 	a[12] = 5;
2185 	a[13] = 6;
2186 	a[14] = 7;
2187 	a[15] = 8;
2188 	a[16] = 9;
2189 	a[17] = 10;
2190 	a[18] = 14;
2191 	a[19] = 17;
2192 	a[20] = 21;
2193 	a[21] = 25;
2194 	a[22] = 30;
2195 	a[23] = 35;
2196 	a[24] = 45;
2197 	a[25] = 60;
2198 	a[26] = 80;
2199 	a[27] = 100;
2200 	a[28] = 200;
2201 	a[29] = 300;
2202 	a[30] = 400;
2203 	a[31] = 500;
2204 
2205 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2206 	b[9] = b[10] = 1;
2207 	b[11] = b[12] = 2;
2208 	b[13] = b[14] = b[15] = b[16] = 3;
2209 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2210 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2211 	b[28] = b[29] = 6;
2212 	b[30] = b[31] = 7;
2213 }
2214 
2215 /* The minimum additive increment value for the congestion control table */
2216 #define CC_MIN_INCR 2U
2217 
2218 /**
2219  *	t4_load_mtus - write the MTU and congestion control HW tables
2220  *	@adap: the adapter
2221  *	@mtus: the values for the MTU table
2222  *	@alpha: the values for the congestion control alpha parameter
2223  *	@beta: the values for the congestion control beta parameter
2224  *
2225  *	Write the HW MTU table with the supplied MTUs and the high-speed
2226  *	congestion control table with the supplied alpha, beta, and MTUs.
2227  *	We write the two tables together because the additive increments
2228  *	depend on the MTUs.
2229  */
2230 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2231 		  const unsigned short *alpha, const unsigned short *beta)
2232 {
2233 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2234 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2235 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2236 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2237 	};
2238 
2239 	unsigned int i, w;
2240 
2241 	for (i = 0; i < NMTUS; ++i) {
2242 		unsigned int mtu = mtus[i];
2243 		unsigned int log2 = fls(mtu);
2244 
2245 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2246 			log2--;
2247 		t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2248 			     MTUWIDTH(log2) | MTUVALUE(mtu));
2249 
2250 		for (w = 0; w < NCCTRL_WIN; ++w) {
2251 			unsigned int inc;
2252 
2253 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2254 				  CC_MIN_INCR);
2255 
2256 			t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2257 				     (w << 16) | (beta[w] << 13) | inc);
2258 		}
2259 	}
2260 }
2261 
2262 /**
2263  *	get_mps_bg_map - return the buffer groups associated with a port
2264  *	@adap: the adapter
2265  *	@idx: the port index
2266  *
2267  *	Returns a bitmap indicating which MPS buffer groups are associated
2268  *	with the given port.  Bit i is set if buffer group i is used by the
2269  *	port.
2270  */
2271 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2272 {
2273 	u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2274 
2275 	if (n == 0)
2276 		return idx == 0 ? 0xf : 0;
2277 	if (n == 1)
2278 		return idx < 2 ? (3 << (2 * idx)) : 0;
2279 	return 1 << idx;
2280 }
2281 
2282 /**
2283  *      t4_get_port_type_description - return Port Type string description
2284  *      @port_type: firmware Port Type enumeration
2285  */
2286 const char *t4_get_port_type_description(enum fw_port_type port_type)
2287 {
2288 	static const char *const port_type_description[] = {
2289 		"R XFI",
2290 		"R XAUI",
2291 		"T SGMII",
2292 		"T XFI",
2293 		"T XAUI",
2294 		"KX4",
2295 		"CX4",
2296 		"KX",
2297 		"KR",
2298 		"R SFP+",
2299 		"KR/KX",
2300 		"KR/KX/KX4",
2301 		"R QSFP_10G",
2302 		"",
2303 		"R QSFP",
2304 		"R BP40_BA",
2305 	};
2306 
2307 	if (port_type < ARRAY_SIZE(port_type_description))
2308 		return port_type_description[port_type];
2309 	return "UNKNOWN";
2310 }
2311 
2312 /**
2313  *	t4_get_port_stats - collect port statistics
2314  *	@adap: the adapter
2315  *	@idx: the port index
2316  *	@p: the stats structure to fill
2317  *
2318  *	Collect statistics related to the given port from HW.
2319  */
2320 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2321 {
2322 	u32 bgmap = get_mps_bg_map(adap, idx);
2323 
2324 #define GET_STAT(name) \
2325 	t4_read_reg64(adap, \
2326 	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2327 	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2328 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2329 
2330 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2331 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2332 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2333 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2334 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2335 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2336 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2337 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2338 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2339 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2340 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2341 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2342 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2343 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
2344 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2345 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2346 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2347 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2348 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2349 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2350 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2351 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2352 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2353 
2354 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2355 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2356 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2357 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2358 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2359 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2360 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2361 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2362 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2363 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2364 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2365 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2366 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2367 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2368 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2369 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2370 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2371 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2372 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2373 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2374 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2375 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2376 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2377 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2378 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2379 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2380 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2381 
2382 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2383 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2384 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2385 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2386 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2387 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2388 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2389 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2390 
2391 #undef GET_STAT
2392 #undef GET_STAT_COM
2393 }
2394 
2395 /**
2396  *	t4_wol_magic_enable - enable/disable magic packet WoL
2397  *	@adap: the adapter
2398  *	@port: the physical port index
2399  *	@addr: MAC address expected in magic packets, %NULL to disable
2400  *
2401  *	Enables/disables magic packet wake-on-LAN for the selected port.
2402  */
2403 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2404 			 const u8 *addr)
2405 {
2406 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2407 
2408 	if (is_t4(adap->params.chip)) {
2409 		mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2410 		mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2411 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2412 	} else {
2413 		mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2414 		mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2415 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2416 	}
2417 
2418 	if (addr) {
2419 		t4_write_reg(adap, mag_id_reg_l,
2420 			     (addr[2] << 24) | (addr[3] << 16) |
2421 			     (addr[4] << 8) | addr[5]);
2422 		t4_write_reg(adap, mag_id_reg_h,
2423 			     (addr[0] << 8) | addr[1]);
2424 	}
2425 	t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2426 			 addr ? MAGICEN : 0);
2427 }
2428 
2429 /**
2430  *	t4_wol_pat_enable - enable/disable pattern-based WoL
2431  *	@adap: the adapter
2432  *	@port: the physical port index
2433  *	@map: bitmap of which HW pattern filters to set
2434  *	@mask0: byte mask for bytes 0-63 of a packet
2435  *	@mask1: byte mask for bytes 64-127 of a packet
2436  *	@crc: Ethernet CRC for selected bytes
2437  *	@enable: enable/disable switch
2438  *
2439  *	Sets the pattern filters indicated in @map to mask out the bytes
2440  *	specified in @mask0/@mask1 in received packets and compare the CRC of
2441  *	the resulting packet against @crc.  If @enable is %true pattern-based
2442  *	WoL is enabled, otherwise disabled.
2443  */
2444 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2445 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
2446 {
2447 	int i;
2448 	u32 port_cfg_reg;
2449 
2450 	if (is_t4(adap->params.chip))
2451 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2452 	else
2453 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2454 
2455 	if (!enable) {
2456 		t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2457 		return 0;
2458 	}
2459 	if (map > 0xff)
2460 		return -EINVAL;
2461 
2462 #define EPIO_REG(name) \
2463 	(is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2464 	T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2465 
2466 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2467 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2468 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2469 
2470 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2471 		if (!(map & 1))
2472 			continue;
2473 
2474 		/* write byte masks */
2475 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2476 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2477 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2478 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2479 			return -ETIMEDOUT;
2480 
2481 		/* write CRC */
2482 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
2483 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2484 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2485 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2486 			return -ETIMEDOUT;
2487 	}
2488 #undef EPIO_REG
2489 
2490 	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2491 	return 0;
2492 }
2493 
2494 /*     t4_mk_filtdelwr - create a delete filter WR
2495  *     @ftid: the filter ID
2496  *     @wr: the filter work request to populate
2497  *     @qid: ingress queue to receive the delete notification
2498  *
2499  *     Creates a filter work request to delete the supplied filter.  If @qid is
2500  *     negative the delete notification is suppressed.
2501  */
2502 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2503 {
2504 	memset(wr, 0, sizeof(*wr));
2505 	wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2506 	wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2507 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2508 			V_FW_FILTER_WR_NOREPLY(qid < 0));
2509 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2510 	if (qid >= 0)
2511 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2512 }
2513 
2514 #define INIT_CMD(var, cmd, rd_wr) do { \
2515 	(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2516 				  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2517 	(var).retval_len16 = htonl(FW_LEN16(var)); \
2518 } while (0)
2519 
2520 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2521 			  u32 addr, u32 val)
2522 {
2523 	struct fw_ldst_cmd c;
2524 
2525 	memset(&c, 0, sizeof(c));
2526 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2527 			    FW_CMD_WRITE |
2528 			    FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2529 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2530 	c.u.addrval.addr = htonl(addr);
2531 	c.u.addrval.val = htonl(val);
2532 
2533 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2534 }
2535 
2536 /**
2537  *	t4_mdio_rd - read a PHY register through MDIO
2538  *	@adap: the adapter
2539  *	@mbox: mailbox to use for the FW command
2540  *	@phy_addr: the PHY address
2541  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2542  *	@reg: the register to read
2543  *	@valp: where to store the value
2544  *
2545  *	Issues a FW command through the given mailbox to read a PHY register.
2546  */
2547 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2548 	       unsigned int mmd, unsigned int reg, u16 *valp)
2549 {
2550 	int ret;
2551 	struct fw_ldst_cmd c;
2552 
2553 	memset(&c, 0, sizeof(c));
2554 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2555 		FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2556 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2557 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2558 				   FW_LDST_CMD_MMD(mmd));
2559 	c.u.mdio.raddr = htons(reg);
2560 
2561 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2562 	if (ret == 0)
2563 		*valp = ntohs(c.u.mdio.rval);
2564 	return ret;
2565 }
2566 
2567 /**
2568  *	t4_mdio_wr - write a PHY register through MDIO
2569  *	@adap: the adapter
2570  *	@mbox: mailbox to use for the FW command
2571  *	@phy_addr: the PHY address
2572  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2573  *	@reg: the register to write
2574  *	@valp: value to write
2575  *
2576  *	Issues a FW command through the given mailbox to write a PHY register.
2577  */
2578 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2579 	       unsigned int mmd, unsigned int reg, u16 val)
2580 {
2581 	struct fw_ldst_cmd c;
2582 
2583 	memset(&c, 0, sizeof(c));
2584 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2585 		FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2586 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2587 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2588 				   FW_LDST_CMD_MMD(mmd));
2589 	c.u.mdio.raddr = htons(reg);
2590 	c.u.mdio.rval = htons(val);
2591 
2592 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2593 }
2594 
2595 /**
2596  *	t4_sge_decode_idma_state - decode the idma state
2597  *	@adap: the adapter
2598  *	@state: the state idma is stuck in
2599  */
2600 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2601 {
2602 	static const char * const t4_decode[] = {
2603 		"IDMA_IDLE",
2604 		"IDMA_PUSH_MORE_CPL_FIFO",
2605 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2606 		"Not used",
2607 		"IDMA_PHYSADDR_SEND_PCIEHDR",
2608 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2609 		"IDMA_PHYSADDR_SEND_PAYLOAD",
2610 		"IDMA_SEND_FIFO_TO_IMSG",
2611 		"IDMA_FL_REQ_DATA_FL_PREP",
2612 		"IDMA_FL_REQ_DATA_FL",
2613 		"IDMA_FL_DROP",
2614 		"IDMA_FL_H_REQ_HEADER_FL",
2615 		"IDMA_FL_H_SEND_PCIEHDR",
2616 		"IDMA_FL_H_PUSH_CPL_FIFO",
2617 		"IDMA_FL_H_SEND_CPL",
2618 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
2619 		"IDMA_FL_H_SEND_IP_HDR",
2620 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
2621 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
2622 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
2623 		"IDMA_FL_D_SEND_PCIEHDR",
2624 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2625 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
2626 		"IDMA_FL_SEND_PCIEHDR",
2627 		"IDMA_FL_PUSH_CPL_FIFO",
2628 		"IDMA_FL_SEND_CPL",
2629 		"IDMA_FL_SEND_PAYLOAD_FIRST",
2630 		"IDMA_FL_SEND_PAYLOAD",
2631 		"IDMA_FL_REQ_NEXT_DATA_FL",
2632 		"IDMA_FL_SEND_NEXT_PCIEHDR",
2633 		"IDMA_FL_SEND_PADDING",
2634 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
2635 		"IDMA_FL_SEND_FIFO_TO_IMSG",
2636 		"IDMA_FL_REQ_DATAFL_DONE",
2637 		"IDMA_FL_REQ_HEADERFL_DONE",
2638 	};
2639 	static const char * const t5_decode[] = {
2640 		"IDMA_IDLE",
2641 		"IDMA_ALMOST_IDLE",
2642 		"IDMA_PUSH_MORE_CPL_FIFO",
2643 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2644 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2645 		"IDMA_PHYSADDR_SEND_PCIEHDR",
2646 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2647 		"IDMA_PHYSADDR_SEND_PAYLOAD",
2648 		"IDMA_SEND_FIFO_TO_IMSG",
2649 		"IDMA_FL_REQ_DATA_FL",
2650 		"IDMA_FL_DROP",
2651 		"IDMA_FL_DROP_SEND_INC",
2652 		"IDMA_FL_H_REQ_HEADER_FL",
2653 		"IDMA_FL_H_SEND_PCIEHDR",
2654 		"IDMA_FL_H_PUSH_CPL_FIFO",
2655 		"IDMA_FL_H_SEND_CPL",
2656 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
2657 		"IDMA_FL_H_SEND_IP_HDR",
2658 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
2659 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
2660 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
2661 		"IDMA_FL_D_SEND_PCIEHDR",
2662 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2663 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
2664 		"IDMA_FL_SEND_PCIEHDR",
2665 		"IDMA_FL_PUSH_CPL_FIFO",
2666 		"IDMA_FL_SEND_CPL",
2667 		"IDMA_FL_SEND_PAYLOAD_FIRST",
2668 		"IDMA_FL_SEND_PAYLOAD",
2669 		"IDMA_FL_REQ_NEXT_DATA_FL",
2670 		"IDMA_FL_SEND_NEXT_PCIEHDR",
2671 		"IDMA_FL_SEND_PADDING",
2672 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
2673 	};
2674 	static const u32 sge_regs[] = {
2675 		SGE_DEBUG_DATA_LOW_INDEX_2,
2676 		SGE_DEBUG_DATA_LOW_INDEX_3,
2677 		SGE_DEBUG_DATA_HIGH_INDEX_10,
2678 	};
2679 	const char **sge_idma_decode;
2680 	int sge_idma_decode_nstates;
2681 	int i;
2682 
2683 	if (is_t4(adapter->params.chip)) {
2684 		sge_idma_decode = (const char **)t4_decode;
2685 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2686 	} else {
2687 		sge_idma_decode = (const char **)t5_decode;
2688 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2689 	}
2690 
2691 	if (state < sge_idma_decode_nstates)
2692 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2693 	else
2694 		CH_WARN(adapter, "idma state %d unknown\n", state);
2695 
2696 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2697 		CH_WARN(adapter, "SGE register %#x value %#x\n",
2698 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2699 }
2700 
2701 /**
2702  *      t4_fw_hello - establish communication with FW
2703  *      @adap: the adapter
2704  *      @mbox: mailbox to use for the FW command
2705  *      @evt_mbox: mailbox to receive async FW events
2706  *      @master: specifies the caller's willingness to be the device master
2707  *	@state: returns the current device state (if non-NULL)
2708  *
2709  *	Issues a command to establish communication with FW.  Returns either
2710  *	an error (negative integer) or the mailbox of the Master PF.
2711  */
2712 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2713 		enum dev_master master, enum dev_state *state)
2714 {
2715 	int ret;
2716 	struct fw_hello_cmd c;
2717 	u32 v;
2718 	unsigned int master_mbox;
2719 	int retries = FW_CMD_HELLO_RETRIES;
2720 
2721 retry:
2722 	memset(&c, 0, sizeof(c));
2723 	INIT_CMD(c, HELLO, WRITE);
2724 	c.err_to_clearinit = htonl(
2725 		FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2726 		FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2727 		FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2728 				      FW_HELLO_CMD_MBMASTER_MASK) |
2729 		FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2730 		FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2731 		FW_HELLO_CMD_CLEARINIT);
2732 
2733 	/*
2734 	 * Issue the HELLO command to the firmware.  If it's not successful
2735 	 * but indicates that we got a "busy" or "timeout" condition, retry
2736 	 * the HELLO until we exhaust our retry limit.
2737 	 */
2738 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2739 	if (ret < 0) {
2740 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2741 			goto retry;
2742 		return ret;
2743 	}
2744 
2745 	v = ntohl(c.err_to_clearinit);
2746 	master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2747 	if (state) {
2748 		if (v & FW_HELLO_CMD_ERR)
2749 			*state = DEV_STATE_ERR;
2750 		else if (v & FW_HELLO_CMD_INIT)
2751 			*state = DEV_STATE_INIT;
2752 		else
2753 			*state = DEV_STATE_UNINIT;
2754 	}
2755 
2756 	/*
2757 	 * If we're not the Master PF then we need to wait around for the
2758 	 * Master PF Driver to finish setting up the adapter.
2759 	 *
2760 	 * Note that we also do this wait if we're a non-Master-capable PF and
2761 	 * there is no current Master PF; a Master PF may show up momentarily
2762 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
2763 	 * OS loads lots of different drivers rapidly at the same time).  In
2764 	 * this case, the Master PF returned by the firmware will be
2765 	 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2766 	 */
2767 	if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2768 	    master_mbox != mbox) {
2769 		int waiting = FW_CMD_HELLO_TIMEOUT;
2770 
2771 		/*
2772 		 * Wait for the firmware to either indicate an error or
2773 		 * initialized state.  If we see either of these we bail out
2774 		 * and report the issue to the caller.  If we exhaust the
2775 		 * "hello timeout" and we haven't exhausted our retries, try
2776 		 * again.  Otherwise bail with a timeout error.
2777 		 */
2778 		for (;;) {
2779 			u32 pcie_fw;
2780 
2781 			msleep(50);
2782 			waiting -= 50;
2783 
2784 			/*
2785 			 * If neither Error nor Initialialized are indicated
2786 			 * by the firmware keep waiting till we exaust our
2787 			 * timeout ... and then retry if we haven't exhausted
2788 			 * our retries ...
2789 			 */
2790 			pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2791 			if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2792 				if (waiting <= 0) {
2793 					if (retries-- > 0)
2794 						goto retry;
2795 
2796 					return -ETIMEDOUT;
2797 				}
2798 				continue;
2799 			}
2800 
2801 			/*
2802 			 * We either have an Error or Initialized condition
2803 			 * report errors preferentially.
2804 			 */
2805 			if (state) {
2806 				if (pcie_fw & FW_PCIE_FW_ERR)
2807 					*state = DEV_STATE_ERR;
2808 				else if (pcie_fw & FW_PCIE_FW_INIT)
2809 					*state = DEV_STATE_INIT;
2810 			}
2811 
2812 			/*
2813 			 * If we arrived before a Master PF was selected and
2814 			 * there's not a valid Master PF, grab its identity
2815 			 * for our caller.
2816 			 */
2817 			if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2818 			    (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2819 				master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2820 			break;
2821 		}
2822 	}
2823 
2824 	return master_mbox;
2825 }
2826 
2827 /**
2828  *	t4_fw_bye - end communication with FW
2829  *	@adap: the adapter
2830  *	@mbox: mailbox to use for the FW command
2831  *
2832  *	Issues a command to terminate communication with FW.
2833  */
2834 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2835 {
2836 	struct fw_bye_cmd c;
2837 
2838 	memset(&c, 0, sizeof(c));
2839 	INIT_CMD(c, BYE, WRITE);
2840 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2841 }
2842 
2843 /**
2844  *	t4_init_cmd - ask FW to initialize the device
2845  *	@adap: the adapter
2846  *	@mbox: mailbox to use for the FW command
2847  *
2848  *	Issues a command to FW to partially initialize the device.  This
2849  *	performs initialization that generally doesn't depend on user input.
2850  */
2851 int t4_early_init(struct adapter *adap, unsigned int mbox)
2852 {
2853 	struct fw_initialize_cmd c;
2854 
2855 	memset(&c, 0, sizeof(c));
2856 	INIT_CMD(c, INITIALIZE, WRITE);
2857 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2858 }
2859 
2860 /**
2861  *	t4_fw_reset - issue a reset to FW
2862  *	@adap: the adapter
2863  *	@mbox: mailbox to use for the FW command
2864  *	@reset: specifies the type of reset to perform
2865  *
2866  *	Issues a reset command of the specified type to FW.
2867  */
2868 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2869 {
2870 	struct fw_reset_cmd c;
2871 
2872 	memset(&c, 0, sizeof(c));
2873 	INIT_CMD(c, RESET, WRITE);
2874 	c.val = htonl(reset);
2875 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2876 }
2877 
2878 /**
2879  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2880  *	@adap: the adapter
2881  *	@mbox: mailbox to use for the FW RESET command (if desired)
2882  *	@force: force uP into RESET even if FW RESET command fails
2883  *
2884  *	Issues a RESET command to firmware (if desired) with a HALT indication
2885  *	and then puts the microprocessor into RESET state.  The RESET command
2886  *	will only be issued if a legitimate mailbox is provided (mbox <=
2887  *	FW_PCIE_FW_MASTER_MASK).
2888  *
2889  *	This is generally used in order for the host to safely manipulate the
2890  *	adapter without fear of conflicting with whatever the firmware might
2891  *	be doing.  The only way out of this state is to RESTART the firmware
2892  *	...
2893  */
2894 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2895 {
2896 	int ret = 0;
2897 
2898 	/*
2899 	 * If a legitimate mailbox is provided, issue a RESET command
2900 	 * with a HALT indication.
2901 	 */
2902 	if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2903 		struct fw_reset_cmd c;
2904 
2905 		memset(&c, 0, sizeof(c));
2906 		INIT_CMD(c, RESET, WRITE);
2907 		c.val = htonl(PIORST | PIORSTMODE);
2908 		c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2909 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2910 	}
2911 
2912 	/*
2913 	 * Normally we won't complete the operation if the firmware RESET
2914 	 * command fails but if our caller insists we'll go ahead and put the
2915 	 * uP into RESET.  This can be useful if the firmware is hung or even
2916 	 * missing ...  We'll have to take the risk of putting the uP into
2917 	 * RESET without the cooperation of firmware in that case.
2918 	 *
2919 	 * We also force the firmware's HALT flag to be on in case we bypassed
2920 	 * the firmware RESET command above or we're dealing with old firmware
2921 	 * which doesn't have the HALT capability.  This will serve as a flag
2922 	 * for the incoming firmware to know that it's coming out of a HALT
2923 	 * rather than a RESET ... if it's new enough to understand that ...
2924 	 */
2925 	if (ret == 0 || force) {
2926 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2927 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2928 				 FW_PCIE_FW_HALT);
2929 	}
2930 
2931 	/*
2932 	 * And we always return the result of the firmware RESET command
2933 	 * even when we force the uP into RESET ...
2934 	 */
2935 	return ret;
2936 }
2937 
2938 /**
2939  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
2940  *	@adap: the adapter
2941  *	@reset: if we want to do a RESET to restart things
2942  *
2943  *	Restart firmware previously halted by t4_fw_halt().  On successful
2944  *	return the previous PF Master remains as the new PF Master and there
2945  *	is no need to issue a new HELLO command, etc.
2946  *
2947  *	We do this in two ways:
2948  *
2949  *	 1. If we're dealing with newer firmware we'll simply want to take
2950  *	    the chip's microprocessor out of RESET.  This will cause the
2951  *	    firmware to start up from its start vector.  And then we'll loop
2952  *	    until the firmware indicates it's started again (PCIE_FW.HALT
2953  *	    reset to 0) or we timeout.
2954  *
2955  *	 2. If we're dealing with older firmware then we'll need to RESET
2956  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
2957  *	    flag and automatically RESET itself on startup.
2958  */
2959 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2960 {
2961 	if (reset) {
2962 		/*
2963 		 * Since we're directing the RESET instead of the firmware
2964 		 * doing it automatically, we need to clear the PCIE_FW.HALT
2965 		 * bit.
2966 		 */
2967 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2968 
2969 		/*
2970 		 * If we've been given a valid mailbox, first try to get the
2971 		 * firmware to do the RESET.  If that works, great and we can
2972 		 * return success.  Otherwise, if we haven't been given a
2973 		 * valid mailbox or the RESET command failed, fall back to
2974 		 * hitting the chip with a hammer.
2975 		 */
2976 		if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2977 			t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2978 			msleep(100);
2979 			if (t4_fw_reset(adap, mbox,
2980 					PIORST | PIORSTMODE) == 0)
2981 				return 0;
2982 		}
2983 
2984 		t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2985 		msleep(2000);
2986 	} else {
2987 		int ms;
2988 
2989 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2990 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2991 			if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2992 				return 0;
2993 			msleep(100);
2994 			ms += 100;
2995 		}
2996 		return -ETIMEDOUT;
2997 	}
2998 	return 0;
2999 }
3000 
3001 /**
3002  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3003  *	@adap: the adapter
3004  *	@mbox: mailbox to use for the FW RESET command (if desired)
3005  *	@fw_data: the firmware image to write
3006  *	@size: image size
3007  *	@force: force upgrade even if firmware doesn't cooperate
3008  *
3009  *	Perform all of the steps necessary for upgrading an adapter's
3010  *	firmware image.  Normally this requires the cooperation of the
3011  *	existing firmware in order to halt all existing activities
3012  *	but if an invalid mailbox token is passed in we skip that step
3013  *	(though we'll still put the adapter microprocessor into RESET in
3014  *	that case).
3015  *
3016  *	On successful return the new firmware will have been loaded and
3017  *	the adapter will have been fully RESET losing all previous setup
3018  *	state.  On unsuccessful return the adapter may be completely hosed ...
3019  *	positive errno indicates that the adapter is ~probably~ intact, a
3020  *	negative errno indicates that things are looking bad ...
3021  */
3022 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3023 			 const u8 *fw_data, unsigned int size, int force)
3024 {
3025 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3026 	int reset, ret;
3027 
3028 	ret = t4_fw_halt(adap, mbox, force);
3029 	if (ret < 0 && !force)
3030 		return ret;
3031 
3032 	ret = t4_load_fw(adap, fw_data, size);
3033 	if (ret < 0)
3034 		return ret;
3035 
3036 	/*
3037 	 * Older versions of the firmware don't understand the new
3038 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3039 	 * restart.  So for newly loaded older firmware we'll have to do the
3040 	 * RESET for it so it starts up on a clean slate.  We can tell if
3041 	 * the newly loaded firmware will handle this right by checking
3042 	 * its header flags to see if it advertises the capability.
3043 	 */
3044 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3045 	return t4_fw_restart(adap, mbox, reset);
3046 }
3047 
3048 /**
3049  *	t4_fixup_host_params - fix up host-dependent parameters
3050  *	@adap: the adapter
3051  *	@page_size: the host's Base Page Size
3052  *	@cache_line_size: the host's Cache Line Size
3053  *
3054  *	Various registers in T4 contain values which are dependent on the
3055  *	host's Base Page and Cache Line Sizes.  This function will fix all of
3056  *	those registers with the appropriate values as passed in ...
3057  */
3058 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3059 			 unsigned int cache_line_size)
3060 {
3061 	unsigned int page_shift = fls(page_size) - 1;
3062 	unsigned int sge_hps = page_shift - 10;
3063 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3064 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3065 	unsigned int fl_align_log = fls(fl_align) - 1;
3066 
3067 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3068 		     HOSTPAGESIZEPF0(sge_hps) |
3069 		     HOSTPAGESIZEPF1(sge_hps) |
3070 		     HOSTPAGESIZEPF2(sge_hps) |
3071 		     HOSTPAGESIZEPF3(sge_hps) |
3072 		     HOSTPAGESIZEPF4(sge_hps) |
3073 		     HOSTPAGESIZEPF5(sge_hps) |
3074 		     HOSTPAGESIZEPF6(sge_hps) |
3075 		     HOSTPAGESIZEPF7(sge_hps));
3076 
3077 	t4_set_reg_field(adap, SGE_CONTROL,
3078 			 INGPADBOUNDARY_MASK |
3079 			 EGRSTATUSPAGESIZE_MASK,
3080 			 INGPADBOUNDARY(fl_align_log - 5) |
3081 			 EGRSTATUSPAGESIZE(stat_len != 64));
3082 
3083 	/*
3084 	 * Adjust various SGE Free List Host Buffer Sizes.
3085 	 *
3086 	 * This is something of a crock since we're using fixed indices into
3087 	 * the array which are also known by the sge.c code and the T4
3088 	 * Firmware Configuration File.  We need to come up with a much better
3089 	 * approach to managing this array.  For now, the first four entries
3090 	 * are:
3091 	 *
3092 	 *   0: Host Page Size
3093 	 *   1: 64KB
3094 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3095 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3096 	 *
3097 	 * For the single-MTU buffers in unpacked mode we need to include
3098 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3099 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3100 	 * Padding boundry.  All of these are accommodated in the Factory
3101 	 * Default Firmware Configuration File but we need to adjust it for
3102 	 * this host's cache line size.
3103 	 */
3104 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3105 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3106 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3107 		     & ~(fl_align-1));
3108 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3109 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3110 		     & ~(fl_align-1));
3111 
3112 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3113 
3114 	return 0;
3115 }
3116 
3117 /**
3118  *	t4_fw_initialize - ask FW to initialize the device
3119  *	@adap: the adapter
3120  *	@mbox: mailbox to use for the FW command
3121  *
3122  *	Issues a command to FW to partially initialize the device.  This
3123  *	performs initialization that generally doesn't depend on user input.
3124  */
3125 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3126 {
3127 	struct fw_initialize_cmd c;
3128 
3129 	memset(&c, 0, sizeof(c));
3130 	INIT_CMD(c, INITIALIZE, WRITE);
3131 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3132 }
3133 
3134 /**
3135  *	t4_query_params - query FW or device parameters
3136  *	@adap: the adapter
3137  *	@mbox: mailbox to use for the FW command
3138  *	@pf: the PF
3139  *	@vf: the VF
3140  *	@nparams: the number of parameters
3141  *	@params: the parameter names
3142  *	@val: the parameter values
3143  *
3144  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3145  *	queried at once.
3146  */
3147 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3148 		    unsigned int vf, unsigned int nparams, const u32 *params,
3149 		    u32 *val)
3150 {
3151 	int i, ret;
3152 	struct fw_params_cmd c;
3153 	__be32 *p = &c.param[0].mnem;
3154 
3155 	if (nparams > 7)
3156 		return -EINVAL;
3157 
3158 	memset(&c, 0, sizeof(c));
3159 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3160 			    FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3161 			    FW_PARAMS_CMD_VFN(vf));
3162 	c.retval_len16 = htonl(FW_LEN16(c));
3163 	for (i = 0; i < nparams; i++, p += 2)
3164 		*p = htonl(*params++);
3165 
3166 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3167 	if (ret == 0)
3168 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3169 			*val++ = ntohl(*p);
3170 	return ret;
3171 }
3172 
3173 /**
3174  *      t4_set_params_nosleep - sets FW or device parameters
3175  *      @adap: the adapter
3176  *      @mbox: mailbox to use for the FW command
3177  *      @pf: the PF
3178  *      @vf: the VF
3179  *      @nparams: the number of parameters
3180  *      @params: the parameter names
3181  *      @val: the parameter values
3182  *
3183  *	 Does not ever sleep
3184  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
3185  *      specified at once.
3186  */
3187 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3188 			  unsigned int pf, unsigned int vf,
3189 			  unsigned int nparams, const u32 *params,
3190 			  const u32 *val)
3191 {
3192 	struct fw_params_cmd c;
3193 	__be32 *p = &c.param[0].mnem;
3194 
3195 	if (nparams > 7)
3196 		return -EINVAL;
3197 
3198 	memset(&c, 0, sizeof(c));
3199 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3200 				FW_CMD_REQUEST | FW_CMD_WRITE |
3201 				FW_PARAMS_CMD_PFN(pf) |
3202 				FW_PARAMS_CMD_VFN(vf));
3203 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3204 
3205 	while (nparams--) {
3206 		*p++ = cpu_to_be32(*params++);
3207 		*p++ = cpu_to_be32(*val++);
3208 	}
3209 
3210 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3211 }
3212 
3213 /**
3214  *	t4_set_params - sets FW or device parameters
3215  *	@adap: the adapter
3216  *	@mbox: mailbox to use for the FW command
3217  *	@pf: the PF
3218  *	@vf: the VF
3219  *	@nparams: the number of parameters
3220  *	@params: the parameter names
3221  *	@val: the parameter values
3222  *
3223  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3224  *	specified at once.
3225  */
3226 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3227 		  unsigned int vf, unsigned int nparams, const u32 *params,
3228 		  const u32 *val)
3229 {
3230 	struct fw_params_cmd c;
3231 	__be32 *p = &c.param[0].mnem;
3232 
3233 	if (nparams > 7)
3234 		return -EINVAL;
3235 
3236 	memset(&c, 0, sizeof(c));
3237 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3238 			    FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3239 			    FW_PARAMS_CMD_VFN(vf));
3240 	c.retval_len16 = htonl(FW_LEN16(c));
3241 	while (nparams--) {
3242 		*p++ = htonl(*params++);
3243 		*p++ = htonl(*val++);
3244 	}
3245 
3246 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3247 }
3248 
3249 /**
3250  *	t4_cfg_pfvf - configure PF/VF resource limits
3251  *	@adap: the adapter
3252  *	@mbox: mailbox to use for the FW command
3253  *	@pf: the PF being configured
3254  *	@vf: the VF being configured
3255  *	@txq: the max number of egress queues
3256  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
3257  *	@rxqi: the max number of interrupt-capable ingress queues
3258  *	@rxq: the max number of interruptless ingress queues
3259  *	@tc: the PCI traffic class
3260  *	@vi: the max number of virtual interfaces
3261  *	@cmask: the channel access rights mask for the PF/VF
3262  *	@pmask: the port access rights mask for the PF/VF
3263  *	@nexact: the maximum number of exact MPS filters
3264  *	@rcaps: read capabilities
3265  *	@wxcaps: write/execute capabilities
3266  *
3267  *	Configures resource limits and capabilities for a physical or virtual
3268  *	function.
3269  */
3270 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3271 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3272 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
3273 		unsigned int vi, unsigned int cmask, unsigned int pmask,
3274 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3275 {
3276 	struct fw_pfvf_cmd c;
3277 
3278 	memset(&c, 0, sizeof(c));
3279 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3280 			    FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3281 			    FW_PFVF_CMD_VFN(vf));
3282 	c.retval_len16 = htonl(FW_LEN16(c));
3283 	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3284 			       FW_PFVF_CMD_NIQ(rxq));
3285 	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
3286 			       FW_PFVF_CMD_PMASK(pmask) |
3287 			       FW_PFVF_CMD_NEQ(txq));
3288 	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3289 				FW_PFVF_CMD_NEXACTF(nexact));
3290 	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3291 				     FW_PFVF_CMD_WX_CAPS(wxcaps) |
3292 				     FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3293 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3294 }
3295 
3296 /**
3297  *	t4_alloc_vi - allocate a virtual interface
3298  *	@adap: the adapter
3299  *	@mbox: mailbox to use for the FW command
3300  *	@port: physical port associated with the VI
3301  *	@pf: the PF owning the VI
3302  *	@vf: the VF owning the VI
3303  *	@nmac: number of MAC addresses needed (1 to 5)
3304  *	@mac: the MAC addresses of the VI
3305  *	@rss_size: size of RSS table slice associated with this VI
3306  *
3307  *	Allocates a virtual interface for the given physical port.  If @mac is
3308  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
3309  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
3310  *	stored consecutively so the space needed is @nmac * 6 bytes.
3311  *	Returns a negative error number or the non-negative VI id.
3312  */
3313 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3314 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3315 		unsigned int *rss_size)
3316 {
3317 	int ret;
3318 	struct fw_vi_cmd c;
3319 
3320 	memset(&c, 0, sizeof(c));
3321 	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3322 			    FW_CMD_WRITE | FW_CMD_EXEC |
3323 			    FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3324 	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3325 	c.portid_pkd = FW_VI_CMD_PORTID(port);
3326 	c.nmac = nmac - 1;
3327 
3328 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3329 	if (ret)
3330 		return ret;
3331 
3332 	if (mac) {
3333 		memcpy(mac, c.mac, sizeof(c.mac));
3334 		switch (nmac) {
3335 		case 5:
3336 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3337 		case 4:
3338 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3339 		case 3:
3340 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3341 		case 2:
3342 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3343 		}
3344 	}
3345 	if (rss_size)
3346 		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3347 	return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3348 }
3349 
3350 /**
3351  *	t4_set_rxmode - set Rx properties of a virtual interface
3352  *	@adap: the adapter
3353  *	@mbox: mailbox to use for the FW command
3354  *	@viid: the VI id
3355  *	@mtu: the new MTU or -1
3356  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3357  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3358  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3359  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3360  *	@sleep_ok: if true we may sleep while awaiting command completion
3361  *
3362  *	Sets Rx properties of a virtual interface.
3363  */
3364 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3365 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3366 		  bool sleep_ok)
3367 {
3368 	struct fw_vi_rxmode_cmd c;
3369 
3370 	/* convert to FW values */
3371 	if (mtu < 0)
3372 		mtu = FW_RXMODE_MTU_NO_CHG;
3373 	if (promisc < 0)
3374 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3375 	if (all_multi < 0)
3376 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3377 	if (bcast < 0)
3378 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
3379 	if (vlanex < 0)
3380 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
3381 
3382 	memset(&c, 0, sizeof(c));
3383 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3384 			     FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3385 	c.retval_len16 = htonl(FW_LEN16(c));
3386 	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3387 				  FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3388 				  FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3389 				  FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3390 				  FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3391 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3392 }
3393 
3394 /**
3395  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3396  *	@adap: the adapter
3397  *	@mbox: mailbox to use for the FW command
3398  *	@viid: the VI id
3399  *	@free: if true any existing filters for this VI id are first removed
3400  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
3401  *	@addr: the MAC address(es)
3402  *	@idx: where to store the index of each allocated filter
3403  *	@hash: pointer to hash address filter bitmap
3404  *	@sleep_ok: call is allowed to sleep
3405  *
3406  *	Allocates an exact-match filter for each of the supplied addresses and
3407  *	sets it to the corresponding address.  If @idx is not %NULL it should
3408  *	have at least @naddr entries, each of which will be set to the index of
3409  *	the filter allocated for the corresponding MAC address.  If a filter
3410  *	could not be allocated for an address its index is set to 0xffff.
3411  *	If @hash is not %NULL addresses that fail to allocate an exact filter
3412  *	are hashed and update the hash filter bitmap pointed at by @hash.
3413  *
3414  *	Returns a negative error number or the number of filters allocated.
3415  */
3416 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3417 		      unsigned int viid, bool free, unsigned int naddr,
3418 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3419 {
3420 	int i, ret;
3421 	struct fw_vi_mac_cmd c;
3422 	struct fw_vi_mac_exact *p;
3423 	unsigned int max_naddr = is_t4(adap->params.chip) ?
3424 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
3425 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3426 
3427 	if (naddr > 7)
3428 		return -EINVAL;
3429 
3430 	memset(&c, 0, sizeof(c));
3431 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3432 			     FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3433 			     FW_VI_MAC_CMD_VIID(viid));
3434 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3435 				    FW_CMD_LEN16((naddr + 2) / 2));
3436 
3437 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3438 		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3439 				      FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3440 		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3441 	}
3442 
3443 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3444 	if (ret)
3445 		return ret;
3446 
3447 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3448 		u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3449 
3450 		if (idx)
3451 			idx[i] = index >= max_naddr ? 0xffff : index;
3452 		if (index < max_naddr)
3453 			ret++;
3454 		else if (hash)
3455 			*hash |= (1ULL << hash_mac_addr(addr[i]));
3456 	}
3457 	return ret;
3458 }
3459 
3460 /**
3461  *	t4_change_mac - modifies the exact-match filter for a MAC address
3462  *	@adap: the adapter
3463  *	@mbox: mailbox to use for the FW command
3464  *	@viid: the VI id
3465  *	@idx: index of existing filter for old value of MAC address, or -1
3466  *	@addr: the new MAC address value
3467  *	@persist: whether a new MAC allocation should be persistent
3468  *	@add_smt: if true also add the address to the HW SMT
3469  *
3470  *	Modifies an exact-match filter and sets it to the new MAC address.
3471  *	Note that in general it is not possible to modify the value of a given
3472  *	filter so the generic way to modify an address filter is to free the one
3473  *	being used by the old address value and allocate a new filter for the
3474  *	new address value.  @idx can be -1 if the address is a new addition.
3475  *
3476  *	Returns a negative error number or the index of the filter with the new
3477  *	MAC value.
3478  */
3479 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3480 		  int idx, const u8 *addr, bool persist, bool add_smt)
3481 {
3482 	int ret, mode;
3483 	struct fw_vi_mac_cmd c;
3484 	struct fw_vi_mac_exact *p = c.u.exact;
3485 	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3486 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
3487 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3488 
3489 	if (idx < 0)                             /* new allocation */
3490 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3491 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3492 
3493 	memset(&c, 0, sizeof(c));
3494 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3495 			     FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3496 	c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3497 	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3498 				FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3499 				FW_VI_MAC_CMD_IDX(idx));
3500 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
3501 
3502 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3503 	if (ret == 0) {
3504 		ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3505 		if (ret >= max_mac_addr)
3506 			ret = -ENOMEM;
3507 	}
3508 	return ret;
3509 }
3510 
3511 /**
3512  *	t4_set_addr_hash - program the MAC inexact-match hash filter
3513  *	@adap: the adapter
3514  *	@mbox: mailbox to use for the FW command
3515  *	@viid: the VI id
3516  *	@ucast: whether the hash filter should also match unicast addresses
3517  *	@vec: the value to be written to the hash filter
3518  *	@sleep_ok: call is allowed to sleep
3519  *
3520  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
3521  */
3522 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3523 		     bool ucast, u64 vec, bool sleep_ok)
3524 {
3525 	struct fw_vi_mac_cmd c;
3526 
3527 	memset(&c, 0, sizeof(c));
3528 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3529 			     FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3530 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3531 				    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3532 				    FW_CMD_LEN16(1));
3533 	c.u.hash.hashvec = cpu_to_be64(vec);
3534 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3535 }
3536 
3537 /**
3538  *      t4_enable_vi_params - enable/disable a virtual interface
3539  *      @adap: the adapter
3540  *      @mbox: mailbox to use for the FW command
3541  *      @viid: the VI id
3542  *      @rx_en: 1=enable Rx, 0=disable Rx
3543  *      @tx_en: 1=enable Tx, 0=disable Tx
3544  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
3545  *
3546  *      Enables/disables a virtual interface.  Note that setting DCB Enable
3547  *      only makes sense when enabling a Virtual Interface ...
3548  */
3549 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3550 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3551 {
3552 	struct fw_vi_enable_cmd c;
3553 
3554 	memset(&c, 0, sizeof(c));
3555 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3556 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3557 
3558 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3559 			       FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3560 			       FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3561 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3562 }
3563 
3564 /**
3565  *	t4_enable_vi - enable/disable a virtual interface
3566  *	@adap: the adapter
3567  *	@mbox: mailbox to use for the FW command
3568  *	@viid: the VI id
3569  *	@rx_en: 1=enable Rx, 0=disable Rx
3570  *	@tx_en: 1=enable Tx, 0=disable Tx
3571  *
3572  *	Enables/disables a virtual interface.
3573  */
3574 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3575 		 bool rx_en, bool tx_en)
3576 {
3577 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3578 }
3579 
3580 /**
3581  *	t4_identify_port - identify a VI's port by blinking its LED
3582  *	@adap: the adapter
3583  *	@mbox: mailbox to use for the FW command
3584  *	@viid: the VI id
3585  *	@nblinks: how many times to blink LED at 2.5 Hz
3586  *
3587  *	Identifies a VI's port by blinking its LED.
3588  */
3589 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3590 		     unsigned int nblinks)
3591 {
3592 	struct fw_vi_enable_cmd c;
3593 
3594 	memset(&c, 0, sizeof(c));
3595 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3596 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3597 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3598 	c.blinkdur = htons(nblinks);
3599 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3600 }
3601 
3602 /**
3603  *	t4_iq_free - free an ingress queue and its FLs
3604  *	@adap: the adapter
3605  *	@mbox: mailbox to use for the FW command
3606  *	@pf: the PF owning the queues
3607  *	@vf: the VF owning the queues
3608  *	@iqtype: the ingress queue type
3609  *	@iqid: ingress queue id
3610  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
3611  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
3612  *
3613  *	Frees an ingress queue and its associated FLs, if any.
3614  */
3615 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3616 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
3617 	       unsigned int fl0id, unsigned int fl1id)
3618 {
3619 	struct fw_iq_cmd c;
3620 
3621 	memset(&c, 0, sizeof(c));
3622 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3623 			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3624 			    FW_IQ_CMD_VFN(vf));
3625 	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3626 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3627 	c.iqid = htons(iqid);
3628 	c.fl0id = htons(fl0id);
3629 	c.fl1id = htons(fl1id);
3630 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3631 }
3632 
3633 /**
3634  *	t4_eth_eq_free - free an Ethernet egress queue
3635  *	@adap: the adapter
3636  *	@mbox: mailbox to use for the FW command
3637  *	@pf: the PF owning the queue
3638  *	@vf: the VF owning the queue
3639  *	@eqid: egress queue id
3640  *
3641  *	Frees an Ethernet egress queue.
3642  */
3643 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3644 		   unsigned int vf, unsigned int eqid)
3645 {
3646 	struct fw_eq_eth_cmd c;
3647 
3648 	memset(&c, 0, sizeof(c));
3649 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3650 			    FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3651 			    FW_EQ_ETH_CMD_VFN(vf));
3652 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3653 	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3654 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3655 }
3656 
3657 /**
3658  *	t4_ctrl_eq_free - free a control egress queue
3659  *	@adap: the adapter
3660  *	@mbox: mailbox to use for the FW command
3661  *	@pf: the PF owning the queue
3662  *	@vf: the VF owning the queue
3663  *	@eqid: egress queue id
3664  *
3665  *	Frees a control egress queue.
3666  */
3667 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3668 		    unsigned int vf, unsigned int eqid)
3669 {
3670 	struct fw_eq_ctrl_cmd c;
3671 
3672 	memset(&c, 0, sizeof(c));
3673 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3674 			    FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3675 			    FW_EQ_CTRL_CMD_VFN(vf));
3676 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3677 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3678 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3679 }
3680 
3681 /**
3682  *	t4_ofld_eq_free - free an offload egress queue
3683  *	@adap: the adapter
3684  *	@mbox: mailbox to use for the FW command
3685  *	@pf: the PF owning the queue
3686  *	@vf: the VF owning the queue
3687  *	@eqid: egress queue id
3688  *
3689  *	Frees a control egress queue.
3690  */
3691 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3692 		    unsigned int vf, unsigned int eqid)
3693 {
3694 	struct fw_eq_ofld_cmd c;
3695 
3696 	memset(&c, 0, sizeof(c));
3697 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3698 			    FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3699 			    FW_EQ_OFLD_CMD_VFN(vf));
3700 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3701 	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3702 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3703 }
3704 
3705 /**
3706  *	t4_handle_fw_rpl - process a FW reply message
3707  *	@adap: the adapter
3708  *	@rpl: start of the FW message
3709  *
3710  *	Processes a FW message, such as link state change messages.
3711  */
3712 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3713 {
3714 	u8 opcode = *(const u8 *)rpl;
3715 
3716 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
3717 		int speed = 0, fc = 0;
3718 		const struct fw_port_cmd *p = (void *)rpl;
3719 		int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3720 		int port = adap->chan_map[chan];
3721 		struct port_info *pi = adap2pinfo(adap, port);
3722 		struct link_config *lc = &pi->link_cfg;
3723 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3724 		int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3725 		u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3726 
3727 		if (stat & FW_PORT_CMD_RXPAUSE)
3728 			fc |= PAUSE_RX;
3729 		if (stat & FW_PORT_CMD_TXPAUSE)
3730 			fc |= PAUSE_TX;
3731 		if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3732 			speed = 100;
3733 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3734 			speed = 1000;
3735 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3736 			speed = 10000;
3737 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3738 			speed = 40000;
3739 
3740 		if (link_ok != lc->link_ok || speed != lc->speed ||
3741 		    fc != lc->fc) {                    /* something changed */
3742 			lc->link_ok = link_ok;
3743 			lc->speed = speed;
3744 			lc->fc = fc;
3745 			t4_os_link_changed(adap, port, link_ok);
3746 		}
3747 		if (mod != pi->mod_type) {
3748 			pi->mod_type = mod;
3749 			t4_os_portmod_changed(adap, port);
3750 		}
3751 	}
3752 	return 0;
3753 }
3754 
3755 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3756 {
3757 	u16 val;
3758 
3759 	if (pci_is_pcie(adapter->pdev)) {
3760 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3761 		p->speed = val & PCI_EXP_LNKSTA_CLS;
3762 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3763 	}
3764 }
3765 
3766 /**
3767  *	init_link_config - initialize a link's SW state
3768  *	@lc: structure holding the link state
3769  *	@caps: link capabilities
3770  *
3771  *	Initializes the SW state maintained for each link, including the link's
3772  *	capabilities and default speed/flow-control/autonegotiation settings.
3773  */
3774 static void init_link_config(struct link_config *lc, unsigned int caps)
3775 {
3776 	lc->supported = caps;
3777 	lc->requested_speed = 0;
3778 	lc->speed = 0;
3779 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3780 	if (lc->supported & FW_PORT_CAP_ANEG) {
3781 		lc->advertising = lc->supported & ADVERT_MASK;
3782 		lc->autoneg = AUTONEG_ENABLE;
3783 		lc->requested_fc |= PAUSE_AUTONEG;
3784 	} else {
3785 		lc->advertising = 0;
3786 		lc->autoneg = AUTONEG_DISABLE;
3787 	}
3788 }
3789 
3790 int t4_wait_dev_ready(struct adapter *adap)
3791 {
3792 	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3793 		return 0;
3794 	msleep(500);
3795 	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3796 }
3797 
3798 static int get_flash_params(struct adapter *adap)
3799 {
3800 	int ret;
3801 	u32 info;
3802 
3803 	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3804 	if (!ret)
3805 		ret = sf1_read(adap, 3, 0, 1, &info);
3806 	t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
3807 	if (ret)
3808 		return ret;
3809 
3810 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
3811 		return -EINVAL;
3812 	info >>= 16;                           /* log2 of size */
3813 	if (info >= 0x14 && info < 0x18)
3814 		adap->params.sf_nsec = 1 << (info - 16);
3815 	else if (info == 0x18)
3816 		adap->params.sf_nsec = 64;
3817 	else
3818 		return -EINVAL;
3819 	adap->params.sf_size = 1 << info;
3820 	adap->params.sf_fw_start =
3821 		t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3822 	return 0;
3823 }
3824 
3825 /**
3826  *	t4_prep_adapter - prepare SW and HW for operation
3827  *	@adapter: the adapter
3828  *	@reset: if true perform a HW reset
3829  *
3830  *	Initialize adapter SW state for the various HW modules, set initial
3831  *	values for some adapter tunables, take PHYs out of reset, and
3832  *	initialize the MDIO interface.
3833  */
3834 int t4_prep_adapter(struct adapter *adapter)
3835 {
3836 	int ret, ver;
3837 	uint16_t device_id;
3838 	u32 pl_rev;
3839 
3840 	ret = t4_wait_dev_ready(adapter);
3841 	if (ret < 0)
3842 		return ret;
3843 
3844 	get_pci_mode(adapter, &adapter->params.pci);
3845 	pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3846 
3847 	ret = get_flash_params(adapter);
3848 	if (ret < 0) {
3849 		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3850 		return ret;
3851 	}
3852 
3853 	/* Retrieve adapter's device ID
3854 	 */
3855 	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3856 	ver = device_id >> 12;
3857 	adapter->params.chip = 0;
3858 	switch (ver) {
3859 	case CHELSIO_T4:
3860 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3861 		break;
3862 	case CHELSIO_T5:
3863 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3864 		break;
3865 	default:
3866 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3867 			device_id);
3868 		return -EINVAL;
3869 	}
3870 
3871 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3872 
3873 	/*
3874 	 * Default port for debugging in case we can't reach FW.
3875 	 */
3876 	adapter->params.nports = 1;
3877 	adapter->params.portvec = 1;
3878 	adapter->params.vpd.cclk = 50000;
3879 	return 0;
3880 }
3881 
3882 /**
3883  *      t4_init_tp_params - initialize adap->params.tp
3884  *      @adap: the adapter
3885  *
3886  *      Initialize various fields of the adapter's TP Parameters structure.
3887  */
3888 int t4_init_tp_params(struct adapter *adap)
3889 {
3890 	int chan;
3891 	u32 v;
3892 
3893 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3894 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3895 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3896 
3897 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3898 	for (chan = 0; chan < NCHAN; chan++)
3899 		adap->params.tp.tx_modq[chan] = chan;
3900 
3901 	/* Cache the adapter's Compressed Filter Mode and global Incress
3902 	 * Configuration.
3903 	 */
3904 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3905 			 &adap->params.tp.vlan_pri_map, 1,
3906 			 TP_VLAN_PRI_MAP);
3907 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3908 			 &adap->params.tp.ingress_config, 1,
3909 			 TP_INGRESS_CONFIG);
3910 
3911 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3912 	 * shift positions of several elements of the Compressed Filter Tuple
3913 	 * for this adapter which we need frequently ...
3914 	 */
3915 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3916 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3917 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3918 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3919 							       F_PROTOCOL);
3920 
3921 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3922 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
3923 	 */
3924 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3925 		adap->params.tp.vnic_shift = -1;
3926 
3927 	return 0;
3928 }
3929 
3930 /**
3931  *      t4_filter_field_shift - calculate filter field shift
3932  *      @adap: the adapter
3933  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3934  *
3935  *      Return the shift position of a filter field within the Compressed
3936  *      Filter Tuple.  The filter field is specified via its selection bit
3937  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
3938  */
3939 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3940 {
3941 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3942 	unsigned int sel;
3943 	int field_shift;
3944 
3945 	if ((filter_mode & filter_sel) == 0)
3946 		return -1;
3947 
3948 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3949 		switch (filter_mode & sel) {
3950 		case F_FCOE:
3951 			field_shift += W_FT_FCOE;
3952 			break;
3953 		case F_PORT:
3954 			field_shift += W_FT_PORT;
3955 			break;
3956 		case F_VNIC_ID:
3957 			field_shift += W_FT_VNIC_ID;
3958 			break;
3959 		case F_VLAN:
3960 			field_shift += W_FT_VLAN;
3961 			break;
3962 		case F_TOS:
3963 			field_shift += W_FT_TOS;
3964 			break;
3965 		case F_PROTOCOL:
3966 			field_shift += W_FT_PROTOCOL;
3967 			break;
3968 		case F_ETHERTYPE:
3969 			field_shift += W_FT_ETHERTYPE;
3970 			break;
3971 		case F_MACMATCH:
3972 			field_shift += W_FT_MACMATCH;
3973 			break;
3974 		case F_MPSHITTYPE:
3975 			field_shift += W_FT_MPSHITTYPE;
3976 			break;
3977 		case F_FRAGMENTATION:
3978 			field_shift += W_FT_FRAGMENTATION;
3979 			break;
3980 		}
3981 	}
3982 	return field_shift;
3983 }
3984 
3985 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3986 {
3987 	u8 addr[6];
3988 	int ret, i, j = 0;
3989 	struct fw_port_cmd c;
3990 	struct fw_rss_vi_config_cmd rvc;
3991 
3992 	memset(&c, 0, sizeof(c));
3993 	memset(&rvc, 0, sizeof(rvc));
3994 
3995 	for_each_port(adap, i) {
3996 		unsigned int rss_size;
3997 		struct port_info *p = adap2pinfo(adap, i);
3998 
3999 		while ((adap->params.portvec & (1 << j)) == 0)
4000 			j++;
4001 
4002 		c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4003 				       FW_CMD_REQUEST | FW_CMD_READ |
4004 				       FW_PORT_CMD_PORTID(j));
4005 		c.action_to_len16 = htonl(
4006 			FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4007 			FW_LEN16(c));
4008 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4009 		if (ret)
4010 			return ret;
4011 
4012 		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4013 		if (ret < 0)
4014 			return ret;
4015 
4016 		p->viid = ret;
4017 		p->tx_chan = j;
4018 		p->lport = j;
4019 		p->rss_size = rss_size;
4020 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
4021 		adap->port[i]->dev_port = j;
4022 
4023 		ret = ntohl(c.u.info.lstatus_to_modtype);
4024 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4025 			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4026 		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
4027 		p->mod_type = FW_PORT_MOD_TYPE_NA;
4028 
4029 		rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4030 				       FW_CMD_REQUEST | FW_CMD_READ |
4031 				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4032 		rvc.retval_len16 = htonl(FW_LEN16(rvc));
4033 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4034 		if (ret)
4035 			return ret;
4036 		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4037 
4038 		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4039 		j++;
4040 	}
4041 	return 0;
4042 }
4043