1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4fw_api.h"
39 
40 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 			 const u8 *fw_data, unsigned int size, int force);
42 /**
43  *	t4_wait_op_done_val - wait until an operation is completed
44  *	@adapter: the adapter performing the operation
45  *	@reg: the register to check for completion
46  *	@mask: a single-bit field within @reg that indicates completion
47  *	@polarity: the value of the field when the operation is completed
48  *	@attempts: number of check iterations
49  *	@delay: delay in usecs between iterations
50  *	@valp: where to store the value of the register at completion time
51  *
52  *	Wait until an operation is completed by checking a bit in a register
53  *	up to @attempts times.  If @valp is not NULL the value of the register
54  *	at the time it indicated completion is stored there.  Returns 0 if the
55  *	operation completes and	-EAGAIN	otherwise.
56  */
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 			       int polarity, int attempts, int delay, u32 *valp)
59 {
60 	while (1) {
61 		u32 val = t4_read_reg(adapter, reg);
62 
63 		if (!!(val & mask) == polarity) {
64 			if (valp)
65 				*valp = val;
66 			return 0;
67 		}
68 		if (--attempts == 0)
69 			return -EAGAIN;
70 		if (delay)
71 			udelay(delay);
72 	}
73 }
74 
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 				  int polarity, int attempts, int delay)
77 {
78 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 				   delay, NULL);
80 }
81 
82 /**
83  *	t4_set_reg_field - set a register field to a value
84  *	@adapter: the adapter to program
85  *	@addr: the register address
86  *	@mask: specifies the portion of the register to modify
87  *	@val: the new value for the register field
88  *
89  *	Sets a register field specified by the supplied mask to the
90  *	given value.
91  */
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 		      u32 val)
94 {
95 	u32 v = t4_read_reg(adapter, addr) & ~mask;
96 
97 	t4_write_reg(adapter, addr, v | val);
98 	(void) t4_read_reg(adapter, addr);      /* flush */
99 }
100 
101 /**
102  *	t4_read_indirect - read indirectly addressed registers
103  *	@adap: the adapter
104  *	@addr_reg: register holding the indirect address
105  *	@data_reg: register holding the value of the indirect register
106  *	@vals: where the read register values are stored
107  *	@nregs: how many indirect registers to read
108  *	@start_idx: index of first indirect register to read
109  *
110  *	Reads registers that are accessed indirectly through an address/data
111  *	register pair.
112  */
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 			     unsigned int data_reg, u32 *vals,
115 			     unsigned int nregs, unsigned int start_idx)
116 {
117 	while (nregs--) {
118 		t4_write_reg(adap, addr_reg, start_idx);
119 		*vals++ = t4_read_reg(adap, data_reg);
120 		start_idx++;
121 	}
122 }
123 
124 /**
125  *	t4_write_indirect - write indirectly addressed registers
126  *	@adap: the adapter
127  *	@addr_reg: register holding the indirect addresses
128  *	@data_reg: register holding the value for the indirect registers
129  *	@vals: values to write
130  *	@nregs: how many indirect registers to write
131  *	@start_idx: address of first indirect register to write
132  *
133  *	Writes a sequential block of registers that are accessed indirectly
134  *	through an address/data register pair.
135  */
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 		       unsigned int data_reg, const u32 *vals,
138 		       unsigned int nregs, unsigned int start_idx)
139 {
140 	while (nregs--) {
141 		t4_write_reg(adap, addr_reg, start_idx++);
142 		t4_write_reg(adap, data_reg, *vals++);
143 	}
144 }
145 
146 /*
147  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
148  */
149 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
150 			 u32 mbox_addr)
151 {
152 	for ( ; nflit; nflit--, mbox_addr += 8)
153 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
154 }
155 
156 /*
157  * Handle a FW assertion reported in a mailbox.
158  */
159 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
160 {
161 	struct fw_debug_cmd asrt;
162 
163 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
164 	dev_alert(adap->pdev_dev,
165 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
166 		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
167 		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
168 }
169 
170 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
171 {
172 	dev_err(adap->pdev_dev,
173 		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
174 		(unsigned long long)t4_read_reg64(adap, data_reg),
175 		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
176 		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
177 		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
178 		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
179 		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
180 		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
181 		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
182 }
183 
184 /**
185  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
186  *	@adap: the adapter
187  *	@mbox: index of the mailbox to use
188  *	@cmd: the command to write
189  *	@size: command length in bytes
190  *	@rpl: where to optionally store the reply
191  *	@sleep_ok: if true we may sleep while awaiting command completion
192  *
193  *	Sends the given command to FW through the selected mailbox and waits
194  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
195  *	store the FW's reply to the command.  The command and its optional
196  *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
197  *	to respond.  @sleep_ok determines whether we may sleep while awaiting
198  *	the response.  If sleeping is allowed we use progressive backoff
199  *	otherwise we spin.
200  *
201  *	The return value is 0 on success or a negative errno on failure.  A
202  *	failure can happen either because we are not able to execute the
203  *	command or FW executes it but signals an error.  In the latter case
204  *	the return value is the error code indicated by FW (negated).
205  */
206 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
207 		    void *rpl, bool sleep_ok)
208 {
209 	static const int delay[] = {
210 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
211 	};
212 
213 	u32 v;
214 	u64 res;
215 	int i, ms, delay_idx;
216 	const __be64 *p = cmd;
217 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
218 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
219 
220 	if ((size & 15) || size > MBOX_LEN)
221 		return -EINVAL;
222 
223 	/*
224 	 * If the device is off-line, as in EEH, commands will time out.
225 	 * Fail them early so we don't waste time waiting.
226 	 */
227 	if (adap->pdev->error_state != pci_channel_io_normal)
228 		return -EIO;
229 
230 	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
231 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
232 		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
233 
234 	if (v != MBOX_OWNER_DRV)
235 		return v ? -EBUSY : -ETIMEDOUT;
236 
237 	for (i = 0; i < size; i += 8)
238 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
239 
240 	t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
241 	t4_read_reg(adap, ctl_reg);          /* flush write */
242 
243 	delay_idx = 0;
244 	ms = delay[0];
245 
246 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
247 		if (sleep_ok) {
248 			ms = delay[delay_idx];  /* last element may repeat */
249 			if (delay_idx < ARRAY_SIZE(delay) - 1)
250 				delay_idx++;
251 			msleep(ms);
252 		} else
253 			mdelay(ms);
254 
255 		v = t4_read_reg(adap, ctl_reg);
256 		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
257 			if (!(v & MBMSGVALID)) {
258 				t4_write_reg(adap, ctl_reg, 0);
259 				continue;
260 			}
261 
262 			res = t4_read_reg64(adap, data_reg);
263 			if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
264 				fw_asrt(adap, data_reg);
265 				res = FW_CMD_RETVAL(EIO);
266 			} else if (rpl)
267 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
268 
269 			if (FW_CMD_RETVAL_GET((int)res))
270 				dump_mbox(adap, mbox, data_reg);
271 			t4_write_reg(adap, ctl_reg, 0);
272 			return -FW_CMD_RETVAL_GET((int)res);
273 		}
274 	}
275 
276 	dump_mbox(adap, mbox, data_reg);
277 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
278 		*(const u8 *)cmd, mbox);
279 	return -ETIMEDOUT;
280 }
281 
282 /**
283  *	t4_mc_read - read from MC through backdoor accesses
284  *	@adap: the adapter
285  *	@addr: address of first byte requested
286  *	@idx: which MC to access
287  *	@data: 64 bytes of data containing the requested address
288  *	@ecc: where to store the corresponding 64-bit ECC word
289  *
290  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
291  *	that covers the requested address @addr.  If @parity is not %NULL it
292  *	is assigned the 64-bit ECC word for the read data.
293  */
294 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
295 {
296 	int i;
297 	u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
298 	u32 mc_bist_status_rdata, mc_bist_data_pattern;
299 
300 	if (is_t4(adap->params.chip)) {
301 		mc_bist_cmd = MC_BIST_CMD;
302 		mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
303 		mc_bist_cmd_len = MC_BIST_CMD_LEN;
304 		mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
305 		mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
306 	} else {
307 		mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
308 		mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
309 		mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
310 		mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
311 		mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
312 	}
313 
314 	if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
315 		return -EBUSY;
316 	t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
317 	t4_write_reg(adap, mc_bist_cmd_len, 64);
318 	t4_write_reg(adap, mc_bist_data_pattern, 0xc);
319 	t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
320 		     BIST_CMD_GAP(1));
321 	i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
322 	if (i)
323 		return i;
324 
325 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
326 
327 	for (i = 15; i >= 0; i--)
328 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
329 	if (ecc)
330 		*ecc = t4_read_reg64(adap, MC_DATA(16));
331 #undef MC_DATA
332 	return 0;
333 }
334 
335 /**
336  *	t4_edc_read - read from EDC through backdoor accesses
337  *	@adap: the adapter
338  *	@idx: which EDC to access
339  *	@addr: address of first byte requested
340  *	@data: 64 bytes of data containing the requested address
341  *	@ecc: where to store the corresponding 64-bit ECC word
342  *
343  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
344  *	that covers the requested address @addr.  If @parity is not %NULL it
345  *	is assigned the 64-bit ECC word for the read data.
346  */
347 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
348 {
349 	int i;
350 	u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
351 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
352 
353 	if (is_t4(adap->params.chip)) {
354 		edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
355 		edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
356 		edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
357 		edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
358 						    idx);
359 		edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
360 						    idx);
361 	} else {
362 		edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
363 		edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
364 		edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
365 		edc_bist_cmd_data_pattern =
366 			EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
367 		edc_bist_status_rdata =
368 			 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
369 	}
370 
371 	if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
372 		return -EBUSY;
373 	t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
374 	t4_write_reg(adap, edc_bist_cmd_len, 64);
375 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
376 	t4_write_reg(adap, edc_bist_cmd,
377 		     BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
378 	i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
379 	if (i)
380 		return i;
381 
382 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
383 
384 	for (i = 15; i >= 0; i--)
385 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
386 	if (ecc)
387 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
388 #undef EDC_DATA
389 	return 0;
390 }
391 
392 /*
393  *	t4_mem_win_rw - read/write memory through PCIE memory window
394  *	@adap: the adapter
395  *	@addr: address of first byte requested
396  *	@data: MEMWIN0_APERTURE bytes of data containing the requested address
397  *	@dir: direction of transfer 1 => read, 0 => write
398  *
399  *	Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
400  *	MEMWIN0_APERTURE-byte-aligned address that covers the requested
401  *	address @addr.
402  */
403 static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
404 {
405 	int i;
406 	u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
407 
408 	/*
409 	 * Setup offset into PCIE memory window.  Address must be a
410 	 * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
411 	 * ensure that changes propagate before we attempt to use the new
412 	 * values.)
413 	 */
414 	t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
415 		     (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
416 	t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
417 
418 	/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
419 	for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
420 		if (dir)
421 			*data++ = (__force __be32) t4_read_reg(adap,
422 							(MEMWIN0_BASE + i));
423 		else
424 			t4_write_reg(adap, (MEMWIN0_BASE + i),
425 				     (__force u32) *data++);
426 	}
427 
428 	return 0;
429 }
430 
431 /**
432  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
433  *	@adap: the adapter
434  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
435  *	@addr: address within indicated memory type
436  *	@len: amount of memory to transfer
437  *	@buf: host memory buffer
438  *	@dir: direction of transfer 1 => read, 0 => write
439  *
440  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
441  *	firmware memory address, length and host buffer must be aligned on
442  *	32-bit boudaries.  The memory is transferred as a raw byte sequence
443  *	from/to the firmware's memory.  If this memory contains data
444  *	structures which contain multi-byte integers, it's the callers
445  *	responsibility to perform appropriate byte order conversions.
446  */
447 static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
448 			__be32 *buf, int dir)
449 {
450 	u32 pos, start, end, offset, memoffset;
451 	u32 edc_size, mc_size;
452 	int ret = 0;
453 	__be32 *data;
454 
455 	/*
456 	 * Argument sanity checks ...
457 	 */
458 	if ((addr & 0x3) || (len & 0x3))
459 		return -EINVAL;
460 
461 	data = vmalloc(MEMWIN0_APERTURE);
462 	if (!data)
463 		return -ENOMEM;
464 
465 	/* Offset into the region of memory which is being accessed
466 	 * MEM_EDC0 = 0
467 	 * MEM_EDC1 = 1
468 	 * MEM_MC   = 2 -- T4
469 	 * MEM_MC0  = 2 -- For T5
470 	 * MEM_MC1  = 3 -- For T5
471 	 */
472 	edc_size  = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
473 	if (mtype != MEM_MC1)
474 		memoffset = (mtype * (edc_size * 1024 * 1024));
475 	else {
476 		mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
477 						       MA_EXT_MEMORY_BAR));
478 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
479 	}
480 
481 	/* Determine the PCIE_MEM_ACCESS_OFFSET */
482 	addr = addr + memoffset;
483 
484 	/*
485 	 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
486 	 * at a time so we need to round down the start and round up the end.
487 	 * We'll start copying out of the first line at (addr - start) a word
488 	 * at a time.
489 	 */
490 	start = addr & ~(MEMWIN0_APERTURE-1);
491 	end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
492 	offset = (addr - start)/sizeof(__be32);
493 
494 	for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
495 
496 		/*
497 		 * If we're writing, copy the data from the caller's memory
498 		 * buffer
499 		 */
500 		if (!dir) {
501 			/*
502 			 * If we're doing a partial write, then we need to do
503 			 * a read-modify-write ...
504 			 */
505 			if (offset || len < MEMWIN0_APERTURE) {
506 				ret = t4_mem_win_rw(adap, pos, data, 1);
507 				if (ret)
508 					break;
509 			}
510 			while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
511 			       len > 0) {
512 				data[offset++] = *buf++;
513 				len -= sizeof(__be32);
514 			}
515 		}
516 
517 		/*
518 		 * Transfer a block of memory and bail if there's an error.
519 		 */
520 		ret = t4_mem_win_rw(adap, pos, data, dir);
521 		if (ret)
522 			break;
523 
524 		/*
525 		 * If we're reading, copy the data into the caller's memory
526 		 * buffer.
527 		 */
528 		if (dir)
529 			while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
530 			       len > 0) {
531 				*buf++ = data[offset++];
532 				len -= sizeof(__be32);
533 			}
534 	}
535 
536 	vfree(data);
537 	return ret;
538 }
539 
540 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
541 		    __be32 *buf)
542 {
543 	return t4_memory_rw(adap, mtype, addr, len, buf, 0);
544 }
545 
546 #define EEPROM_STAT_ADDR   0x7bfc
547 #define VPD_BASE           0x400
548 #define VPD_BASE_OLD       0
549 #define VPD_LEN            1024
550 
551 /**
552  *	t4_seeprom_wp - enable/disable EEPROM write protection
553  *	@adapter: the adapter
554  *	@enable: whether to enable or disable write protection
555  *
556  *	Enables or disables write protection on the serial EEPROM.
557  */
558 int t4_seeprom_wp(struct adapter *adapter, bool enable)
559 {
560 	unsigned int v = enable ? 0xc : 0;
561 	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
562 	return ret < 0 ? ret : 0;
563 }
564 
565 /**
566  *	get_vpd_params - read VPD parameters from VPD EEPROM
567  *	@adapter: adapter to read
568  *	@p: where to store the parameters
569  *
570  *	Reads card parameters stored in VPD EEPROM.
571  */
572 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
573 {
574 	u32 cclk_param, cclk_val;
575 	int i, ret, addr;
576 	int ec, sn;
577 	u8 *vpd, csum;
578 	unsigned int vpdr_len, kw_offset, id_len;
579 
580 	vpd = vmalloc(VPD_LEN);
581 	if (!vpd)
582 		return -ENOMEM;
583 
584 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
585 	if (ret < 0)
586 		goto out;
587 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
588 
589 	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
590 	if (ret < 0)
591 		goto out;
592 
593 	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
594 		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
595 		ret = -EINVAL;
596 		goto out;
597 	}
598 
599 	id_len = pci_vpd_lrdt_size(vpd);
600 	if (id_len > ID_LEN)
601 		id_len = ID_LEN;
602 
603 	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
604 	if (i < 0) {
605 		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
606 		ret = -EINVAL;
607 		goto out;
608 	}
609 
610 	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
611 	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
612 	if (vpdr_len + kw_offset > VPD_LEN) {
613 		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
614 		ret = -EINVAL;
615 		goto out;
616 	}
617 
618 #define FIND_VPD_KW(var, name) do { \
619 	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
620 	if (var < 0) { \
621 		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
622 		ret = -EINVAL; \
623 		goto out; \
624 	} \
625 	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
626 } while (0)
627 
628 	FIND_VPD_KW(i, "RV");
629 	for (csum = 0; i >= 0; i--)
630 		csum += vpd[i];
631 
632 	if (csum) {
633 		dev_err(adapter->pdev_dev,
634 			"corrupted VPD EEPROM, actual csum %u\n", csum);
635 		ret = -EINVAL;
636 		goto out;
637 	}
638 
639 	FIND_VPD_KW(ec, "EC");
640 	FIND_VPD_KW(sn, "SN");
641 #undef FIND_VPD_KW
642 
643 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
644 	strim(p->id);
645 	memcpy(p->ec, vpd + ec, EC_LEN);
646 	strim(p->ec);
647 	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
648 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
649 	strim(p->sn);
650 
651 	/*
652 	 * Ask firmware for the Core Clock since it knows how to translate the
653 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
654 	 */
655 	cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
656 		      FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
657 	ret = t4_query_params(adapter, adapter->mbox, 0, 0,
658 			      1, &cclk_param, &cclk_val);
659 
660 out:
661 	vfree(vpd);
662 	if (ret)
663 		return ret;
664 	p->cclk = cclk_val;
665 
666 	return 0;
667 }
668 
669 /* serial flash and firmware constants */
670 enum {
671 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
672 
673 	/* flash command opcodes */
674 	SF_PROG_PAGE    = 2,          /* program page */
675 	SF_WR_DISABLE   = 4,          /* disable writes */
676 	SF_RD_STATUS    = 5,          /* read status register */
677 	SF_WR_ENABLE    = 6,          /* enable writes */
678 	SF_RD_DATA_FAST = 0xb,        /* read flash */
679 	SF_RD_ID        = 0x9f,       /* read ID */
680 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
681 
682 	FW_MAX_SIZE = 512 * 1024,
683 };
684 
685 /**
686  *	sf1_read - read data from the serial flash
687  *	@adapter: the adapter
688  *	@byte_cnt: number of bytes to read
689  *	@cont: whether another operation will be chained
690  *	@lock: whether to lock SF for PL access only
691  *	@valp: where to store the read data
692  *
693  *	Reads up to 4 bytes of data from the serial flash.  The location of
694  *	the read needs to be specified prior to calling this by issuing the
695  *	appropriate commands to the serial flash.
696  */
697 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
698 		    int lock, u32 *valp)
699 {
700 	int ret;
701 
702 	if (!byte_cnt || byte_cnt > 4)
703 		return -EINVAL;
704 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
705 		return -EBUSY;
706 	cont = cont ? SF_CONT : 0;
707 	lock = lock ? SF_LOCK : 0;
708 	t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
709 	ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
710 	if (!ret)
711 		*valp = t4_read_reg(adapter, SF_DATA);
712 	return ret;
713 }
714 
715 /**
716  *	sf1_write - write data to the serial flash
717  *	@adapter: the adapter
718  *	@byte_cnt: number of bytes to write
719  *	@cont: whether another operation will be chained
720  *	@lock: whether to lock SF for PL access only
721  *	@val: value to write
722  *
723  *	Writes up to 4 bytes of data to the serial flash.  The location of
724  *	the write needs to be specified prior to calling this by issuing the
725  *	appropriate commands to the serial flash.
726  */
727 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
728 		     int lock, u32 val)
729 {
730 	if (!byte_cnt || byte_cnt > 4)
731 		return -EINVAL;
732 	if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
733 		return -EBUSY;
734 	cont = cont ? SF_CONT : 0;
735 	lock = lock ? SF_LOCK : 0;
736 	t4_write_reg(adapter, SF_DATA, val);
737 	t4_write_reg(adapter, SF_OP, lock |
738 		     cont | BYTECNT(byte_cnt - 1) | OP_WR);
739 	return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
740 }
741 
742 /**
743  *	flash_wait_op - wait for a flash operation to complete
744  *	@adapter: the adapter
745  *	@attempts: max number of polls of the status register
746  *	@delay: delay between polls in ms
747  *
748  *	Wait for a flash operation to complete by polling the status register.
749  */
750 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
751 {
752 	int ret;
753 	u32 status;
754 
755 	while (1) {
756 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
757 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
758 			return ret;
759 		if (!(status & 1))
760 			return 0;
761 		if (--attempts == 0)
762 			return -EAGAIN;
763 		if (delay)
764 			msleep(delay);
765 	}
766 }
767 
768 /**
769  *	t4_read_flash - read words from serial flash
770  *	@adapter: the adapter
771  *	@addr: the start address for the read
772  *	@nwords: how many 32-bit words to read
773  *	@data: where to store the read data
774  *	@byte_oriented: whether to store data as bytes or as words
775  *
776  *	Read the specified number of 32-bit words from the serial flash.
777  *	If @byte_oriented is set the read data is stored as a byte array
778  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
779  *	natural endianess.
780  */
781 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
782 			 unsigned int nwords, u32 *data, int byte_oriented)
783 {
784 	int ret;
785 
786 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
787 		return -EINVAL;
788 
789 	addr = swab32(addr) | SF_RD_DATA_FAST;
790 
791 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
792 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
793 		return ret;
794 
795 	for ( ; nwords; nwords--, data++) {
796 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
797 		if (nwords == 1)
798 			t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
799 		if (ret)
800 			return ret;
801 		if (byte_oriented)
802 			*data = (__force __u32) (htonl(*data));
803 	}
804 	return 0;
805 }
806 
807 /**
808  *	t4_write_flash - write up to a page of data to the serial flash
809  *	@adapter: the adapter
810  *	@addr: the start address to write
811  *	@n: length of data to write in bytes
812  *	@data: the data to write
813  *
814  *	Writes up to a page of data (256 bytes) to the serial flash starting
815  *	at the given address.  All the data must be written to the same page.
816  */
817 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
818 			  unsigned int n, const u8 *data)
819 {
820 	int ret;
821 	u32 buf[64];
822 	unsigned int i, c, left, val, offset = addr & 0xff;
823 
824 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
825 		return -EINVAL;
826 
827 	val = swab32(addr) | SF_PROG_PAGE;
828 
829 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
830 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
831 		goto unlock;
832 
833 	for (left = n; left; left -= c) {
834 		c = min(left, 4U);
835 		for (val = 0, i = 0; i < c; ++i)
836 			val = (val << 8) + *data++;
837 
838 		ret = sf1_write(adapter, c, c != left, 1, val);
839 		if (ret)
840 			goto unlock;
841 	}
842 	ret = flash_wait_op(adapter, 8, 1);
843 	if (ret)
844 		goto unlock;
845 
846 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
847 
848 	/* Read the page to verify the write succeeded */
849 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
850 	if (ret)
851 		return ret;
852 
853 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
854 		dev_err(adapter->pdev_dev,
855 			"failed to correctly write the flash page at %#x\n",
856 			addr);
857 		return -EIO;
858 	}
859 	return 0;
860 
861 unlock:
862 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
863 	return ret;
864 }
865 
866 /**
867  *	t4_get_fw_version - read the firmware version
868  *	@adapter: the adapter
869  *	@vers: where to place the version
870  *
871  *	Reads the FW version from flash.
872  */
873 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
874 {
875 	return t4_read_flash(adapter, FLASH_FW_START +
876 			     offsetof(struct fw_hdr, fw_ver), 1,
877 			     vers, 0);
878 }
879 
880 /**
881  *	t4_get_tp_version - read the TP microcode version
882  *	@adapter: the adapter
883  *	@vers: where to place the version
884  *
885  *	Reads the TP microcode version from flash.
886  */
887 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
888 {
889 	return t4_read_flash(adapter, FLASH_FW_START +
890 			     offsetof(struct fw_hdr, tp_microcode_ver),
891 			     1, vers, 0);
892 }
893 
894 /* Is the given firmware API compatible with the one the driver was compiled
895  * with?
896  */
897 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
898 {
899 
900 	/* short circuit if it's the exact same firmware version */
901 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
902 		return 1;
903 
904 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
905 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
906 	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
907 		return 1;
908 #undef SAME_INTF
909 
910 	return 0;
911 }
912 
913 /* The firmware in the filesystem is usable, but should it be installed?
914  * This routine explains itself in detail if it indicates the filesystem
915  * firmware should be installed.
916  */
917 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
918 				int k, int c)
919 {
920 	const char *reason;
921 
922 	if (!card_fw_usable) {
923 		reason = "incompatible or unusable";
924 		goto install;
925 	}
926 
927 	if (k > c) {
928 		reason = "older than the version supported with this driver";
929 		goto install;
930 	}
931 
932 	return 0;
933 
934 install:
935 	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
936 		"installing firmware %u.%u.%u.%u on card.\n",
937 		FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
938 		FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
939 		FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
940 		FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
941 
942 	return 1;
943 }
944 
945 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
946 	       const u8 *fw_data, unsigned int fw_size,
947 	       struct fw_hdr *card_fw, enum dev_state state,
948 	       int *reset)
949 {
950 	int ret, card_fw_usable, fs_fw_usable;
951 	const struct fw_hdr *fs_fw;
952 	const struct fw_hdr *drv_fw;
953 
954 	drv_fw = &fw_info->fw_hdr;
955 
956 	/* Read the header of the firmware on the card */
957 	ret = -t4_read_flash(adap, FLASH_FW_START,
958 			    sizeof(*card_fw) / sizeof(uint32_t),
959 			    (uint32_t *)card_fw, 1);
960 	if (ret == 0) {
961 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
962 	} else {
963 		dev_err(adap->pdev_dev,
964 			"Unable to read card's firmware header: %d\n", ret);
965 		card_fw_usable = 0;
966 	}
967 
968 	if (fw_data != NULL) {
969 		fs_fw = (const void *)fw_data;
970 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
971 	} else {
972 		fs_fw = NULL;
973 		fs_fw_usable = 0;
974 	}
975 
976 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
977 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
978 		/* Common case: the firmware on the card is an exact match and
979 		 * the filesystem one is an exact match too, or the filesystem
980 		 * one is absent/incompatible.
981 		 */
982 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
983 		   should_install_fs_fw(adap, card_fw_usable,
984 					be32_to_cpu(fs_fw->fw_ver),
985 					be32_to_cpu(card_fw->fw_ver))) {
986 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
987 				     fw_size, 0);
988 		if (ret != 0) {
989 			dev_err(adap->pdev_dev,
990 				"failed to install firmware: %d\n", ret);
991 			goto bye;
992 		}
993 
994 		/* Installed successfully, update the cached header too. */
995 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
996 		card_fw_usable = 1;
997 		*reset = 0;	/* already reset as part of load_fw */
998 	}
999 
1000 	if (!card_fw_usable) {
1001 		uint32_t d, c, k;
1002 
1003 		d = be32_to_cpu(drv_fw->fw_ver);
1004 		c = be32_to_cpu(card_fw->fw_ver);
1005 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1006 
1007 		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1008 			"chip state %d, "
1009 			"driver compiled with %d.%d.%d.%d, "
1010 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1011 			state,
1012 			FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1013 			FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1014 			FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1015 			FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1016 			FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1017 			FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1018 		ret = EINVAL;
1019 		goto bye;
1020 	}
1021 
1022 	/* We're using whatever's on the card and it's known to be good. */
1023 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1024 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1025 
1026 bye:
1027 	return ret;
1028 }
1029 
1030 /**
1031  *	t4_flash_erase_sectors - erase a range of flash sectors
1032  *	@adapter: the adapter
1033  *	@start: the first sector to erase
1034  *	@end: the last sector to erase
1035  *
1036  *	Erases the sectors in the given inclusive range.
1037  */
1038 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1039 {
1040 	int ret = 0;
1041 
1042 	while (start <= end) {
1043 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1044 		    (ret = sf1_write(adapter, 4, 0, 1,
1045 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1046 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1047 			dev_err(adapter->pdev_dev,
1048 				"erase of flash sector %d failed, error %d\n",
1049 				start, ret);
1050 			break;
1051 		}
1052 		start++;
1053 	}
1054 	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
1055 	return ret;
1056 }
1057 
1058 /**
1059  *	t4_flash_cfg_addr - return the address of the flash configuration file
1060  *	@adapter: the adapter
1061  *
1062  *	Return the address within the flash where the Firmware Configuration
1063  *	File is stored.
1064  */
1065 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1066 {
1067 	if (adapter->params.sf_size == 0x100000)
1068 		return FLASH_FPGA_CFG_START;
1069 	else
1070 		return FLASH_CFG_START;
1071 }
1072 
1073 /**
1074  *	t4_load_fw - download firmware
1075  *	@adap: the adapter
1076  *	@fw_data: the firmware image to write
1077  *	@size: image size
1078  *
1079  *	Write the supplied firmware image to the card's serial flash.
1080  */
1081 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1082 {
1083 	u32 csum;
1084 	int ret, addr;
1085 	unsigned int i;
1086 	u8 first_page[SF_PAGE_SIZE];
1087 	const __be32 *p = (const __be32 *)fw_data;
1088 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1089 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1090 	unsigned int fw_img_start = adap->params.sf_fw_start;
1091 	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1092 
1093 	if (!size) {
1094 		dev_err(adap->pdev_dev, "FW image has no data\n");
1095 		return -EINVAL;
1096 	}
1097 	if (size & 511) {
1098 		dev_err(adap->pdev_dev,
1099 			"FW image size not multiple of 512 bytes\n");
1100 		return -EINVAL;
1101 	}
1102 	if (ntohs(hdr->len512) * 512 != size) {
1103 		dev_err(adap->pdev_dev,
1104 			"FW image size differs from size in FW header\n");
1105 		return -EINVAL;
1106 	}
1107 	if (size > FW_MAX_SIZE) {
1108 		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1109 			FW_MAX_SIZE);
1110 		return -EFBIG;
1111 	}
1112 
1113 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1114 		csum += ntohl(p[i]);
1115 
1116 	if (csum != 0xffffffff) {
1117 		dev_err(adap->pdev_dev,
1118 			"corrupted firmware image, checksum %#x\n", csum);
1119 		return -EINVAL;
1120 	}
1121 
1122 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1123 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1124 	if (ret)
1125 		goto out;
1126 
1127 	/*
1128 	 * We write the correct version at the end so the driver can see a bad
1129 	 * version if the FW write fails.  Start by writing a copy of the
1130 	 * first page with a bad version.
1131 	 */
1132 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1133 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1134 	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1135 	if (ret)
1136 		goto out;
1137 
1138 	addr = fw_img_start;
1139 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1140 		addr += SF_PAGE_SIZE;
1141 		fw_data += SF_PAGE_SIZE;
1142 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1143 		if (ret)
1144 			goto out;
1145 	}
1146 
1147 	ret = t4_write_flash(adap,
1148 			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
1149 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1150 out:
1151 	if (ret)
1152 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1153 			ret);
1154 	return ret;
1155 }
1156 
1157 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1158 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1159 
1160 /**
1161  *	t4_link_start - apply link configuration to MAC/PHY
1162  *	@phy: the PHY to setup
1163  *	@mac: the MAC to setup
1164  *	@lc: the requested link configuration
1165  *
1166  *	Set up a port's MAC and PHY according to a desired link configuration.
1167  *	- If the PHY can auto-negotiate first decide what to advertise, then
1168  *	  enable/disable auto-negotiation as desired, and reset.
1169  *	- If the PHY does not auto-negotiate just reset it.
1170  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1171  *	  otherwise do it later based on the outcome of auto-negotiation.
1172  */
1173 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1174 		  struct link_config *lc)
1175 {
1176 	struct fw_port_cmd c;
1177 	unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1178 
1179 	lc->link_ok = 0;
1180 	if (lc->requested_fc & PAUSE_RX)
1181 		fc |= FW_PORT_CAP_FC_RX;
1182 	if (lc->requested_fc & PAUSE_TX)
1183 		fc |= FW_PORT_CAP_FC_TX;
1184 
1185 	memset(&c, 0, sizeof(c));
1186 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1187 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1188 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1189 				  FW_LEN16(c));
1190 
1191 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1192 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1193 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1194 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1195 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1196 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1197 	} else
1198 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1199 
1200 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1201 }
1202 
1203 /**
1204  *	t4_restart_aneg - restart autonegotiation
1205  *	@adap: the adapter
1206  *	@mbox: mbox to use for the FW command
1207  *	@port: the port id
1208  *
1209  *	Restarts autonegotiation for the selected port.
1210  */
1211 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1212 {
1213 	struct fw_port_cmd c;
1214 
1215 	memset(&c, 0, sizeof(c));
1216 	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1217 			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1218 	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1219 				  FW_LEN16(c));
1220 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1221 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1222 }
1223 
1224 typedef void (*int_handler_t)(struct adapter *adap);
1225 
1226 struct intr_info {
1227 	unsigned int mask;       /* bits to check in interrupt status */
1228 	const char *msg;         /* message to print or NULL */
1229 	short stat_idx;          /* stat counter to increment or -1 */
1230 	unsigned short fatal;    /* whether the condition reported is fatal */
1231 	int_handler_t int_handler; /* platform-specific int handler */
1232 };
1233 
1234 /**
1235  *	t4_handle_intr_status - table driven interrupt handler
1236  *	@adapter: the adapter that generated the interrupt
1237  *	@reg: the interrupt status register to process
1238  *	@acts: table of interrupt actions
1239  *
1240  *	A table driven interrupt handler that applies a set of masks to an
1241  *	interrupt status word and performs the corresponding actions if the
1242  *	interrupts described by the mask have occurred.  The actions include
1243  *	optionally emitting a warning or alert message.  The table is terminated
1244  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1245  *	conditions.
1246  */
1247 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1248 				 const struct intr_info *acts)
1249 {
1250 	int fatal = 0;
1251 	unsigned int mask = 0;
1252 	unsigned int status = t4_read_reg(adapter, reg);
1253 
1254 	for ( ; acts->mask; ++acts) {
1255 		if (!(status & acts->mask))
1256 			continue;
1257 		if (acts->fatal) {
1258 			fatal++;
1259 			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1260 				  status & acts->mask);
1261 		} else if (acts->msg && printk_ratelimit())
1262 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1263 				 status & acts->mask);
1264 		if (acts->int_handler)
1265 			acts->int_handler(adapter);
1266 		mask |= acts->mask;
1267 	}
1268 	status &= mask;
1269 	if (status)                           /* clear processed interrupts */
1270 		t4_write_reg(adapter, reg, status);
1271 	return fatal;
1272 }
1273 
1274 /*
1275  * Interrupt handler for the PCIE module.
1276  */
1277 static void pcie_intr_handler(struct adapter *adapter)
1278 {
1279 	static const struct intr_info sysbus_intr_info[] = {
1280 		{ RNPP, "RXNP array parity error", -1, 1 },
1281 		{ RPCP, "RXPC array parity error", -1, 1 },
1282 		{ RCIP, "RXCIF array parity error", -1, 1 },
1283 		{ RCCP, "Rx completions control array parity error", -1, 1 },
1284 		{ RFTP, "RXFT array parity error", -1, 1 },
1285 		{ 0 }
1286 	};
1287 	static const struct intr_info pcie_port_intr_info[] = {
1288 		{ TPCP, "TXPC array parity error", -1, 1 },
1289 		{ TNPP, "TXNP array parity error", -1, 1 },
1290 		{ TFTP, "TXFT array parity error", -1, 1 },
1291 		{ TCAP, "TXCA array parity error", -1, 1 },
1292 		{ TCIP, "TXCIF array parity error", -1, 1 },
1293 		{ RCAP, "RXCA array parity error", -1, 1 },
1294 		{ OTDD, "outbound request TLP discarded", -1, 1 },
1295 		{ RDPE, "Rx data parity error", -1, 1 },
1296 		{ TDUE, "Tx uncorrectable data error", -1, 1 },
1297 		{ 0 }
1298 	};
1299 	static const struct intr_info pcie_intr_info[] = {
1300 		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1301 		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1302 		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
1303 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1304 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1305 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1306 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1307 		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1308 		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1309 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1310 		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1311 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1312 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1313 		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1314 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1315 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1316 		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1317 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1318 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1319 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1320 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1321 		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1322 		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
1323 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1324 		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1325 		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
1326 		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
1327 		{ PCIESINT, "PCI core secondary fault", -1, 1 },
1328 		{ PCIEPINT, "PCI core primary fault", -1, 1 },
1329 		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1330 		{ 0 }
1331 	};
1332 
1333 	static struct intr_info t5_pcie_intr_info[] = {
1334 		{ MSTGRPPERR, "Master Response Read Queue parity error",
1335 		  -1, 1 },
1336 		{ MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1337 		{ MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1338 		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1339 		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1340 		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1341 		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1342 		{ PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1343 		  -1, 1 },
1344 		{ PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1345 		  -1, 1 },
1346 		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1347 		{ MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1348 		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1349 		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1350 		{ DREQWRPERR, "PCI DMA channel write request parity error",
1351 		  -1, 1 },
1352 		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1353 		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1354 		{ HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1355 		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1356 		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1357 		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1358 		{ FIDPERR, "PCI FID parity error", -1, 1 },
1359 		{ VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1360 		{ MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1361 		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1362 		{ IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1363 		  -1, 1 },
1364 		{ IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1365 		{ RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1366 		{ IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1367 		{ TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1368 		{ READRSPERR, "Outbound read error", -1, 0 },
1369 		{ 0 }
1370 	};
1371 
1372 	int fat;
1373 
1374 	fat = t4_handle_intr_status(adapter,
1375 				    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1376 				    sysbus_intr_info) +
1377 	      t4_handle_intr_status(adapter,
1378 				    PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1379 				    pcie_port_intr_info) +
1380 	      t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1381 				    is_t4(adapter->params.chip) ?
1382 				    pcie_intr_info : t5_pcie_intr_info);
1383 
1384 	if (fat)
1385 		t4_fatal_err(adapter);
1386 }
1387 
1388 /*
1389  * TP interrupt handler.
1390  */
1391 static void tp_intr_handler(struct adapter *adapter)
1392 {
1393 	static const struct intr_info tp_intr_info[] = {
1394 		{ 0x3fffffff, "TP parity error", -1, 1 },
1395 		{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1396 		{ 0 }
1397 	};
1398 
1399 	if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1400 		t4_fatal_err(adapter);
1401 }
1402 
1403 /*
1404  * SGE interrupt handler.
1405  */
1406 static void sge_intr_handler(struct adapter *adapter)
1407 {
1408 	u64 v;
1409 
1410 	static const struct intr_info sge_intr_info[] = {
1411 		{ ERR_CPL_EXCEED_IQE_SIZE,
1412 		  "SGE received CPL exceeding IQE size", -1, 1 },
1413 		{ ERR_INVALID_CIDX_INC,
1414 		  "SGE GTS CIDX increment too large", -1, 0 },
1415 		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1416 		{ DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1417 		{ DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1418 		{ ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1419 		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1420 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1421 		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1422 		  0 },
1423 		{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1424 		  0 },
1425 		{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1426 		  0 },
1427 		{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1428 		  0 },
1429 		{ ERR_ING_CTXT_PRIO,
1430 		  "SGE too many priority ingress contexts", -1, 0 },
1431 		{ ERR_EGR_CTXT_PRIO,
1432 		  "SGE too many priority egress contexts", -1, 0 },
1433 		{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1434 		{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1435 		{ 0 }
1436 	};
1437 
1438 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1439 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1440 	if (v) {
1441 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1442 				(unsigned long long)v);
1443 		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1444 		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1445 	}
1446 
1447 	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1448 	    v != 0)
1449 		t4_fatal_err(adapter);
1450 }
1451 
1452 /*
1453  * CIM interrupt handler.
1454  */
1455 static void cim_intr_handler(struct adapter *adapter)
1456 {
1457 	static const struct intr_info cim_intr_info[] = {
1458 		{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1459 		{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
1460 		{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
1461 		{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1462 		{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1463 		{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1464 		{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1465 		{ 0 }
1466 	};
1467 	static const struct intr_info cim_upintr_info[] = {
1468 		{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1469 		{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1470 		{ ILLWRINT, "CIM illegal write", -1, 1 },
1471 		{ ILLRDINT, "CIM illegal read", -1, 1 },
1472 		{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1473 		{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1474 		{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1475 		{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1476 		{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1477 		{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1478 		{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1479 		{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1480 		{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1481 		{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1482 		{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1483 		{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1484 		{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1485 		{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1486 		{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1487 		{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1488 		{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1489 		{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1490 		{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1491 		{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1492 		{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1493 		{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1494 		{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1495 		{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1496 		{ 0 }
1497 	};
1498 
1499 	int fat;
1500 
1501 	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1502 				    cim_intr_info) +
1503 	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1504 				    cim_upintr_info);
1505 	if (fat)
1506 		t4_fatal_err(adapter);
1507 }
1508 
1509 /*
1510  * ULP RX interrupt handler.
1511  */
1512 static void ulprx_intr_handler(struct adapter *adapter)
1513 {
1514 	static const struct intr_info ulprx_intr_info[] = {
1515 		{ 0x1800000, "ULPRX context error", -1, 1 },
1516 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1517 		{ 0 }
1518 	};
1519 
1520 	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1521 		t4_fatal_err(adapter);
1522 }
1523 
1524 /*
1525  * ULP TX interrupt handler.
1526  */
1527 static void ulptx_intr_handler(struct adapter *adapter)
1528 {
1529 	static const struct intr_info ulptx_intr_info[] = {
1530 		{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1531 		  0 },
1532 		{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1533 		  0 },
1534 		{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1535 		  0 },
1536 		{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1537 		  0 },
1538 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1539 		{ 0 }
1540 	};
1541 
1542 	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1543 		t4_fatal_err(adapter);
1544 }
1545 
1546 /*
1547  * PM TX interrupt handler.
1548  */
1549 static void pmtx_intr_handler(struct adapter *adapter)
1550 {
1551 	static const struct intr_info pmtx_intr_info[] = {
1552 		{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1553 		{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1554 		{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1555 		{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1556 		{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1557 		{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1558 		{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1559 		{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1560 		{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1561 		{ 0 }
1562 	};
1563 
1564 	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1565 		t4_fatal_err(adapter);
1566 }
1567 
1568 /*
1569  * PM RX interrupt handler.
1570  */
1571 static void pmrx_intr_handler(struct adapter *adapter)
1572 {
1573 	static const struct intr_info pmrx_intr_info[] = {
1574 		{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1575 		{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1576 		{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1577 		{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1578 		{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1579 		{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1580 		{ 0 }
1581 	};
1582 
1583 	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1584 		t4_fatal_err(adapter);
1585 }
1586 
1587 /*
1588  * CPL switch interrupt handler.
1589  */
1590 static void cplsw_intr_handler(struct adapter *adapter)
1591 {
1592 	static const struct intr_info cplsw_intr_info[] = {
1593 		{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1594 		{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1595 		{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1596 		{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1597 		{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1598 		{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1599 		{ 0 }
1600 	};
1601 
1602 	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1603 		t4_fatal_err(adapter);
1604 }
1605 
1606 /*
1607  * LE interrupt handler.
1608  */
1609 static void le_intr_handler(struct adapter *adap)
1610 {
1611 	static const struct intr_info le_intr_info[] = {
1612 		{ LIPMISS, "LE LIP miss", -1, 0 },
1613 		{ LIP0, "LE 0 LIP error", -1, 0 },
1614 		{ PARITYERR, "LE parity error", -1, 1 },
1615 		{ UNKNOWNCMD, "LE unknown command", -1, 1 },
1616 		{ REQQPARERR, "LE request queue parity error", -1, 1 },
1617 		{ 0 }
1618 	};
1619 
1620 	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1621 		t4_fatal_err(adap);
1622 }
1623 
1624 /*
1625  * MPS interrupt handler.
1626  */
1627 static void mps_intr_handler(struct adapter *adapter)
1628 {
1629 	static const struct intr_info mps_rx_intr_info[] = {
1630 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1631 		{ 0 }
1632 	};
1633 	static const struct intr_info mps_tx_intr_info[] = {
1634 		{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1635 		{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1636 		{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1637 		{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1638 		{ BUBBLE, "MPS Tx underflow", -1, 1 },
1639 		{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1640 		{ FRMERR, "MPS Tx framing error", -1, 1 },
1641 		{ 0 }
1642 	};
1643 	static const struct intr_info mps_trc_intr_info[] = {
1644 		{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
1645 		{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1646 		{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
1647 		{ 0 }
1648 	};
1649 	static const struct intr_info mps_stat_sram_intr_info[] = {
1650 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1651 		{ 0 }
1652 	};
1653 	static const struct intr_info mps_stat_tx_intr_info[] = {
1654 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1655 		{ 0 }
1656 	};
1657 	static const struct intr_info mps_stat_rx_intr_info[] = {
1658 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1659 		{ 0 }
1660 	};
1661 	static const struct intr_info mps_cls_intr_info[] = {
1662 		{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1663 		{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1664 		{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1665 		{ 0 }
1666 	};
1667 
1668 	int fat;
1669 
1670 	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1671 				    mps_rx_intr_info) +
1672 	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1673 				    mps_tx_intr_info) +
1674 	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1675 				    mps_trc_intr_info) +
1676 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1677 				    mps_stat_sram_intr_info) +
1678 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1679 				    mps_stat_tx_intr_info) +
1680 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1681 				    mps_stat_rx_intr_info) +
1682 	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1683 				    mps_cls_intr_info);
1684 
1685 	t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1686 		     RXINT | TXINT | STATINT);
1687 	t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
1688 	if (fat)
1689 		t4_fatal_err(adapter);
1690 }
1691 
1692 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1693 
1694 /*
1695  * EDC/MC interrupt handler.
1696  */
1697 static void mem_intr_handler(struct adapter *adapter, int idx)
1698 {
1699 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1700 
1701 	unsigned int addr, cnt_addr, v;
1702 
1703 	if (idx <= MEM_EDC1) {
1704 		addr = EDC_REG(EDC_INT_CAUSE, idx);
1705 		cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1706 	} else {
1707 		addr = MC_INT_CAUSE;
1708 		cnt_addr = MC_ECC_STATUS;
1709 	}
1710 
1711 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1712 	if (v & PERR_INT_CAUSE)
1713 		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1714 			  name[idx]);
1715 	if (v & ECC_CE_INT_CAUSE) {
1716 		u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1717 
1718 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1719 		if (printk_ratelimit())
1720 			dev_warn(adapter->pdev_dev,
1721 				 "%u %s correctable ECC data error%s\n",
1722 				 cnt, name[idx], cnt > 1 ? "s" : "");
1723 	}
1724 	if (v & ECC_UE_INT_CAUSE)
1725 		dev_alert(adapter->pdev_dev,
1726 			  "%s uncorrectable ECC data error\n", name[idx]);
1727 
1728 	t4_write_reg(adapter, addr, v);
1729 	if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1730 		t4_fatal_err(adapter);
1731 }
1732 
1733 /*
1734  * MA interrupt handler.
1735  */
1736 static void ma_intr_handler(struct adapter *adap)
1737 {
1738 	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1739 
1740 	if (status & MEM_PERR_INT_CAUSE)
1741 		dev_alert(adap->pdev_dev,
1742 			  "MA parity error, parity status %#x\n",
1743 			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1744 	if (status & MEM_WRAP_INT_CAUSE) {
1745 		v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1746 		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1747 			  "client %u to address %#x\n",
1748 			  MEM_WRAP_CLIENT_NUM_GET(v),
1749 			  MEM_WRAP_ADDRESS_GET(v) << 4);
1750 	}
1751 	t4_write_reg(adap, MA_INT_CAUSE, status);
1752 	t4_fatal_err(adap);
1753 }
1754 
1755 /*
1756  * SMB interrupt handler.
1757  */
1758 static void smb_intr_handler(struct adapter *adap)
1759 {
1760 	static const struct intr_info smb_intr_info[] = {
1761 		{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1762 		{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1763 		{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1764 		{ 0 }
1765 	};
1766 
1767 	if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1768 		t4_fatal_err(adap);
1769 }
1770 
1771 /*
1772  * NC-SI interrupt handler.
1773  */
1774 static void ncsi_intr_handler(struct adapter *adap)
1775 {
1776 	static const struct intr_info ncsi_intr_info[] = {
1777 		{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1778 		{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1779 		{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1780 		{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1781 		{ 0 }
1782 	};
1783 
1784 	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1785 		t4_fatal_err(adap);
1786 }
1787 
1788 /*
1789  * XGMAC interrupt handler.
1790  */
1791 static void xgmac_intr_handler(struct adapter *adap, int port)
1792 {
1793 	u32 v, int_cause_reg;
1794 
1795 	if (is_t4(adap->params.chip))
1796 		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1797 	else
1798 		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1799 
1800 	v = t4_read_reg(adap, int_cause_reg);
1801 
1802 	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1803 	if (!v)
1804 		return;
1805 
1806 	if (v & TXFIFO_PRTY_ERR)
1807 		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1808 			  port);
1809 	if (v & RXFIFO_PRTY_ERR)
1810 		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1811 			  port);
1812 	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1813 	t4_fatal_err(adap);
1814 }
1815 
1816 /*
1817  * PL interrupt handler.
1818  */
1819 static void pl_intr_handler(struct adapter *adap)
1820 {
1821 	static const struct intr_info pl_intr_info[] = {
1822 		{ FATALPERR, "T4 fatal parity error", -1, 1 },
1823 		{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1824 		{ 0 }
1825 	};
1826 
1827 	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1828 		t4_fatal_err(adap);
1829 }
1830 
1831 #define PF_INTR_MASK (PFSW)
1832 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1833 		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1834 		CPL_SWITCH | SGE | ULP_TX)
1835 
1836 /**
1837  *	t4_slow_intr_handler - control path interrupt handler
1838  *	@adapter: the adapter
1839  *
1840  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
1841  *	The designation 'slow' is because it involves register reads, while
1842  *	data interrupts typically don't involve any MMIOs.
1843  */
1844 int t4_slow_intr_handler(struct adapter *adapter)
1845 {
1846 	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1847 
1848 	if (!(cause & GLBL_INTR_MASK))
1849 		return 0;
1850 	if (cause & CIM)
1851 		cim_intr_handler(adapter);
1852 	if (cause & MPS)
1853 		mps_intr_handler(adapter);
1854 	if (cause & NCSI)
1855 		ncsi_intr_handler(adapter);
1856 	if (cause & PL)
1857 		pl_intr_handler(adapter);
1858 	if (cause & SMB)
1859 		smb_intr_handler(adapter);
1860 	if (cause & XGMAC0)
1861 		xgmac_intr_handler(adapter, 0);
1862 	if (cause & XGMAC1)
1863 		xgmac_intr_handler(adapter, 1);
1864 	if (cause & XGMAC_KR0)
1865 		xgmac_intr_handler(adapter, 2);
1866 	if (cause & XGMAC_KR1)
1867 		xgmac_intr_handler(adapter, 3);
1868 	if (cause & PCIE)
1869 		pcie_intr_handler(adapter);
1870 	if (cause & MC)
1871 		mem_intr_handler(adapter, MEM_MC);
1872 	if (cause & EDC0)
1873 		mem_intr_handler(adapter, MEM_EDC0);
1874 	if (cause & EDC1)
1875 		mem_intr_handler(adapter, MEM_EDC1);
1876 	if (cause & LE)
1877 		le_intr_handler(adapter);
1878 	if (cause & TP)
1879 		tp_intr_handler(adapter);
1880 	if (cause & MA)
1881 		ma_intr_handler(adapter);
1882 	if (cause & PM_TX)
1883 		pmtx_intr_handler(adapter);
1884 	if (cause & PM_RX)
1885 		pmrx_intr_handler(adapter);
1886 	if (cause & ULP_RX)
1887 		ulprx_intr_handler(adapter);
1888 	if (cause & CPL_SWITCH)
1889 		cplsw_intr_handler(adapter);
1890 	if (cause & SGE)
1891 		sge_intr_handler(adapter);
1892 	if (cause & ULP_TX)
1893 		ulptx_intr_handler(adapter);
1894 
1895 	/* Clear the interrupts just processed for which we are the master. */
1896 	t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1897 	(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1898 	return 1;
1899 }
1900 
1901 /**
1902  *	t4_intr_enable - enable interrupts
1903  *	@adapter: the adapter whose interrupts should be enabled
1904  *
1905  *	Enable PF-specific interrupts for the calling function and the top-level
1906  *	interrupt concentrator for global interrupts.  Interrupts are already
1907  *	enabled at each module,	here we just enable the roots of the interrupt
1908  *	hierarchies.
1909  *
1910  *	Note: this function should be called only when the driver manages
1911  *	non PF-specific interrupts from the various HW modules.  Only one PCI
1912  *	function at a time should be doing this.
1913  */
1914 void t4_intr_enable(struct adapter *adapter)
1915 {
1916 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1917 
1918 	t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1919 		     ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1920 		     ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1921 		     ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1922 		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1923 		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1924 		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1925 		     DBFIFO_HP_INT | DBFIFO_LP_INT |
1926 		     EGRESS_SIZE_ERR);
1927 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1928 	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1929 }
1930 
1931 /**
1932  *	t4_intr_disable - disable interrupts
1933  *	@adapter: the adapter whose interrupts should be disabled
1934  *
1935  *	Disable interrupts.  We only disable the top-level interrupt
1936  *	concentrators.  The caller must be a PCI function managing global
1937  *	interrupts.
1938  */
1939 void t4_intr_disable(struct adapter *adapter)
1940 {
1941 	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1942 
1943 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1944 	t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1945 }
1946 
1947 /**
1948  *	hash_mac_addr - return the hash value of a MAC address
1949  *	@addr: the 48-bit Ethernet MAC address
1950  *
1951  *	Hashes a MAC address according to the hash function used by HW inexact
1952  *	(hash) address matching.
1953  */
1954 static int hash_mac_addr(const u8 *addr)
1955 {
1956 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1957 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1958 	a ^= b;
1959 	a ^= (a >> 12);
1960 	a ^= (a >> 6);
1961 	return a & 0x3f;
1962 }
1963 
1964 /**
1965  *	t4_config_rss_range - configure a portion of the RSS mapping table
1966  *	@adapter: the adapter
1967  *	@mbox: mbox to use for the FW command
1968  *	@viid: virtual interface whose RSS subtable is to be written
1969  *	@start: start entry in the table to write
1970  *	@n: how many table entries to write
1971  *	@rspq: values for the response queue lookup table
1972  *	@nrspq: number of values in @rspq
1973  *
1974  *	Programs the selected part of the VI's RSS mapping table with the
1975  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
1976  *	until the full table range is populated.
1977  *
1978  *	The caller must ensure the values in @rspq are in the range allowed for
1979  *	@viid.
1980  */
1981 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1982 			int start, int n, const u16 *rspq, unsigned int nrspq)
1983 {
1984 	int ret;
1985 	const u16 *rsp = rspq;
1986 	const u16 *rsp_end = rspq + nrspq;
1987 	struct fw_rss_ind_tbl_cmd cmd;
1988 
1989 	memset(&cmd, 0, sizeof(cmd));
1990 	cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1991 			       FW_CMD_REQUEST | FW_CMD_WRITE |
1992 			       FW_RSS_IND_TBL_CMD_VIID(viid));
1993 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1994 
1995 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1996 	while (n > 0) {
1997 		int nq = min(n, 32);
1998 		__be32 *qp = &cmd.iq0_to_iq2;
1999 
2000 		cmd.niqid = htons(nq);
2001 		cmd.startidx = htons(start);
2002 
2003 		start += nq;
2004 		n -= nq;
2005 
2006 		while (nq > 0) {
2007 			unsigned int v;
2008 
2009 			v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2010 			if (++rsp >= rsp_end)
2011 				rsp = rspq;
2012 			v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2013 			if (++rsp >= rsp_end)
2014 				rsp = rspq;
2015 			v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2016 			if (++rsp >= rsp_end)
2017 				rsp = rspq;
2018 
2019 			*qp++ = htonl(v);
2020 			nq -= 3;
2021 		}
2022 
2023 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2024 		if (ret)
2025 			return ret;
2026 	}
2027 	return 0;
2028 }
2029 
2030 /**
2031  *	t4_config_glbl_rss - configure the global RSS mode
2032  *	@adapter: the adapter
2033  *	@mbox: mbox to use for the FW command
2034  *	@mode: global RSS mode
2035  *	@flags: mode-specific flags
2036  *
2037  *	Sets the global RSS mode.
2038  */
2039 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2040 		       unsigned int flags)
2041 {
2042 	struct fw_rss_glb_config_cmd c;
2043 
2044 	memset(&c, 0, sizeof(c));
2045 	c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2046 			      FW_CMD_REQUEST | FW_CMD_WRITE);
2047 	c.retval_len16 = htonl(FW_LEN16(c));
2048 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2049 		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2050 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2051 		c.u.basicvirtual.mode_pkd =
2052 			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2053 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2054 	} else
2055 		return -EINVAL;
2056 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2057 }
2058 
2059 /**
2060  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2061  *	@adap: the adapter
2062  *	@v4: holds the TCP/IP counter values
2063  *	@v6: holds the TCP/IPv6 counter values
2064  *
2065  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2066  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2067  */
2068 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2069 			 struct tp_tcp_stats *v6)
2070 {
2071 	u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2072 
2073 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2074 #define STAT(x)     val[STAT_IDX(x)]
2075 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2076 
2077 	if (v4) {
2078 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2079 				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2080 		v4->tcpOutRsts = STAT(OUT_RST);
2081 		v4->tcpInSegs  = STAT64(IN_SEG);
2082 		v4->tcpOutSegs = STAT64(OUT_SEG);
2083 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2084 	}
2085 	if (v6) {
2086 		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2087 				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2088 		v6->tcpOutRsts = STAT(OUT_RST);
2089 		v6->tcpInSegs  = STAT64(IN_SEG);
2090 		v6->tcpOutSegs = STAT64(OUT_SEG);
2091 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2092 	}
2093 #undef STAT64
2094 #undef STAT
2095 #undef STAT_IDX
2096 }
2097 
2098 /**
2099  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2100  *	@adap: the adapter
2101  *	@mtus: where to store the MTU values
2102  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2103  *
2104  *	Reads the HW path MTU table.
2105  */
2106 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2107 {
2108 	u32 v;
2109 	int i;
2110 
2111 	for (i = 0; i < NMTUS; ++i) {
2112 		t4_write_reg(adap, TP_MTU_TABLE,
2113 			     MTUINDEX(0xff) | MTUVALUE(i));
2114 		v = t4_read_reg(adap, TP_MTU_TABLE);
2115 		mtus[i] = MTUVALUE_GET(v);
2116 		if (mtu_log)
2117 			mtu_log[i] = MTUWIDTH_GET(v);
2118 	}
2119 }
2120 
2121 /**
2122  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2123  *	@adap: the adapter
2124  *	@addr: the indirect TP register address
2125  *	@mask: specifies the field within the register to modify
2126  *	@val: new value for the field
2127  *
2128  *	Sets a field of an indirect TP register to the given value.
2129  */
2130 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2131 			    unsigned int mask, unsigned int val)
2132 {
2133 	t4_write_reg(adap, TP_PIO_ADDR, addr);
2134 	val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2135 	t4_write_reg(adap, TP_PIO_DATA, val);
2136 }
2137 
2138 /**
2139  *	init_cong_ctrl - initialize congestion control parameters
2140  *	@a: the alpha values for congestion control
2141  *	@b: the beta values for congestion control
2142  *
2143  *	Initialize the congestion control parameters.
2144  */
2145 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2146 {
2147 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2148 	a[9] = 2;
2149 	a[10] = 3;
2150 	a[11] = 4;
2151 	a[12] = 5;
2152 	a[13] = 6;
2153 	a[14] = 7;
2154 	a[15] = 8;
2155 	a[16] = 9;
2156 	a[17] = 10;
2157 	a[18] = 14;
2158 	a[19] = 17;
2159 	a[20] = 21;
2160 	a[21] = 25;
2161 	a[22] = 30;
2162 	a[23] = 35;
2163 	a[24] = 45;
2164 	a[25] = 60;
2165 	a[26] = 80;
2166 	a[27] = 100;
2167 	a[28] = 200;
2168 	a[29] = 300;
2169 	a[30] = 400;
2170 	a[31] = 500;
2171 
2172 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2173 	b[9] = b[10] = 1;
2174 	b[11] = b[12] = 2;
2175 	b[13] = b[14] = b[15] = b[16] = 3;
2176 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2177 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2178 	b[28] = b[29] = 6;
2179 	b[30] = b[31] = 7;
2180 }
2181 
2182 /* The minimum additive increment value for the congestion control table */
2183 #define CC_MIN_INCR 2U
2184 
2185 /**
2186  *	t4_load_mtus - write the MTU and congestion control HW tables
2187  *	@adap: the adapter
2188  *	@mtus: the values for the MTU table
2189  *	@alpha: the values for the congestion control alpha parameter
2190  *	@beta: the values for the congestion control beta parameter
2191  *
2192  *	Write the HW MTU table with the supplied MTUs and the high-speed
2193  *	congestion control table with the supplied alpha, beta, and MTUs.
2194  *	We write the two tables together because the additive increments
2195  *	depend on the MTUs.
2196  */
2197 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2198 		  const unsigned short *alpha, const unsigned short *beta)
2199 {
2200 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2201 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2202 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2203 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2204 	};
2205 
2206 	unsigned int i, w;
2207 
2208 	for (i = 0; i < NMTUS; ++i) {
2209 		unsigned int mtu = mtus[i];
2210 		unsigned int log2 = fls(mtu);
2211 
2212 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2213 			log2--;
2214 		t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2215 			     MTUWIDTH(log2) | MTUVALUE(mtu));
2216 
2217 		for (w = 0; w < NCCTRL_WIN; ++w) {
2218 			unsigned int inc;
2219 
2220 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2221 				  CC_MIN_INCR);
2222 
2223 			t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2224 				     (w << 16) | (beta[w] << 13) | inc);
2225 		}
2226 	}
2227 }
2228 
2229 /**
2230  *	get_mps_bg_map - return the buffer groups associated with a port
2231  *	@adap: the adapter
2232  *	@idx: the port index
2233  *
2234  *	Returns a bitmap indicating which MPS buffer groups are associated
2235  *	with the given port.  Bit i is set if buffer group i is used by the
2236  *	port.
2237  */
2238 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2239 {
2240 	u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2241 
2242 	if (n == 0)
2243 		return idx == 0 ? 0xf : 0;
2244 	if (n == 1)
2245 		return idx < 2 ? (3 << (2 * idx)) : 0;
2246 	return 1 << idx;
2247 }
2248 
2249 /**
2250  *	t4_get_port_stats - collect port statistics
2251  *	@adap: the adapter
2252  *	@idx: the port index
2253  *	@p: the stats structure to fill
2254  *
2255  *	Collect statistics related to the given port from HW.
2256  */
2257 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2258 {
2259 	u32 bgmap = get_mps_bg_map(adap, idx);
2260 
2261 #define GET_STAT(name) \
2262 	t4_read_reg64(adap, \
2263 	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2264 	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2265 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2266 
2267 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2268 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2269 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2270 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2271 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2272 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2273 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2274 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2275 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2276 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2277 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2278 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2279 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2280 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
2281 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2282 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2283 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2284 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2285 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2286 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2287 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2288 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2289 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2290 
2291 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2292 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2293 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2294 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2295 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2296 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2297 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2298 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2299 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2300 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2301 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2302 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2303 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2304 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2305 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2306 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2307 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2308 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2309 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2310 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2311 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2312 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2313 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2314 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2315 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2316 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2317 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2318 
2319 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2320 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2321 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2322 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2323 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2324 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2325 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2326 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2327 
2328 #undef GET_STAT
2329 #undef GET_STAT_COM
2330 }
2331 
2332 /**
2333  *	t4_wol_magic_enable - enable/disable magic packet WoL
2334  *	@adap: the adapter
2335  *	@port: the physical port index
2336  *	@addr: MAC address expected in magic packets, %NULL to disable
2337  *
2338  *	Enables/disables magic packet wake-on-LAN for the selected port.
2339  */
2340 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2341 			 const u8 *addr)
2342 {
2343 	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2344 
2345 	if (is_t4(adap->params.chip)) {
2346 		mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2347 		mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2348 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2349 	} else {
2350 		mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2351 		mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2352 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2353 	}
2354 
2355 	if (addr) {
2356 		t4_write_reg(adap, mag_id_reg_l,
2357 			     (addr[2] << 24) | (addr[3] << 16) |
2358 			     (addr[4] << 8) | addr[5]);
2359 		t4_write_reg(adap, mag_id_reg_h,
2360 			     (addr[0] << 8) | addr[1]);
2361 	}
2362 	t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2363 			 addr ? MAGICEN : 0);
2364 }
2365 
2366 /**
2367  *	t4_wol_pat_enable - enable/disable pattern-based WoL
2368  *	@adap: the adapter
2369  *	@port: the physical port index
2370  *	@map: bitmap of which HW pattern filters to set
2371  *	@mask0: byte mask for bytes 0-63 of a packet
2372  *	@mask1: byte mask for bytes 64-127 of a packet
2373  *	@crc: Ethernet CRC for selected bytes
2374  *	@enable: enable/disable switch
2375  *
2376  *	Sets the pattern filters indicated in @map to mask out the bytes
2377  *	specified in @mask0/@mask1 in received packets and compare the CRC of
2378  *	the resulting packet against @crc.  If @enable is %true pattern-based
2379  *	WoL is enabled, otherwise disabled.
2380  */
2381 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2382 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
2383 {
2384 	int i;
2385 	u32 port_cfg_reg;
2386 
2387 	if (is_t4(adap->params.chip))
2388 		port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2389 	else
2390 		port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2391 
2392 	if (!enable) {
2393 		t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2394 		return 0;
2395 	}
2396 	if (map > 0xff)
2397 		return -EINVAL;
2398 
2399 #define EPIO_REG(name) \
2400 	(is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2401 	T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2402 
2403 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2404 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2405 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2406 
2407 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2408 		if (!(map & 1))
2409 			continue;
2410 
2411 		/* write byte masks */
2412 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2413 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2414 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2415 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2416 			return -ETIMEDOUT;
2417 
2418 		/* write CRC */
2419 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
2420 		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2421 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2422 		if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2423 			return -ETIMEDOUT;
2424 	}
2425 #undef EPIO_REG
2426 
2427 	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2428 	return 0;
2429 }
2430 
2431 /*     t4_mk_filtdelwr - create a delete filter WR
2432  *     @ftid: the filter ID
2433  *     @wr: the filter work request to populate
2434  *     @qid: ingress queue to receive the delete notification
2435  *
2436  *     Creates a filter work request to delete the supplied filter.  If @qid is
2437  *     negative the delete notification is suppressed.
2438  */
2439 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2440 {
2441 	memset(wr, 0, sizeof(*wr));
2442 	wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2443 	wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2444 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2445 			V_FW_FILTER_WR_NOREPLY(qid < 0));
2446 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2447 	if (qid >= 0)
2448 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2449 }
2450 
2451 #define INIT_CMD(var, cmd, rd_wr) do { \
2452 	(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2453 				  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2454 	(var).retval_len16 = htonl(FW_LEN16(var)); \
2455 } while (0)
2456 
2457 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2458 			  u32 addr, u32 val)
2459 {
2460 	struct fw_ldst_cmd c;
2461 
2462 	memset(&c, 0, sizeof(c));
2463 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2464 			    FW_CMD_WRITE |
2465 			    FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2466 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2467 	c.u.addrval.addr = htonl(addr);
2468 	c.u.addrval.val = htonl(val);
2469 
2470 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2471 }
2472 
2473 /**
2474  *     t4_mem_win_read_len - read memory through PCIE memory window
2475  *     @adap: the adapter
2476  *     @addr: address of first byte requested aligned on 32b.
2477  *     @data: len bytes to hold the data read
2478  *     @len: amount of data to read from window.  Must be <=
2479  *            MEMWIN0_APERATURE after adjusting for 16B for T4 and
2480  *            128B for T5 alignment requirements of the the memory window.
2481  *
2482  *     Read len bytes of data from MC starting at @addr.
2483  */
2484 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2485 {
2486 	int i, off;
2487 	u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
2488 
2489 	/* Align on a 2KB boundary.
2490 	 */
2491 	off = addr & MEMWIN0_APERTURE;
2492 	if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2493 		return -EINVAL;
2494 
2495 	t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
2496 		     (addr & ~MEMWIN0_APERTURE) | win_pf);
2497 	t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2498 
2499 	for (i = 0; i < len; i += 4)
2500 		*data++ = (__force __be32) t4_read_reg(adap,
2501 						(MEMWIN0_BASE + off + i));
2502 
2503 	return 0;
2504 }
2505 
2506 /**
2507  *	t4_mdio_rd - read a PHY register through MDIO
2508  *	@adap: the adapter
2509  *	@mbox: mailbox to use for the FW command
2510  *	@phy_addr: the PHY address
2511  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2512  *	@reg: the register to read
2513  *	@valp: where to store the value
2514  *
2515  *	Issues a FW command through the given mailbox to read a PHY register.
2516  */
2517 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2518 	       unsigned int mmd, unsigned int reg, u16 *valp)
2519 {
2520 	int ret;
2521 	struct fw_ldst_cmd c;
2522 
2523 	memset(&c, 0, sizeof(c));
2524 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2525 		FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2526 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2527 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2528 				   FW_LDST_CMD_MMD(mmd));
2529 	c.u.mdio.raddr = htons(reg);
2530 
2531 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2532 	if (ret == 0)
2533 		*valp = ntohs(c.u.mdio.rval);
2534 	return ret;
2535 }
2536 
2537 /**
2538  *	t4_mdio_wr - write a PHY register through MDIO
2539  *	@adap: the adapter
2540  *	@mbox: mailbox to use for the FW command
2541  *	@phy_addr: the PHY address
2542  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2543  *	@reg: the register to write
2544  *	@valp: value to write
2545  *
2546  *	Issues a FW command through the given mailbox to write a PHY register.
2547  */
2548 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2549 	       unsigned int mmd, unsigned int reg, u16 val)
2550 {
2551 	struct fw_ldst_cmd c;
2552 
2553 	memset(&c, 0, sizeof(c));
2554 	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2555 		FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2556 	c.cycles_to_len16 = htonl(FW_LEN16(c));
2557 	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2558 				   FW_LDST_CMD_MMD(mmd));
2559 	c.u.mdio.raddr = htons(reg);
2560 	c.u.mdio.rval = htons(val);
2561 
2562 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2563 }
2564 
2565 /**
2566  *      t4_fw_hello - establish communication with FW
2567  *      @adap: the adapter
2568  *      @mbox: mailbox to use for the FW command
2569  *      @evt_mbox: mailbox to receive async FW events
2570  *      @master: specifies the caller's willingness to be the device master
2571  *	@state: returns the current device state (if non-NULL)
2572  *
2573  *	Issues a command to establish communication with FW.  Returns either
2574  *	an error (negative integer) or the mailbox of the Master PF.
2575  */
2576 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2577 		enum dev_master master, enum dev_state *state)
2578 {
2579 	int ret;
2580 	struct fw_hello_cmd c;
2581 	u32 v;
2582 	unsigned int master_mbox;
2583 	int retries = FW_CMD_HELLO_RETRIES;
2584 
2585 retry:
2586 	memset(&c, 0, sizeof(c));
2587 	INIT_CMD(c, HELLO, WRITE);
2588 	c.err_to_clearinit = htonl(
2589 		FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2590 		FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2591 		FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2592 				      FW_HELLO_CMD_MBMASTER_MASK) |
2593 		FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2594 		FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2595 		FW_HELLO_CMD_CLEARINIT);
2596 
2597 	/*
2598 	 * Issue the HELLO command to the firmware.  If it's not successful
2599 	 * but indicates that we got a "busy" or "timeout" condition, retry
2600 	 * the HELLO until we exhaust our retry limit.
2601 	 */
2602 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2603 	if (ret < 0) {
2604 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2605 			goto retry;
2606 		return ret;
2607 	}
2608 
2609 	v = ntohl(c.err_to_clearinit);
2610 	master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2611 	if (state) {
2612 		if (v & FW_HELLO_CMD_ERR)
2613 			*state = DEV_STATE_ERR;
2614 		else if (v & FW_HELLO_CMD_INIT)
2615 			*state = DEV_STATE_INIT;
2616 		else
2617 			*state = DEV_STATE_UNINIT;
2618 	}
2619 
2620 	/*
2621 	 * If we're not the Master PF then we need to wait around for the
2622 	 * Master PF Driver to finish setting up the adapter.
2623 	 *
2624 	 * Note that we also do this wait if we're a non-Master-capable PF and
2625 	 * there is no current Master PF; a Master PF may show up momentarily
2626 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
2627 	 * OS loads lots of different drivers rapidly at the same time).  In
2628 	 * this case, the Master PF returned by the firmware will be
2629 	 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2630 	 */
2631 	if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2632 	    master_mbox != mbox) {
2633 		int waiting = FW_CMD_HELLO_TIMEOUT;
2634 
2635 		/*
2636 		 * Wait for the firmware to either indicate an error or
2637 		 * initialized state.  If we see either of these we bail out
2638 		 * and report the issue to the caller.  If we exhaust the
2639 		 * "hello timeout" and we haven't exhausted our retries, try
2640 		 * again.  Otherwise bail with a timeout error.
2641 		 */
2642 		for (;;) {
2643 			u32 pcie_fw;
2644 
2645 			msleep(50);
2646 			waiting -= 50;
2647 
2648 			/*
2649 			 * If neither Error nor Initialialized are indicated
2650 			 * by the firmware keep waiting till we exaust our
2651 			 * timeout ... and then retry if we haven't exhausted
2652 			 * our retries ...
2653 			 */
2654 			pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2655 			if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2656 				if (waiting <= 0) {
2657 					if (retries-- > 0)
2658 						goto retry;
2659 
2660 					return -ETIMEDOUT;
2661 				}
2662 				continue;
2663 			}
2664 
2665 			/*
2666 			 * We either have an Error or Initialized condition
2667 			 * report errors preferentially.
2668 			 */
2669 			if (state) {
2670 				if (pcie_fw & FW_PCIE_FW_ERR)
2671 					*state = DEV_STATE_ERR;
2672 				else if (pcie_fw & FW_PCIE_FW_INIT)
2673 					*state = DEV_STATE_INIT;
2674 			}
2675 
2676 			/*
2677 			 * If we arrived before a Master PF was selected and
2678 			 * there's not a valid Master PF, grab its identity
2679 			 * for our caller.
2680 			 */
2681 			if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2682 			    (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2683 				master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2684 			break;
2685 		}
2686 	}
2687 
2688 	return master_mbox;
2689 }
2690 
2691 /**
2692  *	t4_fw_bye - end communication with FW
2693  *	@adap: the adapter
2694  *	@mbox: mailbox to use for the FW command
2695  *
2696  *	Issues a command to terminate communication with FW.
2697  */
2698 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2699 {
2700 	struct fw_bye_cmd c;
2701 
2702 	memset(&c, 0, sizeof(c));
2703 	INIT_CMD(c, BYE, WRITE);
2704 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2705 }
2706 
2707 /**
2708  *	t4_init_cmd - ask FW to initialize the device
2709  *	@adap: the adapter
2710  *	@mbox: mailbox to use for the FW command
2711  *
2712  *	Issues a command to FW to partially initialize the device.  This
2713  *	performs initialization that generally doesn't depend on user input.
2714  */
2715 int t4_early_init(struct adapter *adap, unsigned int mbox)
2716 {
2717 	struct fw_initialize_cmd c;
2718 
2719 	memset(&c, 0, sizeof(c));
2720 	INIT_CMD(c, INITIALIZE, WRITE);
2721 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2722 }
2723 
2724 /**
2725  *	t4_fw_reset - issue a reset to FW
2726  *	@adap: the adapter
2727  *	@mbox: mailbox to use for the FW command
2728  *	@reset: specifies the type of reset to perform
2729  *
2730  *	Issues a reset command of the specified type to FW.
2731  */
2732 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2733 {
2734 	struct fw_reset_cmd c;
2735 
2736 	memset(&c, 0, sizeof(c));
2737 	INIT_CMD(c, RESET, WRITE);
2738 	c.val = htonl(reset);
2739 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2740 }
2741 
2742 /**
2743  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2744  *	@adap: the adapter
2745  *	@mbox: mailbox to use for the FW RESET command (if desired)
2746  *	@force: force uP into RESET even if FW RESET command fails
2747  *
2748  *	Issues a RESET command to firmware (if desired) with a HALT indication
2749  *	and then puts the microprocessor into RESET state.  The RESET command
2750  *	will only be issued if a legitimate mailbox is provided (mbox <=
2751  *	FW_PCIE_FW_MASTER_MASK).
2752  *
2753  *	This is generally used in order for the host to safely manipulate the
2754  *	adapter without fear of conflicting with whatever the firmware might
2755  *	be doing.  The only way out of this state is to RESTART the firmware
2756  *	...
2757  */
2758 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2759 {
2760 	int ret = 0;
2761 
2762 	/*
2763 	 * If a legitimate mailbox is provided, issue a RESET command
2764 	 * with a HALT indication.
2765 	 */
2766 	if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2767 		struct fw_reset_cmd c;
2768 
2769 		memset(&c, 0, sizeof(c));
2770 		INIT_CMD(c, RESET, WRITE);
2771 		c.val = htonl(PIORST | PIORSTMODE);
2772 		c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2773 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2774 	}
2775 
2776 	/*
2777 	 * Normally we won't complete the operation if the firmware RESET
2778 	 * command fails but if our caller insists we'll go ahead and put the
2779 	 * uP into RESET.  This can be useful if the firmware is hung or even
2780 	 * missing ...  We'll have to take the risk of putting the uP into
2781 	 * RESET without the cooperation of firmware in that case.
2782 	 *
2783 	 * We also force the firmware's HALT flag to be on in case we bypassed
2784 	 * the firmware RESET command above or we're dealing with old firmware
2785 	 * which doesn't have the HALT capability.  This will serve as a flag
2786 	 * for the incoming firmware to know that it's coming out of a HALT
2787 	 * rather than a RESET ... if it's new enough to understand that ...
2788 	 */
2789 	if (ret == 0 || force) {
2790 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2791 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2792 				 FW_PCIE_FW_HALT);
2793 	}
2794 
2795 	/*
2796 	 * And we always return the result of the firmware RESET command
2797 	 * even when we force the uP into RESET ...
2798 	 */
2799 	return ret;
2800 }
2801 
2802 /**
2803  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
2804  *	@adap: the adapter
2805  *	@reset: if we want to do a RESET to restart things
2806  *
2807  *	Restart firmware previously halted by t4_fw_halt().  On successful
2808  *	return the previous PF Master remains as the new PF Master and there
2809  *	is no need to issue a new HELLO command, etc.
2810  *
2811  *	We do this in two ways:
2812  *
2813  *	 1. If we're dealing with newer firmware we'll simply want to take
2814  *	    the chip's microprocessor out of RESET.  This will cause the
2815  *	    firmware to start up from its start vector.  And then we'll loop
2816  *	    until the firmware indicates it's started again (PCIE_FW.HALT
2817  *	    reset to 0) or we timeout.
2818  *
2819  *	 2. If we're dealing with older firmware then we'll need to RESET
2820  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
2821  *	    flag and automatically RESET itself on startup.
2822  */
2823 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2824 {
2825 	if (reset) {
2826 		/*
2827 		 * Since we're directing the RESET instead of the firmware
2828 		 * doing it automatically, we need to clear the PCIE_FW.HALT
2829 		 * bit.
2830 		 */
2831 		t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2832 
2833 		/*
2834 		 * If we've been given a valid mailbox, first try to get the
2835 		 * firmware to do the RESET.  If that works, great and we can
2836 		 * return success.  Otherwise, if we haven't been given a
2837 		 * valid mailbox or the RESET command failed, fall back to
2838 		 * hitting the chip with a hammer.
2839 		 */
2840 		if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2841 			t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2842 			msleep(100);
2843 			if (t4_fw_reset(adap, mbox,
2844 					PIORST | PIORSTMODE) == 0)
2845 				return 0;
2846 		}
2847 
2848 		t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2849 		msleep(2000);
2850 	} else {
2851 		int ms;
2852 
2853 		t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2854 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2855 			if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2856 				return 0;
2857 			msleep(100);
2858 			ms += 100;
2859 		}
2860 		return -ETIMEDOUT;
2861 	}
2862 	return 0;
2863 }
2864 
2865 /**
2866  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2867  *	@adap: the adapter
2868  *	@mbox: mailbox to use for the FW RESET command (if desired)
2869  *	@fw_data: the firmware image to write
2870  *	@size: image size
2871  *	@force: force upgrade even if firmware doesn't cooperate
2872  *
2873  *	Perform all of the steps necessary for upgrading an adapter's
2874  *	firmware image.  Normally this requires the cooperation of the
2875  *	existing firmware in order to halt all existing activities
2876  *	but if an invalid mailbox token is passed in we skip that step
2877  *	(though we'll still put the adapter microprocessor into RESET in
2878  *	that case).
2879  *
2880  *	On successful return the new firmware will have been loaded and
2881  *	the adapter will have been fully RESET losing all previous setup
2882  *	state.  On unsuccessful return the adapter may be completely hosed ...
2883  *	positive errno indicates that the adapter is ~probably~ intact, a
2884  *	negative errno indicates that things are looking bad ...
2885  */
2886 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2887 			 const u8 *fw_data, unsigned int size, int force)
2888 {
2889 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2890 	int reset, ret;
2891 
2892 	ret = t4_fw_halt(adap, mbox, force);
2893 	if (ret < 0 && !force)
2894 		return ret;
2895 
2896 	ret = t4_load_fw(adap, fw_data, size);
2897 	if (ret < 0)
2898 		return ret;
2899 
2900 	/*
2901 	 * Older versions of the firmware don't understand the new
2902 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2903 	 * restart.  So for newly loaded older firmware we'll have to do the
2904 	 * RESET for it so it starts up on a clean slate.  We can tell if
2905 	 * the newly loaded firmware will handle this right by checking
2906 	 * its header flags to see if it advertises the capability.
2907 	 */
2908 	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2909 	return t4_fw_restart(adap, mbox, reset);
2910 }
2911 
2912 /**
2913  *	t4_fixup_host_params - fix up host-dependent parameters
2914  *	@adap: the adapter
2915  *	@page_size: the host's Base Page Size
2916  *	@cache_line_size: the host's Cache Line Size
2917  *
2918  *	Various registers in T4 contain values which are dependent on the
2919  *	host's Base Page and Cache Line Sizes.  This function will fix all of
2920  *	those registers with the appropriate values as passed in ...
2921  */
2922 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2923 			 unsigned int cache_line_size)
2924 {
2925 	unsigned int page_shift = fls(page_size) - 1;
2926 	unsigned int sge_hps = page_shift - 10;
2927 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2928 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2929 	unsigned int fl_align_log = fls(fl_align) - 1;
2930 
2931 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2932 		     HOSTPAGESIZEPF0(sge_hps) |
2933 		     HOSTPAGESIZEPF1(sge_hps) |
2934 		     HOSTPAGESIZEPF2(sge_hps) |
2935 		     HOSTPAGESIZEPF3(sge_hps) |
2936 		     HOSTPAGESIZEPF4(sge_hps) |
2937 		     HOSTPAGESIZEPF5(sge_hps) |
2938 		     HOSTPAGESIZEPF6(sge_hps) |
2939 		     HOSTPAGESIZEPF7(sge_hps));
2940 
2941 	t4_set_reg_field(adap, SGE_CONTROL,
2942 			 INGPADBOUNDARY_MASK |
2943 			 EGRSTATUSPAGESIZE_MASK,
2944 			 INGPADBOUNDARY(fl_align_log - 5) |
2945 			 EGRSTATUSPAGESIZE(stat_len != 64));
2946 
2947 	/*
2948 	 * Adjust various SGE Free List Host Buffer Sizes.
2949 	 *
2950 	 * This is something of a crock since we're using fixed indices into
2951 	 * the array which are also known by the sge.c code and the T4
2952 	 * Firmware Configuration File.  We need to come up with a much better
2953 	 * approach to managing this array.  For now, the first four entries
2954 	 * are:
2955 	 *
2956 	 *   0: Host Page Size
2957 	 *   1: 64KB
2958 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2959 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2960 	 *
2961 	 * For the single-MTU buffers in unpacked mode we need to include
2962 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2963 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2964 	 * Padding boundry.  All of these are accommodated in the Factory
2965 	 * Default Firmware Configuration File but we need to adjust it for
2966 	 * this host's cache line size.
2967 	 */
2968 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2969 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2970 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2971 		     & ~(fl_align-1));
2972 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2973 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2974 		     & ~(fl_align-1));
2975 
2976 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2977 
2978 	return 0;
2979 }
2980 
2981 /**
2982  *	t4_fw_initialize - ask FW to initialize the device
2983  *	@adap: the adapter
2984  *	@mbox: mailbox to use for the FW command
2985  *
2986  *	Issues a command to FW to partially initialize the device.  This
2987  *	performs initialization that generally doesn't depend on user input.
2988  */
2989 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2990 {
2991 	struct fw_initialize_cmd c;
2992 
2993 	memset(&c, 0, sizeof(c));
2994 	INIT_CMD(c, INITIALIZE, WRITE);
2995 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2996 }
2997 
2998 /**
2999  *	t4_query_params - query FW or device parameters
3000  *	@adap: the adapter
3001  *	@mbox: mailbox to use for the FW command
3002  *	@pf: the PF
3003  *	@vf: the VF
3004  *	@nparams: the number of parameters
3005  *	@params: the parameter names
3006  *	@val: the parameter values
3007  *
3008  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3009  *	queried at once.
3010  */
3011 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3012 		    unsigned int vf, unsigned int nparams, const u32 *params,
3013 		    u32 *val)
3014 {
3015 	int i, ret;
3016 	struct fw_params_cmd c;
3017 	__be32 *p = &c.param[0].mnem;
3018 
3019 	if (nparams > 7)
3020 		return -EINVAL;
3021 
3022 	memset(&c, 0, sizeof(c));
3023 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3024 			    FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3025 			    FW_PARAMS_CMD_VFN(vf));
3026 	c.retval_len16 = htonl(FW_LEN16(c));
3027 	for (i = 0; i < nparams; i++, p += 2)
3028 		*p = htonl(*params++);
3029 
3030 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3031 	if (ret == 0)
3032 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3033 			*val++ = ntohl(*p);
3034 	return ret;
3035 }
3036 
3037 /**
3038  *	t4_set_params - sets FW or device parameters
3039  *	@adap: the adapter
3040  *	@mbox: mailbox to use for the FW command
3041  *	@pf: the PF
3042  *	@vf: the VF
3043  *	@nparams: the number of parameters
3044  *	@params: the parameter names
3045  *	@val: the parameter values
3046  *
3047  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3048  *	specified at once.
3049  */
3050 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3051 		  unsigned int vf, unsigned int nparams, const u32 *params,
3052 		  const u32 *val)
3053 {
3054 	struct fw_params_cmd c;
3055 	__be32 *p = &c.param[0].mnem;
3056 
3057 	if (nparams > 7)
3058 		return -EINVAL;
3059 
3060 	memset(&c, 0, sizeof(c));
3061 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3062 			    FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3063 			    FW_PARAMS_CMD_VFN(vf));
3064 	c.retval_len16 = htonl(FW_LEN16(c));
3065 	while (nparams--) {
3066 		*p++ = htonl(*params++);
3067 		*p++ = htonl(*val++);
3068 	}
3069 
3070 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3071 }
3072 
3073 /**
3074  *	t4_cfg_pfvf - configure PF/VF resource limits
3075  *	@adap: the adapter
3076  *	@mbox: mailbox to use for the FW command
3077  *	@pf: the PF being configured
3078  *	@vf: the VF being configured
3079  *	@txq: the max number of egress queues
3080  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
3081  *	@rxqi: the max number of interrupt-capable ingress queues
3082  *	@rxq: the max number of interruptless ingress queues
3083  *	@tc: the PCI traffic class
3084  *	@vi: the max number of virtual interfaces
3085  *	@cmask: the channel access rights mask for the PF/VF
3086  *	@pmask: the port access rights mask for the PF/VF
3087  *	@nexact: the maximum number of exact MPS filters
3088  *	@rcaps: read capabilities
3089  *	@wxcaps: write/execute capabilities
3090  *
3091  *	Configures resource limits and capabilities for a physical or virtual
3092  *	function.
3093  */
3094 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3095 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3096 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
3097 		unsigned int vi, unsigned int cmask, unsigned int pmask,
3098 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3099 {
3100 	struct fw_pfvf_cmd c;
3101 
3102 	memset(&c, 0, sizeof(c));
3103 	c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3104 			    FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3105 			    FW_PFVF_CMD_VFN(vf));
3106 	c.retval_len16 = htonl(FW_LEN16(c));
3107 	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3108 			       FW_PFVF_CMD_NIQ(rxq));
3109 	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
3110 			       FW_PFVF_CMD_PMASK(pmask) |
3111 			       FW_PFVF_CMD_NEQ(txq));
3112 	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3113 				FW_PFVF_CMD_NEXACTF(nexact));
3114 	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3115 				     FW_PFVF_CMD_WX_CAPS(wxcaps) |
3116 				     FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3117 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3118 }
3119 
3120 /**
3121  *	t4_alloc_vi - allocate a virtual interface
3122  *	@adap: the adapter
3123  *	@mbox: mailbox to use for the FW command
3124  *	@port: physical port associated with the VI
3125  *	@pf: the PF owning the VI
3126  *	@vf: the VF owning the VI
3127  *	@nmac: number of MAC addresses needed (1 to 5)
3128  *	@mac: the MAC addresses of the VI
3129  *	@rss_size: size of RSS table slice associated with this VI
3130  *
3131  *	Allocates a virtual interface for the given physical port.  If @mac is
3132  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
3133  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
3134  *	stored consecutively so the space needed is @nmac * 6 bytes.
3135  *	Returns a negative error number or the non-negative VI id.
3136  */
3137 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3138 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3139 		unsigned int *rss_size)
3140 {
3141 	int ret;
3142 	struct fw_vi_cmd c;
3143 
3144 	memset(&c, 0, sizeof(c));
3145 	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3146 			    FW_CMD_WRITE | FW_CMD_EXEC |
3147 			    FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3148 	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3149 	c.portid_pkd = FW_VI_CMD_PORTID(port);
3150 	c.nmac = nmac - 1;
3151 
3152 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3153 	if (ret)
3154 		return ret;
3155 
3156 	if (mac) {
3157 		memcpy(mac, c.mac, sizeof(c.mac));
3158 		switch (nmac) {
3159 		case 5:
3160 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3161 		case 4:
3162 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3163 		case 3:
3164 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3165 		case 2:
3166 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3167 		}
3168 	}
3169 	if (rss_size)
3170 		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3171 	return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3172 }
3173 
3174 /**
3175  *	t4_set_rxmode - set Rx properties of a virtual interface
3176  *	@adap: the adapter
3177  *	@mbox: mailbox to use for the FW command
3178  *	@viid: the VI id
3179  *	@mtu: the new MTU or -1
3180  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3181  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3182  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3183  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3184  *	@sleep_ok: if true we may sleep while awaiting command completion
3185  *
3186  *	Sets Rx properties of a virtual interface.
3187  */
3188 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3189 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3190 		  bool sleep_ok)
3191 {
3192 	struct fw_vi_rxmode_cmd c;
3193 
3194 	/* convert to FW values */
3195 	if (mtu < 0)
3196 		mtu = FW_RXMODE_MTU_NO_CHG;
3197 	if (promisc < 0)
3198 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3199 	if (all_multi < 0)
3200 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3201 	if (bcast < 0)
3202 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
3203 	if (vlanex < 0)
3204 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
3205 
3206 	memset(&c, 0, sizeof(c));
3207 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3208 			     FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3209 	c.retval_len16 = htonl(FW_LEN16(c));
3210 	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3211 				  FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3212 				  FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3213 				  FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3214 				  FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3215 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3216 }
3217 
3218 /**
3219  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3220  *	@adap: the adapter
3221  *	@mbox: mailbox to use for the FW command
3222  *	@viid: the VI id
3223  *	@free: if true any existing filters for this VI id are first removed
3224  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
3225  *	@addr: the MAC address(es)
3226  *	@idx: where to store the index of each allocated filter
3227  *	@hash: pointer to hash address filter bitmap
3228  *	@sleep_ok: call is allowed to sleep
3229  *
3230  *	Allocates an exact-match filter for each of the supplied addresses and
3231  *	sets it to the corresponding address.  If @idx is not %NULL it should
3232  *	have at least @naddr entries, each of which will be set to the index of
3233  *	the filter allocated for the corresponding MAC address.  If a filter
3234  *	could not be allocated for an address its index is set to 0xffff.
3235  *	If @hash is not %NULL addresses that fail to allocate an exact filter
3236  *	are hashed and update the hash filter bitmap pointed at by @hash.
3237  *
3238  *	Returns a negative error number or the number of filters allocated.
3239  */
3240 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3241 		      unsigned int viid, bool free, unsigned int naddr,
3242 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3243 {
3244 	int i, ret;
3245 	struct fw_vi_mac_cmd c;
3246 	struct fw_vi_mac_exact *p;
3247 	unsigned int max_naddr = is_t4(adap->params.chip) ?
3248 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
3249 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3250 
3251 	if (naddr > 7)
3252 		return -EINVAL;
3253 
3254 	memset(&c, 0, sizeof(c));
3255 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3256 			     FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3257 			     FW_VI_MAC_CMD_VIID(viid));
3258 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3259 				    FW_CMD_LEN16((naddr + 2) / 2));
3260 
3261 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3262 		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3263 				      FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3264 		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3265 	}
3266 
3267 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3268 	if (ret)
3269 		return ret;
3270 
3271 	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3272 		u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3273 
3274 		if (idx)
3275 			idx[i] = index >= max_naddr ? 0xffff : index;
3276 		if (index < max_naddr)
3277 			ret++;
3278 		else if (hash)
3279 			*hash |= (1ULL << hash_mac_addr(addr[i]));
3280 	}
3281 	return ret;
3282 }
3283 
3284 /**
3285  *	t4_change_mac - modifies the exact-match filter for a MAC address
3286  *	@adap: the adapter
3287  *	@mbox: mailbox to use for the FW command
3288  *	@viid: the VI id
3289  *	@idx: index of existing filter for old value of MAC address, or -1
3290  *	@addr: the new MAC address value
3291  *	@persist: whether a new MAC allocation should be persistent
3292  *	@add_smt: if true also add the address to the HW SMT
3293  *
3294  *	Modifies an exact-match filter and sets it to the new MAC address.
3295  *	Note that in general it is not possible to modify the value of a given
3296  *	filter so the generic way to modify an address filter is to free the one
3297  *	being used by the old address value and allocate a new filter for the
3298  *	new address value.  @idx can be -1 if the address is a new addition.
3299  *
3300  *	Returns a negative error number or the index of the filter with the new
3301  *	MAC value.
3302  */
3303 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3304 		  int idx, const u8 *addr, bool persist, bool add_smt)
3305 {
3306 	int ret, mode;
3307 	struct fw_vi_mac_cmd c;
3308 	struct fw_vi_mac_exact *p = c.u.exact;
3309 	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3310 				    NUM_MPS_CLS_SRAM_L_INSTANCES :
3311 				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3312 
3313 	if (idx < 0)                             /* new allocation */
3314 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3315 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3316 
3317 	memset(&c, 0, sizeof(c));
3318 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3319 			     FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3320 	c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3321 	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3322 				FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3323 				FW_VI_MAC_CMD_IDX(idx));
3324 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
3325 
3326 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3327 	if (ret == 0) {
3328 		ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3329 		if (ret >= max_mac_addr)
3330 			ret = -ENOMEM;
3331 	}
3332 	return ret;
3333 }
3334 
3335 /**
3336  *	t4_set_addr_hash - program the MAC inexact-match hash filter
3337  *	@adap: the adapter
3338  *	@mbox: mailbox to use for the FW command
3339  *	@viid: the VI id
3340  *	@ucast: whether the hash filter should also match unicast addresses
3341  *	@vec: the value to be written to the hash filter
3342  *	@sleep_ok: call is allowed to sleep
3343  *
3344  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
3345  */
3346 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3347 		     bool ucast, u64 vec, bool sleep_ok)
3348 {
3349 	struct fw_vi_mac_cmd c;
3350 
3351 	memset(&c, 0, sizeof(c));
3352 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3353 			     FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3354 	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3355 				    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3356 				    FW_CMD_LEN16(1));
3357 	c.u.hash.hashvec = cpu_to_be64(vec);
3358 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3359 }
3360 
3361 /**
3362  *	t4_enable_vi - enable/disable a virtual interface
3363  *	@adap: the adapter
3364  *	@mbox: mailbox to use for the FW command
3365  *	@viid: the VI id
3366  *	@rx_en: 1=enable Rx, 0=disable Rx
3367  *	@tx_en: 1=enable Tx, 0=disable Tx
3368  *
3369  *	Enables/disables a virtual interface.
3370  */
3371 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3372 		 bool rx_en, bool tx_en)
3373 {
3374 	struct fw_vi_enable_cmd c;
3375 
3376 	memset(&c, 0, sizeof(c));
3377 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3378 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3379 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3380 			       FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3381 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3382 }
3383 
3384 /**
3385  *	t4_identify_port - identify a VI's port by blinking its LED
3386  *	@adap: the adapter
3387  *	@mbox: mailbox to use for the FW command
3388  *	@viid: the VI id
3389  *	@nblinks: how many times to blink LED at 2.5 Hz
3390  *
3391  *	Identifies a VI's port by blinking its LED.
3392  */
3393 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3394 		     unsigned int nblinks)
3395 {
3396 	struct fw_vi_enable_cmd c;
3397 
3398 	memset(&c, 0, sizeof(c));
3399 	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3400 			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3401 	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3402 	c.blinkdur = htons(nblinks);
3403 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3404 }
3405 
3406 /**
3407  *	t4_iq_free - free an ingress queue and its FLs
3408  *	@adap: the adapter
3409  *	@mbox: mailbox to use for the FW command
3410  *	@pf: the PF owning the queues
3411  *	@vf: the VF owning the queues
3412  *	@iqtype: the ingress queue type
3413  *	@iqid: ingress queue id
3414  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
3415  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
3416  *
3417  *	Frees an ingress queue and its associated FLs, if any.
3418  */
3419 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3420 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
3421 	       unsigned int fl0id, unsigned int fl1id)
3422 {
3423 	struct fw_iq_cmd c;
3424 
3425 	memset(&c, 0, sizeof(c));
3426 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3427 			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3428 			    FW_IQ_CMD_VFN(vf));
3429 	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3430 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3431 	c.iqid = htons(iqid);
3432 	c.fl0id = htons(fl0id);
3433 	c.fl1id = htons(fl1id);
3434 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3435 }
3436 
3437 /**
3438  *	t4_eth_eq_free - free an Ethernet egress queue
3439  *	@adap: the adapter
3440  *	@mbox: mailbox to use for the FW command
3441  *	@pf: the PF owning the queue
3442  *	@vf: the VF owning the queue
3443  *	@eqid: egress queue id
3444  *
3445  *	Frees an Ethernet egress queue.
3446  */
3447 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3448 		   unsigned int vf, unsigned int eqid)
3449 {
3450 	struct fw_eq_eth_cmd c;
3451 
3452 	memset(&c, 0, sizeof(c));
3453 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3454 			    FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3455 			    FW_EQ_ETH_CMD_VFN(vf));
3456 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3457 	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3458 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3459 }
3460 
3461 /**
3462  *	t4_ctrl_eq_free - free a control egress queue
3463  *	@adap: the adapter
3464  *	@mbox: mailbox to use for the FW command
3465  *	@pf: the PF owning the queue
3466  *	@vf: the VF owning the queue
3467  *	@eqid: egress queue id
3468  *
3469  *	Frees a control egress queue.
3470  */
3471 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3472 		    unsigned int vf, unsigned int eqid)
3473 {
3474 	struct fw_eq_ctrl_cmd c;
3475 
3476 	memset(&c, 0, sizeof(c));
3477 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3478 			    FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3479 			    FW_EQ_CTRL_CMD_VFN(vf));
3480 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3481 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3482 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3483 }
3484 
3485 /**
3486  *	t4_ofld_eq_free - free an offload egress queue
3487  *	@adap: the adapter
3488  *	@mbox: mailbox to use for the FW command
3489  *	@pf: the PF owning the queue
3490  *	@vf: the VF owning the queue
3491  *	@eqid: egress queue id
3492  *
3493  *	Frees a control egress queue.
3494  */
3495 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3496 		    unsigned int vf, unsigned int eqid)
3497 {
3498 	struct fw_eq_ofld_cmd c;
3499 
3500 	memset(&c, 0, sizeof(c));
3501 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3502 			    FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3503 			    FW_EQ_OFLD_CMD_VFN(vf));
3504 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3505 	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3506 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3507 }
3508 
3509 /**
3510  *	t4_handle_fw_rpl - process a FW reply message
3511  *	@adap: the adapter
3512  *	@rpl: start of the FW message
3513  *
3514  *	Processes a FW message, such as link state change messages.
3515  */
3516 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3517 {
3518 	u8 opcode = *(const u8 *)rpl;
3519 
3520 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
3521 		int speed = 0, fc = 0;
3522 		const struct fw_port_cmd *p = (void *)rpl;
3523 		int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3524 		int port = adap->chan_map[chan];
3525 		struct port_info *pi = adap2pinfo(adap, port);
3526 		struct link_config *lc = &pi->link_cfg;
3527 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3528 		int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3529 		u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3530 
3531 		if (stat & FW_PORT_CMD_RXPAUSE)
3532 			fc |= PAUSE_RX;
3533 		if (stat & FW_PORT_CMD_TXPAUSE)
3534 			fc |= PAUSE_TX;
3535 		if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3536 			speed = SPEED_100;
3537 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3538 			speed = SPEED_1000;
3539 		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3540 			speed = SPEED_10000;
3541 
3542 		if (link_ok != lc->link_ok || speed != lc->speed ||
3543 		    fc != lc->fc) {                    /* something changed */
3544 			lc->link_ok = link_ok;
3545 			lc->speed = speed;
3546 			lc->fc = fc;
3547 			t4_os_link_changed(adap, port, link_ok);
3548 		}
3549 		if (mod != pi->mod_type) {
3550 			pi->mod_type = mod;
3551 			t4_os_portmod_changed(adap, port);
3552 		}
3553 	}
3554 	return 0;
3555 }
3556 
3557 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3558 {
3559 	u16 val;
3560 
3561 	if (pci_is_pcie(adapter->pdev)) {
3562 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3563 		p->speed = val & PCI_EXP_LNKSTA_CLS;
3564 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3565 	}
3566 }
3567 
3568 /**
3569  *	init_link_config - initialize a link's SW state
3570  *	@lc: structure holding the link state
3571  *	@caps: link capabilities
3572  *
3573  *	Initializes the SW state maintained for each link, including the link's
3574  *	capabilities and default speed/flow-control/autonegotiation settings.
3575  */
3576 static void init_link_config(struct link_config *lc, unsigned int caps)
3577 {
3578 	lc->supported = caps;
3579 	lc->requested_speed = 0;
3580 	lc->speed = 0;
3581 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3582 	if (lc->supported & FW_PORT_CAP_ANEG) {
3583 		lc->advertising = lc->supported & ADVERT_MASK;
3584 		lc->autoneg = AUTONEG_ENABLE;
3585 		lc->requested_fc |= PAUSE_AUTONEG;
3586 	} else {
3587 		lc->advertising = 0;
3588 		lc->autoneg = AUTONEG_DISABLE;
3589 	}
3590 }
3591 
3592 int t4_wait_dev_ready(struct adapter *adap)
3593 {
3594 	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3595 		return 0;
3596 	msleep(500);
3597 	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3598 }
3599 
3600 static int get_flash_params(struct adapter *adap)
3601 {
3602 	int ret;
3603 	u32 info;
3604 
3605 	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3606 	if (!ret)
3607 		ret = sf1_read(adap, 3, 0, 1, &info);
3608 	t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
3609 	if (ret)
3610 		return ret;
3611 
3612 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
3613 		return -EINVAL;
3614 	info >>= 16;                           /* log2 of size */
3615 	if (info >= 0x14 && info < 0x18)
3616 		adap->params.sf_nsec = 1 << (info - 16);
3617 	else if (info == 0x18)
3618 		adap->params.sf_nsec = 64;
3619 	else
3620 		return -EINVAL;
3621 	adap->params.sf_size = 1 << info;
3622 	adap->params.sf_fw_start =
3623 		t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3624 	return 0;
3625 }
3626 
3627 /**
3628  *	t4_prep_adapter - prepare SW and HW for operation
3629  *	@adapter: the adapter
3630  *	@reset: if true perform a HW reset
3631  *
3632  *	Initialize adapter SW state for the various HW modules, set initial
3633  *	values for some adapter tunables, take PHYs out of reset, and
3634  *	initialize the MDIO interface.
3635  */
3636 int t4_prep_adapter(struct adapter *adapter)
3637 {
3638 	int ret, ver;
3639 	uint16_t device_id;
3640 	u32 pl_rev;
3641 
3642 	ret = t4_wait_dev_ready(adapter);
3643 	if (ret < 0)
3644 		return ret;
3645 
3646 	get_pci_mode(adapter, &adapter->params.pci);
3647 	pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3648 
3649 	ret = get_flash_params(adapter);
3650 	if (ret < 0) {
3651 		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3652 		return ret;
3653 	}
3654 
3655 	/* Retrieve adapter's device ID
3656 	 */
3657 	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3658 	ver = device_id >> 12;
3659 	adapter->params.chip = 0;
3660 	switch (ver) {
3661 	case CHELSIO_T4:
3662 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3663 		break;
3664 	case CHELSIO_T5:
3665 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3666 		break;
3667 	default:
3668 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3669 			device_id);
3670 		return -EINVAL;
3671 	}
3672 
3673 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3674 
3675 	/*
3676 	 * Default port for debugging in case we can't reach FW.
3677 	 */
3678 	adapter->params.nports = 1;
3679 	adapter->params.portvec = 1;
3680 	adapter->params.vpd.cclk = 50000;
3681 	return 0;
3682 }
3683 
3684 /**
3685  *      t4_init_tp_params - initialize adap->params.tp
3686  *      @adap: the adapter
3687  *
3688  *      Initialize various fields of the adapter's TP Parameters structure.
3689  */
3690 int t4_init_tp_params(struct adapter *adap)
3691 {
3692 	int chan;
3693 	u32 v;
3694 
3695 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3696 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3697 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3698 
3699 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3700 	for (chan = 0; chan < NCHAN; chan++)
3701 		adap->params.tp.tx_modq[chan] = chan;
3702 
3703 	/* Cache the adapter's Compressed Filter Mode and global Incress
3704 	 * Configuration.
3705 	 */
3706 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3707 			 &adap->params.tp.vlan_pri_map, 1,
3708 			 TP_VLAN_PRI_MAP);
3709 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3710 			 &adap->params.tp.ingress_config, 1,
3711 			 TP_INGRESS_CONFIG);
3712 
3713 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3714 	 * shift positions of several elements of the Compressed Filter Tuple
3715 	 * for this adapter which we need frequently ...
3716 	 */
3717 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3718 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3719 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3720 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3721 							       F_PROTOCOL);
3722 
3723 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3724 	 * represents the presense of an Outer VLAN instead of a VNIC ID.
3725 	 */
3726 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3727 		adap->params.tp.vnic_shift = -1;
3728 
3729 	return 0;
3730 }
3731 
3732 /**
3733  *      t4_filter_field_shift - calculate filter field shift
3734  *      @adap: the adapter
3735  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3736  *
3737  *      Return the shift position of a filter field within the Compressed
3738  *      Filter Tuple.  The filter field is specified via its selection bit
3739  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
3740  */
3741 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3742 {
3743 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3744 	unsigned int sel;
3745 	int field_shift;
3746 
3747 	if ((filter_mode & filter_sel) == 0)
3748 		return -1;
3749 
3750 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3751 		switch (filter_mode & sel) {
3752 		case F_FCOE:
3753 			field_shift += W_FT_FCOE;
3754 			break;
3755 		case F_PORT:
3756 			field_shift += W_FT_PORT;
3757 			break;
3758 		case F_VNIC_ID:
3759 			field_shift += W_FT_VNIC_ID;
3760 			break;
3761 		case F_VLAN:
3762 			field_shift += W_FT_VLAN;
3763 			break;
3764 		case F_TOS:
3765 			field_shift += W_FT_TOS;
3766 			break;
3767 		case F_PROTOCOL:
3768 			field_shift += W_FT_PROTOCOL;
3769 			break;
3770 		case F_ETHERTYPE:
3771 			field_shift += W_FT_ETHERTYPE;
3772 			break;
3773 		case F_MACMATCH:
3774 			field_shift += W_FT_MACMATCH;
3775 			break;
3776 		case F_MPSHITTYPE:
3777 			field_shift += W_FT_MPSHITTYPE;
3778 			break;
3779 		case F_FRAGMENTATION:
3780 			field_shift += W_FT_FRAGMENTATION;
3781 			break;
3782 		}
3783 	}
3784 	return field_shift;
3785 }
3786 
3787 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3788 {
3789 	u8 addr[6];
3790 	int ret, i, j = 0;
3791 	struct fw_port_cmd c;
3792 	struct fw_rss_vi_config_cmd rvc;
3793 
3794 	memset(&c, 0, sizeof(c));
3795 	memset(&rvc, 0, sizeof(rvc));
3796 
3797 	for_each_port(adap, i) {
3798 		unsigned int rss_size;
3799 		struct port_info *p = adap2pinfo(adap, i);
3800 
3801 		while ((adap->params.portvec & (1 << j)) == 0)
3802 			j++;
3803 
3804 		c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3805 				       FW_CMD_REQUEST | FW_CMD_READ |
3806 				       FW_PORT_CMD_PORTID(j));
3807 		c.action_to_len16 = htonl(
3808 			FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3809 			FW_LEN16(c));
3810 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3811 		if (ret)
3812 			return ret;
3813 
3814 		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3815 		if (ret < 0)
3816 			return ret;
3817 
3818 		p->viid = ret;
3819 		p->tx_chan = j;
3820 		p->lport = j;
3821 		p->rss_size = rss_size;
3822 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3823 
3824 		ret = ntohl(c.u.info.lstatus_to_modtype);
3825 		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3826 			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3827 		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3828 		p->mod_type = FW_PORT_MOD_TYPE_NA;
3829 
3830 		rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3831 				       FW_CMD_REQUEST | FW_CMD_READ |
3832 				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3833 		rvc.retval_len16 = htonl(FW_LEN16(rvc));
3834 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3835 		if (ret)
3836 			return ret;
3837 		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3838 
3839 		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3840 		j++;
3841 	}
3842 	return 0;
3843 }
3844