xref: /openbmc/linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c (revision 7f2e85840871f199057e65232ebde846192ed989)
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_values.h"
39 #include "t4fw_api.h"
40 #include "t4fw_version.h"
41 
42 /**
43  *	t4_wait_op_done_val - wait until an operation is completed
44  *	@adapter: the adapter performing the operation
45  *	@reg: the register to check for completion
46  *	@mask: a single-bit field within @reg that indicates completion
47  *	@polarity: the value of the field when the operation is completed
48  *	@attempts: number of check iterations
49  *	@delay: delay in usecs between iterations
50  *	@valp: where to store the value of the register at completion time
51  *
52  *	Wait until an operation is completed by checking a bit in a register
53  *	up to @attempts times.  If @valp is not NULL the value of the register
54  *	at the time it indicated completion is stored there.  Returns 0 if the
55  *	operation completes and	-EAGAIN	otherwise.
56  */
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 			       int polarity, int attempts, int delay, u32 *valp)
59 {
60 	while (1) {
61 		u32 val = t4_read_reg(adapter, reg);
62 
63 		if (!!(val & mask) == polarity) {
64 			if (valp)
65 				*valp = val;
66 			return 0;
67 		}
68 		if (--attempts == 0)
69 			return -EAGAIN;
70 		if (delay)
71 			udelay(delay);
72 	}
73 }
74 
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 				  int polarity, int attempts, int delay)
77 {
78 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 				   delay, NULL);
80 }
81 
82 /**
83  *	t4_set_reg_field - set a register field to a value
84  *	@adapter: the adapter to program
85  *	@addr: the register address
86  *	@mask: specifies the portion of the register to modify
87  *	@val: the new value for the register field
88  *
89  *	Sets a register field specified by the supplied mask to the
90  *	given value.
91  */
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 		      u32 val)
94 {
95 	u32 v = t4_read_reg(adapter, addr) & ~mask;
96 
97 	t4_write_reg(adapter, addr, v | val);
98 	(void) t4_read_reg(adapter, addr);      /* flush */
99 }
100 
101 /**
102  *	t4_read_indirect - read indirectly addressed registers
103  *	@adap: the adapter
104  *	@addr_reg: register holding the indirect address
105  *	@data_reg: register holding the value of the indirect register
106  *	@vals: where the read register values are stored
107  *	@nregs: how many indirect registers to read
108  *	@start_idx: index of first indirect register to read
109  *
110  *	Reads registers that are accessed indirectly through an address/data
111  *	register pair.
112  */
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 			     unsigned int data_reg, u32 *vals,
115 			     unsigned int nregs, unsigned int start_idx)
116 {
117 	while (nregs--) {
118 		t4_write_reg(adap, addr_reg, start_idx);
119 		*vals++ = t4_read_reg(adap, data_reg);
120 		start_idx++;
121 	}
122 }
123 
124 /**
125  *	t4_write_indirect - write indirectly addressed registers
126  *	@adap: the adapter
127  *	@addr_reg: register holding the indirect addresses
128  *	@data_reg: register holding the value for the indirect registers
129  *	@vals: values to write
130  *	@nregs: how many indirect registers to write
131  *	@start_idx: address of first indirect register to write
132  *
133  *	Writes a sequential block of registers that are accessed indirectly
134  *	through an address/data register pair.
135  */
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 		       unsigned int data_reg, const u32 *vals,
138 		       unsigned int nregs, unsigned int start_idx)
139 {
140 	while (nregs--) {
141 		t4_write_reg(adap, addr_reg, start_idx++);
142 		t4_write_reg(adap, data_reg, *vals++);
143 	}
144 }
145 
146 /*
147  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148  * mechanism.  This guarantees that we get the real value even if we're
149  * operating within a Virtual Machine and the Hypervisor is trapping our
150  * Configuration Space accesses.
151  */
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153 {
154 	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155 
156 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157 		req |= ENABLE_F;
158 	else
159 		req |= T6_ENABLE_F;
160 
161 	if (is_t4(adap->params.chip))
162 		req |= LOCALCFG_F;
163 
164 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
166 
167 	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 	 * Configuration Space read.  (None of the other fields matter when
169 	 * ENABLE is 0 so a simple register write is easier than a
170 	 * read-modify-write via t4_set_reg_field().)
171 	 */
172 	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
173 }
174 
175 /*
176  * t4_report_fw_error - report firmware error
177  * @adap: the adapter
178  *
179  * The adapter firmware can indicate error conditions to the host.
180  * If the firmware has indicated an error, print out the reason for
181  * the firmware error.
182  */
183 static void t4_report_fw_error(struct adapter *adap)
184 {
185 	static const char *const reason[] = {
186 		"Crash",                        /* PCIE_FW_EVAL_CRASH */
187 		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
188 		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
189 		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
192 		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 		"Reserved",                     /* reserved */
194 	};
195 	u32 pcie_fw;
196 
197 	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 	if (pcie_fw & PCIE_FW_ERR_F) {
199 		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 			reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 		adap->flags &= ~FW_OK;
202 	}
203 }
204 
205 /*
206  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
207  */
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209 			 u32 mbox_addr)
210 {
211 	for ( ; nflit; nflit--, mbox_addr += 8)
212 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
213 }
214 
215 /*
216  * Handle a FW assertion reported in a mailbox.
217  */
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
219 {
220 	struct fw_debug_cmd asrt;
221 
222 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 	dev_alert(adap->pdev_dev,
224 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225 		  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
227 }
228 
229 /**
230  *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231  *	@adapter: the adapter
232  *	@cmd: the Firmware Mailbox Command or Reply
233  *	@size: command length in bytes
234  *	@access: the time (ms) needed to access the Firmware Mailbox
235  *	@execute: the time (ms) the command spent being executed
236  */
237 static void t4_record_mbox(struct adapter *adapter,
238 			   const __be64 *cmd, unsigned int size,
239 			   int access, int execute)
240 {
241 	struct mbox_cmd_log *log = adapter->mbox_log;
242 	struct mbox_cmd *entry;
243 	int i;
244 
245 	entry = mbox_cmd_log_entry(log, log->cursor++);
246 	if (log->cursor == log->size)
247 		log->cursor = 0;
248 
249 	for (i = 0; i < size / 8; i++)
250 		entry->cmd[i] = be64_to_cpu(cmd[i]);
251 	while (i < MBOX_LEN / 8)
252 		entry->cmd[i++] = 0;
253 	entry->timestamp = jiffies;
254 	entry->seqno = log->seqno++;
255 	entry->access = access;
256 	entry->execute = execute;
257 }
258 
259 /**
260  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
261  *	@adap: the adapter
262  *	@mbox: index of the mailbox to use
263  *	@cmd: the command to write
264  *	@size: command length in bytes
265  *	@rpl: where to optionally store the reply
266  *	@sleep_ok: if true we may sleep while awaiting command completion
267  *	@timeout: time to wait for command to finish before timing out
268  *
269  *	Sends the given command to FW through the selected mailbox and waits
270  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
271  *	store the FW's reply to the command.  The command and its optional
272  *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
273  *	to respond.  @sleep_ok determines whether we may sleep while awaiting
274  *	the response.  If sleeping is allowed we use progressive backoff
275  *	otherwise we spin.
276  *
277  *	The return value is 0 on success or a negative errno on failure.  A
278  *	failure can happen either because we are not able to execute the
279  *	command or FW executes it but signals an error.  In the latter case
280  *	the return value is the error code indicated by FW (negated).
281  */
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 			    int size, void *rpl, bool sleep_ok, int timeout)
284 {
285 	static const int delay[] = {
286 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287 	};
288 
289 	struct mbox_list entry;
290 	u16 access = 0;
291 	u16 execute = 0;
292 	u32 v;
293 	u64 res;
294 	int i, ms, delay_idx, ret;
295 	const __be64 *p = cmd;
296 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298 	__be64 cmd_rpl[MBOX_LEN / 8];
299 	u32 pcie_fw;
300 
301 	if ((size & 15) || size > MBOX_LEN)
302 		return -EINVAL;
303 
304 	/*
305 	 * If the device is off-line, as in EEH, commands will time out.
306 	 * Fail them early so we don't waste time waiting.
307 	 */
308 	if (adap->pdev->error_state != pci_channel_io_normal)
309 		return -EIO;
310 
311 	/* If we have a negative timeout, that implies that we can't sleep. */
312 	if (timeout < 0) {
313 		sleep_ok = false;
314 		timeout = -timeout;
315 	}
316 
317 	/* Queue ourselves onto the mailbox access list.  When our entry is at
318 	 * the front of the list, we have rights to access the mailbox.  So we
319 	 * wait [for a while] till we're at the front [or bail out with an
320 	 * EBUSY] ...
321 	 */
322 	spin_lock_bh(&adap->mbox_lock);
323 	list_add_tail(&entry.list, &adap->mlist.list);
324 	spin_unlock_bh(&adap->mbox_lock);
325 
326 	delay_idx = 0;
327 	ms = delay[0];
328 
329 	for (i = 0; ; i += ms) {
330 		/* If we've waited too long, return a busy indication.  This
331 		 * really ought to be based on our initial position in the
332 		 * mailbox access list but this is a start.  We very rearely
333 		 * contend on access to the mailbox ...
334 		 */
335 		pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 		if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337 			spin_lock_bh(&adap->mbox_lock);
338 			list_del(&entry.list);
339 			spin_unlock_bh(&adap->mbox_lock);
340 			ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341 			t4_record_mbox(adap, cmd, size, access, ret);
342 			return ret;
343 		}
344 
345 		/* If we're at the head, break out and start the mailbox
346 		 * protocol.
347 		 */
348 		if (list_first_entry(&adap->mlist.list, struct mbox_list,
349 				     list) == &entry)
350 			break;
351 
352 		/* Delay for a bit before checking again ... */
353 		if (sleep_ok) {
354 			ms = delay[delay_idx];  /* last element may repeat */
355 			if (delay_idx < ARRAY_SIZE(delay) - 1)
356 				delay_idx++;
357 			msleep(ms);
358 		} else {
359 			mdelay(ms);
360 		}
361 	}
362 
363 	/* Loop trying to get ownership of the mailbox.  Return an error
364 	 * if we can't gain ownership.
365 	 */
366 	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368 		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369 	if (v != MBOX_OWNER_DRV) {
370 		spin_lock_bh(&adap->mbox_lock);
371 		list_del(&entry.list);
372 		spin_unlock_bh(&adap->mbox_lock);
373 		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374 		t4_record_mbox(adap, cmd, size, access, ret);
375 		return ret;
376 	}
377 
378 	/* Copy in the new mailbox command and send it on its way ... */
379 	t4_record_mbox(adap, cmd, size, access, 0);
380 	for (i = 0; i < size; i += 8)
381 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
382 
383 	t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384 	t4_read_reg(adap, ctl_reg);          /* flush write */
385 
386 	delay_idx = 0;
387 	ms = delay[0];
388 
389 	for (i = 0;
390 	     !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
391 	     i < timeout;
392 	     i += ms) {
393 		if (sleep_ok) {
394 			ms = delay[delay_idx];  /* last element may repeat */
395 			if (delay_idx < ARRAY_SIZE(delay) - 1)
396 				delay_idx++;
397 			msleep(ms);
398 		} else
399 			mdelay(ms);
400 
401 		v = t4_read_reg(adap, ctl_reg);
402 		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 			if (!(v & MBMSGVALID_F)) {
404 				t4_write_reg(adap, ctl_reg, 0);
405 				continue;
406 			}
407 
408 			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 			res = be64_to_cpu(cmd_rpl[0]);
410 
411 			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412 				fw_asrt(adap, data_reg);
413 				res = FW_CMD_RETVAL_V(EIO);
414 			} else if (rpl) {
415 				memcpy(rpl, cmd_rpl, size);
416 			}
417 
418 			t4_write_reg(adap, ctl_reg, 0);
419 
420 			execute = i + ms;
421 			t4_record_mbox(adap, cmd_rpl,
422 				       MBOX_LEN, access, execute);
423 			spin_lock_bh(&adap->mbox_lock);
424 			list_del(&entry.list);
425 			spin_unlock_bh(&adap->mbox_lock);
426 			return -FW_CMD_RETVAL_G((int)res);
427 		}
428 	}
429 
430 	ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431 	t4_record_mbox(adap, cmd, size, access, ret);
432 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 		*(const u8 *)cmd, mbox);
434 	t4_report_fw_error(adap);
435 	spin_lock_bh(&adap->mbox_lock);
436 	list_del(&entry.list);
437 	spin_unlock_bh(&adap->mbox_lock);
438 	t4_fatal_err(adap);
439 	return ret;
440 }
441 
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 		    void *rpl, bool sleep_ok)
444 {
445 	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
446 				       FW_CMD_MAX_TIMEOUT);
447 }
448 
449 static int t4_edc_err_read(struct adapter *adap, int idx)
450 {
451 	u32 edc_ecc_err_addr_reg;
452 	u32 rdata_reg;
453 
454 	if (is_t4(adap->params.chip)) {
455 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456 		return 0;
457 	}
458 	if (idx != 0 && idx != 1) {
459 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
460 		return 0;
461 	}
462 
463 	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465 
466 	CH_WARN(adap,
467 		"edc%d err addr 0x%x: 0x%x.\n",
468 		idx, edc_ecc_err_addr_reg,
469 		t4_read_reg(adap, edc_ecc_err_addr_reg));
470 	CH_WARN(adap,
471 		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
472 		rdata_reg,
473 		(unsigned long long)t4_read_reg64(adap, rdata_reg),
474 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
482 
483 	return 0;
484 }
485 
486 /**
487  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
488  *	@adap: the adapter
489  *	@win: PCI-E Memory Window to use
490  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
491  *	@addr: address within indicated memory type
492  *	@len: amount of memory to transfer
493  *	@hbuf: host memory buffer
494  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
495  *
496  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
497  *	firmware memory address and host buffer must be aligned on 32-bit
498  *	boudaries; the length may be arbitrary.  The memory is transferred as
499  *	a raw byte sequence from/to the firmware's memory.  If this memory
500  *	contains data structures which contain multi-byte integers, it's the
501  *	caller's responsibility to perform appropriate byte order conversions.
502  */
503 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
504 		 u32 len, void *hbuf, int dir)
505 {
506 	u32 pos, offset, resid, memoffset;
507 	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
508 	u32 *buf;
509 
510 	/* Argument sanity checks ...
511 	 */
512 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
513 		return -EINVAL;
514 	buf = (u32 *)hbuf;
515 
516 	/* It's convenient to be able to handle lengths which aren't a
517 	 * multiple of 32-bits because we often end up transferring files to
518 	 * the firmware.  So we'll handle that by normalizing the length here
519 	 * and then handling any residual transfer at the end.
520 	 */
521 	resid = len & 0x3;
522 	len -= resid;
523 
524 	/* Offset into the region of memory which is being accessed
525 	 * MEM_EDC0 = 0
526 	 * MEM_EDC1 = 1
527 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
528 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
529 	 * MEM_HMA  = 4
530 	 */
531 	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
532 	if (mtype == MEM_HMA) {
533 		memoffset = 2 * (edc_size * 1024 * 1024);
534 	} else if (mtype != MEM_MC1) {
535 		memoffset = (mtype * (edc_size * 1024 * 1024));
536 	} else {
537 		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
538 						      MA_EXT_MEMORY0_BAR_A));
539 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
540 	}
541 
542 	/* Determine the PCIE_MEM_ACCESS_OFFSET */
543 	addr = addr + memoffset;
544 
545 	/* Each PCI-E Memory Window is programmed with a window size -- or
546 	 * "aperture" -- which controls the granularity of its mapping onto
547 	 * adapter memory.  We need to grab that aperture in order to know
548 	 * how to use the specified window.  The window is also programmed
549 	 * with the base address of the Memory Window in BAR0's address
550 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
551 	 * the address is relative to BAR0.
552 	 */
553 	mem_reg = t4_read_reg(adap,
554 			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
555 						  win));
556 	mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
557 	mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
558 	if (is_t4(adap->params.chip))
559 		mem_base -= adap->t4_bar0;
560 	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
561 
562 	/* Calculate our initial PCI-E Memory Window Position and Offset into
563 	 * that Window.
564 	 */
565 	pos = addr & ~(mem_aperture-1);
566 	offset = addr - pos;
567 
568 	/* Set up initial PCI-E Memory Window to cover the start of our
569 	 * transfer.  (Read it back to ensure that changes propagate before we
570 	 * attempt to use the new value.)
571 	 */
572 	t4_write_reg(adap,
573 		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
574 		     pos | win_pf);
575 	t4_read_reg(adap,
576 		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
577 
578 	/* Transfer data to/from the adapter as long as there's an integral
579 	 * number of 32-bit transfers to complete.
580 	 *
581 	 * A note on Endianness issues:
582 	 *
583 	 * The "register" reads and writes below from/to the PCI-E Memory
584 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
585 	 * Little-Endian "swizzel."  As a result, if we have the following
586 	 * data in adapter memory:
587 	 *
588 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
589 	 *     Address:      i+0  i+1  i+2  i+3
590 	 *
591 	 * Then a read of the adapter memory via the PCI-E Memory Window
592 	 * will yield:
593 	 *
594 	 *     x = readl(i)
595 	 *         31                  0
596 	 *         [ b3 | b2 | b1 | b0 ]
597 	 *
598 	 * If this value is stored into local memory on a Little-Endian system
599 	 * it will show up correctly in local memory as:
600 	 *
601 	 *     ( ..., b0, b1, b2, b3, ... )
602 	 *
603 	 * But on a Big-Endian system, the store will show up in memory
604 	 * incorrectly swizzled as:
605 	 *
606 	 *     ( ..., b3, b2, b1, b0, ... )
607 	 *
608 	 * So we need to account for this in the reads and writes to the
609 	 * PCI-E Memory Window below by undoing the register read/write
610 	 * swizzels.
611 	 */
612 	while (len > 0) {
613 		if (dir == T4_MEMORY_READ)
614 			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
615 						mem_base + offset));
616 		else
617 			t4_write_reg(adap, mem_base + offset,
618 				     (__force u32)cpu_to_le32(*buf++));
619 		offset += sizeof(__be32);
620 		len -= sizeof(__be32);
621 
622 		/* If we've reached the end of our current window aperture,
623 		 * move the PCI-E Memory Window on to the next.  Note that
624 		 * doing this here after "len" may be 0 allows us to set up
625 		 * the PCI-E Memory Window for a possible final residual
626 		 * transfer below ...
627 		 */
628 		if (offset == mem_aperture) {
629 			pos += mem_aperture;
630 			offset = 0;
631 			t4_write_reg(adap,
632 				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
633 						    win), pos | win_pf);
634 			t4_read_reg(adap,
635 				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
636 						    win));
637 		}
638 	}
639 
640 	/* If the original transfer had a length which wasn't a multiple of
641 	 * 32-bits, now's where we need to finish off the transfer of the
642 	 * residual amount.  The PCI-E Memory Window has already been moved
643 	 * above (if necessary) to cover this final transfer.
644 	 */
645 	if (resid) {
646 		union {
647 			u32 word;
648 			char byte[4];
649 		} last;
650 		unsigned char *bp;
651 		int i;
652 
653 		if (dir == T4_MEMORY_READ) {
654 			last.word = le32_to_cpu(
655 					(__force __le32)t4_read_reg(adap,
656 						mem_base + offset));
657 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
658 				bp[i] = last.byte[i];
659 		} else {
660 			last.word = *buf;
661 			for (i = resid; i < 4; i++)
662 				last.byte[i] = 0;
663 			t4_write_reg(adap, mem_base + offset,
664 				     (__force u32)cpu_to_le32(last.word));
665 		}
666 	}
667 
668 	return 0;
669 }
670 
671 /* Return the specified PCI-E Configuration Space register from our Physical
672  * Function.  We try first via a Firmware LDST Command since we prefer to let
673  * the firmware own all of these registers, but if that fails we go for it
674  * directly ourselves.
675  */
676 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
677 {
678 	u32 val, ldst_addrspace;
679 
680 	/* If fw_attach != 0, construct and send the Firmware LDST Command to
681 	 * retrieve the specified PCI-E Configuration Space register.
682 	 */
683 	struct fw_ldst_cmd ldst_cmd;
684 	int ret;
685 
686 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
687 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
688 	ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
689 					       FW_CMD_REQUEST_F |
690 					       FW_CMD_READ_F |
691 					       ldst_addrspace);
692 	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
693 	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
694 	ldst_cmd.u.pcie.ctrl_to_fn =
695 		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
696 	ldst_cmd.u.pcie.r = reg;
697 
698 	/* If the LDST Command succeeds, return the result, otherwise
699 	 * fall through to reading it directly ourselves ...
700 	 */
701 	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
702 			 &ldst_cmd);
703 	if (ret == 0)
704 		val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
705 	else
706 		/* Read the desired Configuration Space register via the PCI-E
707 		 * Backdoor mechanism.
708 		 */
709 		t4_hw_pci_read_cfg4(adap, reg, &val);
710 	return val;
711 }
712 
713 /* Get the window based on base passed to it.
714  * Window aperture is currently unhandled, but there is no use case for it
715  * right now
716  */
717 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
718 			 u32 memwin_base)
719 {
720 	u32 ret;
721 
722 	if (is_t4(adap->params.chip)) {
723 		u32 bar0;
724 
725 		/* Truncation intentional: we only read the bottom 32-bits of
726 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
727 		 * mechanism to read BAR0 instead of using
728 		 * pci_resource_start() because we could be operating from
729 		 * within a Virtual Machine which is trapping our accesses to
730 		 * our Configuration Space and we need to set up the PCI-E
731 		 * Memory Window decoders with the actual addresses which will
732 		 * be coming across the PCI-E link.
733 		 */
734 		bar0 = t4_read_pcie_cfg4(adap, pci_base);
735 		bar0 &= pci_mask;
736 		adap->t4_bar0 = bar0;
737 
738 		ret = bar0 + memwin_base;
739 	} else {
740 		/* For T5, only relative offset inside the PCIe BAR is passed */
741 		ret = memwin_base;
742 	}
743 	return ret;
744 }
745 
746 /* Get the default utility window (win0) used by everyone */
747 u32 t4_get_util_window(struct adapter *adap)
748 {
749 	return t4_get_window(adap, PCI_BASE_ADDRESS_0,
750 			     PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
751 }
752 
753 /* Set up memory window for accessing adapter memory ranges.  (Read
754  * back MA register to ensure that changes propagate before we attempt
755  * to use the new values.)
756  */
757 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
758 {
759 	t4_write_reg(adap,
760 		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
761 		     memwin_base | BIR_V(0) |
762 		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
763 	t4_read_reg(adap,
764 		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
765 }
766 
767 /**
768  *	t4_get_regs_len - return the size of the chips register set
769  *	@adapter: the adapter
770  *
771  *	Returns the size of the chip's BAR0 register space.
772  */
773 unsigned int t4_get_regs_len(struct adapter *adapter)
774 {
775 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
776 
777 	switch (chip_version) {
778 	case CHELSIO_T4:
779 		return T4_REGMAP_SIZE;
780 
781 	case CHELSIO_T5:
782 	case CHELSIO_T6:
783 		return T5_REGMAP_SIZE;
784 	}
785 
786 	dev_err(adapter->pdev_dev,
787 		"Unsupported chip version %d\n", chip_version);
788 	return 0;
789 }
790 
791 /**
792  *	t4_get_regs - read chip registers into provided buffer
793  *	@adap: the adapter
794  *	@buf: register buffer
795  *	@buf_size: size (in bytes) of register buffer
796  *
797  *	If the provided register buffer isn't large enough for the chip's
798  *	full register range, the register dump will be truncated to the
799  *	register buffer's size.
800  */
801 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
802 {
803 	static const unsigned int t4_reg_ranges[] = {
804 		0x1008, 0x1108,
805 		0x1180, 0x1184,
806 		0x1190, 0x1194,
807 		0x11a0, 0x11a4,
808 		0x11b0, 0x11b4,
809 		0x11fc, 0x123c,
810 		0x1300, 0x173c,
811 		0x1800, 0x18fc,
812 		0x3000, 0x30d8,
813 		0x30e0, 0x30e4,
814 		0x30ec, 0x5910,
815 		0x5920, 0x5924,
816 		0x5960, 0x5960,
817 		0x5968, 0x5968,
818 		0x5970, 0x5970,
819 		0x5978, 0x5978,
820 		0x5980, 0x5980,
821 		0x5988, 0x5988,
822 		0x5990, 0x5990,
823 		0x5998, 0x5998,
824 		0x59a0, 0x59d4,
825 		0x5a00, 0x5ae0,
826 		0x5ae8, 0x5ae8,
827 		0x5af0, 0x5af0,
828 		0x5af8, 0x5af8,
829 		0x6000, 0x6098,
830 		0x6100, 0x6150,
831 		0x6200, 0x6208,
832 		0x6240, 0x6248,
833 		0x6280, 0x62b0,
834 		0x62c0, 0x6338,
835 		0x6370, 0x638c,
836 		0x6400, 0x643c,
837 		0x6500, 0x6524,
838 		0x6a00, 0x6a04,
839 		0x6a14, 0x6a38,
840 		0x6a60, 0x6a70,
841 		0x6a78, 0x6a78,
842 		0x6b00, 0x6b0c,
843 		0x6b1c, 0x6b84,
844 		0x6bf0, 0x6bf8,
845 		0x6c00, 0x6c0c,
846 		0x6c1c, 0x6c84,
847 		0x6cf0, 0x6cf8,
848 		0x6d00, 0x6d0c,
849 		0x6d1c, 0x6d84,
850 		0x6df0, 0x6df8,
851 		0x6e00, 0x6e0c,
852 		0x6e1c, 0x6e84,
853 		0x6ef0, 0x6ef8,
854 		0x6f00, 0x6f0c,
855 		0x6f1c, 0x6f84,
856 		0x6ff0, 0x6ff8,
857 		0x7000, 0x700c,
858 		0x701c, 0x7084,
859 		0x70f0, 0x70f8,
860 		0x7100, 0x710c,
861 		0x711c, 0x7184,
862 		0x71f0, 0x71f8,
863 		0x7200, 0x720c,
864 		0x721c, 0x7284,
865 		0x72f0, 0x72f8,
866 		0x7300, 0x730c,
867 		0x731c, 0x7384,
868 		0x73f0, 0x73f8,
869 		0x7400, 0x7450,
870 		0x7500, 0x7530,
871 		0x7600, 0x760c,
872 		0x7614, 0x761c,
873 		0x7680, 0x76cc,
874 		0x7700, 0x7798,
875 		0x77c0, 0x77fc,
876 		0x7900, 0x79fc,
877 		0x7b00, 0x7b58,
878 		0x7b60, 0x7b84,
879 		0x7b8c, 0x7c38,
880 		0x7d00, 0x7d38,
881 		0x7d40, 0x7d80,
882 		0x7d8c, 0x7ddc,
883 		0x7de4, 0x7e04,
884 		0x7e10, 0x7e1c,
885 		0x7e24, 0x7e38,
886 		0x7e40, 0x7e44,
887 		0x7e4c, 0x7e78,
888 		0x7e80, 0x7ea4,
889 		0x7eac, 0x7edc,
890 		0x7ee8, 0x7efc,
891 		0x8dc0, 0x8e04,
892 		0x8e10, 0x8e1c,
893 		0x8e30, 0x8e78,
894 		0x8ea0, 0x8eb8,
895 		0x8ec0, 0x8f6c,
896 		0x8fc0, 0x9008,
897 		0x9010, 0x9058,
898 		0x9060, 0x9060,
899 		0x9068, 0x9074,
900 		0x90fc, 0x90fc,
901 		0x9400, 0x9408,
902 		0x9410, 0x9458,
903 		0x9600, 0x9600,
904 		0x9608, 0x9638,
905 		0x9640, 0x96bc,
906 		0x9800, 0x9808,
907 		0x9820, 0x983c,
908 		0x9850, 0x9864,
909 		0x9c00, 0x9c6c,
910 		0x9c80, 0x9cec,
911 		0x9d00, 0x9d6c,
912 		0x9d80, 0x9dec,
913 		0x9e00, 0x9e6c,
914 		0x9e80, 0x9eec,
915 		0x9f00, 0x9f6c,
916 		0x9f80, 0x9fec,
917 		0xd004, 0xd004,
918 		0xd010, 0xd03c,
919 		0xdfc0, 0xdfe0,
920 		0xe000, 0xea7c,
921 		0xf000, 0x11110,
922 		0x11118, 0x11190,
923 		0x19040, 0x1906c,
924 		0x19078, 0x19080,
925 		0x1908c, 0x190e4,
926 		0x190f0, 0x190f8,
927 		0x19100, 0x19110,
928 		0x19120, 0x19124,
929 		0x19150, 0x19194,
930 		0x1919c, 0x191b0,
931 		0x191d0, 0x191e8,
932 		0x19238, 0x1924c,
933 		0x193f8, 0x1943c,
934 		0x1944c, 0x19474,
935 		0x19490, 0x194e0,
936 		0x194f0, 0x194f8,
937 		0x19800, 0x19c08,
938 		0x19c10, 0x19c90,
939 		0x19ca0, 0x19ce4,
940 		0x19cf0, 0x19d40,
941 		0x19d50, 0x19d94,
942 		0x19da0, 0x19de8,
943 		0x19df0, 0x19e40,
944 		0x19e50, 0x19e90,
945 		0x19ea0, 0x19f4c,
946 		0x1a000, 0x1a004,
947 		0x1a010, 0x1a06c,
948 		0x1a0b0, 0x1a0e4,
949 		0x1a0ec, 0x1a0f4,
950 		0x1a100, 0x1a108,
951 		0x1a114, 0x1a120,
952 		0x1a128, 0x1a130,
953 		0x1a138, 0x1a138,
954 		0x1a190, 0x1a1c4,
955 		0x1a1fc, 0x1a1fc,
956 		0x1e040, 0x1e04c,
957 		0x1e284, 0x1e28c,
958 		0x1e2c0, 0x1e2c0,
959 		0x1e2e0, 0x1e2e0,
960 		0x1e300, 0x1e384,
961 		0x1e3c0, 0x1e3c8,
962 		0x1e440, 0x1e44c,
963 		0x1e684, 0x1e68c,
964 		0x1e6c0, 0x1e6c0,
965 		0x1e6e0, 0x1e6e0,
966 		0x1e700, 0x1e784,
967 		0x1e7c0, 0x1e7c8,
968 		0x1e840, 0x1e84c,
969 		0x1ea84, 0x1ea8c,
970 		0x1eac0, 0x1eac0,
971 		0x1eae0, 0x1eae0,
972 		0x1eb00, 0x1eb84,
973 		0x1ebc0, 0x1ebc8,
974 		0x1ec40, 0x1ec4c,
975 		0x1ee84, 0x1ee8c,
976 		0x1eec0, 0x1eec0,
977 		0x1eee0, 0x1eee0,
978 		0x1ef00, 0x1ef84,
979 		0x1efc0, 0x1efc8,
980 		0x1f040, 0x1f04c,
981 		0x1f284, 0x1f28c,
982 		0x1f2c0, 0x1f2c0,
983 		0x1f2e0, 0x1f2e0,
984 		0x1f300, 0x1f384,
985 		0x1f3c0, 0x1f3c8,
986 		0x1f440, 0x1f44c,
987 		0x1f684, 0x1f68c,
988 		0x1f6c0, 0x1f6c0,
989 		0x1f6e0, 0x1f6e0,
990 		0x1f700, 0x1f784,
991 		0x1f7c0, 0x1f7c8,
992 		0x1f840, 0x1f84c,
993 		0x1fa84, 0x1fa8c,
994 		0x1fac0, 0x1fac0,
995 		0x1fae0, 0x1fae0,
996 		0x1fb00, 0x1fb84,
997 		0x1fbc0, 0x1fbc8,
998 		0x1fc40, 0x1fc4c,
999 		0x1fe84, 0x1fe8c,
1000 		0x1fec0, 0x1fec0,
1001 		0x1fee0, 0x1fee0,
1002 		0x1ff00, 0x1ff84,
1003 		0x1ffc0, 0x1ffc8,
1004 		0x20000, 0x2002c,
1005 		0x20100, 0x2013c,
1006 		0x20190, 0x201a0,
1007 		0x201a8, 0x201b8,
1008 		0x201c4, 0x201c8,
1009 		0x20200, 0x20318,
1010 		0x20400, 0x204b4,
1011 		0x204c0, 0x20528,
1012 		0x20540, 0x20614,
1013 		0x21000, 0x21040,
1014 		0x2104c, 0x21060,
1015 		0x210c0, 0x210ec,
1016 		0x21200, 0x21268,
1017 		0x21270, 0x21284,
1018 		0x212fc, 0x21388,
1019 		0x21400, 0x21404,
1020 		0x21500, 0x21500,
1021 		0x21510, 0x21518,
1022 		0x2152c, 0x21530,
1023 		0x2153c, 0x2153c,
1024 		0x21550, 0x21554,
1025 		0x21600, 0x21600,
1026 		0x21608, 0x2161c,
1027 		0x21624, 0x21628,
1028 		0x21630, 0x21634,
1029 		0x2163c, 0x2163c,
1030 		0x21700, 0x2171c,
1031 		0x21780, 0x2178c,
1032 		0x21800, 0x21818,
1033 		0x21820, 0x21828,
1034 		0x21830, 0x21848,
1035 		0x21850, 0x21854,
1036 		0x21860, 0x21868,
1037 		0x21870, 0x21870,
1038 		0x21878, 0x21898,
1039 		0x218a0, 0x218a8,
1040 		0x218b0, 0x218c8,
1041 		0x218d0, 0x218d4,
1042 		0x218e0, 0x218e8,
1043 		0x218f0, 0x218f0,
1044 		0x218f8, 0x21a18,
1045 		0x21a20, 0x21a28,
1046 		0x21a30, 0x21a48,
1047 		0x21a50, 0x21a54,
1048 		0x21a60, 0x21a68,
1049 		0x21a70, 0x21a70,
1050 		0x21a78, 0x21a98,
1051 		0x21aa0, 0x21aa8,
1052 		0x21ab0, 0x21ac8,
1053 		0x21ad0, 0x21ad4,
1054 		0x21ae0, 0x21ae8,
1055 		0x21af0, 0x21af0,
1056 		0x21af8, 0x21c18,
1057 		0x21c20, 0x21c20,
1058 		0x21c28, 0x21c30,
1059 		0x21c38, 0x21c38,
1060 		0x21c80, 0x21c98,
1061 		0x21ca0, 0x21ca8,
1062 		0x21cb0, 0x21cc8,
1063 		0x21cd0, 0x21cd4,
1064 		0x21ce0, 0x21ce8,
1065 		0x21cf0, 0x21cf0,
1066 		0x21cf8, 0x21d7c,
1067 		0x21e00, 0x21e04,
1068 		0x22000, 0x2202c,
1069 		0x22100, 0x2213c,
1070 		0x22190, 0x221a0,
1071 		0x221a8, 0x221b8,
1072 		0x221c4, 0x221c8,
1073 		0x22200, 0x22318,
1074 		0x22400, 0x224b4,
1075 		0x224c0, 0x22528,
1076 		0x22540, 0x22614,
1077 		0x23000, 0x23040,
1078 		0x2304c, 0x23060,
1079 		0x230c0, 0x230ec,
1080 		0x23200, 0x23268,
1081 		0x23270, 0x23284,
1082 		0x232fc, 0x23388,
1083 		0x23400, 0x23404,
1084 		0x23500, 0x23500,
1085 		0x23510, 0x23518,
1086 		0x2352c, 0x23530,
1087 		0x2353c, 0x2353c,
1088 		0x23550, 0x23554,
1089 		0x23600, 0x23600,
1090 		0x23608, 0x2361c,
1091 		0x23624, 0x23628,
1092 		0x23630, 0x23634,
1093 		0x2363c, 0x2363c,
1094 		0x23700, 0x2371c,
1095 		0x23780, 0x2378c,
1096 		0x23800, 0x23818,
1097 		0x23820, 0x23828,
1098 		0x23830, 0x23848,
1099 		0x23850, 0x23854,
1100 		0x23860, 0x23868,
1101 		0x23870, 0x23870,
1102 		0x23878, 0x23898,
1103 		0x238a0, 0x238a8,
1104 		0x238b0, 0x238c8,
1105 		0x238d0, 0x238d4,
1106 		0x238e0, 0x238e8,
1107 		0x238f0, 0x238f0,
1108 		0x238f8, 0x23a18,
1109 		0x23a20, 0x23a28,
1110 		0x23a30, 0x23a48,
1111 		0x23a50, 0x23a54,
1112 		0x23a60, 0x23a68,
1113 		0x23a70, 0x23a70,
1114 		0x23a78, 0x23a98,
1115 		0x23aa0, 0x23aa8,
1116 		0x23ab0, 0x23ac8,
1117 		0x23ad0, 0x23ad4,
1118 		0x23ae0, 0x23ae8,
1119 		0x23af0, 0x23af0,
1120 		0x23af8, 0x23c18,
1121 		0x23c20, 0x23c20,
1122 		0x23c28, 0x23c30,
1123 		0x23c38, 0x23c38,
1124 		0x23c80, 0x23c98,
1125 		0x23ca0, 0x23ca8,
1126 		0x23cb0, 0x23cc8,
1127 		0x23cd0, 0x23cd4,
1128 		0x23ce0, 0x23ce8,
1129 		0x23cf0, 0x23cf0,
1130 		0x23cf8, 0x23d7c,
1131 		0x23e00, 0x23e04,
1132 		0x24000, 0x2402c,
1133 		0x24100, 0x2413c,
1134 		0x24190, 0x241a0,
1135 		0x241a8, 0x241b8,
1136 		0x241c4, 0x241c8,
1137 		0x24200, 0x24318,
1138 		0x24400, 0x244b4,
1139 		0x244c0, 0x24528,
1140 		0x24540, 0x24614,
1141 		0x25000, 0x25040,
1142 		0x2504c, 0x25060,
1143 		0x250c0, 0x250ec,
1144 		0x25200, 0x25268,
1145 		0x25270, 0x25284,
1146 		0x252fc, 0x25388,
1147 		0x25400, 0x25404,
1148 		0x25500, 0x25500,
1149 		0x25510, 0x25518,
1150 		0x2552c, 0x25530,
1151 		0x2553c, 0x2553c,
1152 		0x25550, 0x25554,
1153 		0x25600, 0x25600,
1154 		0x25608, 0x2561c,
1155 		0x25624, 0x25628,
1156 		0x25630, 0x25634,
1157 		0x2563c, 0x2563c,
1158 		0x25700, 0x2571c,
1159 		0x25780, 0x2578c,
1160 		0x25800, 0x25818,
1161 		0x25820, 0x25828,
1162 		0x25830, 0x25848,
1163 		0x25850, 0x25854,
1164 		0x25860, 0x25868,
1165 		0x25870, 0x25870,
1166 		0x25878, 0x25898,
1167 		0x258a0, 0x258a8,
1168 		0x258b0, 0x258c8,
1169 		0x258d0, 0x258d4,
1170 		0x258e0, 0x258e8,
1171 		0x258f0, 0x258f0,
1172 		0x258f8, 0x25a18,
1173 		0x25a20, 0x25a28,
1174 		0x25a30, 0x25a48,
1175 		0x25a50, 0x25a54,
1176 		0x25a60, 0x25a68,
1177 		0x25a70, 0x25a70,
1178 		0x25a78, 0x25a98,
1179 		0x25aa0, 0x25aa8,
1180 		0x25ab0, 0x25ac8,
1181 		0x25ad0, 0x25ad4,
1182 		0x25ae0, 0x25ae8,
1183 		0x25af0, 0x25af0,
1184 		0x25af8, 0x25c18,
1185 		0x25c20, 0x25c20,
1186 		0x25c28, 0x25c30,
1187 		0x25c38, 0x25c38,
1188 		0x25c80, 0x25c98,
1189 		0x25ca0, 0x25ca8,
1190 		0x25cb0, 0x25cc8,
1191 		0x25cd0, 0x25cd4,
1192 		0x25ce0, 0x25ce8,
1193 		0x25cf0, 0x25cf0,
1194 		0x25cf8, 0x25d7c,
1195 		0x25e00, 0x25e04,
1196 		0x26000, 0x2602c,
1197 		0x26100, 0x2613c,
1198 		0x26190, 0x261a0,
1199 		0x261a8, 0x261b8,
1200 		0x261c4, 0x261c8,
1201 		0x26200, 0x26318,
1202 		0x26400, 0x264b4,
1203 		0x264c0, 0x26528,
1204 		0x26540, 0x26614,
1205 		0x27000, 0x27040,
1206 		0x2704c, 0x27060,
1207 		0x270c0, 0x270ec,
1208 		0x27200, 0x27268,
1209 		0x27270, 0x27284,
1210 		0x272fc, 0x27388,
1211 		0x27400, 0x27404,
1212 		0x27500, 0x27500,
1213 		0x27510, 0x27518,
1214 		0x2752c, 0x27530,
1215 		0x2753c, 0x2753c,
1216 		0x27550, 0x27554,
1217 		0x27600, 0x27600,
1218 		0x27608, 0x2761c,
1219 		0x27624, 0x27628,
1220 		0x27630, 0x27634,
1221 		0x2763c, 0x2763c,
1222 		0x27700, 0x2771c,
1223 		0x27780, 0x2778c,
1224 		0x27800, 0x27818,
1225 		0x27820, 0x27828,
1226 		0x27830, 0x27848,
1227 		0x27850, 0x27854,
1228 		0x27860, 0x27868,
1229 		0x27870, 0x27870,
1230 		0x27878, 0x27898,
1231 		0x278a0, 0x278a8,
1232 		0x278b0, 0x278c8,
1233 		0x278d0, 0x278d4,
1234 		0x278e0, 0x278e8,
1235 		0x278f0, 0x278f0,
1236 		0x278f8, 0x27a18,
1237 		0x27a20, 0x27a28,
1238 		0x27a30, 0x27a48,
1239 		0x27a50, 0x27a54,
1240 		0x27a60, 0x27a68,
1241 		0x27a70, 0x27a70,
1242 		0x27a78, 0x27a98,
1243 		0x27aa0, 0x27aa8,
1244 		0x27ab0, 0x27ac8,
1245 		0x27ad0, 0x27ad4,
1246 		0x27ae0, 0x27ae8,
1247 		0x27af0, 0x27af0,
1248 		0x27af8, 0x27c18,
1249 		0x27c20, 0x27c20,
1250 		0x27c28, 0x27c30,
1251 		0x27c38, 0x27c38,
1252 		0x27c80, 0x27c98,
1253 		0x27ca0, 0x27ca8,
1254 		0x27cb0, 0x27cc8,
1255 		0x27cd0, 0x27cd4,
1256 		0x27ce0, 0x27ce8,
1257 		0x27cf0, 0x27cf0,
1258 		0x27cf8, 0x27d7c,
1259 		0x27e00, 0x27e04,
1260 	};
1261 
1262 	static const unsigned int t5_reg_ranges[] = {
1263 		0x1008, 0x10c0,
1264 		0x10cc, 0x10f8,
1265 		0x1100, 0x1100,
1266 		0x110c, 0x1148,
1267 		0x1180, 0x1184,
1268 		0x1190, 0x1194,
1269 		0x11a0, 0x11a4,
1270 		0x11b0, 0x11b4,
1271 		0x11fc, 0x123c,
1272 		0x1280, 0x173c,
1273 		0x1800, 0x18fc,
1274 		0x3000, 0x3028,
1275 		0x3060, 0x30b0,
1276 		0x30b8, 0x30d8,
1277 		0x30e0, 0x30fc,
1278 		0x3140, 0x357c,
1279 		0x35a8, 0x35cc,
1280 		0x35ec, 0x35ec,
1281 		0x3600, 0x5624,
1282 		0x56cc, 0x56ec,
1283 		0x56f4, 0x5720,
1284 		0x5728, 0x575c,
1285 		0x580c, 0x5814,
1286 		0x5890, 0x589c,
1287 		0x58a4, 0x58ac,
1288 		0x58b8, 0x58bc,
1289 		0x5940, 0x59c8,
1290 		0x59d0, 0x59dc,
1291 		0x59fc, 0x5a18,
1292 		0x5a60, 0x5a70,
1293 		0x5a80, 0x5a9c,
1294 		0x5b94, 0x5bfc,
1295 		0x6000, 0x6020,
1296 		0x6028, 0x6040,
1297 		0x6058, 0x609c,
1298 		0x60a8, 0x614c,
1299 		0x7700, 0x7798,
1300 		0x77c0, 0x78fc,
1301 		0x7b00, 0x7b58,
1302 		0x7b60, 0x7b84,
1303 		0x7b8c, 0x7c54,
1304 		0x7d00, 0x7d38,
1305 		0x7d40, 0x7d80,
1306 		0x7d8c, 0x7ddc,
1307 		0x7de4, 0x7e04,
1308 		0x7e10, 0x7e1c,
1309 		0x7e24, 0x7e38,
1310 		0x7e40, 0x7e44,
1311 		0x7e4c, 0x7e78,
1312 		0x7e80, 0x7edc,
1313 		0x7ee8, 0x7efc,
1314 		0x8dc0, 0x8de0,
1315 		0x8df8, 0x8e04,
1316 		0x8e10, 0x8e84,
1317 		0x8ea0, 0x8f84,
1318 		0x8fc0, 0x9058,
1319 		0x9060, 0x9060,
1320 		0x9068, 0x90f8,
1321 		0x9400, 0x9408,
1322 		0x9410, 0x9470,
1323 		0x9600, 0x9600,
1324 		0x9608, 0x9638,
1325 		0x9640, 0x96f4,
1326 		0x9800, 0x9808,
1327 		0x9820, 0x983c,
1328 		0x9850, 0x9864,
1329 		0x9c00, 0x9c6c,
1330 		0x9c80, 0x9cec,
1331 		0x9d00, 0x9d6c,
1332 		0x9d80, 0x9dec,
1333 		0x9e00, 0x9e6c,
1334 		0x9e80, 0x9eec,
1335 		0x9f00, 0x9f6c,
1336 		0x9f80, 0xa020,
1337 		0xd004, 0xd004,
1338 		0xd010, 0xd03c,
1339 		0xdfc0, 0xdfe0,
1340 		0xe000, 0x1106c,
1341 		0x11074, 0x11088,
1342 		0x1109c, 0x1117c,
1343 		0x11190, 0x11204,
1344 		0x19040, 0x1906c,
1345 		0x19078, 0x19080,
1346 		0x1908c, 0x190e8,
1347 		0x190f0, 0x190f8,
1348 		0x19100, 0x19110,
1349 		0x19120, 0x19124,
1350 		0x19150, 0x19194,
1351 		0x1919c, 0x191b0,
1352 		0x191d0, 0x191e8,
1353 		0x19238, 0x19290,
1354 		0x193f8, 0x19428,
1355 		0x19430, 0x19444,
1356 		0x1944c, 0x1946c,
1357 		0x19474, 0x19474,
1358 		0x19490, 0x194cc,
1359 		0x194f0, 0x194f8,
1360 		0x19c00, 0x19c08,
1361 		0x19c10, 0x19c60,
1362 		0x19c94, 0x19ce4,
1363 		0x19cf0, 0x19d40,
1364 		0x19d50, 0x19d94,
1365 		0x19da0, 0x19de8,
1366 		0x19df0, 0x19e10,
1367 		0x19e50, 0x19e90,
1368 		0x19ea0, 0x19f24,
1369 		0x19f34, 0x19f34,
1370 		0x19f40, 0x19f50,
1371 		0x19f90, 0x19fb4,
1372 		0x19fc4, 0x19fe4,
1373 		0x1a000, 0x1a004,
1374 		0x1a010, 0x1a06c,
1375 		0x1a0b0, 0x1a0e4,
1376 		0x1a0ec, 0x1a0f8,
1377 		0x1a100, 0x1a108,
1378 		0x1a114, 0x1a120,
1379 		0x1a128, 0x1a130,
1380 		0x1a138, 0x1a138,
1381 		0x1a190, 0x1a1c4,
1382 		0x1a1fc, 0x1a1fc,
1383 		0x1e008, 0x1e00c,
1384 		0x1e040, 0x1e044,
1385 		0x1e04c, 0x1e04c,
1386 		0x1e284, 0x1e290,
1387 		0x1e2c0, 0x1e2c0,
1388 		0x1e2e0, 0x1e2e0,
1389 		0x1e300, 0x1e384,
1390 		0x1e3c0, 0x1e3c8,
1391 		0x1e408, 0x1e40c,
1392 		0x1e440, 0x1e444,
1393 		0x1e44c, 0x1e44c,
1394 		0x1e684, 0x1e690,
1395 		0x1e6c0, 0x1e6c0,
1396 		0x1e6e0, 0x1e6e0,
1397 		0x1e700, 0x1e784,
1398 		0x1e7c0, 0x1e7c8,
1399 		0x1e808, 0x1e80c,
1400 		0x1e840, 0x1e844,
1401 		0x1e84c, 0x1e84c,
1402 		0x1ea84, 0x1ea90,
1403 		0x1eac0, 0x1eac0,
1404 		0x1eae0, 0x1eae0,
1405 		0x1eb00, 0x1eb84,
1406 		0x1ebc0, 0x1ebc8,
1407 		0x1ec08, 0x1ec0c,
1408 		0x1ec40, 0x1ec44,
1409 		0x1ec4c, 0x1ec4c,
1410 		0x1ee84, 0x1ee90,
1411 		0x1eec0, 0x1eec0,
1412 		0x1eee0, 0x1eee0,
1413 		0x1ef00, 0x1ef84,
1414 		0x1efc0, 0x1efc8,
1415 		0x1f008, 0x1f00c,
1416 		0x1f040, 0x1f044,
1417 		0x1f04c, 0x1f04c,
1418 		0x1f284, 0x1f290,
1419 		0x1f2c0, 0x1f2c0,
1420 		0x1f2e0, 0x1f2e0,
1421 		0x1f300, 0x1f384,
1422 		0x1f3c0, 0x1f3c8,
1423 		0x1f408, 0x1f40c,
1424 		0x1f440, 0x1f444,
1425 		0x1f44c, 0x1f44c,
1426 		0x1f684, 0x1f690,
1427 		0x1f6c0, 0x1f6c0,
1428 		0x1f6e0, 0x1f6e0,
1429 		0x1f700, 0x1f784,
1430 		0x1f7c0, 0x1f7c8,
1431 		0x1f808, 0x1f80c,
1432 		0x1f840, 0x1f844,
1433 		0x1f84c, 0x1f84c,
1434 		0x1fa84, 0x1fa90,
1435 		0x1fac0, 0x1fac0,
1436 		0x1fae0, 0x1fae0,
1437 		0x1fb00, 0x1fb84,
1438 		0x1fbc0, 0x1fbc8,
1439 		0x1fc08, 0x1fc0c,
1440 		0x1fc40, 0x1fc44,
1441 		0x1fc4c, 0x1fc4c,
1442 		0x1fe84, 0x1fe90,
1443 		0x1fec0, 0x1fec0,
1444 		0x1fee0, 0x1fee0,
1445 		0x1ff00, 0x1ff84,
1446 		0x1ffc0, 0x1ffc8,
1447 		0x30000, 0x30030,
1448 		0x30100, 0x30144,
1449 		0x30190, 0x301a0,
1450 		0x301a8, 0x301b8,
1451 		0x301c4, 0x301c8,
1452 		0x301d0, 0x301d0,
1453 		0x30200, 0x30318,
1454 		0x30400, 0x304b4,
1455 		0x304c0, 0x3052c,
1456 		0x30540, 0x3061c,
1457 		0x30800, 0x30828,
1458 		0x30834, 0x30834,
1459 		0x308c0, 0x30908,
1460 		0x30910, 0x309ac,
1461 		0x30a00, 0x30a14,
1462 		0x30a1c, 0x30a2c,
1463 		0x30a44, 0x30a50,
1464 		0x30a74, 0x30a74,
1465 		0x30a7c, 0x30afc,
1466 		0x30b08, 0x30c24,
1467 		0x30d00, 0x30d00,
1468 		0x30d08, 0x30d14,
1469 		0x30d1c, 0x30d20,
1470 		0x30d3c, 0x30d3c,
1471 		0x30d48, 0x30d50,
1472 		0x31200, 0x3120c,
1473 		0x31220, 0x31220,
1474 		0x31240, 0x31240,
1475 		0x31600, 0x3160c,
1476 		0x31a00, 0x31a1c,
1477 		0x31e00, 0x31e20,
1478 		0x31e38, 0x31e3c,
1479 		0x31e80, 0x31e80,
1480 		0x31e88, 0x31ea8,
1481 		0x31eb0, 0x31eb4,
1482 		0x31ec8, 0x31ed4,
1483 		0x31fb8, 0x32004,
1484 		0x32200, 0x32200,
1485 		0x32208, 0x32240,
1486 		0x32248, 0x32280,
1487 		0x32288, 0x322c0,
1488 		0x322c8, 0x322fc,
1489 		0x32600, 0x32630,
1490 		0x32a00, 0x32abc,
1491 		0x32b00, 0x32b10,
1492 		0x32b20, 0x32b30,
1493 		0x32b40, 0x32b50,
1494 		0x32b60, 0x32b70,
1495 		0x33000, 0x33028,
1496 		0x33030, 0x33048,
1497 		0x33060, 0x33068,
1498 		0x33070, 0x3309c,
1499 		0x330f0, 0x33128,
1500 		0x33130, 0x33148,
1501 		0x33160, 0x33168,
1502 		0x33170, 0x3319c,
1503 		0x331f0, 0x33238,
1504 		0x33240, 0x33240,
1505 		0x33248, 0x33250,
1506 		0x3325c, 0x33264,
1507 		0x33270, 0x332b8,
1508 		0x332c0, 0x332e4,
1509 		0x332f8, 0x33338,
1510 		0x33340, 0x33340,
1511 		0x33348, 0x33350,
1512 		0x3335c, 0x33364,
1513 		0x33370, 0x333b8,
1514 		0x333c0, 0x333e4,
1515 		0x333f8, 0x33428,
1516 		0x33430, 0x33448,
1517 		0x33460, 0x33468,
1518 		0x33470, 0x3349c,
1519 		0x334f0, 0x33528,
1520 		0x33530, 0x33548,
1521 		0x33560, 0x33568,
1522 		0x33570, 0x3359c,
1523 		0x335f0, 0x33638,
1524 		0x33640, 0x33640,
1525 		0x33648, 0x33650,
1526 		0x3365c, 0x33664,
1527 		0x33670, 0x336b8,
1528 		0x336c0, 0x336e4,
1529 		0x336f8, 0x33738,
1530 		0x33740, 0x33740,
1531 		0x33748, 0x33750,
1532 		0x3375c, 0x33764,
1533 		0x33770, 0x337b8,
1534 		0x337c0, 0x337e4,
1535 		0x337f8, 0x337fc,
1536 		0x33814, 0x33814,
1537 		0x3382c, 0x3382c,
1538 		0x33880, 0x3388c,
1539 		0x338e8, 0x338ec,
1540 		0x33900, 0x33928,
1541 		0x33930, 0x33948,
1542 		0x33960, 0x33968,
1543 		0x33970, 0x3399c,
1544 		0x339f0, 0x33a38,
1545 		0x33a40, 0x33a40,
1546 		0x33a48, 0x33a50,
1547 		0x33a5c, 0x33a64,
1548 		0x33a70, 0x33ab8,
1549 		0x33ac0, 0x33ae4,
1550 		0x33af8, 0x33b10,
1551 		0x33b28, 0x33b28,
1552 		0x33b3c, 0x33b50,
1553 		0x33bf0, 0x33c10,
1554 		0x33c28, 0x33c28,
1555 		0x33c3c, 0x33c50,
1556 		0x33cf0, 0x33cfc,
1557 		0x34000, 0x34030,
1558 		0x34100, 0x34144,
1559 		0x34190, 0x341a0,
1560 		0x341a8, 0x341b8,
1561 		0x341c4, 0x341c8,
1562 		0x341d0, 0x341d0,
1563 		0x34200, 0x34318,
1564 		0x34400, 0x344b4,
1565 		0x344c0, 0x3452c,
1566 		0x34540, 0x3461c,
1567 		0x34800, 0x34828,
1568 		0x34834, 0x34834,
1569 		0x348c0, 0x34908,
1570 		0x34910, 0x349ac,
1571 		0x34a00, 0x34a14,
1572 		0x34a1c, 0x34a2c,
1573 		0x34a44, 0x34a50,
1574 		0x34a74, 0x34a74,
1575 		0x34a7c, 0x34afc,
1576 		0x34b08, 0x34c24,
1577 		0x34d00, 0x34d00,
1578 		0x34d08, 0x34d14,
1579 		0x34d1c, 0x34d20,
1580 		0x34d3c, 0x34d3c,
1581 		0x34d48, 0x34d50,
1582 		0x35200, 0x3520c,
1583 		0x35220, 0x35220,
1584 		0x35240, 0x35240,
1585 		0x35600, 0x3560c,
1586 		0x35a00, 0x35a1c,
1587 		0x35e00, 0x35e20,
1588 		0x35e38, 0x35e3c,
1589 		0x35e80, 0x35e80,
1590 		0x35e88, 0x35ea8,
1591 		0x35eb0, 0x35eb4,
1592 		0x35ec8, 0x35ed4,
1593 		0x35fb8, 0x36004,
1594 		0x36200, 0x36200,
1595 		0x36208, 0x36240,
1596 		0x36248, 0x36280,
1597 		0x36288, 0x362c0,
1598 		0x362c8, 0x362fc,
1599 		0x36600, 0x36630,
1600 		0x36a00, 0x36abc,
1601 		0x36b00, 0x36b10,
1602 		0x36b20, 0x36b30,
1603 		0x36b40, 0x36b50,
1604 		0x36b60, 0x36b70,
1605 		0x37000, 0x37028,
1606 		0x37030, 0x37048,
1607 		0x37060, 0x37068,
1608 		0x37070, 0x3709c,
1609 		0x370f0, 0x37128,
1610 		0x37130, 0x37148,
1611 		0x37160, 0x37168,
1612 		0x37170, 0x3719c,
1613 		0x371f0, 0x37238,
1614 		0x37240, 0x37240,
1615 		0x37248, 0x37250,
1616 		0x3725c, 0x37264,
1617 		0x37270, 0x372b8,
1618 		0x372c0, 0x372e4,
1619 		0x372f8, 0x37338,
1620 		0x37340, 0x37340,
1621 		0x37348, 0x37350,
1622 		0x3735c, 0x37364,
1623 		0x37370, 0x373b8,
1624 		0x373c0, 0x373e4,
1625 		0x373f8, 0x37428,
1626 		0x37430, 0x37448,
1627 		0x37460, 0x37468,
1628 		0x37470, 0x3749c,
1629 		0x374f0, 0x37528,
1630 		0x37530, 0x37548,
1631 		0x37560, 0x37568,
1632 		0x37570, 0x3759c,
1633 		0x375f0, 0x37638,
1634 		0x37640, 0x37640,
1635 		0x37648, 0x37650,
1636 		0x3765c, 0x37664,
1637 		0x37670, 0x376b8,
1638 		0x376c0, 0x376e4,
1639 		0x376f8, 0x37738,
1640 		0x37740, 0x37740,
1641 		0x37748, 0x37750,
1642 		0x3775c, 0x37764,
1643 		0x37770, 0x377b8,
1644 		0x377c0, 0x377e4,
1645 		0x377f8, 0x377fc,
1646 		0x37814, 0x37814,
1647 		0x3782c, 0x3782c,
1648 		0x37880, 0x3788c,
1649 		0x378e8, 0x378ec,
1650 		0x37900, 0x37928,
1651 		0x37930, 0x37948,
1652 		0x37960, 0x37968,
1653 		0x37970, 0x3799c,
1654 		0x379f0, 0x37a38,
1655 		0x37a40, 0x37a40,
1656 		0x37a48, 0x37a50,
1657 		0x37a5c, 0x37a64,
1658 		0x37a70, 0x37ab8,
1659 		0x37ac0, 0x37ae4,
1660 		0x37af8, 0x37b10,
1661 		0x37b28, 0x37b28,
1662 		0x37b3c, 0x37b50,
1663 		0x37bf0, 0x37c10,
1664 		0x37c28, 0x37c28,
1665 		0x37c3c, 0x37c50,
1666 		0x37cf0, 0x37cfc,
1667 		0x38000, 0x38030,
1668 		0x38100, 0x38144,
1669 		0x38190, 0x381a0,
1670 		0x381a8, 0x381b8,
1671 		0x381c4, 0x381c8,
1672 		0x381d0, 0x381d0,
1673 		0x38200, 0x38318,
1674 		0x38400, 0x384b4,
1675 		0x384c0, 0x3852c,
1676 		0x38540, 0x3861c,
1677 		0x38800, 0x38828,
1678 		0x38834, 0x38834,
1679 		0x388c0, 0x38908,
1680 		0x38910, 0x389ac,
1681 		0x38a00, 0x38a14,
1682 		0x38a1c, 0x38a2c,
1683 		0x38a44, 0x38a50,
1684 		0x38a74, 0x38a74,
1685 		0x38a7c, 0x38afc,
1686 		0x38b08, 0x38c24,
1687 		0x38d00, 0x38d00,
1688 		0x38d08, 0x38d14,
1689 		0x38d1c, 0x38d20,
1690 		0x38d3c, 0x38d3c,
1691 		0x38d48, 0x38d50,
1692 		0x39200, 0x3920c,
1693 		0x39220, 0x39220,
1694 		0x39240, 0x39240,
1695 		0x39600, 0x3960c,
1696 		0x39a00, 0x39a1c,
1697 		0x39e00, 0x39e20,
1698 		0x39e38, 0x39e3c,
1699 		0x39e80, 0x39e80,
1700 		0x39e88, 0x39ea8,
1701 		0x39eb0, 0x39eb4,
1702 		0x39ec8, 0x39ed4,
1703 		0x39fb8, 0x3a004,
1704 		0x3a200, 0x3a200,
1705 		0x3a208, 0x3a240,
1706 		0x3a248, 0x3a280,
1707 		0x3a288, 0x3a2c0,
1708 		0x3a2c8, 0x3a2fc,
1709 		0x3a600, 0x3a630,
1710 		0x3aa00, 0x3aabc,
1711 		0x3ab00, 0x3ab10,
1712 		0x3ab20, 0x3ab30,
1713 		0x3ab40, 0x3ab50,
1714 		0x3ab60, 0x3ab70,
1715 		0x3b000, 0x3b028,
1716 		0x3b030, 0x3b048,
1717 		0x3b060, 0x3b068,
1718 		0x3b070, 0x3b09c,
1719 		0x3b0f0, 0x3b128,
1720 		0x3b130, 0x3b148,
1721 		0x3b160, 0x3b168,
1722 		0x3b170, 0x3b19c,
1723 		0x3b1f0, 0x3b238,
1724 		0x3b240, 0x3b240,
1725 		0x3b248, 0x3b250,
1726 		0x3b25c, 0x3b264,
1727 		0x3b270, 0x3b2b8,
1728 		0x3b2c0, 0x3b2e4,
1729 		0x3b2f8, 0x3b338,
1730 		0x3b340, 0x3b340,
1731 		0x3b348, 0x3b350,
1732 		0x3b35c, 0x3b364,
1733 		0x3b370, 0x3b3b8,
1734 		0x3b3c0, 0x3b3e4,
1735 		0x3b3f8, 0x3b428,
1736 		0x3b430, 0x3b448,
1737 		0x3b460, 0x3b468,
1738 		0x3b470, 0x3b49c,
1739 		0x3b4f0, 0x3b528,
1740 		0x3b530, 0x3b548,
1741 		0x3b560, 0x3b568,
1742 		0x3b570, 0x3b59c,
1743 		0x3b5f0, 0x3b638,
1744 		0x3b640, 0x3b640,
1745 		0x3b648, 0x3b650,
1746 		0x3b65c, 0x3b664,
1747 		0x3b670, 0x3b6b8,
1748 		0x3b6c0, 0x3b6e4,
1749 		0x3b6f8, 0x3b738,
1750 		0x3b740, 0x3b740,
1751 		0x3b748, 0x3b750,
1752 		0x3b75c, 0x3b764,
1753 		0x3b770, 0x3b7b8,
1754 		0x3b7c0, 0x3b7e4,
1755 		0x3b7f8, 0x3b7fc,
1756 		0x3b814, 0x3b814,
1757 		0x3b82c, 0x3b82c,
1758 		0x3b880, 0x3b88c,
1759 		0x3b8e8, 0x3b8ec,
1760 		0x3b900, 0x3b928,
1761 		0x3b930, 0x3b948,
1762 		0x3b960, 0x3b968,
1763 		0x3b970, 0x3b99c,
1764 		0x3b9f0, 0x3ba38,
1765 		0x3ba40, 0x3ba40,
1766 		0x3ba48, 0x3ba50,
1767 		0x3ba5c, 0x3ba64,
1768 		0x3ba70, 0x3bab8,
1769 		0x3bac0, 0x3bae4,
1770 		0x3baf8, 0x3bb10,
1771 		0x3bb28, 0x3bb28,
1772 		0x3bb3c, 0x3bb50,
1773 		0x3bbf0, 0x3bc10,
1774 		0x3bc28, 0x3bc28,
1775 		0x3bc3c, 0x3bc50,
1776 		0x3bcf0, 0x3bcfc,
1777 		0x3c000, 0x3c030,
1778 		0x3c100, 0x3c144,
1779 		0x3c190, 0x3c1a0,
1780 		0x3c1a8, 0x3c1b8,
1781 		0x3c1c4, 0x3c1c8,
1782 		0x3c1d0, 0x3c1d0,
1783 		0x3c200, 0x3c318,
1784 		0x3c400, 0x3c4b4,
1785 		0x3c4c0, 0x3c52c,
1786 		0x3c540, 0x3c61c,
1787 		0x3c800, 0x3c828,
1788 		0x3c834, 0x3c834,
1789 		0x3c8c0, 0x3c908,
1790 		0x3c910, 0x3c9ac,
1791 		0x3ca00, 0x3ca14,
1792 		0x3ca1c, 0x3ca2c,
1793 		0x3ca44, 0x3ca50,
1794 		0x3ca74, 0x3ca74,
1795 		0x3ca7c, 0x3cafc,
1796 		0x3cb08, 0x3cc24,
1797 		0x3cd00, 0x3cd00,
1798 		0x3cd08, 0x3cd14,
1799 		0x3cd1c, 0x3cd20,
1800 		0x3cd3c, 0x3cd3c,
1801 		0x3cd48, 0x3cd50,
1802 		0x3d200, 0x3d20c,
1803 		0x3d220, 0x3d220,
1804 		0x3d240, 0x3d240,
1805 		0x3d600, 0x3d60c,
1806 		0x3da00, 0x3da1c,
1807 		0x3de00, 0x3de20,
1808 		0x3de38, 0x3de3c,
1809 		0x3de80, 0x3de80,
1810 		0x3de88, 0x3dea8,
1811 		0x3deb0, 0x3deb4,
1812 		0x3dec8, 0x3ded4,
1813 		0x3dfb8, 0x3e004,
1814 		0x3e200, 0x3e200,
1815 		0x3e208, 0x3e240,
1816 		0x3e248, 0x3e280,
1817 		0x3e288, 0x3e2c0,
1818 		0x3e2c8, 0x3e2fc,
1819 		0x3e600, 0x3e630,
1820 		0x3ea00, 0x3eabc,
1821 		0x3eb00, 0x3eb10,
1822 		0x3eb20, 0x3eb30,
1823 		0x3eb40, 0x3eb50,
1824 		0x3eb60, 0x3eb70,
1825 		0x3f000, 0x3f028,
1826 		0x3f030, 0x3f048,
1827 		0x3f060, 0x3f068,
1828 		0x3f070, 0x3f09c,
1829 		0x3f0f0, 0x3f128,
1830 		0x3f130, 0x3f148,
1831 		0x3f160, 0x3f168,
1832 		0x3f170, 0x3f19c,
1833 		0x3f1f0, 0x3f238,
1834 		0x3f240, 0x3f240,
1835 		0x3f248, 0x3f250,
1836 		0x3f25c, 0x3f264,
1837 		0x3f270, 0x3f2b8,
1838 		0x3f2c0, 0x3f2e4,
1839 		0x3f2f8, 0x3f338,
1840 		0x3f340, 0x3f340,
1841 		0x3f348, 0x3f350,
1842 		0x3f35c, 0x3f364,
1843 		0x3f370, 0x3f3b8,
1844 		0x3f3c0, 0x3f3e4,
1845 		0x3f3f8, 0x3f428,
1846 		0x3f430, 0x3f448,
1847 		0x3f460, 0x3f468,
1848 		0x3f470, 0x3f49c,
1849 		0x3f4f0, 0x3f528,
1850 		0x3f530, 0x3f548,
1851 		0x3f560, 0x3f568,
1852 		0x3f570, 0x3f59c,
1853 		0x3f5f0, 0x3f638,
1854 		0x3f640, 0x3f640,
1855 		0x3f648, 0x3f650,
1856 		0x3f65c, 0x3f664,
1857 		0x3f670, 0x3f6b8,
1858 		0x3f6c0, 0x3f6e4,
1859 		0x3f6f8, 0x3f738,
1860 		0x3f740, 0x3f740,
1861 		0x3f748, 0x3f750,
1862 		0x3f75c, 0x3f764,
1863 		0x3f770, 0x3f7b8,
1864 		0x3f7c0, 0x3f7e4,
1865 		0x3f7f8, 0x3f7fc,
1866 		0x3f814, 0x3f814,
1867 		0x3f82c, 0x3f82c,
1868 		0x3f880, 0x3f88c,
1869 		0x3f8e8, 0x3f8ec,
1870 		0x3f900, 0x3f928,
1871 		0x3f930, 0x3f948,
1872 		0x3f960, 0x3f968,
1873 		0x3f970, 0x3f99c,
1874 		0x3f9f0, 0x3fa38,
1875 		0x3fa40, 0x3fa40,
1876 		0x3fa48, 0x3fa50,
1877 		0x3fa5c, 0x3fa64,
1878 		0x3fa70, 0x3fab8,
1879 		0x3fac0, 0x3fae4,
1880 		0x3faf8, 0x3fb10,
1881 		0x3fb28, 0x3fb28,
1882 		0x3fb3c, 0x3fb50,
1883 		0x3fbf0, 0x3fc10,
1884 		0x3fc28, 0x3fc28,
1885 		0x3fc3c, 0x3fc50,
1886 		0x3fcf0, 0x3fcfc,
1887 		0x40000, 0x4000c,
1888 		0x40040, 0x40050,
1889 		0x40060, 0x40068,
1890 		0x4007c, 0x4008c,
1891 		0x40094, 0x400b0,
1892 		0x400c0, 0x40144,
1893 		0x40180, 0x4018c,
1894 		0x40200, 0x40254,
1895 		0x40260, 0x40264,
1896 		0x40270, 0x40288,
1897 		0x40290, 0x40298,
1898 		0x402ac, 0x402c8,
1899 		0x402d0, 0x402e0,
1900 		0x402f0, 0x402f0,
1901 		0x40300, 0x4033c,
1902 		0x403f8, 0x403fc,
1903 		0x41304, 0x413c4,
1904 		0x41400, 0x4140c,
1905 		0x41414, 0x4141c,
1906 		0x41480, 0x414d0,
1907 		0x44000, 0x44054,
1908 		0x4405c, 0x44078,
1909 		0x440c0, 0x44174,
1910 		0x44180, 0x441ac,
1911 		0x441b4, 0x441b8,
1912 		0x441c0, 0x44254,
1913 		0x4425c, 0x44278,
1914 		0x442c0, 0x44374,
1915 		0x44380, 0x443ac,
1916 		0x443b4, 0x443b8,
1917 		0x443c0, 0x44454,
1918 		0x4445c, 0x44478,
1919 		0x444c0, 0x44574,
1920 		0x44580, 0x445ac,
1921 		0x445b4, 0x445b8,
1922 		0x445c0, 0x44654,
1923 		0x4465c, 0x44678,
1924 		0x446c0, 0x44774,
1925 		0x44780, 0x447ac,
1926 		0x447b4, 0x447b8,
1927 		0x447c0, 0x44854,
1928 		0x4485c, 0x44878,
1929 		0x448c0, 0x44974,
1930 		0x44980, 0x449ac,
1931 		0x449b4, 0x449b8,
1932 		0x449c0, 0x449fc,
1933 		0x45000, 0x45004,
1934 		0x45010, 0x45030,
1935 		0x45040, 0x45060,
1936 		0x45068, 0x45068,
1937 		0x45080, 0x45084,
1938 		0x450a0, 0x450b0,
1939 		0x45200, 0x45204,
1940 		0x45210, 0x45230,
1941 		0x45240, 0x45260,
1942 		0x45268, 0x45268,
1943 		0x45280, 0x45284,
1944 		0x452a0, 0x452b0,
1945 		0x460c0, 0x460e4,
1946 		0x47000, 0x4703c,
1947 		0x47044, 0x4708c,
1948 		0x47200, 0x47250,
1949 		0x47400, 0x47408,
1950 		0x47414, 0x47420,
1951 		0x47600, 0x47618,
1952 		0x47800, 0x47814,
1953 		0x48000, 0x4800c,
1954 		0x48040, 0x48050,
1955 		0x48060, 0x48068,
1956 		0x4807c, 0x4808c,
1957 		0x48094, 0x480b0,
1958 		0x480c0, 0x48144,
1959 		0x48180, 0x4818c,
1960 		0x48200, 0x48254,
1961 		0x48260, 0x48264,
1962 		0x48270, 0x48288,
1963 		0x48290, 0x48298,
1964 		0x482ac, 0x482c8,
1965 		0x482d0, 0x482e0,
1966 		0x482f0, 0x482f0,
1967 		0x48300, 0x4833c,
1968 		0x483f8, 0x483fc,
1969 		0x49304, 0x493c4,
1970 		0x49400, 0x4940c,
1971 		0x49414, 0x4941c,
1972 		0x49480, 0x494d0,
1973 		0x4c000, 0x4c054,
1974 		0x4c05c, 0x4c078,
1975 		0x4c0c0, 0x4c174,
1976 		0x4c180, 0x4c1ac,
1977 		0x4c1b4, 0x4c1b8,
1978 		0x4c1c0, 0x4c254,
1979 		0x4c25c, 0x4c278,
1980 		0x4c2c0, 0x4c374,
1981 		0x4c380, 0x4c3ac,
1982 		0x4c3b4, 0x4c3b8,
1983 		0x4c3c0, 0x4c454,
1984 		0x4c45c, 0x4c478,
1985 		0x4c4c0, 0x4c574,
1986 		0x4c580, 0x4c5ac,
1987 		0x4c5b4, 0x4c5b8,
1988 		0x4c5c0, 0x4c654,
1989 		0x4c65c, 0x4c678,
1990 		0x4c6c0, 0x4c774,
1991 		0x4c780, 0x4c7ac,
1992 		0x4c7b4, 0x4c7b8,
1993 		0x4c7c0, 0x4c854,
1994 		0x4c85c, 0x4c878,
1995 		0x4c8c0, 0x4c974,
1996 		0x4c980, 0x4c9ac,
1997 		0x4c9b4, 0x4c9b8,
1998 		0x4c9c0, 0x4c9fc,
1999 		0x4d000, 0x4d004,
2000 		0x4d010, 0x4d030,
2001 		0x4d040, 0x4d060,
2002 		0x4d068, 0x4d068,
2003 		0x4d080, 0x4d084,
2004 		0x4d0a0, 0x4d0b0,
2005 		0x4d200, 0x4d204,
2006 		0x4d210, 0x4d230,
2007 		0x4d240, 0x4d260,
2008 		0x4d268, 0x4d268,
2009 		0x4d280, 0x4d284,
2010 		0x4d2a0, 0x4d2b0,
2011 		0x4e0c0, 0x4e0e4,
2012 		0x4f000, 0x4f03c,
2013 		0x4f044, 0x4f08c,
2014 		0x4f200, 0x4f250,
2015 		0x4f400, 0x4f408,
2016 		0x4f414, 0x4f420,
2017 		0x4f600, 0x4f618,
2018 		0x4f800, 0x4f814,
2019 		0x50000, 0x50084,
2020 		0x50090, 0x500cc,
2021 		0x50400, 0x50400,
2022 		0x50800, 0x50884,
2023 		0x50890, 0x508cc,
2024 		0x50c00, 0x50c00,
2025 		0x51000, 0x5101c,
2026 		0x51300, 0x51308,
2027 	};
2028 
2029 	static const unsigned int t6_reg_ranges[] = {
2030 		0x1008, 0x101c,
2031 		0x1024, 0x10a8,
2032 		0x10b4, 0x10f8,
2033 		0x1100, 0x1114,
2034 		0x111c, 0x112c,
2035 		0x1138, 0x113c,
2036 		0x1144, 0x114c,
2037 		0x1180, 0x1184,
2038 		0x1190, 0x1194,
2039 		0x11a0, 0x11a4,
2040 		0x11b0, 0x11b4,
2041 		0x11fc, 0x1274,
2042 		0x1280, 0x133c,
2043 		0x1800, 0x18fc,
2044 		0x3000, 0x302c,
2045 		0x3060, 0x30b0,
2046 		0x30b8, 0x30d8,
2047 		0x30e0, 0x30fc,
2048 		0x3140, 0x357c,
2049 		0x35a8, 0x35cc,
2050 		0x35ec, 0x35ec,
2051 		0x3600, 0x5624,
2052 		0x56cc, 0x56ec,
2053 		0x56f4, 0x5720,
2054 		0x5728, 0x575c,
2055 		0x580c, 0x5814,
2056 		0x5890, 0x589c,
2057 		0x58a4, 0x58ac,
2058 		0x58b8, 0x58bc,
2059 		0x5940, 0x595c,
2060 		0x5980, 0x598c,
2061 		0x59b0, 0x59c8,
2062 		0x59d0, 0x59dc,
2063 		0x59fc, 0x5a18,
2064 		0x5a60, 0x5a6c,
2065 		0x5a80, 0x5a8c,
2066 		0x5a94, 0x5a9c,
2067 		0x5b94, 0x5bfc,
2068 		0x5c10, 0x5e48,
2069 		0x5e50, 0x5e94,
2070 		0x5ea0, 0x5eb0,
2071 		0x5ec0, 0x5ec0,
2072 		0x5ec8, 0x5ed0,
2073 		0x5ee0, 0x5ee0,
2074 		0x5ef0, 0x5ef0,
2075 		0x5f00, 0x5f00,
2076 		0x6000, 0x6020,
2077 		0x6028, 0x6040,
2078 		0x6058, 0x609c,
2079 		0x60a8, 0x619c,
2080 		0x7700, 0x7798,
2081 		0x77c0, 0x7880,
2082 		0x78cc, 0x78fc,
2083 		0x7b00, 0x7b58,
2084 		0x7b60, 0x7b84,
2085 		0x7b8c, 0x7c54,
2086 		0x7d00, 0x7d38,
2087 		0x7d40, 0x7d84,
2088 		0x7d8c, 0x7ddc,
2089 		0x7de4, 0x7e04,
2090 		0x7e10, 0x7e1c,
2091 		0x7e24, 0x7e38,
2092 		0x7e40, 0x7e44,
2093 		0x7e4c, 0x7e78,
2094 		0x7e80, 0x7edc,
2095 		0x7ee8, 0x7efc,
2096 		0x8dc0, 0x8de4,
2097 		0x8df8, 0x8e04,
2098 		0x8e10, 0x8e84,
2099 		0x8ea0, 0x8f88,
2100 		0x8fb8, 0x9058,
2101 		0x9060, 0x9060,
2102 		0x9068, 0x90f8,
2103 		0x9100, 0x9124,
2104 		0x9400, 0x9470,
2105 		0x9600, 0x9600,
2106 		0x9608, 0x9638,
2107 		0x9640, 0x9704,
2108 		0x9710, 0x971c,
2109 		0x9800, 0x9808,
2110 		0x9820, 0x983c,
2111 		0x9850, 0x9864,
2112 		0x9c00, 0x9c6c,
2113 		0x9c80, 0x9cec,
2114 		0x9d00, 0x9d6c,
2115 		0x9d80, 0x9dec,
2116 		0x9e00, 0x9e6c,
2117 		0x9e80, 0x9eec,
2118 		0x9f00, 0x9f6c,
2119 		0x9f80, 0xa020,
2120 		0xd004, 0xd03c,
2121 		0xd100, 0xd118,
2122 		0xd200, 0xd214,
2123 		0xd220, 0xd234,
2124 		0xd240, 0xd254,
2125 		0xd260, 0xd274,
2126 		0xd280, 0xd294,
2127 		0xd2a0, 0xd2b4,
2128 		0xd2c0, 0xd2d4,
2129 		0xd2e0, 0xd2f4,
2130 		0xd300, 0xd31c,
2131 		0xdfc0, 0xdfe0,
2132 		0xe000, 0xf008,
2133 		0xf010, 0xf018,
2134 		0xf020, 0xf028,
2135 		0x11000, 0x11014,
2136 		0x11048, 0x1106c,
2137 		0x11074, 0x11088,
2138 		0x11098, 0x11120,
2139 		0x1112c, 0x1117c,
2140 		0x11190, 0x112e0,
2141 		0x11300, 0x1130c,
2142 		0x12000, 0x1206c,
2143 		0x19040, 0x1906c,
2144 		0x19078, 0x19080,
2145 		0x1908c, 0x190e8,
2146 		0x190f0, 0x190f8,
2147 		0x19100, 0x19110,
2148 		0x19120, 0x19124,
2149 		0x19150, 0x19194,
2150 		0x1919c, 0x191b0,
2151 		0x191d0, 0x191e8,
2152 		0x19238, 0x19290,
2153 		0x192a4, 0x192b0,
2154 		0x192bc, 0x192bc,
2155 		0x19348, 0x1934c,
2156 		0x193f8, 0x19418,
2157 		0x19420, 0x19428,
2158 		0x19430, 0x19444,
2159 		0x1944c, 0x1946c,
2160 		0x19474, 0x19474,
2161 		0x19490, 0x194cc,
2162 		0x194f0, 0x194f8,
2163 		0x19c00, 0x19c48,
2164 		0x19c50, 0x19c80,
2165 		0x19c94, 0x19c98,
2166 		0x19ca0, 0x19cbc,
2167 		0x19ce4, 0x19ce4,
2168 		0x19cf0, 0x19cf8,
2169 		0x19d00, 0x19d28,
2170 		0x19d50, 0x19d78,
2171 		0x19d94, 0x19d98,
2172 		0x19da0, 0x19dc8,
2173 		0x19df0, 0x19e10,
2174 		0x19e50, 0x19e6c,
2175 		0x19ea0, 0x19ebc,
2176 		0x19ec4, 0x19ef4,
2177 		0x19f04, 0x19f2c,
2178 		0x19f34, 0x19f34,
2179 		0x19f40, 0x19f50,
2180 		0x19f90, 0x19fac,
2181 		0x19fc4, 0x19fc8,
2182 		0x19fd0, 0x19fe4,
2183 		0x1a000, 0x1a004,
2184 		0x1a010, 0x1a06c,
2185 		0x1a0b0, 0x1a0e4,
2186 		0x1a0ec, 0x1a0f8,
2187 		0x1a100, 0x1a108,
2188 		0x1a114, 0x1a120,
2189 		0x1a128, 0x1a130,
2190 		0x1a138, 0x1a138,
2191 		0x1a190, 0x1a1c4,
2192 		0x1a1fc, 0x1a1fc,
2193 		0x1e008, 0x1e00c,
2194 		0x1e040, 0x1e044,
2195 		0x1e04c, 0x1e04c,
2196 		0x1e284, 0x1e290,
2197 		0x1e2c0, 0x1e2c0,
2198 		0x1e2e0, 0x1e2e0,
2199 		0x1e300, 0x1e384,
2200 		0x1e3c0, 0x1e3c8,
2201 		0x1e408, 0x1e40c,
2202 		0x1e440, 0x1e444,
2203 		0x1e44c, 0x1e44c,
2204 		0x1e684, 0x1e690,
2205 		0x1e6c0, 0x1e6c0,
2206 		0x1e6e0, 0x1e6e0,
2207 		0x1e700, 0x1e784,
2208 		0x1e7c0, 0x1e7c8,
2209 		0x1e808, 0x1e80c,
2210 		0x1e840, 0x1e844,
2211 		0x1e84c, 0x1e84c,
2212 		0x1ea84, 0x1ea90,
2213 		0x1eac0, 0x1eac0,
2214 		0x1eae0, 0x1eae0,
2215 		0x1eb00, 0x1eb84,
2216 		0x1ebc0, 0x1ebc8,
2217 		0x1ec08, 0x1ec0c,
2218 		0x1ec40, 0x1ec44,
2219 		0x1ec4c, 0x1ec4c,
2220 		0x1ee84, 0x1ee90,
2221 		0x1eec0, 0x1eec0,
2222 		0x1eee0, 0x1eee0,
2223 		0x1ef00, 0x1ef84,
2224 		0x1efc0, 0x1efc8,
2225 		0x1f008, 0x1f00c,
2226 		0x1f040, 0x1f044,
2227 		0x1f04c, 0x1f04c,
2228 		0x1f284, 0x1f290,
2229 		0x1f2c0, 0x1f2c0,
2230 		0x1f2e0, 0x1f2e0,
2231 		0x1f300, 0x1f384,
2232 		0x1f3c0, 0x1f3c8,
2233 		0x1f408, 0x1f40c,
2234 		0x1f440, 0x1f444,
2235 		0x1f44c, 0x1f44c,
2236 		0x1f684, 0x1f690,
2237 		0x1f6c0, 0x1f6c0,
2238 		0x1f6e0, 0x1f6e0,
2239 		0x1f700, 0x1f784,
2240 		0x1f7c0, 0x1f7c8,
2241 		0x1f808, 0x1f80c,
2242 		0x1f840, 0x1f844,
2243 		0x1f84c, 0x1f84c,
2244 		0x1fa84, 0x1fa90,
2245 		0x1fac0, 0x1fac0,
2246 		0x1fae0, 0x1fae0,
2247 		0x1fb00, 0x1fb84,
2248 		0x1fbc0, 0x1fbc8,
2249 		0x1fc08, 0x1fc0c,
2250 		0x1fc40, 0x1fc44,
2251 		0x1fc4c, 0x1fc4c,
2252 		0x1fe84, 0x1fe90,
2253 		0x1fec0, 0x1fec0,
2254 		0x1fee0, 0x1fee0,
2255 		0x1ff00, 0x1ff84,
2256 		0x1ffc0, 0x1ffc8,
2257 		0x30000, 0x30030,
2258 		0x30100, 0x30168,
2259 		0x30190, 0x301a0,
2260 		0x301a8, 0x301b8,
2261 		0x301c4, 0x301c8,
2262 		0x301d0, 0x301d0,
2263 		0x30200, 0x30320,
2264 		0x30400, 0x304b4,
2265 		0x304c0, 0x3052c,
2266 		0x30540, 0x3061c,
2267 		0x30800, 0x308a0,
2268 		0x308c0, 0x30908,
2269 		0x30910, 0x309b8,
2270 		0x30a00, 0x30a04,
2271 		0x30a0c, 0x30a14,
2272 		0x30a1c, 0x30a2c,
2273 		0x30a44, 0x30a50,
2274 		0x30a74, 0x30a74,
2275 		0x30a7c, 0x30afc,
2276 		0x30b08, 0x30c24,
2277 		0x30d00, 0x30d14,
2278 		0x30d1c, 0x30d3c,
2279 		0x30d44, 0x30d4c,
2280 		0x30d54, 0x30d74,
2281 		0x30d7c, 0x30d7c,
2282 		0x30de0, 0x30de0,
2283 		0x30e00, 0x30ed4,
2284 		0x30f00, 0x30fa4,
2285 		0x30fc0, 0x30fc4,
2286 		0x31000, 0x31004,
2287 		0x31080, 0x310fc,
2288 		0x31208, 0x31220,
2289 		0x3123c, 0x31254,
2290 		0x31300, 0x31300,
2291 		0x31308, 0x3131c,
2292 		0x31338, 0x3133c,
2293 		0x31380, 0x31380,
2294 		0x31388, 0x313a8,
2295 		0x313b4, 0x313b4,
2296 		0x31400, 0x31420,
2297 		0x31438, 0x3143c,
2298 		0x31480, 0x31480,
2299 		0x314a8, 0x314a8,
2300 		0x314b0, 0x314b4,
2301 		0x314c8, 0x314d4,
2302 		0x31a40, 0x31a4c,
2303 		0x31af0, 0x31b20,
2304 		0x31b38, 0x31b3c,
2305 		0x31b80, 0x31b80,
2306 		0x31ba8, 0x31ba8,
2307 		0x31bb0, 0x31bb4,
2308 		0x31bc8, 0x31bd4,
2309 		0x32140, 0x3218c,
2310 		0x321f0, 0x321f4,
2311 		0x32200, 0x32200,
2312 		0x32218, 0x32218,
2313 		0x32400, 0x32400,
2314 		0x32408, 0x3241c,
2315 		0x32618, 0x32620,
2316 		0x32664, 0x32664,
2317 		0x326a8, 0x326a8,
2318 		0x326ec, 0x326ec,
2319 		0x32a00, 0x32abc,
2320 		0x32b00, 0x32b18,
2321 		0x32b20, 0x32b38,
2322 		0x32b40, 0x32b58,
2323 		0x32b60, 0x32b78,
2324 		0x32c00, 0x32c00,
2325 		0x32c08, 0x32c3c,
2326 		0x33000, 0x3302c,
2327 		0x33034, 0x33050,
2328 		0x33058, 0x33058,
2329 		0x33060, 0x3308c,
2330 		0x3309c, 0x330ac,
2331 		0x330c0, 0x330c0,
2332 		0x330c8, 0x330d0,
2333 		0x330d8, 0x330e0,
2334 		0x330ec, 0x3312c,
2335 		0x33134, 0x33150,
2336 		0x33158, 0x33158,
2337 		0x33160, 0x3318c,
2338 		0x3319c, 0x331ac,
2339 		0x331c0, 0x331c0,
2340 		0x331c8, 0x331d0,
2341 		0x331d8, 0x331e0,
2342 		0x331ec, 0x33290,
2343 		0x33298, 0x332c4,
2344 		0x332e4, 0x33390,
2345 		0x33398, 0x333c4,
2346 		0x333e4, 0x3342c,
2347 		0x33434, 0x33450,
2348 		0x33458, 0x33458,
2349 		0x33460, 0x3348c,
2350 		0x3349c, 0x334ac,
2351 		0x334c0, 0x334c0,
2352 		0x334c8, 0x334d0,
2353 		0x334d8, 0x334e0,
2354 		0x334ec, 0x3352c,
2355 		0x33534, 0x33550,
2356 		0x33558, 0x33558,
2357 		0x33560, 0x3358c,
2358 		0x3359c, 0x335ac,
2359 		0x335c0, 0x335c0,
2360 		0x335c8, 0x335d0,
2361 		0x335d8, 0x335e0,
2362 		0x335ec, 0x33690,
2363 		0x33698, 0x336c4,
2364 		0x336e4, 0x33790,
2365 		0x33798, 0x337c4,
2366 		0x337e4, 0x337fc,
2367 		0x33814, 0x33814,
2368 		0x33854, 0x33868,
2369 		0x33880, 0x3388c,
2370 		0x338c0, 0x338d0,
2371 		0x338e8, 0x338ec,
2372 		0x33900, 0x3392c,
2373 		0x33934, 0x33950,
2374 		0x33958, 0x33958,
2375 		0x33960, 0x3398c,
2376 		0x3399c, 0x339ac,
2377 		0x339c0, 0x339c0,
2378 		0x339c8, 0x339d0,
2379 		0x339d8, 0x339e0,
2380 		0x339ec, 0x33a90,
2381 		0x33a98, 0x33ac4,
2382 		0x33ae4, 0x33b10,
2383 		0x33b24, 0x33b28,
2384 		0x33b38, 0x33b50,
2385 		0x33bf0, 0x33c10,
2386 		0x33c24, 0x33c28,
2387 		0x33c38, 0x33c50,
2388 		0x33cf0, 0x33cfc,
2389 		0x34000, 0x34030,
2390 		0x34100, 0x34168,
2391 		0x34190, 0x341a0,
2392 		0x341a8, 0x341b8,
2393 		0x341c4, 0x341c8,
2394 		0x341d0, 0x341d0,
2395 		0x34200, 0x34320,
2396 		0x34400, 0x344b4,
2397 		0x344c0, 0x3452c,
2398 		0x34540, 0x3461c,
2399 		0x34800, 0x348a0,
2400 		0x348c0, 0x34908,
2401 		0x34910, 0x349b8,
2402 		0x34a00, 0x34a04,
2403 		0x34a0c, 0x34a14,
2404 		0x34a1c, 0x34a2c,
2405 		0x34a44, 0x34a50,
2406 		0x34a74, 0x34a74,
2407 		0x34a7c, 0x34afc,
2408 		0x34b08, 0x34c24,
2409 		0x34d00, 0x34d14,
2410 		0x34d1c, 0x34d3c,
2411 		0x34d44, 0x34d4c,
2412 		0x34d54, 0x34d74,
2413 		0x34d7c, 0x34d7c,
2414 		0x34de0, 0x34de0,
2415 		0x34e00, 0x34ed4,
2416 		0x34f00, 0x34fa4,
2417 		0x34fc0, 0x34fc4,
2418 		0x35000, 0x35004,
2419 		0x35080, 0x350fc,
2420 		0x35208, 0x35220,
2421 		0x3523c, 0x35254,
2422 		0x35300, 0x35300,
2423 		0x35308, 0x3531c,
2424 		0x35338, 0x3533c,
2425 		0x35380, 0x35380,
2426 		0x35388, 0x353a8,
2427 		0x353b4, 0x353b4,
2428 		0x35400, 0x35420,
2429 		0x35438, 0x3543c,
2430 		0x35480, 0x35480,
2431 		0x354a8, 0x354a8,
2432 		0x354b0, 0x354b4,
2433 		0x354c8, 0x354d4,
2434 		0x35a40, 0x35a4c,
2435 		0x35af0, 0x35b20,
2436 		0x35b38, 0x35b3c,
2437 		0x35b80, 0x35b80,
2438 		0x35ba8, 0x35ba8,
2439 		0x35bb0, 0x35bb4,
2440 		0x35bc8, 0x35bd4,
2441 		0x36140, 0x3618c,
2442 		0x361f0, 0x361f4,
2443 		0x36200, 0x36200,
2444 		0x36218, 0x36218,
2445 		0x36400, 0x36400,
2446 		0x36408, 0x3641c,
2447 		0x36618, 0x36620,
2448 		0x36664, 0x36664,
2449 		0x366a8, 0x366a8,
2450 		0x366ec, 0x366ec,
2451 		0x36a00, 0x36abc,
2452 		0x36b00, 0x36b18,
2453 		0x36b20, 0x36b38,
2454 		0x36b40, 0x36b58,
2455 		0x36b60, 0x36b78,
2456 		0x36c00, 0x36c00,
2457 		0x36c08, 0x36c3c,
2458 		0x37000, 0x3702c,
2459 		0x37034, 0x37050,
2460 		0x37058, 0x37058,
2461 		0x37060, 0x3708c,
2462 		0x3709c, 0x370ac,
2463 		0x370c0, 0x370c0,
2464 		0x370c8, 0x370d0,
2465 		0x370d8, 0x370e0,
2466 		0x370ec, 0x3712c,
2467 		0x37134, 0x37150,
2468 		0x37158, 0x37158,
2469 		0x37160, 0x3718c,
2470 		0x3719c, 0x371ac,
2471 		0x371c0, 0x371c0,
2472 		0x371c8, 0x371d0,
2473 		0x371d8, 0x371e0,
2474 		0x371ec, 0x37290,
2475 		0x37298, 0x372c4,
2476 		0x372e4, 0x37390,
2477 		0x37398, 0x373c4,
2478 		0x373e4, 0x3742c,
2479 		0x37434, 0x37450,
2480 		0x37458, 0x37458,
2481 		0x37460, 0x3748c,
2482 		0x3749c, 0x374ac,
2483 		0x374c0, 0x374c0,
2484 		0x374c8, 0x374d0,
2485 		0x374d8, 0x374e0,
2486 		0x374ec, 0x3752c,
2487 		0x37534, 0x37550,
2488 		0x37558, 0x37558,
2489 		0x37560, 0x3758c,
2490 		0x3759c, 0x375ac,
2491 		0x375c0, 0x375c0,
2492 		0x375c8, 0x375d0,
2493 		0x375d8, 0x375e0,
2494 		0x375ec, 0x37690,
2495 		0x37698, 0x376c4,
2496 		0x376e4, 0x37790,
2497 		0x37798, 0x377c4,
2498 		0x377e4, 0x377fc,
2499 		0x37814, 0x37814,
2500 		0x37854, 0x37868,
2501 		0x37880, 0x3788c,
2502 		0x378c0, 0x378d0,
2503 		0x378e8, 0x378ec,
2504 		0x37900, 0x3792c,
2505 		0x37934, 0x37950,
2506 		0x37958, 0x37958,
2507 		0x37960, 0x3798c,
2508 		0x3799c, 0x379ac,
2509 		0x379c0, 0x379c0,
2510 		0x379c8, 0x379d0,
2511 		0x379d8, 0x379e0,
2512 		0x379ec, 0x37a90,
2513 		0x37a98, 0x37ac4,
2514 		0x37ae4, 0x37b10,
2515 		0x37b24, 0x37b28,
2516 		0x37b38, 0x37b50,
2517 		0x37bf0, 0x37c10,
2518 		0x37c24, 0x37c28,
2519 		0x37c38, 0x37c50,
2520 		0x37cf0, 0x37cfc,
2521 		0x40040, 0x40040,
2522 		0x40080, 0x40084,
2523 		0x40100, 0x40100,
2524 		0x40140, 0x401bc,
2525 		0x40200, 0x40214,
2526 		0x40228, 0x40228,
2527 		0x40240, 0x40258,
2528 		0x40280, 0x40280,
2529 		0x40304, 0x40304,
2530 		0x40330, 0x4033c,
2531 		0x41304, 0x413c8,
2532 		0x413d0, 0x413dc,
2533 		0x413f0, 0x413f0,
2534 		0x41400, 0x4140c,
2535 		0x41414, 0x4141c,
2536 		0x41480, 0x414d0,
2537 		0x44000, 0x4407c,
2538 		0x440c0, 0x441ac,
2539 		0x441b4, 0x4427c,
2540 		0x442c0, 0x443ac,
2541 		0x443b4, 0x4447c,
2542 		0x444c0, 0x445ac,
2543 		0x445b4, 0x4467c,
2544 		0x446c0, 0x447ac,
2545 		0x447b4, 0x4487c,
2546 		0x448c0, 0x449ac,
2547 		0x449b4, 0x44a7c,
2548 		0x44ac0, 0x44bac,
2549 		0x44bb4, 0x44c7c,
2550 		0x44cc0, 0x44dac,
2551 		0x44db4, 0x44e7c,
2552 		0x44ec0, 0x44fac,
2553 		0x44fb4, 0x4507c,
2554 		0x450c0, 0x451ac,
2555 		0x451b4, 0x451fc,
2556 		0x45800, 0x45804,
2557 		0x45810, 0x45830,
2558 		0x45840, 0x45860,
2559 		0x45868, 0x45868,
2560 		0x45880, 0x45884,
2561 		0x458a0, 0x458b0,
2562 		0x45a00, 0x45a04,
2563 		0x45a10, 0x45a30,
2564 		0x45a40, 0x45a60,
2565 		0x45a68, 0x45a68,
2566 		0x45a80, 0x45a84,
2567 		0x45aa0, 0x45ab0,
2568 		0x460c0, 0x460e4,
2569 		0x47000, 0x4703c,
2570 		0x47044, 0x4708c,
2571 		0x47200, 0x47250,
2572 		0x47400, 0x47408,
2573 		0x47414, 0x47420,
2574 		0x47600, 0x47618,
2575 		0x47800, 0x47814,
2576 		0x47820, 0x4782c,
2577 		0x50000, 0x50084,
2578 		0x50090, 0x500cc,
2579 		0x50300, 0x50384,
2580 		0x50400, 0x50400,
2581 		0x50800, 0x50884,
2582 		0x50890, 0x508cc,
2583 		0x50b00, 0x50b84,
2584 		0x50c00, 0x50c00,
2585 		0x51000, 0x51020,
2586 		0x51028, 0x510b0,
2587 		0x51300, 0x51324,
2588 	};
2589 
2590 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2591 	const unsigned int *reg_ranges;
2592 	int reg_ranges_size, range;
2593 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2594 
2595 	/* Select the right set of register ranges to dump depending on the
2596 	 * adapter chip type.
2597 	 */
2598 	switch (chip_version) {
2599 	case CHELSIO_T4:
2600 		reg_ranges = t4_reg_ranges;
2601 		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2602 		break;
2603 
2604 	case CHELSIO_T5:
2605 		reg_ranges = t5_reg_ranges;
2606 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2607 		break;
2608 
2609 	case CHELSIO_T6:
2610 		reg_ranges = t6_reg_ranges;
2611 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2612 		break;
2613 
2614 	default:
2615 		dev_err(adap->pdev_dev,
2616 			"Unsupported chip version %d\n", chip_version);
2617 		return;
2618 	}
2619 
2620 	/* Clear the register buffer and insert the appropriate register
2621 	 * values selected by the above register ranges.
2622 	 */
2623 	memset(buf, 0, buf_size);
2624 	for (range = 0; range < reg_ranges_size; range += 2) {
2625 		unsigned int reg = reg_ranges[range];
2626 		unsigned int last_reg = reg_ranges[range + 1];
2627 		u32 *bufp = (u32 *)((char *)buf + reg);
2628 
2629 		/* Iterate across the register range filling in the register
2630 		 * buffer but don't write past the end of the register buffer.
2631 		 */
2632 		while (reg <= last_reg && bufp < buf_end) {
2633 			*bufp++ = t4_read_reg(adap, reg);
2634 			reg += sizeof(u32);
2635 		}
2636 	}
2637 }
2638 
2639 #define EEPROM_STAT_ADDR   0x7bfc
2640 #define VPD_SIZE           0x800
2641 #define VPD_BASE           0x400
2642 #define VPD_BASE_OLD       0
2643 #define VPD_LEN            1024
2644 #define CHELSIO_VPD_UNIQUE_ID 0x82
2645 
2646 /**
2647  * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2648  * @phys_addr: the physical EEPROM address
2649  * @fn: the PCI function number
2650  * @sz: size of function-specific area
2651  *
2652  * Translate a physical EEPROM address to virtual.  The first 1K is
2653  * accessed through virtual addresses starting at 31K, the rest is
2654  * accessed through virtual addresses starting at 0.
2655  *
2656  * The mapping is as follows:
2657  * [0..1K) -> [31K..32K)
2658  * [1K..1K+A) -> [31K-A..31K)
2659  * [1K+A..ES) -> [0..ES-A-1K)
2660  *
2661  * where A = @fn * @sz, and ES = EEPROM size.
2662  */
2663 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2664 {
2665 	fn *= sz;
2666 	if (phys_addr < 1024)
2667 		return phys_addr + (31 << 10);
2668 	if (phys_addr < 1024 + fn)
2669 		return 31744 - fn + phys_addr - 1024;
2670 	if (phys_addr < EEPROMSIZE)
2671 		return phys_addr - 1024 - fn;
2672 	return -EINVAL;
2673 }
2674 
2675 /**
2676  *	t4_seeprom_wp - enable/disable EEPROM write protection
2677  *	@adapter: the adapter
2678  *	@enable: whether to enable or disable write protection
2679  *
2680  *	Enables or disables write protection on the serial EEPROM.
2681  */
2682 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2683 {
2684 	unsigned int v = enable ? 0xc : 0;
2685 	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2686 	return ret < 0 ? ret : 0;
2687 }
2688 
2689 /**
2690  *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2691  *	@adapter: adapter to read
2692  *	@p: where to store the parameters
2693  *
2694  *	Reads card parameters stored in VPD EEPROM.
2695  */
2696 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2697 {
2698 	int i, ret = 0, addr;
2699 	int ec, sn, pn, na;
2700 	u8 *vpd, csum;
2701 	unsigned int vpdr_len, kw_offset, id_len;
2702 
2703 	vpd = vmalloc(VPD_LEN);
2704 	if (!vpd)
2705 		return -ENOMEM;
2706 
2707 	/* We have two VPD data structures stored in the adapter VPD area.
2708 	 * By default, Linux calculates the size of the VPD area by traversing
2709 	 * the first VPD area at offset 0x0, so we need to tell the OS what
2710 	 * our real VPD size is.
2711 	 */
2712 	ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2713 	if (ret < 0)
2714 		goto out;
2715 
2716 	/* Card information normally starts at VPD_BASE but early cards had
2717 	 * it at 0.
2718 	 */
2719 	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2720 	if (ret < 0)
2721 		goto out;
2722 
2723 	/* The VPD shall have a unique identifier specified by the PCI SIG.
2724 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2725 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2726 	 * is expected to automatically put this entry at the
2727 	 * beginning of the VPD.
2728 	 */
2729 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2730 
2731 	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2732 	if (ret < 0)
2733 		goto out;
2734 
2735 	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2736 		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2737 		ret = -EINVAL;
2738 		goto out;
2739 	}
2740 
2741 	id_len = pci_vpd_lrdt_size(vpd);
2742 	if (id_len > ID_LEN)
2743 		id_len = ID_LEN;
2744 
2745 	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2746 	if (i < 0) {
2747 		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2748 		ret = -EINVAL;
2749 		goto out;
2750 	}
2751 
2752 	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2753 	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2754 	if (vpdr_len + kw_offset > VPD_LEN) {
2755 		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2756 		ret = -EINVAL;
2757 		goto out;
2758 	}
2759 
2760 #define FIND_VPD_KW(var, name) do { \
2761 	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2762 	if (var < 0) { \
2763 		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2764 		ret = -EINVAL; \
2765 		goto out; \
2766 	} \
2767 	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2768 } while (0)
2769 
2770 	FIND_VPD_KW(i, "RV");
2771 	for (csum = 0; i >= 0; i--)
2772 		csum += vpd[i];
2773 
2774 	if (csum) {
2775 		dev_err(adapter->pdev_dev,
2776 			"corrupted VPD EEPROM, actual csum %u\n", csum);
2777 		ret = -EINVAL;
2778 		goto out;
2779 	}
2780 
2781 	FIND_VPD_KW(ec, "EC");
2782 	FIND_VPD_KW(sn, "SN");
2783 	FIND_VPD_KW(pn, "PN");
2784 	FIND_VPD_KW(na, "NA");
2785 #undef FIND_VPD_KW
2786 
2787 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2788 	strim(p->id);
2789 	memcpy(p->ec, vpd + ec, EC_LEN);
2790 	strim(p->ec);
2791 	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2792 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2793 	strim(p->sn);
2794 	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2795 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2796 	strim(p->pn);
2797 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2798 	strim((char *)p->na);
2799 
2800 out:
2801 	vfree(vpd);
2802 	return ret < 0 ? ret : 0;
2803 }
2804 
2805 /**
2806  *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2807  *	@adapter: adapter to read
2808  *	@p: where to store the parameters
2809  *
2810  *	Reads card parameters stored in VPD EEPROM and retrieves the Core
2811  *	Clock.  This can only be called after a connection to the firmware
2812  *	is established.
2813  */
2814 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2815 {
2816 	u32 cclk_param, cclk_val;
2817 	int ret;
2818 
2819 	/* Grab the raw VPD parameters.
2820 	 */
2821 	ret = t4_get_raw_vpd_params(adapter, p);
2822 	if (ret)
2823 		return ret;
2824 
2825 	/* Ask firmware for the Core Clock since it knows how to translate the
2826 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
2827 	 */
2828 	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2829 		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2830 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2831 			      1, &cclk_param, &cclk_val);
2832 
2833 	if (ret)
2834 		return ret;
2835 	p->cclk = cclk_val;
2836 
2837 	return 0;
2838 }
2839 
2840 /* serial flash and firmware constants */
2841 enum {
2842 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
2843 
2844 	/* flash command opcodes */
2845 	SF_PROG_PAGE    = 2,          /* program page */
2846 	SF_WR_DISABLE   = 4,          /* disable writes */
2847 	SF_RD_STATUS    = 5,          /* read status register */
2848 	SF_WR_ENABLE    = 6,          /* enable writes */
2849 	SF_RD_DATA_FAST = 0xb,        /* read flash */
2850 	SF_RD_ID        = 0x9f,       /* read ID */
2851 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
2852 };
2853 
2854 /**
2855  *	sf1_read - read data from the serial flash
2856  *	@adapter: the adapter
2857  *	@byte_cnt: number of bytes to read
2858  *	@cont: whether another operation will be chained
2859  *	@lock: whether to lock SF for PL access only
2860  *	@valp: where to store the read data
2861  *
2862  *	Reads up to 4 bytes of data from the serial flash.  The location of
2863  *	the read needs to be specified prior to calling this by issuing the
2864  *	appropriate commands to the serial flash.
2865  */
2866 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2867 		    int lock, u32 *valp)
2868 {
2869 	int ret;
2870 
2871 	if (!byte_cnt || byte_cnt > 4)
2872 		return -EINVAL;
2873 	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2874 		return -EBUSY;
2875 	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2876 		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2877 	ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2878 	if (!ret)
2879 		*valp = t4_read_reg(adapter, SF_DATA_A);
2880 	return ret;
2881 }
2882 
2883 /**
2884  *	sf1_write - write data to the serial flash
2885  *	@adapter: the adapter
2886  *	@byte_cnt: number of bytes to write
2887  *	@cont: whether another operation will be chained
2888  *	@lock: whether to lock SF for PL access only
2889  *	@val: value to write
2890  *
2891  *	Writes up to 4 bytes of data to the serial flash.  The location of
2892  *	the write needs to be specified prior to calling this by issuing the
2893  *	appropriate commands to the serial flash.
2894  */
2895 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2896 		     int lock, u32 val)
2897 {
2898 	if (!byte_cnt || byte_cnt > 4)
2899 		return -EINVAL;
2900 	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2901 		return -EBUSY;
2902 	t4_write_reg(adapter, SF_DATA_A, val);
2903 	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2904 		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2905 	return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2906 }
2907 
2908 /**
2909  *	flash_wait_op - wait for a flash operation to complete
2910  *	@adapter: the adapter
2911  *	@attempts: max number of polls of the status register
2912  *	@delay: delay between polls in ms
2913  *
2914  *	Wait for a flash operation to complete by polling the status register.
2915  */
2916 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2917 {
2918 	int ret;
2919 	u32 status;
2920 
2921 	while (1) {
2922 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2923 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2924 			return ret;
2925 		if (!(status & 1))
2926 			return 0;
2927 		if (--attempts == 0)
2928 			return -EAGAIN;
2929 		if (delay)
2930 			msleep(delay);
2931 	}
2932 }
2933 
2934 /**
2935  *	t4_read_flash - read words from serial flash
2936  *	@adapter: the adapter
2937  *	@addr: the start address for the read
2938  *	@nwords: how many 32-bit words to read
2939  *	@data: where to store the read data
2940  *	@byte_oriented: whether to store data as bytes or as words
2941  *
2942  *	Read the specified number of 32-bit words from the serial flash.
2943  *	If @byte_oriented is set the read data is stored as a byte array
2944  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
2945  *	natural endianness.
2946  */
2947 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2948 		  unsigned int nwords, u32 *data, int byte_oriented)
2949 {
2950 	int ret;
2951 
2952 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
2953 		return -EINVAL;
2954 
2955 	addr = swab32(addr) | SF_RD_DATA_FAST;
2956 
2957 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2958 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2959 		return ret;
2960 
2961 	for ( ; nwords; nwords--, data++) {
2962 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2963 		if (nwords == 1)
2964 			t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2965 		if (ret)
2966 			return ret;
2967 		if (byte_oriented)
2968 			*data = (__force __u32)(cpu_to_be32(*data));
2969 	}
2970 	return 0;
2971 }
2972 
2973 /**
2974  *	t4_write_flash - write up to a page of data to the serial flash
2975  *	@adapter: the adapter
2976  *	@addr: the start address to write
2977  *	@n: length of data to write in bytes
2978  *	@data: the data to write
2979  *
2980  *	Writes up to a page of data (256 bytes) to the serial flash starting
2981  *	at the given address.  All the data must be written to the same page.
2982  */
2983 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2984 			  unsigned int n, const u8 *data)
2985 {
2986 	int ret;
2987 	u32 buf[64];
2988 	unsigned int i, c, left, val, offset = addr & 0xff;
2989 
2990 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2991 		return -EINVAL;
2992 
2993 	val = swab32(addr) | SF_PROG_PAGE;
2994 
2995 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2996 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2997 		goto unlock;
2998 
2999 	for (left = n; left; left -= c) {
3000 		c = min(left, 4U);
3001 		for (val = 0, i = 0; i < c; ++i)
3002 			val = (val << 8) + *data++;
3003 
3004 		ret = sf1_write(adapter, c, c != left, 1, val);
3005 		if (ret)
3006 			goto unlock;
3007 	}
3008 	ret = flash_wait_op(adapter, 8, 1);
3009 	if (ret)
3010 		goto unlock;
3011 
3012 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3013 
3014 	/* Read the page to verify the write succeeded */
3015 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3016 	if (ret)
3017 		return ret;
3018 
3019 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3020 		dev_err(adapter->pdev_dev,
3021 			"failed to correctly write the flash page at %#x\n",
3022 			addr);
3023 		return -EIO;
3024 	}
3025 	return 0;
3026 
3027 unlock:
3028 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3029 	return ret;
3030 }
3031 
3032 /**
3033  *	t4_get_fw_version - read the firmware version
3034  *	@adapter: the adapter
3035  *	@vers: where to place the version
3036  *
3037  *	Reads the FW version from flash.
3038  */
3039 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3040 {
3041 	return t4_read_flash(adapter, FLASH_FW_START +
3042 			     offsetof(struct fw_hdr, fw_ver), 1,
3043 			     vers, 0);
3044 }
3045 
3046 /**
3047  *	t4_get_bs_version - read the firmware bootstrap version
3048  *	@adapter: the adapter
3049  *	@vers: where to place the version
3050  *
3051  *	Reads the FW Bootstrap version from flash.
3052  */
3053 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3054 {
3055 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3056 			     offsetof(struct fw_hdr, fw_ver), 1,
3057 			     vers, 0);
3058 }
3059 
3060 /**
3061  *	t4_get_tp_version - read the TP microcode version
3062  *	@adapter: the adapter
3063  *	@vers: where to place the version
3064  *
3065  *	Reads the TP microcode version from flash.
3066  */
3067 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3068 {
3069 	return t4_read_flash(adapter, FLASH_FW_START +
3070 			     offsetof(struct fw_hdr, tp_microcode_ver),
3071 			     1, vers, 0);
3072 }
3073 
3074 /**
3075  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3076  *	@adapter: the adapter
3077  *	@vers: where to place the version
3078  *
3079  *	Reads the Expansion ROM header from FLASH and returns the version
3080  *	number (if present) through the @vers return value pointer.  We return
3081  *	this in the Firmware Version Format since it's convenient.  Return
3082  *	0 on success, -ENOENT if no Expansion ROM is present.
3083  */
3084 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3085 {
3086 	struct exprom_header {
3087 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3088 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3089 	} *hdr;
3090 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3091 					   sizeof(u32))];
3092 	int ret;
3093 
3094 	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3095 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3096 			    0);
3097 	if (ret)
3098 		return ret;
3099 
3100 	hdr = (struct exprom_header *)exprom_header_buf;
3101 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3102 		return -ENOENT;
3103 
3104 	*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3105 		 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3106 		 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3107 		 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3108 	return 0;
3109 }
3110 
3111 /**
3112  *      t4_get_vpd_version - return the VPD version
3113  *      @adapter: the adapter
3114  *      @vers: where to place the version
3115  *
3116  *      Reads the VPD via the Firmware interface (thus this can only be called
3117  *      once we're ready to issue Firmware commands).  The format of the
3118  *      VPD version is adapter specific.  Returns 0 on success, an error on
3119  *      failure.
3120  *
3121  *      Note that early versions of the Firmware didn't include the ability
3122  *      to retrieve the VPD version, so we zero-out the return-value parameter
3123  *      in that case to avoid leaving it with garbage in it.
3124  *
3125  *      Also note that the Firmware will return its cached copy of the VPD
3126  *      Revision ID, not the actual Revision ID as written in the Serial
3127  *      EEPROM.  This is only an issue if a new VPD has been written and the
3128  *      Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3129  *      to defer calling this routine till after a FW_RESET_CMD has been issued
3130  *      if the Host Driver will be performing a full adapter initialization.
3131  */
3132 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3133 {
3134 	u32 vpdrev_param;
3135 	int ret;
3136 
3137 	vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3138 			FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3139 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3140 			      1, &vpdrev_param, vers);
3141 	if (ret)
3142 		*vers = 0;
3143 	return ret;
3144 }
3145 
3146 /**
3147  *      t4_get_scfg_version - return the Serial Configuration version
3148  *      @adapter: the adapter
3149  *      @vers: where to place the version
3150  *
3151  *      Reads the Serial Configuration Version via the Firmware interface
3152  *      (thus this can only be called once we're ready to issue Firmware
3153  *      commands).  The format of the Serial Configuration version is
3154  *      adapter specific.  Returns 0 on success, an error on failure.
3155  *
3156  *      Note that early versions of the Firmware didn't include the ability
3157  *      to retrieve the Serial Configuration version, so we zero-out the
3158  *      return-value parameter in that case to avoid leaving it with
3159  *      garbage in it.
3160  *
3161  *      Also note that the Firmware will return its cached copy of the Serial
3162  *      Initialization Revision ID, not the actual Revision ID as written in
3163  *      the Serial EEPROM.  This is only an issue if a new VPD has been written
3164  *      and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3165  *      it's best to defer calling this routine till after a FW_RESET_CMD has
3166  *      been issued if the Host Driver will be performing a full adapter
3167  *      initialization.
3168  */
3169 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3170 {
3171 	u32 scfgrev_param;
3172 	int ret;
3173 
3174 	scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3175 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3176 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3177 			      1, &scfgrev_param, vers);
3178 	if (ret)
3179 		*vers = 0;
3180 	return ret;
3181 }
3182 
3183 /**
3184  *      t4_get_version_info - extract various chip/firmware version information
3185  *      @adapter: the adapter
3186  *
3187  *      Reads various chip/firmware version numbers and stores them into the
3188  *      adapter Adapter Parameters structure.  If any of the efforts fails
3189  *      the first failure will be returned, but all of the version numbers
3190  *      will be read.
3191  */
3192 int t4_get_version_info(struct adapter *adapter)
3193 {
3194 	int ret = 0;
3195 
3196 	#define FIRST_RET(__getvinfo) \
3197 	do { \
3198 		int __ret = __getvinfo; \
3199 		if (__ret && !ret) \
3200 			ret = __ret; \
3201 	} while (0)
3202 
3203 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3204 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3205 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3206 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3207 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3208 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3209 
3210 	#undef FIRST_RET
3211 	return ret;
3212 }
3213 
3214 /**
3215  *      t4_dump_version_info - dump all of the adapter configuration IDs
3216  *      @adapter: the adapter
3217  *
3218  *      Dumps all of the various bits of adapter configuration version/revision
3219  *      IDs information.  This is typically called at some point after
3220  *      t4_get_version_info() has been called.
3221  */
3222 void t4_dump_version_info(struct adapter *adapter)
3223 {
3224 	/* Device information */
3225 	dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3226 		 adapter->params.vpd.id,
3227 		 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3228 	dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3229 		 adapter->params.vpd.sn, adapter->params.vpd.pn);
3230 
3231 	/* Firmware Version */
3232 	if (!adapter->params.fw_vers)
3233 		dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3234 	else
3235 		dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3236 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3237 			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3238 			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3239 			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3240 
3241 	/* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3242 	 * Firmware, so dev_info() is more appropriate here.)
3243 	 */
3244 	if (!adapter->params.bs_vers)
3245 		dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3246 	else
3247 		dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3248 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3249 			 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3250 			 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3251 			 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3252 
3253 	/* TP Microcode Version */
3254 	if (!adapter->params.tp_vers)
3255 		dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3256 	else
3257 		dev_info(adapter->pdev_dev,
3258 			 "TP Microcode version: %u.%u.%u.%u\n",
3259 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3260 			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3261 			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3262 			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3263 
3264 	/* Expansion ROM version */
3265 	if (!adapter->params.er_vers)
3266 		dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3267 	else
3268 		dev_info(adapter->pdev_dev,
3269 			 "Expansion ROM version: %u.%u.%u.%u\n",
3270 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3271 			 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3272 			 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3273 			 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3274 
3275 	/* Serial Configuration version */
3276 	dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3277 		 adapter->params.scfg_vers);
3278 
3279 	/* VPD Version */
3280 	dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3281 		 adapter->params.vpd_vers);
3282 }
3283 
3284 /**
3285  *	t4_check_fw_version - check if the FW is supported with this driver
3286  *	@adap: the adapter
3287  *
3288  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3289  *	if there's exact match, a negative error if the version could not be
3290  *	read or there's a major version mismatch
3291  */
3292 int t4_check_fw_version(struct adapter *adap)
3293 {
3294 	int i, ret, major, minor, micro;
3295 	int exp_major, exp_minor, exp_micro;
3296 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3297 
3298 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3299 	/* Try multiple times before returning error */
3300 	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3301 		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3302 
3303 	if (ret)
3304 		return ret;
3305 
3306 	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3307 	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3308 	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3309 
3310 	switch (chip_version) {
3311 	case CHELSIO_T4:
3312 		exp_major = T4FW_MIN_VERSION_MAJOR;
3313 		exp_minor = T4FW_MIN_VERSION_MINOR;
3314 		exp_micro = T4FW_MIN_VERSION_MICRO;
3315 		break;
3316 	case CHELSIO_T5:
3317 		exp_major = T5FW_MIN_VERSION_MAJOR;
3318 		exp_minor = T5FW_MIN_VERSION_MINOR;
3319 		exp_micro = T5FW_MIN_VERSION_MICRO;
3320 		break;
3321 	case CHELSIO_T6:
3322 		exp_major = T6FW_MIN_VERSION_MAJOR;
3323 		exp_minor = T6FW_MIN_VERSION_MINOR;
3324 		exp_micro = T6FW_MIN_VERSION_MICRO;
3325 		break;
3326 	default:
3327 		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3328 			adap->chip);
3329 		return -EINVAL;
3330 	}
3331 
3332 	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3333 	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3334 		dev_err(adap->pdev_dev,
3335 			"Card has firmware version %u.%u.%u, minimum "
3336 			"supported firmware is %u.%u.%u.\n", major, minor,
3337 			micro, exp_major, exp_minor, exp_micro);
3338 		return -EFAULT;
3339 	}
3340 	return 0;
3341 }
3342 
3343 /* Is the given firmware API compatible with the one the driver was compiled
3344  * with?
3345  */
3346 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3347 {
3348 
3349 	/* short circuit if it's the exact same firmware version */
3350 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3351 		return 1;
3352 
3353 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3354 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3355 	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3356 		return 1;
3357 #undef SAME_INTF
3358 
3359 	return 0;
3360 }
3361 
3362 /* The firmware in the filesystem is usable, but should it be installed?
3363  * This routine explains itself in detail if it indicates the filesystem
3364  * firmware should be installed.
3365  */
3366 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3367 				int k, int c)
3368 {
3369 	const char *reason;
3370 
3371 	if (!card_fw_usable) {
3372 		reason = "incompatible or unusable";
3373 		goto install;
3374 	}
3375 
3376 	if (k > c) {
3377 		reason = "older than the version supported with this driver";
3378 		goto install;
3379 	}
3380 
3381 	return 0;
3382 
3383 install:
3384 	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3385 		"installing firmware %u.%u.%u.%u on card.\n",
3386 		FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3387 		FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3388 		FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3389 		FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3390 
3391 	return 1;
3392 }
3393 
3394 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3395 	       const u8 *fw_data, unsigned int fw_size,
3396 	       struct fw_hdr *card_fw, enum dev_state state,
3397 	       int *reset)
3398 {
3399 	int ret, card_fw_usable, fs_fw_usable;
3400 	const struct fw_hdr *fs_fw;
3401 	const struct fw_hdr *drv_fw;
3402 
3403 	drv_fw = &fw_info->fw_hdr;
3404 
3405 	/* Read the header of the firmware on the card */
3406 	ret = -t4_read_flash(adap, FLASH_FW_START,
3407 			    sizeof(*card_fw) / sizeof(uint32_t),
3408 			    (uint32_t *)card_fw, 1);
3409 	if (ret == 0) {
3410 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3411 	} else {
3412 		dev_err(adap->pdev_dev,
3413 			"Unable to read card's firmware header: %d\n", ret);
3414 		card_fw_usable = 0;
3415 	}
3416 
3417 	if (fw_data != NULL) {
3418 		fs_fw = (const void *)fw_data;
3419 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3420 	} else {
3421 		fs_fw = NULL;
3422 		fs_fw_usable = 0;
3423 	}
3424 
3425 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3426 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3427 		/* Common case: the firmware on the card is an exact match and
3428 		 * the filesystem one is an exact match too, or the filesystem
3429 		 * one is absent/incompatible.
3430 		 */
3431 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3432 		   should_install_fs_fw(adap, card_fw_usable,
3433 					be32_to_cpu(fs_fw->fw_ver),
3434 					be32_to_cpu(card_fw->fw_ver))) {
3435 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3436 				     fw_size, 0);
3437 		if (ret != 0) {
3438 			dev_err(adap->pdev_dev,
3439 				"failed to install firmware: %d\n", ret);
3440 			goto bye;
3441 		}
3442 
3443 		/* Installed successfully, update the cached header too. */
3444 		*card_fw = *fs_fw;
3445 		card_fw_usable = 1;
3446 		*reset = 0;	/* already reset as part of load_fw */
3447 	}
3448 
3449 	if (!card_fw_usable) {
3450 		uint32_t d, c, k;
3451 
3452 		d = be32_to_cpu(drv_fw->fw_ver);
3453 		c = be32_to_cpu(card_fw->fw_ver);
3454 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3455 
3456 		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3457 			"chip state %d, "
3458 			"driver compiled with %d.%d.%d.%d, "
3459 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3460 			state,
3461 			FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3462 			FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3463 			FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3464 			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3465 			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3466 			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3467 		ret = EINVAL;
3468 		goto bye;
3469 	}
3470 
3471 	/* We're using whatever's on the card and it's known to be good. */
3472 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3473 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3474 
3475 bye:
3476 	return ret;
3477 }
3478 
3479 /**
3480  *	t4_flash_erase_sectors - erase a range of flash sectors
3481  *	@adapter: the adapter
3482  *	@start: the first sector to erase
3483  *	@end: the last sector to erase
3484  *
3485  *	Erases the sectors in the given inclusive range.
3486  */
3487 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3488 {
3489 	int ret = 0;
3490 
3491 	if (end >= adapter->params.sf_nsec)
3492 		return -EINVAL;
3493 
3494 	while (start <= end) {
3495 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3496 		    (ret = sf1_write(adapter, 4, 0, 1,
3497 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3498 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3499 			dev_err(adapter->pdev_dev,
3500 				"erase of flash sector %d failed, error %d\n",
3501 				start, ret);
3502 			break;
3503 		}
3504 		start++;
3505 	}
3506 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3507 	return ret;
3508 }
3509 
3510 /**
3511  *	t4_flash_cfg_addr - return the address of the flash configuration file
3512  *	@adapter: the adapter
3513  *
3514  *	Return the address within the flash where the Firmware Configuration
3515  *	File is stored.
3516  */
3517 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3518 {
3519 	if (adapter->params.sf_size == 0x100000)
3520 		return FLASH_FPGA_CFG_START;
3521 	else
3522 		return FLASH_CFG_START;
3523 }
3524 
3525 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
3526  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3527  * and emit an error message for mismatched firmware to save our caller the
3528  * effort ...
3529  */
3530 static bool t4_fw_matches_chip(const struct adapter *adap,
3531 			       const struct fw_hdr *hdr)
3532 {
3533 	/* The expression below will return FALSE for any unsupported adapter
3534 	 * which will keep us "honest" in the future ...
3535 	 */
3536 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3537 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3538 	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3539 		return true;
3540 
3541 	dev_err(adap->pdev_dev,
3542 		"FW image (%d) is not suitable for this adapter (%d)\n",
3543 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3544 	return false;
3545 }
3546 
3547 /**
3548  *	t4_load_fw - download firmware
3549  *	@adap: the adapter
3550  *	@fw_data: the firmware image to write
3551  *	@size: image size
3552  *
3553  *	Write the supplied firmware image to the card's serial flash.
3554  */
3555 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3556 {
3557 	u32 csum;
3558 	int ret, addr;
3559 	unsigned int i;
3560 	u8 first_page[SF_PAGE_SIZE];
3561 	const __be32 *p = (const __be32 *)fw_data;
3562 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3563 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3564 	unsigned int fw_start_sec = FLASH_FW_START_SEC;
3565 	unsigned int fw_size = FLASH_FW_MAX_SIZE;
3566 	unsigned int fw_start = FLASH_FW_START;
3567 
3568 	if (!size) {
3569 		dev_err(adap->pdev_dev, "FW image has no data\n");
3570 		return -EINVAL;
3571 	}
3572 	if (size & 511) {
3573 		dev_err(adap->pdev_dev,
3574 			"FW image size not multiple of 512 bytes\n");
3575 		return -EINVAL;
3576 	}
3577 	if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3578 		dev_err(adap->pdev_dev,
3579 			"FW image size differs from size in FW header\n");
3580 		return -EINVAL;
3581 	}
3582 	if (size > fw_size) {
3583 		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3584 			fw_size);
3585 		return -EFBIG;
3586 	}
3587 	if (!t4_fw_matches_chip(adap, hdr))
3588 		return -EINVAL;
3589 
3590 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3591 		csum += be32_to_cpu(p[i]);
3592 
3593 	if (csum != 0xffffffff) {
3594 		dev_err(adap->pdev_dev,
3595 			"corrupted firmware image, checksum %#x\n", csum);
3596 		return -EINVAL;
3597 	}
3598 
3599 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
3600 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3601 	if (ret)
3602 		goto out;
3603 
3604 	/*
3605 	 * We write the correct version at the end so the driver can see a bad
3606 	 * version if the FW write fails.  Start by writing a copy of the
3607 	 * first page with a bad version.
3608 	 */
3609 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3610 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3611 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3612 	if (ret)
3613 		goto out;
3614 
3615 	addr = fw_start;
3616 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3617 		addr += SF_PAGE_SIZE;
3618 		fw_data += SF_PAGE_SIZE;
3619 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3620 		if (ret)
3621 			goto out;
3622 	}
3623 
3624 	ret = t4_write_flash(adap,
3625 			     fw_start + offsetof(struct fw_hdr, fw_ver),
3626 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3627 out:
3628 	if (ret)
3629 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3630 			ret);
3631 	else
3632 		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3633 	return ret;
3634 }
3635 
3636 /**
3637  *	t4_phy_fw_ver - return current PHY firmware version
3638  *	@adap: the adapter
3639  *	@phy_fw_ver: return value buffer for PHY firmware version
3640  *
3641  *	Returns the current version of external PHY firmware on the
3642  *	adapter.
3643  */
3644 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3645 {
3646 	u32 param, val;
3647 	int ret;
3648 
3649 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3650 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3651 		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3652 		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3653 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3654 			      &param, &val);
3655 	if (ret < 0)
3656 		return ret;
3657 	*phy_fw_ver = val;
3658 	return 0;
3659 }
3660 
3661 /**
3662  *	t4_load_phy_fw - download port PHY firmware
3663  *	@adap: the adapter
3664  *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
3665  *	@win_lock: the lock to use to guard the memory copy
3666  *	@phy_fw_version: function to check PHY firmware versions
3667  *	@phy_fw_data: the PHY firmware image to write
3668  *	@phy_fw_size: image size
3669  *
3670  *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
3671  *	@phy_fw_version is supplied, then it will be used to determine if
3672  *	it's necessary to perform the transfer by comparing the version
3673  *	of any existing adapter PHY firmware with that of the passed in
3674  *	PHY firmware image.  If @win_lock is non-NULL then it will be used
3675  *	around the call to t4_memory_rw() which transfers the PHY firmware
3676  *	to the adapter.
3677  *
3678  *	A negative error number will be returned if an error occurs.  If
3679  *	version number support is available and there's no need to upgrade
3680  *	the firmware, 0 will be returned.  If firmware is successfully
3681  *	transferred to the adapter, 1 will be retured.
3682  *
3683  *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
3684  *	a result, a RESET of the adapter would cause that RAM to lose its
3685  *	contents.  Thus, loading PHY firmware on such adapters must happen
3686  *	after any FW_RESET_CMDs ...
3687  */
3688 int t4_load_phy_fw(struct adapter *adap,
3689 		   int win, spinlock_t *win_lock,
3690 		   int (*phy_fw_version)(const u8 *, size_t),
3691 		   const u8 *phy_fw_data, size_t phy_fw_size)
3692 {
3693 	unsigned long mtype = 0, maddr = 0;
3694 	u32 param, val;
3695 	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3696 	int ret;
3697 
3698 	/* If we have version number support, then check to see if the adapter
3699 	 * already has up-to-date PHY firmware loaded.
3700 	 */
3701 	 if (phy_fw_version) {
3702 		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3703 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3704 		if (ret < 0)
3705 			return ret;
3706 
3707 		if (cur_phy_fw_ver >= new_phy_fw_vers) {
3708 			CH_WARN(adap, "PHY Firmware already up-to-date, "
3709 				"version %#x\n", cur_phy_fw_ver);
3710 			return 0;
3711 		}
3712 	}
3713 
3714 	/* Ask the firmware where it wants us to copy the PHY firmware image.
3715 	 * The size of the file requires a special version of the READ coommand
3716 	 * which will pass the file size via the values field in PARAMS_CMD and
3717 	 * retrieve the return value from firmware and place it in the same
3718 	 * buffer values
3719 	 */
3720 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3721 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3722 		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3723 		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3724 	val = phy_fw_size;
3725 	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3726 				 &param, &val, 1, true);
3727 	if (ret < 0)
3728 		return ret;
3729 	mtype = val >> 8;
3730 	maddr = (val & 0xff) << 16;
3731 
3732 	/* Copy the supplied PHY Firmware image to the adapter memory location
3733 	 * allocated by the adapter firmware.
3734 	 */
3735 	if (win_lock)
3736 		spin_lock_bh(win_lock);
3737 	ret = t4_memory_rw(adap, win, mtype, maddr,
3738 			   phy_fw_size, (__be32 *)phy_fw_data,
3739 			   T4_MEMORY_WRITE);
3740 	if (win_lock)
3741 		spin_unlock_bh(win_lock);
3742 	if (ret)
3743 		return ret;
3744 
3745 	/* Tell the firmware that the PHY firmware image has been written to
3746 	 * RAM and it can now start copying it over to the PHYs.  The chip
3747 	 * firmware will RESET the affected PHYs as part of this operation
3748 	 * leaving them running the new PHY firmware image.
3749 	 */
3750 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3751 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3752 		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3753 		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3754 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3755 				    &param, &val, 30000);
3756 
3757 	/* If we have version number support, then check to see that the new
3758 	 * firmware got loaded properly.
3759 	 */
3760 	if (phy_fw_version) {
3761 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3762 		if (ret < 0)
3763 			return ret;
3764 
3765 		if (cur_phy_fw_ver != new_phy_fw_vers) {
3766 			CH_WARN(adap, "PHY Firmware did not update: "
3767 				"version on adapter %#x, "
3768 				"version flashed %#x\n",
3769 				cur_phy_fw_ver, new_phy_fw_vers);
3770 			return -ENXIO;
3771 		}
3772 	}
3773 
3774 	return 1;
3775 }
3776 
3777 /**
3778  *	t4_fwcache - firmware cache operation
3779  *	@adap: the adapter
3780  *	@op  : the operation (flush or flush and invalidate)
3781  */
3782 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3783 {
3784 	struct fw_params_cmd c;
3785 
3786 	memset(&c, 0, sizeof(c));
3787 	c.op_to_vfn =
3788 		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3789 			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3790 			    FW_PARAMS_CMD_PFN_V(adap->pf) |
3791 			    FW_PARAMS_CMD_VFN_V(0));
3792 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3793 	c.param[0].mnem =
3794 		cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3795 			    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3796 	c.param[0].val = (__force __be32)op;
3797 
3798 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3799 }
3800 
3801 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3802 			unsigned int *pif_req_wrptr,
3803 			unsigned int *pif_rsp_wrptr)
3804 {
3805 	int i, j;
3806 	u32 cfg, val, req, rsp;
3807 
3808 	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3809 	if (cfg & LADBGEN_F)
3810 		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3811 
3812 	val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3813 	req = POLADBGWRPTR_G(val);
3814 	rsp = PILADBGWRPTR_G(val);
3815 	if (pif_req_wrptr)
3816 		*pif_req_wrptr = req;
3817 	if (pif_rsp_wrptr)
3818 		*pif_rsp_wrptr = rsp;
3819 
3820 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3821 		for (j = 0; j < 6; j++) {
3822 			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3823 				     PILADBGRDPTR_V(rsp));
3824 			*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3825 			*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3826 			req++;
3827 			rsp++;
3828 		}
3829 		req = (req + 2) & POLADBGRDPTR_M;
3830 		rsp = (rsp + 2) & PILADBGRDPTR_M;
3831 	}
3832 	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3833 }
3834 
3835 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3836 {
3837 	u32 cfg;
3838 	int i, j, idx;
3839 
3840 	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3841 	if (cfg & LADBGEN_F)
3842 		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3843 
3844 	for (i = 0; i < CIM_MALA_SIZE; i++) {
3845 		for (j = 0; j < 5; j++) {
3846 			idx = 8 * i + j;
3847 			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3848 				     PILADBGRDPTR_V(idx));
3849 			*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3850 			*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3851 		}
3852 	}
3853 	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3854 }
3855 
3856 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3857 {
3858 	unsigned int i, j;
3859 
3860 	for (i = 0; i < 8; i++) {
3861 		u32 *p = la_buf + i;
3862 
3863 		t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3864 		j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3865 		t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3866 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3867 			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3868 	}
3869 }
3870 
3871 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3872 		     FW_PORT_CAP32_ANEG)
3873 
3874 /**
3875  *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3876  *	@caps16: a 16-bit Port Capabilities value
3877  *
3878  *	Returns the equivalent 32-bit Port Capabilities value.
3879  */
3880 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3881 {
3882 	fw_port_cap32_t caps32 = 0;
3883 
3884 	#define CAP16_TO_CAP32(__cap) \
3885 		do { \
3886 			if (caps16 & FW_PORT_CAP_##__cap) \
3887 				caps32 |= FW_PORT_CAP32_##__cap; \
3888 		} while (0)
3889 
3890 	CAP16_TO_CAP32(SPEED_100M);
3891 	CAP16_TO_CAP32(SPEED_1G);
3892 	CAP16_TO_CAP32(SPEED_25G);
3893 	CAP16_TO_CAP32(SPEED_10G);
3894 	CAP16_TO_CAP32(SPEED_40G);
3895 	CAP16_TO_CAP32(SPEED_100G);
3896 	CAP16_TO_CAP32(FC_RX);
3897 	CAP16_TO_CAP32(FC_TX);
3898 	CAP16_TO_CAP32(ANEG);
3899 	CAP16_TO_CAP32(MDIX);
3900 	CAP16_TO_CAP32(MDIAUTO);
3901 	CAP16_TO_CAP32(FEC_RS);
3902 	CAP16_TO_CAP32(FEC_BASER_RS);
3903 	CAP16_TO_CAP32(802_3_PAUSE);
3904 	CAP16_TO_CAP32(802_3_ASM_DIR);
3905 
3906 	#undef CAP16_TO_CAP32
3907 
3908 	return caps32;
3909 }
3910 
3911 /**
3912  *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3913  *	@caps32: a 32-bit Port Capabilities value
3914  *
3915  *	Returns the equivalent 16-bit Port Capabilities value.  Note that
3916  *	not all 32-bit Port Capabilities can be represented in the 16-bit
3917  *	Port Capabilities and some fields/values may not make it.
3918  */
3919 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3920 {
3921 	fw_port_cap16_t caps16 = 0;
3922 
3923 	#define CAP32_TO_CAP16(__cap) \
3924 		do { \
3925 			if (caps32 & FW_PORT_CAP32_##__cap) \
3926 				caps16 |= FW_PORT_CAP_##__cap; \
3927 		} while (0)
3928 
3929 	CAP32_TO_CAP16(SPEED_100M);
3930 	CAP32_TO_CAP16(SPEED_1G);
3931 	CAP32_TO_CAP16(SPEED_10G);
3932 	CAP32_TO_CAP16(SPEED_25G);
3933 	CAP32_TO_CAP16(SPEED_40G);
3934 	CAP32_TO_CAP16(SPEED_100G);
3935 	CAP32_TO_CAP16(FC_RX);
3936 	CAP32_TO_CAP16(FC_TX);
3937 	CAP32_TO_CAP16(802_3_PAUSE);
3938 	CAP32_TO_CAP16(802_3_ASM_DIR);
3939 	CAP32_TO_CAP16(ANEG);
3940 	CAP32_TO_CAP16(MDIX);
3941 	CAP32_TO_CAP16(MDIAUTO);
3942 	CAP32_TO_CAP16(FEC_RS);
3943 	CAP32_TO_CAP16(FEC_BASER_RS);
3944 
3945 	#undef CAP32_TO_CAP16
3946 
3947 	return caps16;
3948 }
3949 
3950 /* Translate Firmware Port Capabilities Pause specification to Common Code */
3951 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
3952 {
3953 	enum cc_pause cc_pause = 0;
3954 
3955 	if (fw_pause & FW_PORT_CAP32_FC_RX)
3956 		cc_pause |= PAUSE_RX;
3957 	if (fw_pause & FW_PORT_CAP32_FC_TX)
3958 		cc_pause |= PAUSE_TX;
3959 
3960 	return cc_pause;
3961 }
3962 
3963 /* Translate Common Code Pause specification into Firmware Port Capabilities */
3964 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
3965 {
3966 	fw_port_cap32_t fw_pause = 0;
3967 
3968 	if (cc_pause & PAUSE_RX)
3969 		fw_pause |= FW_PORT_CAP32_FC_RX;
3970 	if (cc_pause & PAUSE_TX)
3971 		fw_pause |= FW_PORT_CAP32_FC_TX;
3972 
3973 	return fw_pause;
3974 }
3975 
3976 /* Translate Firmware Forward Error Correction specification to Common Code */
3977 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
3978 {
3979 	enum cc_fec cc_fec = 0;
3980 
3981 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
3982 		cc_fec |= FEC_RS;
3983 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
3984 		cc_fec |= FEC_BASER_RS;
3985 
3986 	return cc_fec;
3987 }
3988 
3989 /* Translate Common Code Forward Error Correction specification to Firmware */
3990 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
3991 {
3992 	fw_port_cap32_t fw_fec = 0;
3993 
3994 	if (cc_fec & FEC_RS)
3995 		fw_fec |= FW_PORT_CAP32_FEC_RS;
3996 	if (cc_fec & FEC_BASER_RS)
3997 		fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
3998 
3999 	return fw_fec;
4000 }
4001 
4002 /**
4003  *	t4_link_l1cfg - apply link configuration to MAC/PHY
4004  *	@adapter: the adapter
4005  *	@mbox: the Firmware Mailbox to use
4006  *	@port: the Port ID
4007  *	@lc: the Port's Link Configuration
4008  *
4009  *	Set up a port's MAC and PHY according to a desired link configuration.
4010  *	- If the PHY can auto-negotiate first decide what to advertise, then
4011  *	  enable/disable auto-negotiation as desired, and reset.
4012  *	- If the PHY does not auto-negotiate just reset it.
4013  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4014  *	  otherwise do it later based on the outcome of auto-negotiation.
4015  */
4016 int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
4017 		  unsigned int port, struct link_config *lc)
4018 {
4019 	unsigned int fw_caps = adapter->params.fw_caps_support;
4020 	struct fw_port_cmd cmd;
4021 	unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
4022 	fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
4023 
4024 	lc->link_ok = 0;
4025 
4026 	/* Convert driver coding of Pause Frame Flow Control settings into the
4027 	 * Firmware's API.
4028 	 */
4029 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4030 
4031 	/* Convert Common Code Forward Error Control settings into the
4032 	 * Firmware's API.  If the current Requested FEC has "Automatic"
4033 	 * (IEEE 802.3) specified, then we use whatever the Firmware
4034 	 * sent us as part of it's IEEE 802.3-based interpratation of
4035 	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
4036 	 * use whatever is in the current Requested FEC settings.
4037 	 */
4038 	if (lc->requested_fec & FEC_AUTO)
4039 		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4040 	else
4041 		cc_fec = lc->requested_fec;
4042 	fw_fec = cc_to_fwcap_fec(cc_fec);
4043 
4044 	/* Figure out what our Requested Port Capabilities are going to be.
4045 	 */
4046 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4047 		rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
4048 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4049 		lc->fec = cc_fec;
4050 	} else if (lc->autoneg == AUTONEG_DISABLE) {
4051 		rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4052 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4053 		lc->fec = cc_fec;
4054 	} else {
4055 		rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
4056 	}
4057 
4058 	/* And send that on to the Firmware ...
4059 	 */
4060 	memset(&cmd, 0, sizeof(cmd));
4061 	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4062 				       FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4063 				       FW_PORT_CMD_PORTID_V(port));
4064 	cmd.action_to_len16 =
4065 		cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4066 						 ? FW_PORT_ACTION_L1_CFG
4067 						 : FW_PORT_ACTION_L1_CFG32) |
4068 			    FW_LEN16(cmd));
4069 	if (fw_caps == FW_CAPS16)
4070 		cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4071 	else
4072 		cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4073 	return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4074 }
4075 
4076 /**
4077  *	t4_restart_aneg - restart autonegotiation
4078  *	@adap: the adapter
4079  *	@mbox: mbox to use for the FW command
4080  *	@port: the port id
4081  *
4082  *	Restarts autonegotiation for the selected port.
4083  */
4084 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4085 {
4086 	struct fw_port_cmd c;
4087 
4088 	memset(&c, 0, sizeof(c));
4089 	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4090 				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4091 				     FW_PORT_CMD_PORTID_V(port));
4092 	c.action_to_len16 =
4093 		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4094 			    FW_LEN16(c));
4095 	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
4096 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4097 }
4098 
4099 typedef void (*int_handler_t)(struct adapter *adap);
4100 
4101 struct intr_info {
4102 	unsigned int mask;       /* bits to check in interrupt status */
4103 	const char *msg;         /* message to print or NULL */
4104 	short stat_idx;          /* stat counter to increment or -1 */
4105 	unsigned short fatal;    /* whether the condition reported is fatal */
4106 	int_handler_t int_handler; /* platform-specific int handler */
4107 };
4108 
4109 /**
4110  *	t4_handle_intr_status - table driven interrupt handler
4111  *	@adapter: the adapter that generated the interrupt
4112  *	@reg: the interrupt status register to process
4113  *	@acts: table of interrupt actions
4114  *
4115  *	A table driven interrupt handler that applies a set of masks to an
4116  *	interrupt status word and performs the corresponding actions if the
4117  *	interrupts described by the mask have occurred.  The actions include
4118  *	optionally emitting a warning or alert message.  The table is terminated
4119  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4120  *	conditions.
4121  */
4122 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4123 				 const struct intr_info *acts)
4124 {
4125 	int fatal = 0;
4126 	unsigned int mask = 0;
4127 	unsigned int status = t4_read_reg(adapter, reg);
4128 
4129 	for ( ; acts->mask; ++acts) {
4130 		if (!(status & acts->mask))
4131 			continue;
4132 		if (acts->fatal) {
4133 			fatal++;
4134 			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4135 				  status & acts->mask);
4136 		} else if (acts->msg && printk_ratelimit())
4137 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4138 				 status & acts->mask);
4139 		if (acts->int_handler)
4140 			acts->int_handler(adapter);
4141 		mask |= acts->mask;
4142 	}
4143 	status &= mask;
4144 	if (status)                           /* clear processed interrupts */
4145 		t4_write_reg(adapter, reg, status);
4146 	return fatal;
4147 }
4148 
4149 /*
4150  * Interrupt handler for the PCIE module.
4151  */
4152 static void pcie_intr_handler(struct adapter *adapter)
4153 {
4154 	static const struct intr_info sysbus_intr_info[] = {
4155 		{ RNPP_F, "RXNP array parity error", -1, 1 },
4156 		{ RPCP_F, "RXPC array parity error", -1, 1 },
4157 		{ RCIP_F, "RXCIF array parity error", -1, 1 },
4158 		{ RCCP_F, "Rx completions control array parity error", -1, 1 },
4159 		{ RFTP_F, "RXFT array parity error", -1, 1 },
4160 		{ 0 }
4161 	};
4162 	static const struct intr_info pcie_port_intr_info[] = {
4163 		{ TPCP_F, "TXPC array parity error", -1, 1 },
4164 		{ TNPP_F, "TXNP array parity error", -1, 1 },
4165 		{ TFTP_F, "TXFT array parity error", -1, 1 },
4166 		{ TCAP_F, "TXCA array parity error", -1, 1 },
4167 		{ TCIP_F, "TXCIF array parity error", -1, 1 },
4168 		{ RCAP_F, "RXCA array parity error", -1, 1 },
4169 		{ OTDD_F, "outbound request TLP discarded", -1, 1 },
4170 		{ RDPE_F, "Rx data parity error", -1, 1 },
4171 		{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
4172 		{ 0 }
4173 	};
4174 	static const struct intr_info pcie_intr_info[] = {
4175 		{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4176 		{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4177 		{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4178 		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4179 		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4180 		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4181 		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4182 		{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4183 		{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4184 		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4185 		{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4186 		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4187 		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4188 		{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4189 		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4190 		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4191 		{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4192 		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4193 		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4194 		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4195 		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
4196 		{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4197 		{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4198 		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4199 		{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4200 		{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4201 		{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4202 		{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
4203 		{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
4204 		{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
4205 		  -1, 0 },
4206 		{ 0 }
4207 	};
4208 
4209 	static struct intr_info t5_pcie_intr_info[] = {
4210 		{ MSTGRPPERR_F, "Master Response Read Queue parity error",
4211 		  -1, 1 },
4212 		{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4213 		{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4214 		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4215 		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4216 		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4217 		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4218 		{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4219 		  -1, 1 },
4220 		{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4221 		  -1, 1 },
4222 		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4223 		{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4224 		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4225 		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4226 		{ DREQWRPERR_F, "PCI DMA channel write request parity error",
4227 		  -1, 1 },
4228 		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4229 		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4230 		{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4231 		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4232 		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4233 		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4234 		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
4235 		{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4236 		{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4237 		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4238 		{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4239 		  -1, 1 },
4240 		{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4241 		  -1, 1 },
4242 		{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4243 		{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4244 		{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4245 		{ READRSPERR_F, "Outbound read error", -1, 0 },
4246 		{ 0 }
4247 	};
4248 
4249 	int fat;
4250 
4251 	if (is_t4(adapter->params.chip))
4252 		fat = t4_handle_intr_status(adapter,
4253 				PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4254 				sysbus_intr_info) +
4255 			t4_handle_intr_status(adapter,
4256 					PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4257 					pcie_port_intr_info) +
4258 			t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4259 					      pcie_intr_info);
4260 	else
4261 		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4262 					    t5_pcie_intr_info);
4263 
4264 	if (fat)
4265 		t4_fatal_err(adapter);
4266 }
4267 
4268 /*
4269  * TP interrupt handler.
4270  */
4271 static void tp_intr_handler(struct adapter *adapter)
4272 {
4273 	static const struct intr_info tp_intr_info[] = {
4274 		{ 0x3fffffff, "TP parity error", -1, 1 },
4275 		{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4276 		{ 0 }
4277 	};
4278 
4279 	if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4280 		t4_fatal_err(adapter);
4281 }
4282 
4283 /*
4284  * SGE interrupt handler.
4285  */
4286 static void sge_intr_handler(struct adapter *adapter)
4287 {
4288 	u64 v;
4289 	u32 err;
4290 
4291 	static const struct intr_info sge_intr_info[] = {
4292 		{ ERR_CPL_EXCEED_IQE_SIZE_F,
4293 		  "SGE received CPL exceeding IQE size", -1, 1 },
4294 		{ ERR_INVALID_CIDX_INC_F,
4295 		  "SGE GTS CIDX increment too large", -1, 0 },
4296 		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4297 		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4298 		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4299 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4300 		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4301 		  0 },
4302 		{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4303 		  0 },
4304 		{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4305 		  0 },
4306 		{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4307 		  0 },
4308 		{ ERR_ING_CTXT_PRIO_F,
4309 		  "SGE too many priority ingress contexts", -1, 0 },
4310 		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4311 		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4312 		{ 0 }
4313 	};
4314 
4315 	static struct intr_info t4t5_sge_intr_info[] = {
4316 		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4317 		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4318 		{ ERR_EGR_CTXT_PRIO_F,
4319 		  "SGE too many priority egress contexts", -1, 0 },
4320 		{ 0 }
4321 	};
4322 
4323 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4324 		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
4325 	if (v) {
4326 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4327 				(unsigned long long)v);
4328 		t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4329 		t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4330 	}
4331 
4332 	v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4333 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4334 		v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4335 					   t4t5_sge_intr_info);
4336 
4337 	err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4338 	if (err & ERROR_QID_VALID_F) {
4339 		dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4340 			ERROR_QID_G(err));
4341 		if (err & UNCAPTURED_ERROR_F)
4342 			dev_err(adapter->pdev_dev,
4343 				"SGE UNCAPTURED_ERROR set (clearing)\n");
4344 		t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4345 			     UNCAPTURED_ERROR_F);
4346 	}
4347 
4348 	if (v != 0)
4349 		t4_fatal_err(adapter);
4350 }
4351 
4352 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4353 		      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4354 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4355 		      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4356 
4357 /*
4358  * CIM interrupt handler.
4359  */
4360 static void cim_intr_handler(struct adapter *adapter)
4361 {
4362 	static const struct intr_info cim_intr_info[] = {
4363 		{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4364 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4365 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4366 		{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4367 		{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4368 		{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4369 		{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4370 		{ TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4371 		{ 0 }
4372 	};
4373 	static const struct intr_info cim_upintr_info[] = {
4374 		{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4375 		{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4376 		{ ILLWRINT_F, "CIM illegal write", -1, 1 },
4377 		{ ILLRDINT_F, "CIM illegal read", -1, 1 },
4378 		{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4379 		{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4380 		{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4381 		{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4382 		{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4383 		{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4384 		{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4385 		{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4386 		{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4387 		{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4388 		{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4389 		{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4390 		{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4391 		{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4392 		{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4393 		{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4394 		{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4395 		{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4396 		{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4397 		{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4398 		{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4399 		{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4400 		{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4401 		{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4402 		{ 0 }
4403 	};
4404 
4405 	u32 val, fw_err;
4406 	int fat;
4407 
4408 	fw_err = t4_read_reg(adapter, PCIE_FW_A);
4409 	if (fw_err & PCIE_FW_ERR_F)
4410 		t4_report_fw_error(adapter);
4411 
4412 	/* When the Firmware detects an internal error which normally
4413 	 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4414 	 * in order to make sure the Host sees the Firmware Crash.  So
4415 	 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4416 	 * ignore the Timer0 interrupt.
4417 	 */
4418 
4419 	val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4420 	if (val & TIMER0INT_F)
4421 		if (!(fw_err & PCIE_FW_ERR_F) ||
4422 		    (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4423 			t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4424 				     TIMER0INT_F);
4425 
4426 	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4427 				    cim_intr_info) +
4428 	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4429 				    cim_upintr_info);
4430 	if (fat)
4431 		t4_fatal_err(adapter);
4432 }
4433 
4434 /*
4435  * ULP RX interrupt handler.
4436  */
4437 static void ulprx_intr_handler(struct adapter *adapter)
4438 {
4439 	static const struct intr_info ulprx_intr_info[] = {
4440 		{ 0x1800000, "ULPRX context error", -1, 1 },
4441 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4442 		{ 0 }
4443 	};
4444 
4445 	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4446 		t4_fatal_err(adapter);
4447 }
4448 
4449 /*
4450  * ULP TX interrupt handler.
4451  */
4452 static void ulptx_intr_handler(struct adapter *adapter)
4453 {
4454 	static const struct intr_info ulptx_intr_info[] = {
4455 		{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4456 		  0 },
4457 		{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4458 		  0 },
4459 		{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4460 		  0 },
4461 		{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4462 		  0 },
4463 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4464 		{ 0 }
4465 	};
4466 
4467 	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4468 		t4_fatal_err(adapter);
4469 }
4470 
4471 /*
4472  * PM TX interrupt handler.
4473  */
4474 static void pmtx_intr_handler(struct adapter *adapter)
4475 {
4476 	static const struct intr_info pmtx_intr_info[] = {
4477 		{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4478 		{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4479 		{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4480 		{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4481 		{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4482 		{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4483 		{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4484 		  -1, 1 },
4485 		{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4486 		{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4487 		{ 0 }
4488 	};
4489 
4490 	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4491 		t4_fatal_err(adapter);
4492 }
4493 
4494 /*
4495  * PM RX interrupt handler.
4496  */
4497 static void pmrx_intr_handler(struct adapter *adapter)
4498 {
4499 	static const struct intr_info pmrx_intr_info[] = {
4500 		{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4501 		{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4502 		{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4503 		{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4504 		  -1, 1 },
4505 		{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4506 		{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4507 		{ 0 }
4508 	};
4509 
4510 	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4511 		t4_fatal_err(adapter);
4512 }
4513 
4514 /*
4515  * CPL switch interrupt handler.
4516  */
4517 static void cplsw_intr_handler(struct adapter *adapter)
4518 {
4519 	static const struct intr_info cplsw_intr_info[] = {
4520 		{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4521 		{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4522 		{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4523 		{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4524 		{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4525 		{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4526 		{ 0 }
4527 	};
4528 
4529 	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4530 		t4_fatal_err(adapter);
4531 }
4532 
4533 /*
4534  * LE interrupt handler.
4535  */
4536 static void le_intr_handler(struct adapter *adap)
4537 {
4538 	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4539 	static const struct intr_info le_intr_info[] = {
4540 		{ LIPMISS_F, "LE LIP miss", -1, 0 },
4541 		{ LIP0_F, "LE 0 LIP error", -1, 0 },
4542 		{ PARITYERR_F, "LE parity error", -1, 1 },
4543 		{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4544 		{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
4545 		{ 0 }
4546 	};
4547 
4548 	static struct intr_info t6_le_intr_info[] = {
4549 		{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4550 		{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4551 		{ TCAMINTPERR_F, "LE parity error", -1, 1 },
4552 		{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4553 		{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4554 		{ 0 }
4555 	};
4556 
4557 	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4558 				  (chip <= CHELSIO_T5) ?
4559 				  le_intr_info : t6_le_intr_info))
4560 		t4_fatal_err(adap);
4561 }
4562 
4563 /*
4564  * MPS interrupt handler.
4565  */
4566 static void mps_intr_handler(struct adapter *adapter)
4567 {
4568 	static const struct intr_info mps_rx_intr_info[] = {
4569 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
4570 		{ 0 }
4571 	};
4572 	static const struct intr_info mps_tx_intr_info[] = {
4573 		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4574 		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4575 		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4576 		  -1, 1 },
4577 		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4578 		  -1, 1 },
4579 		{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
4580 		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4581 		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
4582 		{ 0 }
4583 	};
4584 	static const struct intr_info t6_mps_tx_intr_info[] = {
4585 		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4586 		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4587 		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4588 		  -1, 1 },
4589 		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4590 		  -1, 1 },
4591 		/* MPS Tx Bubble is normal for T6 */
4592 		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4593 		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
4594 		{ 0 }
4595 	};
4596 	static const struct intr_info mps_trc_intr_info[] = {
4597 		{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4598 		{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4599 		  -1, 1 },
4600 		{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4601 		{ 0 }
4602 	};
4603 	static const struct intr_info mps_stat_sram_intr_info[] = {
4604 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4605 		{ 0 }
4606 	};
4607 	static const struct intr_info mps_stat_tx_intr_info[] = {
4608 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4609 		{ 0 }
4610 	};
4611 	static const struct intr_info mps_stat_rx_intr_info[] = {
4612 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4613 		{ 0 }
4614 	};
4615 	static const struct intr_info mps_cls_intr_info[] = {
4616 		{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4617 		{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4618 		{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4619 		{ 0 }
4620 	};
4621 
4622 	int fat;
4623 
4624 	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4625 				    mps_rx_intr_info) +
4626 	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4627 				    is_t6(adapter->params.chip)
4628 				    ? t6_mps_tx_intr_info
4629 				    : mps_tx_intr_info) +
4630 	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4631 				    mps_trc_intr_info) +
4632 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4633 				    mps_stat_sram_intr_info) +
4634 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4635 				    mps_stat_tx_intr_info) +
4636 	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4637 				    mps_stat_rx_intr_info) +
4638 	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4639 				    mps_cls_intr_info);
4640 
4641 	t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4642 	t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
4643 	if (fat)
4644 		t4_fatal_err(adapter);
4645 }
4646 
4647 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4648 		      ECC_UE_INT_CAUSE_F)
4649 
4650 /*
4651  * EDC/MC interrupt handler.
4652  */
4653 static void mem_intr_handler(struct adapter *adapter, int idx)
4654 {
4655 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4656 
4657 	unsigned int addr, cnt_addr, v;
4658 
4659 	if (idx <= MEM_EDC1) {
4660 		addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4661 		cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4662 	} else if (idx == MEM_MC) {
4663 		if (is_t4(adapter->params.chip)) {
4664 			addr = MC_INT_CAUSE_A;
4665 			cnt_addr = MC_ECC_STATUS_A;
4666 		} else {
4667 			addr = MC_P_INT_CAUSE_A;
4668 			cnt_addr = MC_P_ECC_STATUS_A;
4669 		}
4670 	} else {
4671 		addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4672 		cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4673 	}
4674 
4675 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4676 	if (v & PERR_INT_CAUSE_F)
4677 		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4678 			  name[idx]);
4679 	if (v & ECC_CE_INT_CAUSE_F) {
4680 		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4681 
4682 		t4_edc_err_read(adapter, idx);
4683 
4684 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4685 		if (printk_ratelimit())
4686 			dev_warn(adapter->pdev_dev,
4687 				 "%u %s correctable ECC data error%s\n",
4688 				 cnt, name[idx], cnt > 1 ? "s" : "");
4689 	}
4690 	if (v & ECC_UE_INT_CAUSE_F)
4691 		dev_alert(adapter->pdev_dev,
4692 			  "%s uncorrectable ECC data error\n", name[idx]);
4693 
4694 	t4_write_reg(adapter, addr, v);
4695 	if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4696 		t4_fatal_err(adapter);
4697 }
4698 
4699 /*
4700  * MA interrupt handler.
4701  */
4702 static void ma_intr_handler(struct adapter *adap)
4703 {
4704 	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4705 
4706 	if (status & MEM_PERR_INT_CAUSE_F) {
4707 		dev_alert(adap->pdev_dev,
4708 			  "MA parity error, parity status %#x\n",
4709 			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4710 		if (is_t5(adap->params.chip))
4711 			dev_alert(adap->pdev_dev,
4712 				  "MA parity error, parity status %#x\n",
4713 				  t4_read_reg(adap,
4714 					      MA_PARITY_ERROR_STATUS2_A));
4715 	}
4716 	if (status & MEM_WRAP_INT_CAUSE_F) {
4717 		v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4718 		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4719 			  "client %u to address %#x\n",
4720 			  MEM_WRAP_CLIENT_NUM_G(v),
4721 			  MEM_WRAP_ADDRESS_G(v) << 4);
4722 	}
4723 	t4_write_reg(adap, MA_INT_CAUSE_A, status);
4724 	t4_fatal_err(adap);
4725 }
4726 
4727 /*
4728  * SMB interrupt handler.
4729  */
4730 static void smb_intr_handler(struct adapter *adap)
4731 {
4732 	static const struct intr_info smb_intr_info[] = {
4733 		{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4734 		{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4735 		{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4736 		{ 0 }
4737 	};
4738 
4739 	if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4740 		t4_fatal_err(adap);
4741 }
4742 
4743 /*
4744  * NC-SI interrupt handler.
4745  */
4746 static void ncsi_intr_handler(struct adapter *adap)
4747 {
4748 	static const struct intr_info ncsi_intr_info[] = {
4749 		{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4750 		{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4751 		{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4752 		{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4753 		{ 0 }
4754 	};
4755 
4756 	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4757 		t4_fatal_err(adap);
4758 }
4759 
4760 /*
4761  * XGMAC interrupt handler.
4762  */
4763 static void xgmac_intr_handler(struct adapter *adap, int port)
4764 {
4765 	u32 v, int_cause_reg;
4766 
4767 	if (is_t4(adap->params.chip))
4768 		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4769 	else
4770 		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4771 
4772 	v = t4_read_reg(adap, int_cause_reg);
4773 
4774 	v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4775 	if (!v)
4776 		return;
4777 
4778 	if (v & TXFIFO_PRTY_ERR_F)
4779 		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4780 			  port);
4781 	if (v & RXFIFO_PRTY_ERR_F)
4782 		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4783 			  port);
4784 	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4785 	t4_fatal_err(adap);
4786 }
4787 
4788 /*
4789  * PL interrupt handler.
4790  */
4791 static void pl_intr_handler(struct adapter *adap)
4792 {
4793 	static const struct intr_info pl_intr_info[] = {
4794 		{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
4795 		{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4796 		{ 0 }
4797 	};
4798 
4799 	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4800 		t4_fatal_err(adap);
4801 }
4802 
4803 #define PF_INTR_MASK (PFSW_F)
4804 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4805 		EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4806 		CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4807 
4808 /**
4809  *	t4_slow_intr_handler - control path interrupt handler
4810  *	@adapter: the adapter
4811  *
4812  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
4813  *	The designation 'slow' is because it involves register reads, while
4814  *	data interrupts typically don't involve any MMIOs.
4815  */
4816 int t4_slow_intr_handler(struct adapter *adapter)
4817 {
4818 	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4819 
4820 	if (!(cause & GLBL_INTR_MASK))
4821 		return 0;
4822 	if (cause & CIM_F)
4823 		cim_intr_handler(adapter);
4824 	if (cause & MPS_F)
4825 		mps_intr_handler(adapter);
4826 	if (cause & NCSI_F)
4827 		ncsi_intr_handler(adapter);
4828 	if (cause & PL_F)
4829 		pl_intr_handler(adapter);
4830 	if (cause & SMB_F)
4831 		smb_intr_handler(adapter);
4832 	if (cause & XGMAC0_F)
4833 		xgmac_intr_handler(adapter, 0);
4834 	if (cause & XGMAC1_F)
4835 		xgmac_intr_handler(adapter, 1);
4836 	if (cause & XGMAC_KR0_F)
4837 		xgmac_intr_handler(adapter, 2);
4838 	if (cause & XGMAC_KR1_F)
4839 		xgmac_intr_handler(adapter, 3);
4840 	if (cause & PCIE_F)
4841 		pcie_intr_handler(adapter);
4842 	if (cause & MC_F)
4843 		mem_intr_handler(adapter, MEM_MC);
4844 	if (is_t5(adapter->params.chip) && (cause & MC1_F))
4845 		mem_intr_handler(adapter, MEM_MC1);
4846 	if (cause & EDC0_F)
4847 		mem_intr_handler(adapter, MEM_EDC0);
4848 	if (cause & EDC1_F)
4849 		mem_intr_handler(adapter, MEM_EDC1);
4850 	if (cause & LE_F)
4851 		le_intr_handler(adapter);
4852 	if (cause & TP_F)
4853 		tp_intr_handler(adapter);
4854 	if (cause & MA_F)
4855 		ma_intr_handler(adapter);
4856 	if (cause & PM_TX_F)
4857 		pmtx_intr_handler(adapter);
4858 	if (cause & PM_RX_F)
4859 		pmrx_intr_handler(adapter);
4860 	if (cause & ULP_RX_F)
4861 		ulprx_intr_handler(adapter);
4862 	if (cause & CPL_SWITCH_F)
4863 		cplsw_intr_handler(adapter);
4864 	if (cause & SGE_F)
4865 		sge_intr_handler(adapter);
4866 	if (cause & ULP_TX_F)
4867 		ulptx_intr_handler(adapter);
4868 
4869 	/* Clear the interrupts just processed for which we are the master. */
4870 	t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4871 	(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4872 	return 1;
4873 }
4874 
4875 /**
4876  *	t4_intr_enable - enable interrupts
4877  *	@adapter: the adapter whose interrupts should be enabled
4878  *
4879  *	Enable PF-specific interrupts for the calling function and the top-level
4880  *	interrupt concentrator for global interrupts.  Interrupts are already
4881  *	enabled at each module,	here we just enable the roots of the interrupt
4882  *	hierarchies.
4883  *
4884  *	Note: this function should be called only when the driver manages
4885  *	non PF-specific interrupts from the various HW modules.  Only one PCI
4886  *	function at a time should be doing this.
4887  */
4888 void t4_intr_enable(struct adapter *adapter)
4889 {
4890 	u32 val = 0;
4891 	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4892 	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4893 			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4894 
4895 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4896 		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
4897 	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4898 		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
4899 		     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
4900 		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4901 		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4902 		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
4903 		     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
4904 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4905 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
4906 }
4907 
4908 /**
4909  *	t4_intr_disable - disable interrupts
4910  *	@adapter: the adapter whose interrupts should be disabled
4911  *
4912  *	Disable interrupts.  We only disable the top-level interrupt
4913  *	concentrators.  The caller must be a PCI function managing global
4914  *	interrupts.
4915  */
4916 void t4_intr_disable(struct adapter *adapter)
4917 {
4918 	u32 whoami, pf;
4919 
4920 	if (pci_channel_offline(adapter->pdev))
4921 		return;
4922 
4923 	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4924 	pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4925 			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4926 
4927 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4928 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
4929 }
4930 
4931 unsigned int t4_chip_rss_size(struct adapter *adap)
4932 {
4933 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
4934 		return RSS_NENTRIES;
4935 	else
4936 		return T6_RSS_NENTRIES;
4937 }
4938 
4939 /**
4940  *	t4_config_rss_range - configure a portion of the RSS mapping table
4941  *	@adapter: the adapter
4942  *	@mbox: mbox to use for the FW command
4943  *	@viid: virtual interface whose RSS subtable is to be written
4944  *	@start: start entry in the table to write
4945  *	@n: how many table entries to write
4946  *	@rspq: values for the response queue lookup table
4947  *	@nrspq: number of values in @rspq
4948  *
4949  *	Programs the selected part of the VI's RSS mapping table with the
4950  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
4951  *	until the full table range is populated.
4952  *
4953  *	The caller must ensure the values in @rspq are in the range allowed for
4954  *	@viid.
4955  */
4956 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4957 			int start, int n, const u16 *rspq, unsigned int nrspq)
4958 {
4959 	int ret;
4960 	const u16 *rsp = rspq;
4961 	const u16 *rsp_end = rspq + nrspq;
4962 	struct fw_rss_ind_tbl_cmd cmd;
4963 
4964 	memset(&cmd, 0, sizeof(cmd));
4965 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
4966 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4967 			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
4968 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4969 
4970 	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4971 	while (n > 0) {
4972 		int nq = min(n, 32);
4973 		__be32 *qp = &cmd.iq0_to_iq2;
4974 
4975 		cmd.niqid = cpu_to_be16(nq);
4976 		cmd.startidx = cpu_to_be16(start);
4977 
4978 		start += nq;
4979 		n -= nq;
4980 
4981 		while (nq > 0) {
4982 			unsigned int v;
4983 
4984 			v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
4985 			if (++rsp >= rsp_end)
4986 				rsp = rspq;
4987 			v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
4988 			if (++rsp >= rsp_end)
4989 				rsp = rspq;
4990 			v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
4991 			if (++rsp >= rsp_end)
4992 				rsp = rspq;
4993 
4994 			*qp++ = cpu_to_be32(v);
4995 			nq -= 3;
4996 		}
4997 
4998 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4999 		if (ret)
5000 			return ret;
5001 	}
5002 	return 0;
5003 }
5004 
5005 /**
5006  *	t4_config_glbl_rss - configure the global RSS mode
5007  *	@adapter: the adapter
5008  *	@mbox: mbox to use for the FW command
5009  *	@mode: global RSS mode
5010  *	@flags: mode-specific flags
5011  *
5012  *	Sets the global RSS mode.
5013  */
5014 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5015 		       unsigned int flags)
5016 {
5017 	struct fw_rss_glb_config_cmd c;
5018 
5019 	memset(&c, 0, sizeof(c));
5020 	c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5021 				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5022 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5023 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5024 		c.u.manual.mode_pkd =
5025 			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5026 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5027 		c.u.basicvirtual.mode_pkd =
5028 			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5029 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5030 	} else
5031 		return -EINVAL;
5032 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5033 }
5034 
5035 /**
5036  *	t4_config_vi_rss - configure per VI RSS settings
5037  *	@adapter: the adapter
5038  *	@mbox: mbox to use for the FW command
5039  *	@viid: the VI id
5040  *	@flags: RSS flags
5041  *	@defq: id of the default RSS queue for the VI.
5042  *
5043  *	Configures VI-specific RSS properties.
5044  */
5045 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5046 		     unsigned int flags, unsigned int defq)
5047 {
5048 	struct fw_rss_vi_config_cmd c;
5049 
5050 	memset(&c, 0, sizeof(c));
5051 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5052 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5053 				   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5054 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5055 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5056 					FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5057 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5058 }
5059 
5060 /* Read an RSS table row */
5061 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5062 {
5063 	t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5064 	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5065 				   5, 0, val);
5066 }
5067 
5068 /**
5069  *	t4_read_rss - read the contents of the RSS mapping table
5070  *	@adapter: the adapter
5071  *	@map: holds the contents of the RSS mapping table
5072  *
5073  *	Reads the contents of the RSS hash->queue mapping table.
5074  */
5075 int t4_read_rss(struct adapter *adapter, u16 *map)
5076 {
5077 	int i, ret, nentries;
5078 	u32 val;
5079 
5080 	nentries = t4_chip_rss_size(adapter);
5081 	for (i = 0; i < nentries / 2; ++i) {
5082 		ret = rd_rss_row(adapter, i, &val);
5083 		if (ret)
5084 			return ret;
5085 		*map++ = LKPTBLQUEUE0_G(val);
5086 		*map++ = LKPTBLQUEUE1_G(val);
5087 	}
5088 	return 0;
5089 }
5090 
5091 static unsigned int t4_use_ldst(struct adapter *adap)
5092 {
5093 	return (adap->flags & FW_OK) && !adap->use_bd;
5094 }
5095 
5096 /**
5097  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5098  * @adap: the adapter
5099  * @cmd: TP fw ldst address space type
5100  * @vals: where the indirect register values are stored/written
5101  * @nregs: how many indirect registers to read/write
5102  * @start_idx: index of first indirect register to read/write
5103  * @rw: Read (1) or Write (0)
5104  * @sleep_ok: if true we may sleep while awaiting command completion
5105  *
5106  * Access TP indirect registers through LDST
5107  */
5108 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5109 			    unsigned int nregs, unsigned int start_index,
5110 			    unsigned int rw, bool sleep_ok)
5111 {
5112 	int ret = 0;
5113 	unsigned int i;
5114 	struct fw_ldst_cmd c;
5115 
5116 	for (i = 0; i < nregs; i++) {
5117 		memset(&c, 0, sizeof(c));
5118 		c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5119 						FW_CMD_REQUEST_F |
5120 						(rw ? FW_CMD_READ_F :
5121 						      FW_CMD_WRITE_F) |
5122 						FW_LDST_CMD_ADDRSPACE_V(cmd));
5123 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5124 
5125 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5126 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5127 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5128 				      sleep_ok);
5129 		if (ret)
5130 			return ret;
5131 
5132 		if (rw)
5133 			vals[i] = be32_to_cpu(c.u.addrval.val);
5134 	}
5135 	return 0;
5136 }
5137 
5138 /**
5139  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5140  * @adap: the adapter
5141  * @reg_addr: Address Register
5142  * @reg_data: Data register
5143  * @buff: where the indirect register values are stored/written
5144  * @nregs: how many indirect registers to read/write
5145  * @start_index: index of first indirect register to read/write
5146  * @rw: READ(1) or WRITE(0)
5147  * @sleep_ok: if true we may sleep while awaiting command completion
5148  *
5149  * Read/Write TP indirect registers through LDST if possible.
5150  * Else, use backdoor access
5151  **/
5152 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5153 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5154 			      bool sleep_ok)
5155 {
5156 	int rc = -EINVAL;
5157 	int cmd;
5158 
5159 	switch (reg_addr) {
5160 	case TP_PIO_ADDR_A:
5161 		cmd = FW_LDST_ADDRSPC_TP_PIO;
5162 		break;
5163 	case TP_TM_PIO_ADDR_A:
5164 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5165 		break;
5166 	case TP_MIB_INDEX_A:
5167 		cmd = FW_LDST_ADDRSPC_TP_MIB;
5168 		break;
5169 	default:
5170 		goto indirect_access;
5171 	}
5172 
5173 	if (t4_use_ldst(adap))
5174 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5175 				      sleep_ok);
5176 
5177 indirect_access:
5178 
5179 	if (rc) {
5180 		if (rw)
5181 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5182 					 start_index);
5183 		else
5184 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5185 					  start_index);
5186 	}
5187 }
5188 
5189 /**
5190  * t4_tp_pio_read - Read TP PIO registers
5191  * @adap: the adapter
5192  * @buff: where the indirect register values are written
5193  * @nregs: how many indirect registers to read
5194  * @start_index: index of first indirect register to read
5195  * @sleep_ok: if true we may sleep while awaiting command completion
5196  *
5197  * Read TP PIO Registers
5198  **/
5199 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5200 		    u32 start_index, bool sleep_ok)
5201 {
5202 	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5203 			  start_index, 1, sleep_ok);
5204 }
5205 
5206 /**
5207  * t4_tp_pio_write - Write TP PIO registers
5208  * @adap: the adapter
5209  * @buff: where the indirect register values are stored
5210  * @nregs: how many indirect registers to write
5211  * @start_index: index of first indirect register to write
5212  * @sleep_ok: if true we may sleep while awaiting command completion
5213  *
5214  * Write TP PIO Registers
5215  **/
5216 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5217 			    u32 start_index, bool sleep_ok)
5218 {
5219 	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5220 			  start_index, 0, sleep_ok);
5221 }
5222 
5223 /**
5224  * t4_tp_tm_pio_read - Read TP TM PIO registers
5225  * @adap: the adapter
5226  * @buff: where the indirect register values are written
5227  * @nregs: how many indirect registers to read
5228  * @start_index: index of first indirect register to read
5229  * @sleep_ok: if true we may sleep while awaiting command completion
5230  *
5231  * Read TP TM PIO Registers
5232  **/
5233 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5234 		       u32 start_index, bool sleep_ok)
5235 {
5236 	t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5237 			  nregs, start_index, 1, sleep_ok);
5238 }
5239 
5240 /**
5241  * t4_tp_mib_read - Read TP MIB registers
5242  * @adap: the adapter
5243  * @buff: where the indirect register values are written
5244  * @nregs: how many indirect registers to read
5245  * @start_index: index of first indirect register to read
5246  * @sleep_ok: if true we may sleep while awaiting command completion
5247  *
5248  * Read TP MIB Registers
5249  **/
5250 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5251 		    bool sleep_ok)
5252 {
5253 	t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5254 			  start_index, 1, sleep_ok);
5255 }
5256 
5257 /**
5258  *	t4_read_rss_key - read the global RSS key
5259  *	@adap: the adapter
5260  *	@key: 10-entry array holding the 320-bit RSS key
5261  *      @sleep_ok: if true we may sleep while awaiting command completion
5262  *
5263  *	Reads the global 320-bit RSS key.
5264  */
5265 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5266 {
5267 	t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5268 }
5269 
5270 /**
5271  *	t4_write_rss_key - program one of the RSS keys
5272  *	@adap: the adapter
5273  *	@key: 10-entry array holding the 320-bit RSS key
5274  *	@idx: which RSS key to write
5275  *      @sleep_ok: if true we may sleep while awaiting command completion
5276  *
5277  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5278  *	0..15 the corresponding entry in the RSS key table is written,
5279  *	otherwise the global RSS key is written.
5280  */
5281 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5282 		      bool sleep_ok)
5283 {
5284 	u8 rss_key_addr_cnt = 16;
5285 	u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5286 
5287 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5288 	 * allows access to key addresses 16-63 by using KeyWrAddrX
5289 	 * as index[5:4](upper 2) into key table
5290 	 */
5291 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5292 	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5293 		rss_key_addr_cnt = 32;
5294 
5295 	t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5296 
5297 	if (idx >= 0 && idx < rss_key_addr_cnt) {
5298 		if (rss_key_addr_cnt > 16)
5299 			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5300 				     KEYWRADDRX_V(idx >> 4) |
5301 				     T6_VFWRADDR_V(idx) | KEYWREN_F);
5302 		else
5303 			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5304 				     KEYWRADDR_V(idx) | KEYWREN_F);
5305 	}
5306 }
5307 
5308 /**
5309  *	t4_read_rss_pf_config - read PF RSS Configuration Table
5310  *	@adapter: the adapter
5311  *	@index: the entry in the PF RSS table to read
5312  *	@valp: where to store the returned value
5313  *      @sleep_ok: if true we may sleep while awaiting command completion
5314  *
5315  *	Reads the PF RSS Configuration Table at the specified index and returns
5316  *	the value found there.
5317  */
5318 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5319 			   u32 *valp, bool sleep_ok)
5320 {
5321 	t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5322 }
5323 
5324 /**
5325  *	t4_read_rss_vf_config - read VF RSS Configuration Table
5326  *	@adapter: the adapter
5327  *	@index: the entry in the VF RSS table to read
5328  *	@vfl: where to store the returned VFL
5329  *	@vfh: where to store the returned VFH
5330  *      @sleep_ok: if true we may sleep while awaiting command completion
5331  *
5332  *	Reads the VF RSS Configuration Table at the specified index and returns
5333  *	the (VFL, VFH) values found there.
5334  */
5335 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5336 			   u32 *vfl, u32 *vfh, bool sleep_ok)
5337 {
5338 	u32 vrt, mask, data;
5339 
5340 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5341 		mask = VFWRADDR_V(VFWRADDR_M);
5342 		data = VFWRADDR_V(index);
5343 	} else {
5344 		 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
5345 		 data = T6_VFWRADDR_V(index);
5346 	}
5347 
5348 	/* Request that the index'th VF Table values be read into VFL/VFH.
5349 	 */
5350 	vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5351 	vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5352 	vrt |= data | VFRDEN_F;
5353 	t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5354 
5355 	/* Grab the VFL/VFH values ...
5356 	 */
5357 	t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5358 	t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5359 }
5360 
5361 /**
5362  *	t4_read_rss_pf_map - read PF RSS Map
5363  *	@adapter: the adapter
5364  *      @sleep_ok: if true we may sleep while awaiting command completion
5365  *
5366  *	Reads the PF RSS Map register and returns its value.
5367  */
5368 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5369 {
5370 	u32 pfmap;
5371 
5372 	t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5373 	return pfmap;
5374 }
5375 
5376 /**
5377  *	t4_read_rss_pf_mask - read PF RSS Mask
5378  *	@adapter: the adapter
5379  *      @sleep_ok: if true we may sleep while awaiting command completion
5380  *
5381  *	Reads the PF RSS Mask register and returns its value.
5382  */
5383 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5384 {
5385 	u32 pfmask;
5386 
5387 	t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5388 	return pfmask;
5389 }
5390 
5391 /**
5392  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5393  *	@adap: the adapter
5394  *	@v4: holds the TCP/IP counter values
5395  *	@v6: holds the TCP/IPv6 counter values
5396  *      @sleep_ok: if true we may sleep while awaiting command completion
5397  *
5398  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5399  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5400  */
5401 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5402 			 struct tp_tcp_stats *v6, bool sleep_ok)
5403 {
5404 	u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5405 
5406 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5407 #define STAT(x)     val[STAT_IDX(x)]
5408 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5409 
5410 	if (v4) {
5411 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5412 			       TP_MIB_TCP_OUT_RST_A, sleep_ok);
5413 		v4->tcp_out_rsts = STAT(OUT_RST);
5414 		v4->tcp_in_segs  = STAT64(IN_SEG);
5415 		v4->tcp_out_segs = STAT64(OUT_SEG);
5416 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5417 	}
5418 	if (v6) {
5419 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5420 			       TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5421 		v6->tcp_out_rsts = STAT(OUT_RST);
5422 		v6->tcp_in_segs  = STAT64(IN_SEG);
5423 		v6->tcp_out_segs = STAT64(OUT_SEG);
5424 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5425 	}
5426 #undef STAT64
5427 #undef STAT
5428 #undef STAT_IDX
5429 }
5430 
5431 /**
5432  *	t4_tp_get_err_stats - read TP's error MIB counters
5433  *	@adap: the adapter
5434  *	@st: holds the counter values
5435  *      @sleep_ok: if true we may sleep while awaiting command completion
5436  *
5437  *	Returns the values of TP's error counters.
5438  */
5439 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5440 			 bool sleep_ok)
5441 {
5442 	int nchan = adap->params.arch.nchan;
5443 
5444 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5445 		       sleep_ok);
5446 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5447 		       sleep_ok);
5448 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5449 		       sleep_ok);
5450 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5451 		       TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5452 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5453 		       TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5454 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5455 		       sleep_ok);
5456 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5457 		       TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5458 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5459 		       TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5460 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5461 		       sleep_ok);
5462 }
5463 
5464 /**
5465  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
5466  *	@adap: the adapter
5467  *	@st: holds the counter values
5468  *      @sleep_ok: if true we may sleep while awaiting command completion
5469  *
5470  *	Returns the values of TP's CPL counters.
5471  */
5472 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5473 			 bool sleep_ok)
5474 {
5475 	int nchan = adap->params.arch.nchan;
5476 
5477 	t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5478 
5479 	t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5480 }
5481 
5482 /**
5483  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5484  *	@adap: the adapter
5485  *	@st: holds the counter values
5486  *      @sleep_ok: if true we may sleep while awaiting command completion
5487  *
5488  *	Returns the values of TP's RDMA counters.
5489  */
5490 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5491 			  bool sleep_ok)
5492 {
5493 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5494 		       sleep_ok);
5495 }
5496 
5497 /**
5498  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5499  *	@adap: the adapter
5500  *	@idx: the port index
5501  *	@st: holds the counter values
5502  *      @sleep_ok: if true we may sleep while awaiting command completion
5503  *
5504  *	Returns the values of TP's FCoE counters for the selected port.
5505  */
5506 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5507 		       struct tp_fcoe_stats *st, bool sleep_ok)
5508 {
5509 	u32 val[2];
5510 
5511 	t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5512 		       sleep_ok);
5513 
5514 	t4_tp_mib_read(adap, &st->frames_drop, 1,
5515 		       TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5516 
5517 	t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5518 		       sleep_ok);
5519 
5520 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
5521 }
5522 
5523 /**
5524  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5525  *	@adap: the adapter
5526  *	@st: holds the counter values
5527  *      @sleep_ok: if true we may sleep while awaiting command completion
5528  *
5529  *	Returns the values of TP's counters for non-TCP directly-placed packets.
5530  */
5531 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5532 		      bool sleep_ok)
5533 {
5534 	u32 val[4];
5535 
5536 	t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5537 	st->frames = val[0];
5538 	st->drops = val[1];
5539 	st->octets = ((u64)val[2] << 32) | val[3];
5540 }
5541 
5542 /**
5543  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
5544  *	@adap: the adapter
5545  *	@mtus: where to store the MTU values
5546  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
5547  *
5548  *	Reads the HW path MTU table.
5549  */
5550 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5551 {
5552 	u32 v;
5553 	int i;
5554 
5555 	for (i = 0; i < NMTUS; ++i) {
5556 		t4_write_reg(adap, TP_MTU_TABLE_A,
5557 			     MTUINDEX_V(0xff) | MTUVALUE_V(i));
5558 		v = t4_read_reg(adap, TP_MTU_TABLE_A);
5559 		mtus[i] = MTUVALUE_G(v);
5560 		if (mtu_log)
5561 			mtu_log[i] = MTUWIDTH_G(v);
5562 	}
5563 }
5564 
5565 /**
5566  *	t4_read_cong_tbl - reads the congestion control table
5567  *	@adap: the adapter
5568  *	@incr: where to store the alpha values
5569  *
5570  *	Reads the additive increments programmed into the HW congestion
5571  *	control table.
5572  */
5573 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5574 {
5575 	unsigned int mtu, w;
5576 
5577 	for (mtu = 0; mtu < NMTUS; ++mtu)
5578 		for (w = 0; w < NCCTRL_WIN; ++w) {
5579 			t4_write_reg(adap, TP_CCTRL_TABLE_A,
5580 				     ROWINDEX_V(0xffff) | (mtu << 5) | w);
5581 			incr[mtu][w] = (u16)t4_read_reg(adap,
5582 						TP_CCTRL_TABLE_A) & 0x1fff;
5583 		}
5584 }
5585 
5586 /**
5587  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5588  *	@adap: the adapter
5589  *	@addr: the indirect TP register address
5590  *	@mask: specifies the field within the register to modify
5591  *	@val: new value for the field
5592  *
5593  *	Sets a field of an indirect TP register to the given value.
5594  */
5595 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5596 			    unsigned int mask, unsigned int val)
5597 {
5598 	t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5599 	val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5600 	t4_write_reg(adap, TP_PIO_DATA_A, val);
5601 }
5602 
5603 /**
5604  *	init_cong_ctrl - initialize congestion control parameters
5605  *	@a: the alpha values for congestion control
5606  *	@b: the beta values for congestion control
5607  *
5608  *	Initialize the congestion control parameters.
5609  */
5610 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5611 {
5612 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5613 	a[9] = 2;
5614 	a[10] = 3;
5615 	a[11] = 4;
5616 	a[12] = 5;
5617 	a[13] = 6;
5618 	a[14] = 7;
5619 	a[15] = 8;
5620 	a[16] = 9;
5621 	a[17] = 10;
5622 	a[18] = 14;
5623 	a[19] = 17;
5624 	a[20] = 21;
5625 	a[21] = 25;
5626 	a[22] = 30;
5627 	a[23] = 35;
5628 	a[24] = 45;
5629 	a[25] = 60;
5630 	a[26] = 80;
5631 	a[27] = 100;
5632 	a[28] = 200;
5633 	a[29] = 300;
5634 	a[30] = 400;
5635 	a[31] = 500;
5636 
5637 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5638 	b[9] = b[10] = 1;
5639 	b[11] = b[12] = 2;
5640 	b[13] = b[14] = b[15] = b[16] = 3;
5641 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5642 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5643 	b[28] = b[29] = 6;
5644 	b[30] = b[31] = 7;
5645 }
5646 
5647 /* The minimum additive increment value for the congestion control table */
5648 #define CC_MIN_INCR 2U
5649 
5650 /**
5651  *	t4_load_mtus - write the MTU and congestion control HW tables
5652  *	@adap: the adapter
5653  *	@mtus: the values for the MTU table
5654  *	@alpha: the values for the congestion control alpha parameter
5655  *	@beta: the values for the congestion control beta parameter
5656  *
5657  *	Write the HW MTU table with the supplied MTUs and the high-speed
5658  *	congestion control table with the supplied alpha, beta, and MTUs.
5659  *	We write the two tables together because the additive increments
5660  *	depend on the MTUs.
5661  */
5662 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5663 		  const unsigned short *alpha, const unsigned short *beta)
5664 {
5665 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
5666 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5667 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5668 		28672, 40960, 57344, 81920, 114688, 163840, 229376
5669 	};
5670 
5671 	unsigned int i, w;
5672 
5673 	for (i = 0; i < NMTUS; ++i) {
5674 		unsigned int mtu = mtus[i];
5675 		unsigned int log2 = fls(mtu);
5676 
5677 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
5678 			log2--;
5679 		t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5680 			     MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5681 
5682 		for (w = 0; w < NCCTRL_WIN; ++w) {
5683 			unsigned int inc;
5684 
5685 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5686 				  CC_MIN_INCR);
5687 
5688 			t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5689 				     (w << 16) | (beta[w] << 13) | inc);
5690 		}
5691 	}
5692 }
5693 
5694 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5695  * clocks.  The formula is
5696  *
5697  * bytes/s = bytes256 * 256 * ClkFreq / 4096
5698  *
5699  * which is equivalent to
5700  *
5701  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5702  */
5703 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5704 {
5705 	u64 v = bytes256 * adap->params.vpd.cclk;
5706 
5707 	return v * 62 + v / 2;
5708 }
5709 
5710 /**
5711  *	t4_get_chan_txrate - get the current per channel Tx rates
5712  *	@adap: the adapter
5713  *	@nic_rate: rates for NIC traffic
5714  *	@ofld_rate: rates for offloaded traffic
5715  *
5716  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
5717  *	for each channel.
5718  */
5719 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5720 {
5721 	u32 v;
5722 
5723 	v = t4_read_reg(adap, TP_TX_TRATE_A);
5724 	nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5725 	nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5726 	if (adap->params.arch.nchan == NCHAN) {
5727 		nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5728 		nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5729 	}
5730 
5731 	v = t4_read_reg(adap, TP_TX_ORATE_A);
5732 	ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5733 	ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5734 	if (adap->params.arch.nchan == NCHAN) {
5735 		ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5736 		ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5737 	}
5738 }
5739 
5740 /**
5741  *	t4_set_trace_filter - configure one of the tracing filters
5742  *	@adap: the adapter
5743  *	@tp: the desired trace filter parameters
5744  *	@idx: which filter to configure
5745  *	@enable: whether to enable or disable the filter
5746  *
5747  *	Configures one of the tracing filters available in HW.  If @enable is
5748  *	%0 @tp is not examined and may be %NULL. The user is responsible to
5749  *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5750  */
5751 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5752 			int idx, int enable)
5753 {
5754 	int i, ofst = idx * 4;
5755 	u32 data_reg, mask_reg, cfg;
5756 	u32 multitrc = TRCMULTIFILTER_F;
5757 
5758 	if (!enable) {
5759 		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5760 		return 0;
5761 	}
5762 
5763 	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5764 	if (cfg & TRCMULTIFILTER_F) {
5765 		/* If multiple tracers are enabled, then maximum
5766 		 * capture size is 2.5KB (FIFO size of a single channel)
5767 		 * minus 2 flits for CPL_TRACE_PKT header.
5768 		 */
5769 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5770 			return -EINVAL;
5771 	} else {
5772 		/* If multiple tracers are disabled, to avoid deadlocks
5773 		 * maximum packet capture size of 9600 bytes is recommended.
5774 		 * Also in this mode, only trace0 can be enabled and running.
5775 		 */
5776 		multitrc = 0;
5777 		if (tp->snap_len > 9600 || idx)
5778 			return -EINVAL;
5779 	}
5780 
5781 	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5782 	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5783 	    tp->min_len > TFMINPKTSIZE_M)
5784 		return -EINVAL;
5785 
5786 	/* stop the tracer we'll be changing */
5787 	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5788 
5789 	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5790 	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5791 	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5792 
5793 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5794 		t4_write_reg(adap, data_reg, tp->data[i]);
5795 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5796 	}
5797 	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5798 		     TFCAPTUREMAX_V(tp->snap_len) |
5799 		     TFMINPKTSIZE_V(tp->min_len));
5800 	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5801 		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5802 		     (is_t4(adap->params.chip) ?
5803 		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5804 		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
5805 		     T5_TFINVERTMATCH_V(tp->invert)));
5806 
5807 	return 0;
5808 }
5809 
5810 /**
5811  *	t4_get_trace_filter - query one of the tracing filters
5812  *	@adap: the adapter
5813  *	@tp: the current trace filter parameters
5814  *	@idx: which trace filter to query
5815  *	@enabled: non-zero if the filter is enabled
5816  *
5817  *	Returns the current settings of one of the HW tracing filters.
5818  */
5819 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5820 			 int *enabled)
5821 {
5822 	u32 ctla, ctlb;
5823 	int i, ofst = idx * 4;
5824 	u32 data_reg, mask_reg;
5825 
5826 	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5827 	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5828 
5829 	if (is_t4(adap->params.chip)) {
5830 		*enabled = !!(ctla & TFEN_F);
5831 		tp->port =  TFPORT_G(ctla);
5832 		tp->invert = !!(ctla & TFINVERTMATCH_F);
5833 	} else {
5834 		*enabled = !!(ctla & T5_TFEN_F);
5835 		tp->port = T5_TFPORT_G(ctla);
5836 		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5837 	}
5838 	tp->snap_len = TFCAPTUREMAX_G(ctlb);
5839 	tp->min_len = TFMINPKTSIZE_G(ctlb);
5840 	tp->skip_ofst = TFOFFSET_G(ctla);
5841 	tp->skip_len = TFLENGTH_G(ctla);
5842 
5843 	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5844 	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5845 	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5846 
5847 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5848 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5849 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5850 	}
5851 }
5852 
5853 /**
5854  *	t4_pmtx_get_stats - returns the HW stats from PMTX
5855  *	@adap: the adapter
5856  *	@cnt: where to store the count statistics
5857  *	@cycles: where to store the cycle statistics
5858  *
5859  *	Returns performance statistics from PMTX.
5860  */
5861 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5862 {
5863 	int i;
5864 	u32 data[2];
5865 
5866 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5867 		t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5868 		cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5869 		if (is_t4(adap->params.chip)) {
5870 			cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5871 		} else {
5872 			t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5873 					 PM_TX_DBG_DATA_A, data, 2,
5874 					 PM_TX_DBG_STAT_MSB_A);
5875 			cycles[i] = (((u64)data[0] << 32) | data[1]);
5876 		}
5877 	}
5878 }
5879 
5880 /**
5881  *	t4_pmrx_get_stats - returns the HW stats from PMRX
5882  *	@adap: the adapter
5883  *	@cnt: where to store the count statistics
5884  *	@cycles: where to store the cycle statistics
5885  *
5886  *	Returns performance statistics from PMRX.
5887  */
5888 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5889 {
5890 	int i;
5891 	u32 data[2];
5892 
5893 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5894 		t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5895 		cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5896 		if (is_t4(adap->params.chip)) {
5897 			cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5898 		} else {
5899 			t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5900 					 PM_RX_DBG_DATA_A, data, 2,
5901 					 PM_RX_DBG_STAT_MSB_A);
5902 			cycles[i] = (((u64)data[0] << 32) | data[1]);
5903 		}
5904 	}
5905 }
5906 
5907 /**
5908  *	compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
5909  *	@adap: the adapter
5910  *	@pidx: the port index
5911  *
5912  *	Computes and returns a bitmap indicating which MPS buffer groups are
5913  *	associated with the given Port.  Bit i is set if buffer group i is
5914  *	used by the Port.
5915  */
5916 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
5917 					      int pidx)
5918 {
5919 	unsigned int chip_version, nports;
5920 
5921 	chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5922 	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5923 
5924 	switch (chip_version) {
5925 	case CHELSIO_T4:
5926 	case CHELSIO_T5:
5927 		switch (nports) {
5928 		case 1: return 0xf;
5929 		case 2: return 3 << (2 * pidx);
5930 		case 4: return 1 << pidx;
5931 		}
5932 		break;
5933 
5934 	case CHELSIO_T6:
5935 		switch (nports) {
5936 		case 2: return 1 << (2 * pidx);
5937 		}
5938 		break;
5939 	}
5940 
5941 	dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
5942 		chip_version, nports);
5943 
5944 	return 0;
5945 }
5946 
5947 /**
5948  *	t4_get_mps_bg_map - return the buffer groups associated with a port
5949  *	@adapter: the adapter
5950  *	@pidx: the port index
5951  *
5952  *	Returns a bitmap indicating which MPS buffer groups are associated
5953  *	with the given Port.  Bit i is set if buffer group i is used by the
5954  *	Port.
5955  */
5956 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
5957 {
5958 	u8 *mps_bg_map;
5959 	unsigned int nports;
5960 
5961 	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5962 	if (pidx >= nports) {
5963 		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
5964 			pidx, nports);
5965 		return 0;
5966 	}
5967 
5968 	/* If we've already retrieved/computed this, just return the result.
5969 	 */
5970 	mps_bg_map = adapter->params.mps_bg_map;
5971 	if (mps_bg_map[pidx])
5972 		return mps_bg_map[pidx];
5973 
5974 	/* Newer Firmware can tell us what the MPS Buffer Group Map is.
5975 	 * If we're talking to such Firmware, let it tell us.  If the new
5976 	 * API isn't supported, revert back to old hardcoded way.  The value
5977 	 * obtained from Firmware is encoded in below format:
5978 	 *
5979 	 * val = (( MPSBGMAP[Port 3] << 24 ) |
5980 	 *        ( MPSBGMAP[Port 2] << 16 ) |
5981 	 *        ( MPSBGMAP[Port 1] <<  8 ) |
5982 	 *        ( MPSBGMAP[Port 0] <<  0 ))
5983 	 */
5984 	if (adapter->flags & FW_OK) {
5985 		u32 param, val;
5986 		int ret;
5987 
5988 		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5989 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
5990 		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
5991 					 0, 1, &param, &val);
5992 		if (!ret) {
5993 			int p;
5994 
5995 			/* Store the BG Map for all of the Ports in order to
5996 			 * avoid more calls to the Firmware in the future.
5997 			 */
5998 			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
5999 				mps_bg_map[p] = val & 0xff;
6000 
6001 			return mps_bg_map[pidx];
6002 		}
6003 	}
6004 
6005 	/* Either we're not talking to the Firmware or we're dealing with
6006 	 * older Firmware which doesn't support the new API to get the MPS
6007 	 * Buffer Group Map.  Fall back to computing it ourselves.
6008 	 */
6009 	mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6010 	return mps_bg_map[pidx];
6011 }
6012 
6013 /**
6014  *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6015  *	@adapter: the adapter
6016  *	@pidx: the port index
6017  *
6018  *	Returns a bitmap indicating which TP Ingress Channels are associated
6019  *	with a given Port.  Bit i is set if TP Ingress Channel i is used by
6020  *	the Port.
6021  */
6022 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6023 {
6024 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6025 	unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6026 
6027 	if (pidx >= nports) {
6028 		dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6029 			 pidx, nports);
6030 		return 0;
6031 	}
6032 
6033 	switch (chip_version) {
6034 	case CHELSIO_T4:
6035 	case CHELSIO_T5:
6036 		/* Note that this happens to be the same values as the MPS
6037 		 * Buffer Group Map for these Chips.  But we replicate the code
6038 		 * here because they're really separate concepts.
6039 		 */
6040 		switch (nports) {
6041 		case 1: return 0xf;
6042 		case 2: return 3 << (2 * pidx);
6043 		case 4: return 1 << pidx;
6044 		}
6045 		break;
6046 
6047 	case CHELSIO_T6:
6048 		switch (nports) {
6049 		case 2: return 1 << pidx;
6050 		}
6051 		break;
6052 	}
6053 
6054 	dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6055 		chip_version, nports);
6056 	return 0;
6057 }
6058 
6059 /**
6060  *      t4_get_port_type_description - return Port Type string description
6061  *      @port_type: firmware Port Type enumeration
6062  */
6063 const char *t4_get_port_type_description(enum fw_port_type port_type)
6064 {
6065 	static const char *const port_type_description[] = {
6066 		"Fiber_XFI",
6067 		"Fiber_XAUI",
6068 		"BT_SGMII",
6069 		"BT_XFI",
6070 		"BT_XAUI",
6071 		"KX4",
6072 		"CX4",
6073 		"KX",
6074 		"KR",
6075 		"SFP",
6076 		"BP_AP",
6077 		"BP4_AP",
6078 		"QSFP_10G",
6079 		"QSA",
6080 		"QSFP",
6081 		"BP40_BA",
6082 		"KR4_100G",
6083 		"CR4_QSFP",
6084 		"CR_QSFP",
6085 		"CR2_QSFP",
6086 		"SFP28",
6087 		"KR_SFP28",
6088 		"KR_XLAUI"
6089 	};
6090 
6091 	if (port_type < ARRAY_SIZE(port_type_description))
6092 		return port_type_description[port_type];
6093 	return "UNKNOWN";
6094 }
6095 
6096 /**
6097  *      t4_get_port_stats_offset - collect port stats relative to a previous
6098  *                                 snapshot
6099  *      @adap: The adapter
6100  *      @idx: The port
6101  *      @stats: Current stats to fill
6102  *      @offset: Previous stats snapshot
6103  */
6104 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6105 			      struct port_stats *stats,
6106 			      struct port_stats *offset)
6107 {
6108 	u64 *s, *o;
6109 	int i;
6110 
6111 	t4_get_port_stats(adap, idx, stats);
6112 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6113 			i < (sizeof(struct port_stats) / sizeof(u64));
6114 			i++, s++, o++)
6115 		*s -= *o;
6116 }
6117 
6118 /**
6119  *	t4_get_port_stats - collect port statistics
6120  *	@adap: the adapter
6121  *	@idx: the port index
6122  *	@p: the stats structure to fill
6123  *
6124  *	Collect statistics related to the given port from HW.
6125  */
6126 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6127 {
6128 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6129 	u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6130 
6131 #define GET_STAT(name) \
6132 	t4_read_reg64(adap, \
6133 	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6134 	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6135 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6136 
6137 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
6138 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
6139 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
6140 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
6141 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
6142 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
6143 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
6144 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
6145 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
6146 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
6147 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
6148 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6149 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
6150 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
6151 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
6152 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
6153 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
6154 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
6155 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
6156 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
6157 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
6158 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
6159 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
6160 
6161 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6162 		if (stat_ctl & COUNTPAUSESTATTX_F)
6163 			p->tx_frames_64 -= p->tx_pause;
6164 		if (stat_ctl & COUNTPAUSEMCTX_F)
6165 			p->tx_mcast_frames -= p->tx_pause;
6166 	}
6167 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
6168 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
6169 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
6170 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
6171 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
6172 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
6173 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6174 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
6175 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
6176 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
6177 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
6178 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
6179 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
6180 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
6181 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
6182 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
6183 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6184 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
6185 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
6186 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
6187 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
6188 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
6189 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
6190 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
6191 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
6192 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
6193 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
6194 
6195 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6196 		if (stat_ctl & COUNTPAUSESTATRX_F)
6197 			p->rx_frames_64 -= p->rx_pause;
6198 		if (stat_ctl & COUNTPAUSEMCRX_F)
6199 			p->rx_mcast_frames -= p->rx_pause;
6200 	}
6201 
6202 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6203 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6204 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6205 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6206 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6207 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6208 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6209 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6210 
6211 #undef GET_STAT
6212 #undef GET_STAT_COM
6213 }
6214 
6215 /**
6216  *	t4_get_lb_stats - collect loopback port statistics
6217  *	@adap: the adapter
6218  *	@idx: the loopback port index
6219  *	@p: the stats structure to fill
6220  *
6221  *	Return HW statistics for the given loopback port.
6222  */
6223 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6224 {
6225 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6226 
6227 #define GET_STAT(name) \
6228 	t4_read_reg64(adap, \
6229 	(is_t4(adap->params.chip) ? \
6230 	PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6231 	T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6232 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6233 
6234 	p->octets           = GET_STAT(BYTES);
6235 	p->frames           = GET_STAT(FRAMES);
6236 	p->bcast_frames     = GET_STAT(BCAST);
6237 	p->mcast_frames     = GET_STAT(MCAST);
6238 	p->ucast_frames     = GET_STAT(UCAST);
6239 	p->error_frames     = GET_STAT(ERROR);
6240 
6241 	p->frames_64        = GET_STAT(64B);
6242 	p->frames_65_127    = GET_STAT(65B_127B);
6243 	p->frames_128_255   = GET_STAT(128B_255B);
6244 	p->frames_256_511   = GET_STAT(256B_511B);
6245 	p->frames_512_1023  = GET_STAT(512B_1023B);
6246 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
6247 	p->frames_1519_max  = GET_STAT(1519B_MAX);
6248 	p->drop             = GET_STAT(DROP_FRAMES);
6249 
6250 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6251 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6252 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6253 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6254 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6255 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6256 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6257 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6258 
6259 #undef GET_STAT
6260 #undef GET_STAT_COM
6261 }
6262 
6263 /*     t4_mk_filtdelwr - create a delete filter WR
6264  *     @ftid: the filter ID
6265  *     @wr: the filter work request to populate
6266  *     @qid: ingress queue to receive the delete notification
6267  *
6268  *     Creates a filter work request to delete the supplied filter.  If @qid is
6269  *     negative the delete notification is suppressed.
6270  */
6271 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6272 {
6273 	memset(wr, 0, sizeof(*wr));
6274 	wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6275 	wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6276 	wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6277 				    FW_FILTER_WR_NOREPLY_V(qid < 0));
6278 	wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6279 	if (qid >= 0)
6280 		wr->rx_chan_rx_rpl_iq =
6281 			cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6282 }
6283 
6284 #define INIT_CMD(var, cmd, rd_wr) do { \
6285 	(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6286 					FW_CMD_REQUEST_F | \
6287 					FW_CMD_##rd_wr##_F); \
6288 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6289 } while (0)
6290 
6291 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6292 			  u32 addr, u32 val)
6293 {
6294 	u32 ldst_addrspace;
6295 	struct fw_ldst_cmd c;
6296 
6297 	memset(&c, 0, sizeof(c));
6298 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6299 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6300 					FW_CMD_REQUEST_F |
6301 					FW_CMD_WRITE_F |
6302 					ldst_addrspace);
6303 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6304 	c.u.addrval.addr = cpu_to_be32(addr);
6305 	c.u.addrval.val = cpu_to_be32(val);
6306 
6307 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6308 }
6309 
6310 /**
6311  *	t4_mdio_rd - read a PHY register through MDIO
6312  *	@adap: the adapter
6313  *	@mbox: mailbox to use for the FW command
6314  *	@phy_addr: the PHY address
6315  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6316  *	@reg: the register to read
6317  *	@valp: where to store the value
6318  *
6319  *	Issues a FW command through the given mailbox to read a PHY register.
6320  */
6321 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6322 	       unsigned int mmd, unsigned int reg, u16 *valp)
6323 {
6324 	int ret;
6325 	u32 ldst_addrspace;
6326 	struct fw_ldst_cmd c;
6327 
6328 	memset(&c, 0, sizeof(c));
6329 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6330 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6331 					FW_CMD_REQUEST_F | FW_CMD_READ_F |
6332 					ldst_addrspace);
6333 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6334 	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6335 					 FW_LDST_CMD_MMD_V(mmd));
6336 	c.u.mdio.raddr = cpu_to_be16(reg);
6337 
6338 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6339 	if (ret == 0)
6340 		*valp = be16_to_cpu(c.u.mdio.rval);
6341 	return ret;
6342 }
6343 
6344 /**
6345  *	t4_mdio_wr - write a PHY register through MDIO
6346  *	@adap: the adapter
6347  *	@mbox: mailbox to use for the FW command
6348  *	@phy_addr: the PHY address
6349  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6350  *	@reg: the register to write
6351  *	@valp: value to write
6352  *
6353  *	Issues a FW command through the given mailbox to write a PHY register.
6354  */
6355 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6356 	       unsigned int mmd, unsigned int reg, u16 val)
6357 {
6358 	u32 ldst_addrspace;
6359 	struct fw_ldst_cmd c;
6360 
6361 	memset(&c, 0, sizeof(c));
6362 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6363 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6364 					FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6365 					ldst_addrspace);
6366 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6367 	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6368 					 FW_LDST_CMD_MMD_V(mmd));
6369 	c.u.mdio.raddr = cpu_to_be16(reg);
6370 	c.u.mdio.rval = cpu_to_be16(val);
6371 
6372 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6373 }
6374 
6375 /**
6376  *	t4_sge_decode_idma_state - decode the idma state
6377  *	@adap: the adapter
6378  *	@state: the state idma is stuck in
6379  */
6380 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6381 {
6382 	static const char * const t4_decode[] = {
6383 		"IDMA_IDLE",
6384 		"IDMA_PUSH_MORE_CPL_FIFO",
6385 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6386 		"Not used",
6387 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6388 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6389 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6390 		"IDMA_SEND_FIFO_TO_IMSG",
6391 		"IDMA_FL_REQ_DATA_FL_PREP",
6392 		"IDMA_FL_REQ_DATA_FL",
6393 		"IDMA_FL_DROP",
6394 		"IDMA_FL_H_REQ_HEADER_FL",
6395 		"IDMA_FL_H_SEND_PCIEHDR",
6396 		"IDMA_FL_H_PUSH_CPL_FIFO",
6397 		"IDMA_FL_H_SEND_CPL",
6398 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6399 		"IDMA_FL_H_SEND_IP_HDR",
6400 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6401 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6402 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6403 		"IDMA_FL_D_SEND_PCIEHDR",
6404 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6405 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6406 		"IDMA_FL_SEND_PCIEHDR",
6407 		"IDMA_FL_PUSH_CPL_FIFO",
6408 		"IDMA_FL_SEND_CPL",
6409 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6410 		"IDMA_FL_SEND_PAYLOAD",
6411 		"IDMA_FL_REQ_NEXT_DATA_FL",
6412 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6413 		"IDMA_FL_SEND_PADDING",
6414 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6415 		"IDMA_FL_SEND_FIFO_TO_IMSG",
6416 		"IDMA_FL_REQ_DATAFL_DONE",
6417 		"IDMA_FL_REQ_HEADERFL_DONE",
6418 	};
6419 	static const char * const t5_decode[] = {
6420 		"IDMA_IDLE",
6421 		"IDMA_ALMOST_IDLE",
6422 		"IDMA_PUSH_MORE_CPL_FIFO",
6423 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6424 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6425 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6426 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6427 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6428 		"IDMA_SEND_FIFO_TO_IMSG",
6429 		"IDMA_FL_REQ_DATA_FL",
6430 		"IDMA_FL_DROP",
6431 		"IDMA_FL_DROP_SEND_INC",
6432 		"IDMA_FL_H_REQ_HEADER_FL",
6433 		"IDMA_FL_H_SEND_PCIEHDR",
6434 		"IDMA_FL_H_PUSH_CPL_FIFO",
6435 		"IDMA_FL_H_SEND_CPL",
6436 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6437 		"IDMA_FL_H_SEND_IP_HDR",
6438 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6439 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6440 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6441 		"IDMA_FL_D_SEND_PCIEHDR",
6442 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6443 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6444 		"IDMA_FL_SEND_PCIEHDR",
6445 		"IDMA_FL_PUSH_CPL_FIFO",
6446 		"IDMA_FL_SEND_CPL",
6447 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6448 		"IDMA_FL_SEND_PAYLOAD",
6449 		"IDMA_FL_REQ_NEXT_DATA_FL",
6450 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6451 		"IDMA_FL_SEND_PADDING",
6452 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6453 	};
6454 	static const char * const t6_decode[] = {
6455 		"IDMA_IDLE",
6456 		"IDMA_PUSH_MORE_CPL_FIFO",
6457 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6458 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6459 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6460 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6461 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6462 		"IDMA_FL_REQ_DATA_FL",
6463 		"IDMA_FL_DROP",
6464 		"IDMA_FL_DROP_SEND_INC",
6465 		"IDMA_FL_H_REQ_HEADER_FL",
6466 		"IDMA_FL_H_SEND_PCIEHDR",
6467 		"IDMA_FL_H_PUSH_CPL_FIFO",
6468 		"IDMA_FL_H_SEND_CPL",
6469 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6470 		"IDMA_FL_H_SEND_IP_HDR",
6471 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6472 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6473 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6474 		"IDMA_FL_D_SEND_PCIEHDR",
6475 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6476 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6477 		"IDMA_FL_SEND_PCIEHDR",
6478 		"IDMA_FL_PUSH_CPL_FIFO",
6479 		"IDMA_FL_SEND_CPL",
6480 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6481 		"IDMA_FL_SEND_PAYLOAD",
6482 		"IDMA_FL_REQ_NEXT_DATA_FL",
6483 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6484 		"IDMA_FL_SEND_PADDING",
6485 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6486 	};
6487 	static const u32 sge_regs[] = {
6488 		SGE_DEBUG_DATA_LOW_INDEX_2_A,
6489 		SGE_DEBUG_DATA_LOW_INDEX_3_A,
6490 		SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6491 	};
6492 	const char **sge_idma_decode;
6493 	int sge_idma_decode_nstates;
6494 	int i;
6495 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6496 
6497 	/* Select the right set of decode strings to dump depending on the
6498 	 * adapter chip type.
6499 	 */
6500 	switch (chip_version) {
6501 	case CHELSIO_T4:
6502 		sge_idma_decode = (const char **)t4_decode;
6503 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6504 		break;
6505 
6506 	case CHELSIO_T5:
6507 		sge_idma_decode = (const char **)t5_decode;
6508 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6509 		break;
6510 
6511 	case CHELSIO_T6:
6512 		sge_idma_decode = (const char **)t6_decode;
6513 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6514 		break;
6515 
6516 	default:
6517 		dev_err(adapter->pdev_dev,
6518 			"Unsupported chip version %d\n", chip_version);
6519 		return;
6520 	}
6521 
6522 	if (is_t4(adapter->params.chip)) {
6523 		sge_idma_decode = (const char **)t4_decode;
6524 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6525 	} else {
6526 		sge_idma_decode = (const char **)t5_decode;
6527 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6528 	}
6529 
6530 	if (state < sge_idma_decode_nstates)
6531 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6532 	else
6533 		CH_WARN(adapter, "idma state %d unknown\n", state);
6534 
6535 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6536 		CH_WARN(adapter, "SGE register %#x value %#x\n",
6537 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6538 }
6539 
6540 /**
6541  *      t4_sge_ctxt_flush - flush the SGE context cache
6542  *      @adap: the adapter
6543  *      @mbox: mailbox to use for the FW command
6544  *      @ctx_type: Egress or Ingress
6545  *
6546  *      Issues a FW command through the given mailbox to flush the
6547  *      SGE context cache.
6548  */
6549 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6550 {
6551 	int ret;
6552 	u32 ldst_addrspace;
6553 	struct fw_ldst_cmd c;
6554 
6555 	memset(&c, 0, sizeof(c));
6556 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6557 						 FW_LDST_ADDRSPC_SGE_EGRC :
6558 						 FW_LDST_ADDRSPC_SGE_INGC);
6559 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6560 					FW_CMD_REQUEST_F | FW_CMD_READ_F |
6561 					ldst_addrspace);
6562 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6563 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6564 
6565 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6566 	return ret;
6567 }
6568 
6569 /**
6570  *      t4_fw_hello - establish communication with FW
6571  *      @adap: the adapter
6572  *      @mbox: mailbox to use for the FW command
6573  *      @evt_mbox: mailbox to receive async FW events
6574  *      @master: specifies the caller's willingness to be the device master
6575  *	@state: returns the current device state (if non-NULL)
6576  *
6577  *	Issues a command to establish communication with FW.  Returns either
6578  *	an error (negative integer) or the mailbox of the Master PF.
6579  */
6580 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6581 		enum dev_master master, enum dev_state *state)
6582 {
6583 	int ret;
6584 	struct fw_hello_cmd c;
6585 	u32 v;
6586 	unsigned int master_mbox;
6587 	int retries = FW_CMD_HELLO_RETRIES;
6588 
6589 retry:
6590 	memset(&c, 0, sizeof(c));
6591 	INIT_CMD(c, HELLO, WRITE);
6592 	c.err_to_clearinit = cpu_to_be32(
6593 		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6594 		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6595 		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6596 					mbox : FW_HELLO_CMD_MBMASTER_M) |
6597 		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6598 		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6599 		FW_HELLO_CMD_CLEARINIT_F);
6600 
6601 	/*
6602 	 * Issue the HELLO command to the firmware.  If it's not successful
6603 	 * but indicates that we got a "busy" or "timeout" condition, retry
6604 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
6605 	 * retry limit, check to see if the firmware left us any error
6606 	 * information and report that if so.
6607 	 */
6608 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6609 	if (ret < 0) {
6610 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6611 			goto retry;
6612 		if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6613 			t4_report_fw_error(adap);
6614 		return ret;
6615 	}
6616 
6617 	v = be32_to_cpu(c.err_to_clearinit);
6618 	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6619 	if (state) {
6620 		if (v & FW_HELLO_CMD_ERR_F)
6621 			*state = DEV_STATE_ERR;
6622 		else if (v & FW_HELLO_CMD_INIT_F)
6623 			*state = DEV_STATE_INIT;
6624 		else
6625 			*state = DEV_STATE_UNINIT;
6626 	}
6627 
6628 	/*
6629 	 * If we're not the Master PF then we need to wait around for the
6630 	 * Master PF Driver to finish setting up the adapter.
6631 	 *
6632 	 * Note that we also do this wait if we're a non-Master-capable PF and
6633 	 * there is no current Master PF; a Master PF may show up momentarily
6634 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
6635 	 * OS loads lots of different drivers rapidly at the same time).  In
6636 	 * this case, the Master PF returned by the firmware will be
6637 	 * PCIE_FW_MASTER_M so the test below will work ...
6638 	 */
6639 	if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6640 	    master_mbox != mbox) {
6641 		int waiting = FW_CMD_HELLO_TIMEOUT;
6642 
6643 		/*
6644 		 * Wait for the firmware to either indicate an error or
6645 		 * initialized state.  If we see either of these we bail out
6646 		 * and report the issue to the caller.  If we exhaust the
6647 		 * "hello timeout" and we haven't exhausted our retries, try
6648 		 * again.  Otherwise bail with a timeout error.
6649 		 */
6650 		for (;;) {
6651 			u32 pcie_fw;
6652 
6653 			msleep(50);
6654 			waiting -= 50;
6655 
6656 			/*
6657 			 * If neither Error nor Initialialized are indicated
6658 			 * by the firmware keep waiting till we exaust our
6659 			 * timeout ... and then retry if we haven't exhausted
6660 			 * our retries ...
6661 			 */
6662 			pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6663 			if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6664 				if (waiting <= 0) {
6665 					if (retries-- > 0)
6666 						goto retry;
6667 
6668 					return -ETIMEDOUT;
6669 				}
6670 				continue;
6671 			}
6672 
6673 			/*
6674 			 * We either have an Error or Initialized condition
6675 			 * report errors preferentially.
6676 			 */
6677 			if (state) {
6678 				if (pcie_fw & PCIE_FW_ERR_F)
6679 					*state = DEV_STATE_ERR;
6680 				else if (pcie_fw & PCIE_FW_INIT_F)
6681 					*state = DEV_STATE_INIT;
6682 			}
6683 
6684 			/*
6685 			 * If we arrived before a Master PF was selected and
6686 			 * there's not a valid Master PF, grab its identity
6687 			 * for our caller.
6688 			 */
6689 			if (master_mbox == PCIE_FW_MASTER_M &&
6690 			    (pcie_fw & PCIE_FW_MASTER_VLD_F))
6691 				master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6692 			break;
6693 		}
6694 	}
6695 
6696 	return master_mbox;
6697 }
6698 
6699 /**
6700  *	t4_fw_bye - end communication with FW
6701  *	@adap: the adapter
6702  *	@mbox: mailbox to use for the FW command
6703  *
6704  *	Issues a command to terminate communication with FW.
6705  */
6706 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6707 {
6708 	struct fw_bye_cmd c;
6709 
6710 	memset(&c, 0, sizeof(c));
6711 	INIT_CMD(c, BYE, WRITE);
6712 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6713 }
6714 
6715 /**
6716  *	t4_init_cmd - ask FW to initialize the device
6717  *	@adap: the adapter
6718  *	@mbox: mailbox to use for the FW command
6719  *
6720  *	Issues a command to FW to partially initialize the device.  This
6721  *	performs initialization that generally doesn't depend on user input.
6722  */
6723 int t4_early_init(struct adapter *adap, unsigned int mbox)
6724 {
6725 	struct fw_initialize_cmd c;
6726 
6727 	memset(&c, 0, sizeof(c));
6728 	INIT_CMD(c, INITIALIZE, WRITE);
6729 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6730 }
6731 
6732 /**
6733  *	t4_fw_reset - issue a reset to FW
6734  *	@adap: the adapter
6735  *	@mbox: mailbox to use for the FW command
6736  *	@reset: specifies the type of reset to perform
6737  *
6738  *	Issues a reset command of the specified type to FW.
6739  */
6740 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6741 {
6742 	struct fw_reset_cmd c;
6743 
6744 	memset(&c, 0, sizeof(c));
6745 	INIT_CMD(c, RESET, WRITE);
6746 	c.val = cpu_to_be32(reset);
6747 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6748 }
6749 
6750 /**
6751  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6752  *	@adap: the adapter
6753  *	@mbox: mailbox to use for the FW RESET command (if desired)
6754  *	@force: force uP into RESET even if FW RESET command fails
6755  *
6756  *	Issues a RESET command to firmware (if desired) with a HALT indication
6757  *	and then puts the microprocessor into RESET state.  The RESET command
6758  *	will only be issued if a legitimate mailbox is provided (mbox <=
6759  *	PCIE_FW_MASTER_M).
6760  *
6761  *	This is generally used in order for the host to safely manipulate the
6762  *	adapter without fear of conflicting with whatever the firmware might
6763  *	be doing.  The only way out of this state is to RESTART the firmware
6764  *	...
6765  */
6766 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6767 {
6768 	int ret = 0;
6769 
6770 	/*
6771 	 * If a legitimate mailbox is provided, issue a RESET command
6772 	 * with a HALT indication.
6773 	 */
6774 	if (mbox <= PCIE_FW_MASTER_M) {
6775 		struct fw_reset_cmd c;
6776 
6777 		memset(&c, 0, sizeof(c));
6778 		INIT_CMD(c, RESET, WRITE);
6779 		c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6780 		c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6781 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6782 	}
6783 
6784 	/*
6785 	 * Normally we won't complete the operation if the firmware RESET
6786 	 * command fails but if our caller insists we'll go ahead and put the
6787 	 * uP into RESET.  This can be useful if the firmware is hung or even
6788 	 * missing ...  We'll have to take the risk of putting the uP into
6789 	 * RESET without the cooperation of firmware in that case.
6790 	 *
6791 	 * We also force the firmware's HALT flag to be on in case we bypassed
6792 	 * the firmware RESET command above or we're dealing with old firmware
6793 	 * which doesn't have the HALT capability.  This will serve as a flag
6794 	 * for the incoming firmware to know that it's coming out of a HALT
6795 	 * rather than a RESET ... if it's new enough to understand that ...
6796 	 */
6797 	if (ret == 0 || force) {
6798 		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6799 		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6800 				 PCIE_FW_HALT_F);
6801 	}
6802 
6803 	/*
6804 	 * And we always return the result of the firmware RESET command
6805 	 * even when we force the uP into RESET ...
6806 	 */
6807 	return ret;
6808 }
6809 
6810 /**
6811  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
6812  *	@adap: the adapter
6813  *	@reset: if we want to do a RESET to restart things
6814  *
6815  *	Restart firmware previously halted by t4_fw_halt().  On successful
6816  *	return the previous PF Master remains as the new PF Master and there
6817  *	is no need to issue a new HELLO command, etc.
6818  *
6819  *	We do this in two ways:
6820  *
6821  *	 1. If we're dealing with newer firmware we'll simply want to take
6822  *	    the chip's microprocessor out of RESET.  This will cause the
6823  *	    firmware to start up from its start vector.  And then we'll loop
6824  *	    until the firmware indicates it's started again (PCIE_FW.HALT
6825  *	    reset to 0) or we timeout.
6826  *
6827  *	 2. If we're dealing with older firmware then we'll need to RESET
6828  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
6829  *	    flag and automatically RESET itself on startup.
6830  */
6831 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6832 {
6833 	if (reset) {
6834 		/*
6835 		 * Since we're directing the RESET instead of the firmware
6836 		 * doing it automatically, we need to clear the PCIE_FW.HALT
6837 		 * bit.
6838 		 */
6839 		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6840 
6841 		/*
6842 		 * If we've been given a valid mailbox, first try to get the
6843 		 * firmware to do the RESET.  If that works, great and we can
6844 		 * return success.  Otherwise, if we haven't been given a
6845 		 * valid mailbox or the RESET command failed, fall back to
6846 		 * hitting the chip with a hammer.
6847 		 */
6848 		if (mbox <= PCIE_FW_MASTER_M) {
6849 			t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6850 			msleep(100);
6851 			if (t4_fw_reset(adap, mbox,
6852 					PIORST_F | PIORSTMODE_F) == 0)
6853 				return 0;
6854 		}
6855 
6856 		t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6857 		msleep(2000);
6858 	} else {
6859 		int ms;
6860 
6861 		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6862 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6863 			if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6864 				return 0;
6865 			msleep(100);
6866 			ms += 100;
6867 		}
6868 		return -ETIMEDOUT;
6869 	}
6870 	return 0;
6871 }
6872 
6873 /**
6874  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6875  *	@adap: the adapter
6876  *	@mbox: mailbox to use for the FW RESET command (if desired)
6877  *	@fw_data: the firmware image to write
6878  *	@size: image size
6879  *	@force: force upgrade even if firmware doesn't cooperate
6880  *
6881  *	Perform all of the steps necessary for upgrading an adapter's
6882  *	firmware image.  Normally this requires the cooperation of the
6883  *	existing firmware in order to halt all existing activities
6884  *	but if an invalid mailbox token is passed in we skip that step
6885  *	(though we'll still put the adapter microprocessor into RESET in
6886  *	that case).
6887  *
6888  *	On successful return the new firmware will have been loaded and
6889  *	the adapter will have been fully RESET losing all previous setup
6890  *	state.  On unsuccessful return the adapter may be completely hosed ...
6891  *	positive errno indicates that the adapter is ~probably~ intact, a
6892  *	negative errno indicates that things are looking bad ...
6893  */
6894 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6895 		  const u8 *fw_data, unsigned int size, int force)
6896 {
6897 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6898 	int reset, ret;
6899 
6900 	if (!t4_fw_matches_chip(adap, fw_hdr))
6901 		return -EINVAL;
6902 
6903 	/* Disable FW_OK flag so that mbox commands with FW_OK flag set
6904 	 * wont be sent when we are flashing FW.
6905 	 */
6906 	adap->flags &= ~FW_OK;
6907 
6908 	ret = t4_fw_halt(adap, mbox, force);
6909 	if (ret < 0 && !force)
6910 		goto out;
6911 
6912 	ret = t4_load_fw(adap, fw_data, size);
6913 	if (ret < 0)
6914 		goto out;
6915 
6916 	/*
6917 	 * If there was a Firmware Configuration File stored in FLASH,
6918 	 * there's a good chance that it won't be compatible with the new
6919 	 * Firmware.  In order to prevent difficult to diagnose adapter
6920 	 * initialization issues, we clear out the Firmware Configuration File
6921 	 * portion of the FLASH .  The user will need to re-FLASH a new
6922 	 * Firmware Configuration File which is compatible with the new
6923 	 * Firmware if that's desired.
6924 	 */
6925 	(void)t4_load_cfg(adap, NULL, 0);
6926 
6927 	/*
6928 	 * Older versions of the firmware don't understand the new
6929 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6930 	 * restart.  So for newly loaded older firmware we'll have to do the
6931 	 * RESET for it so it starts up on a clean slate.  We can tell if
6932 	 * the newly loaded firmware will handle this right by checking
6933 	 * its header flags to see if it advertises the capability.
6934 	 */
6935 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6936 	ret = t4_fw_restart(adap, mbox, reset);
6937 
6938 	/* Grab potentially new Firmware Device Log parameters so we can see
6939 	 * how healthy the new Firmware is.  It's okay to contact the new
6940 	 * Firmware for these parameters even though, as far as it's
6941 	 * concerned, we've never said "HELLO" to it ...
6942 	 */
6943 	(void)t4_init_devlog_params(adap);
6944 out:
6945 	adap->flags |= FW_OK;
6946 	return ret;
6947 }
6948 
6949 /**
6950  *	t4_fl_pkt_align - return the fl packet alignment
6951  *	@adap: the adapter
6952  *
6953  *	T4 has a single field to specify the packing and padding boundary.
6954  *	T5 onwards has separate fields for this and hence the alignment for
6955  *	next packet offset is maximum of these two.
6956  *
6957  */
6958 int t4_fl_pkt_align(struct adapter *adap)
6959 {
6960 	u32 sge_control, sge_control2;
6961 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6962 
6963 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6964 
6965 	/* T4 uses a single control field to specify both the PCIe Padding and
6966 	 * Packing Boundary.  T5 introduced the ability to specify these
6967 	 * separately.  The actual Ingress Packet Data alignment boundary
6968 	 * within Packed Buffer Mode is the maximum of these two
6969 	 * specifications.  (Note that it makes no real practical sense to
6970 	 * have the Pading Boudary be larger than the Packing Boundary but you
6971 	 * could set the chip up that way and, in fact, legacy T4 code would
6972 	 * end doing this because it would initialize the Padding Boundary and
6973 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
6974 	 * Padding Boundary values in T6 starts from 8B,
6975 	 * where as it is 32B for T4 and T5.
6976 	 */
6977 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6978 		ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6979 	else
6980 		ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6981 
6982 	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6983 
6984 	fl_align = ingpadboundary;
6985 	if (!is_t4(adap->params.chip)) {
6986 		/* T5 has a weird interpretation of one of the PCIe Packing
6987 		 * Boundary values.  No idea why ...
6988 		 */
6989 		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6990 		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6991 		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6992 			ingpackboundary = 16;
6993 		else
6994 			ingpackboundary = 1 << (ingpackboundary +
6995 						INGPACKBOUNDARY_SHIFT_X);
6996 
6997 		fl_align = max(ingpadboundary, ingpackboundary);
6998 	}
6999 	return fl_align;
7000 }
7001 
7002 /**
7003  *	t4_fixup_host_params - fix up host-dependent parameters
7004  *	@adap: the adapter
7005  *	@page_size: the host's Base Page Size
7006  *	@cache_line_size: the host's Cache Line Size
7007  *
7008  *	Various registers in T4 contain values which are dependent on the
7009  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7010  *	those registers with the appropriate values as passed in ...
7011  */
7012 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7013 			 unsigned int cache_line_size)
7014 {
7015 	unsigned int page_shift = fls(page_size) - 1;
7016 	unsigned int sge_hps = page_shift - 10;
7017 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7018 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7019 	unsigned int fl_align_log = fls(fl_align) - 1;
7020 
7021 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7022 		     HOSTPAGESIZEPF0_V(sge_hps) |
7023 		     HOSTPAGESIZEPF1_V(sge_hps) |
7024 		     HOSTPAGESIZEPF2_V(sge_hps) |
7025 		     HOSTPAGESIZEPF3_V(sge_hps) |
7026 		     HOSTPAGESIZEPF4_V(sge_hps) |
7027 		     HOSTPAGESIZEPF5_V(sge_hps) |
7028 		     HOSTPAGESIZEPF6_V(sge_hps) |
7029 		     HOSTPAGESIZEPF7_V(sge_hps));
7030 
7031 	if (is_t4(adap->params.chip)) {
7032 		t4_set_reg_field(adap, SGE_CONTROL_A,
7033 				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7034 				 EGRSTATUSPAGESIZE_F,
7035 				 INGPADBOUNDARY_V(fl_align_log -
7036 						  INGPADBOUNDARY_SHIFT_X) |
7037 				 EGRSTATUSPAGESIZE_V(stat_len != 64));
7038 	} else {
7039 		unsigned int pack_align;
7040 		unsigned int ingpad, ingpack;
7041 		unsigned int pcie_cap;
7042 
7043 		/* T5 introduced the separation of the Free List Padding and
7044 		 * Packing Boundaries.  Thus, we can select a smaller Padding
7045 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7046 		 * Bandwidth, and use a Packing Boundary which is large enough
7047 		 * to avoid false sharing between CPUs, etc.
7048 		 *
7049 		 * For the PCI Link, the smaller the Padding Boundary the
7050 		 * better.  For the Memory Controller, a smaller Padding
7051 		 * Boundary is better until we cross under the Memory Line
7052 		 * Size (the minimum unit of transfer to/from Memory).  If we
7053 		 * have a Padding Boundary which is smaller than the Memory
7054 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
7055 		 * Memory Controller which is never good.
7056 		 */
7057 
7058 		/* We want the Packing Boundary to be based on the Cache Line
7059 		 * Size in order to help avoid False Sharing performance
7060 		 * issues between CPUs, etc.  We also want the Packing
7061 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
7062 		 * get best performance when the Packing Boundary is a
7063 		 * multiple of the Maximum Payload Size.
7064 		 */
7065 		pack_align = fl_align;
7066 		pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7067 		if (pcie_cap) {
7068 			unsigned int mps, mps_log;
7069 			u16 devctl;
7070 
7071 			/* The PCIe Device Control Maximum Payload Size field
7072 			 * [bits 7:5] encodes sizes as powers of 2 starting at
7073 			 * 128 bytes.
7074 			 */
7075 			pci_read_config_word(adap->pdev,
7076 					     pcie_cap + PCI_EXP_DEVCTL,
7077 					     &devctl);
7078 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7079 			mps = 1 << mps_log;
7080 			if (mps > pack_align)
7081 				pack_align = mps;
7082 		}
7083 
7084 		/* N.B. T5/T6 have a crazy special interpretation of the "0"
7085 		 * value for the Packing Boundary.  This corresponds to 16
7086 		 * bytes instead of the expected 32 bytes.  So if we want 32
7087 		 * bytes, the best we can really do is 64 bytes ...
7088 		 */
7089 		if (pack_align <= 16) {
7090 			ingpack = INGPACKBOUNDARY_16B_X;
7091 			fl_align = 16;
7092 		} else if (pack_align == 32) {
7093 			ingpack = INGPACKBOUNDARY_64B_X;
7094 			fl_align = 64;
7095 		} else {
7096 			unsigned int pack_align_log = fls(pack_align) - 1;
7097 
7098 			ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7099 			fl_align = pack_align;
7100 		}
7101 
7102 		/* Use the smallest Ingress Padding which isn't smaller than
7103 		 * the Memory Controller Read/Write Size.  We'll take that as
7104 		 * being 8 bytes since we don't know of any system with a
7105 		 * wider Memory Controller Bus Width.
7106 		 */
7107 		if (is_t5(adap->params.chip))
7108 			ingpad = INGPADBOUNDARY_32B_X;
7109 		else
7110 			ingpad = T6_INGPADBOUNDARY_8B_X;
7111 
7112 		t4_set_reg_field(adap, SGE_CONTROL_A,
7113 				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7114 				 EGRSTATUSPAGESIZE_F,
7115 				 INGPADBOUNDARY_V(ingpad) |
7116 				 EGRSTATUSPAGESIZE_V(stat_len != 64));
7117 		t4_set_reg_field(adap, SGE_CONTROL2_A,
7118 				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7119 				 INGPACKBOUNDARY_V(ingpack));
7120 	}
7121 	/*
7122 	 * Adjust various SGE Free List Host Buffer Sizes.
7123 	 *
7124 	 * This is something of a crock since we're using fixed indices into
7125 	 * the array which are also known by the sge.c code and the T4
7126 	 * Firmware Configuration File.  We need to come up with a much better
7127 	 * approach to managing this array.  For now, the first four entries
7128 	 * are:
7129 	 *
7130 	 *   0: Host Page Size
7131 	 *   1: 64KB
7132 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7133 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7134 	 *
7135 	 * For the single-MTU buffers in unpacked mode we need to include
7136 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7137 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7138 	 * Padding boundary.  All of these are accommodated in the Factory
7139 	 * Default Firmware Configuration File but we need to adjust it for
7140 	 * this host's cache line size.
7141 	 */
7142 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7143 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7144 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7145 		     & ~(fl_align-1));
7146 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7147 		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7148 		     & ~(fl_align-1));
7149 
7150 	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7151 
7152 	return 0;
7153 }
7154 
7155 /**
7156  *	t4_fw_initialize - ask FW to initialize the device
7157  *	@adap: the adapter
7158  *	@mbox: mailbox to use for the FW command
7159  *
7160  *	Issues a command to FW to partially initialize the device.  This
7161  *	performs initialization that generally doesn't depend on user input.
7162  */
7163 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7164 {
7165 	struct fw_initialize_cmd c;
7166 
7167 	memset(&c, 0, sizeof(c));
7168 	INIT_CMD(c, INITIALIZE, WRITE);
7169 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7170 }
7171 
7172 /**
7173  *	t4_query_params_rw - query FW or device parameters
7174  *	@adap: the adapter
7175  *	@mbox: mailbox to use for the FW command
7176  *	@pf: the PF
7177  *	@vf: the VF
7178  *	@nparams: the number of parameters
7179  *	@params: the parameter names
7180  *	@val: the parameter values
7181  *	@rw: Write and read flag
7182  *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
7183  *
7184  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7185  *	queried at once.
7186  */
7187 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7188 		       unsigned int vf, unsigned int nparams, const u32 *params,
7189 		       u32 *val, int rw, bool sleep_ok)
7190 {
7191 	int i, ret;
7192 	struct fw_params_cmd c;
7193 	__be32 *p = &c.param[0].mnem;
7194 
7195 	if (nparams > 7)
7196 		return -EINVAL;
7197 
7198 	memset(&c, 0, sizeof(c));
7199 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7200 				  FW_CMD_REQUEST_F | FW_CMD_READ_F |
7201 				  FW_PARAMS_CMD_PFN_V(pf) |
7202 				  FW_PARAMS_CMD_VFN_V(vf));
7203 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7204 
7205 	for (i = 0; i < nparams; i++) {
7206 		*p++ = cpu_to_be32(*params++);
7207 		if (rw)
7208 			*p = cpu_to_be32(*(val + i));
7209 		p++;
7210 	}
7211 
7212 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7213 	if (ret == 0)
7214 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7215 			*val++ = be32_to_cpu(*p);
7216 	return ret;
7217 }
7218 
7219 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7220 		    unsigned int vf, unsigned int nparams, const u32 *params,
7221 		    u32 *val)
7222 {
7223 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7224 				  true);
7225 }
7226 
7227 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7228 		       unsigned int vf, unsigned int nparams, const u32 *params,
7229 		       u32 *val)
7230 {
7231 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7232 				  false);
7233 }
7234 
7235 /**
7236  *      t4_set_params_timeout - sets FW or device parameters
7237  *      @adap: the adapter
7238  *      @mbox: mailbox to use for the FW command
7239  *      @pf: the PF
7240  *      @vf: the VF
7241  *      @nparams: the number of parameters
7242  *      @params: the parameter names
7243  *      @val: the parameter values
7244  *      @timeout: the timeout time
7245  *
7246  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7247  *      specified at once.
7248  */
7249 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7250 			  unsigned int pf, unsigned int vf,
7251 			  unsigned int nparams, const u32 *params,
7252 			  const u32 *val, int timeout)
7253 {
7254 	struct fw_params_cmd c;
7255 	__be32 *p = &c.param[0].mnem;
7256 
7257 	if (nparams > 7)
7258 		return -EINVAL;
7259 
7260 	memset(&c, 0, sizeof(c));
7261 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7262 				  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7263 				  FW_PARAMS_CMD_PFN_V(pf) |
7264 				  FW_PARAMS_CMD_VFN_V(vf));
7265 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7266 
7267 	while (nparams--) {
7268 		*p++ = cpu_to_be32(*params++);
7269 		*p++ = cpu_to_be32(*val++);
7270 	}
7271 
7272 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7273 }
7274 
7275 /**
7276  *	t4_set_params - sets FW or device parameters
7277  *	@adap: the adapter
7278  *	@mbox: mailbox to use for the FW command
7279  *	@pf: the PF
7280  *	@vf: the VF
7281  *	@nparams: the number of parameters
7282  *	@params: the parameter names
7283  *	@val: the parameter values
7284  *
7285  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7286  *	specified at once.
7287  */
7288 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7289 		  unsigned int vf, unsigned int nparams, const u32 *params,
7290 		  const u32 *val)
7291 {
7292 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7293 				     FW_CMD_MAX_TIMEOUT);
7294 }
7295 
7296 /**
7297  *	t4_cfg_pfvf - configure PF/VF resource limits
7298  *	@adap: the adapter
7299  *	@mbox: mailbox to use for the FW command
7300  *	@pf: the PF being configured
7301  *	@vf: the VF being configured
7302  *	@txq: the max number of egress queues
7303  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7304  *	@rxqi: the max number of interrupt-capable ingress queues
7305  *	@rxq: the max number of interruptless ingress queues
7306  *	@tc: the PCI traffic class
7307  *	@vi: the max number of virtual interfaces
7308  *	@cmask: the channel access rights mask for the PF/VF
7309  *	@pmask: the port access rights mask for the PF/VF
7310  *	@nexact: the maximum number of exact MPS filters
7311  *	@rcaps: read capabilities
7312  *	@wxcaps: write/execute capabilities
7313  *
7314  *	Configures resource limits and capabilities for a physical or virtual
7315  *	function.
7316  */
7317 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7318 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7319 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7320 		unsigned int vi, unsigned int cmask, unsigned int pmask,
7321 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7322 {
7323 	struct fw_pfvf_cmd c;
7324 
7325 	memset(&c, 0, sizeof(c));
7326 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7327 				  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7328 				  FW_PFVF_CMD_VFN_V(vf));
7329 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7330 	c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7331 				     FW_PFVF_CMD_NIQ_V(rxq));
7332 	c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7333 				    FW_PFVF_CMD_PMASK_V(pmask) |
7334 				    FW_PFVF_CMD_NEQ_V(txq));
7335 	c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7336 				      FW_PFVF_CMD_NVI_V(vi) |
7337 				      FW_PFVF_CMD_NEXACTF_V(nexact));
7338 	c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7339 					FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7340 					FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7341 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7342 }
7343 
7344 /**
7345  *	t4_alloc_vi - allocate a virtual interface
7346  *	@adap: the adapter
7347  *	@mbox: mailbox to use for the FW command
7348  *	@port: physical port associated with the VI
7349  *	@pf: the PF owning the VI
7350  *	@vf: the VF owning the VI
7351  *	@nmac: number of MAC addresses needed (1 to 5)
7352  *	@mac: the MAC addresses of the VI
7353  *	@rss_size: size of RSS table slice associated with this VI
7354  *
7355  *	Allocates a virtual interface for the given physical port.  If @mac is
7356  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7357  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7358  *	stored consecutively so the space needed is @nmac * 6 bytes.
7359  *	Returns a negative error number or the non-negative VI id.
7360  */
7361 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7362 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7363 		unsigned int *rss_size)
7364 {
7365 	int ret;
7366 	struct fw_vi_cmd c;
7367 
7368 	memset(&c, 0, sizeof(c));
7369 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7370 				  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7371 				  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7372 	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7373 	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7374 	c.nmac = nmac - 1;
7375 
7376 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7377 	if (ret)
7378 		return ret;
7379 
7380 	if (mac) {
7381 		memcpy(mac, c.mac, sizeof(c.mac));
7382 		switch (nmac) {
7383 		case 5:
7384 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7385 		case 4:
7386 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7387 		case 3:
7388 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7389 		case 2:
7390 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7391 		}
7392 	}
7393 	if (rss_size)
7394 		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7395 	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7396 }
7397 
7398 /**
7399  *	t4_free_vi - free a virtual interface
7400  *	@adap: the adapter
7401  *	@mbox: mailbox to use for the FW command
7402  *	@pf: the PF owning the VI
7403  *	@vf: the VF owning the VI
7404  *	@viid: virtual interface identifiler
7405  *
7406  *	Free a previously allocated virtual interface.
7407  */
7408 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7409 	       unsigned int vf, unsigned int viid)
7410 {
7411 	struct fw_vi_cmd c;
7412 
7413 	memset(&c, 0, sizeof(c));
7414 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7415 				  FW_CMD_REQUEST_F |
7416 				  FW_CMD_EXEC_F |
7417 				  FW_VI_CMD_PFN_V(pf) |
7418 				  FW_VI_CMD_VFN_V(vf));
7419 	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7420 	c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7421 
7422 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7423 }
7424 
7425 /**
7426  *	t4_set_rxmode - set Rx properties of a virtual interface
7427  *	@adap: the adapter
7428  *	@mbox: mailbox to use for the FW command
7429  *	@viid: the VI id
7430  *	@mtu: the new MTU or -1
7431  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7432  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7433  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7434  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7435  *	@sleep_ok: if true we may sleep while awaiting command completion
7436  *
7437  *	Sets Rx properties of a virtual interface.
7438  */
7439 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7440 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
7441 		  bool sleep_ok)
7442 {
7443 	struct fw_vi_rxmode_cmd c;
7444 
7445 	/* convert to FW values */
7446 	if (mtu < 0)
7447 		mtu = FW_RXMODE_MTU_NO_CHG;
7448 	if (promisc < 0)
7449 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7450 	if (all_multi < 0)
7451 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7452 	if (bcast < 0)
7453 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7454 	if (vlanex < 0)
7455 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7456 
7457 	memset(&c, 0, sizeof(c));
7458 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7459 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7460 				   FW_VI_RXMODE_CMD_VIID_V(viid));
7461 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7462 	c.mtu_to_vlanexen =
7463 		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7464 			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7465 			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7466 			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7467 			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7468 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7469 }
7470 
7471 /**
7472  *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7473  *	@adap: the adapter
7474  *	@viid: the VI id
7475  *	@addr: the MAC address
7476  *	@mask: the mask
7477  *	@idx: index of the entry in mps tcam
7478  *	@lookup_type: MAC address for inner (1) or outer (0) header
7479  *	@port_id: the port index
7480  *	@sleep_ok: call is allowed to sleep
7481  *
7482  *	Removes the mac entry at the specified index using raw mac interface.
7483  *
7484  *	Returns a negative error number on failure.
7485  */
7486 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7487 			 const u8 *addr, const u8 *mask, unsigned int idx,
7488 			 u8 lookup_type, u8 port_id, bool sleep_ok)
7489 {
7490 	struct fw_vi_mac_cmd c;
7491 	struct fw_vi_mac_raw *p = &c.u.raw;
7492 	u32 val;
7493 
7494 	memset(&c, 0, sizeof(c));
7495 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7496 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7497 				   FW_CMD_EXEC_V(0) |
7498 				   FW_VI_MAC_CMD_VIID_V(viid));
7499 	val = FW_CMD_LEN16_V(1) |
7500 	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7501 	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7502 					  FW_CMD_LEN16_V(val));
7503 
7504 	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7505 				     FW_VI_MAC_ID_BASED_FREE);
7506 
7507 	/* Lookup Type. Outer header: 0, Inner header: 1 */
7508 	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7509 				   DATAPORTNUM_V(port_id));
7510 	/* Lookup mask and port mask */
7511 	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7512 				    DATAPORTNUM_V(DATAPORTNUM_M));
7513 
7514 	/* Copy the address and the mask */
7515 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7516 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7517 
7518 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7519 }
7520 
7521 /**
7522  *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7523  *	@adap: the adapter
7524  *	@viid: the VI id
7525  *	@mac: the MAC address
7526  *	@mask: the mask
7527  *	@idx: index at which to add this entry
7528  *	@port_id: the port index
7529  *	@lookup_type: MAC address for inner (1) or outer (0) header
7530  *	@sleep_ok: call is allowed to sleep
7531  *
7532  *	Adds the mac entry at the specified index using raw mac interface.
7533  *
7534  *	Returns a negative error number or the allocated index for this mac.
7535  */
7536 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7537 			  const u8 *addr, const u8 *mask, unsigned int idx,
7538 			  u8 lookup_type, u8 port_id, bool sleep_ok)
7539 {
7540 	int ret = 0;
7541 	struct fw_vi_mac_cmd c;
7542 	struct fw_vi_mac_raw *p = &c.u.raw;
7543 	u32 val;
7544 
7545 	memset(&c, 0, sizeof(c));
7546 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7547 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7548 				   FW_VI_MAC_CMD_VIID_V(viid));
7549 	val = FW_CMD_LEN16_V(1) |
7550 	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7551 	c.freemacs_to_len16 = cpu_to_be32(val);
7552 
7553 	/* Specify that this is an inner mac address */
7554 	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7555 
7556 	/* Lookup Type. Outer header: 0, Inner header: 1 */
7557 	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7558 				   DATAPORTNUM_V(port_id));
7559 	/* Lookup mask and port mask */
7560 	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7561 				    DATAPORTNUM_V(DATAPORTNUM_M));
7562 
7563 	/* Copy the address and the mask */
7564 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7565 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7566 
7567 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7568 	if (ret == 0) {
7569 		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7570 		if (ret != idx)
7571 			ret = -ENOMEM;
7572 	}
7573 
7574 	return ret;
7575 }
7576 
7577 /**
7578  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7579  *	@adap: the adapter
7580  *	@mbox: mailbox to use for the FW command
7581  *	@viid: the VI id
7582  *	@free: if true any existing filters for this VI id are first removed
7583  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7584  *	@addr: the MAC address(es)
7585  *	@idx: where to store the index of each allocated filter
7586  *	@hash: pointer to hash address filter bitmap
7587  *	@sleep_ok: call is allowed to sleep
7588  *
7589  *	Allocates an exact-match filter for each of the supplied addresses and
7590  *	sets it to the corresponding address.  If @idx is not %NULL it should
7591  *	have at least @naddr entries, each of which will be set to the index of
7592  *	the filter allocated for the corresponding MAC address.  If a filter
7593  *	could not be allocated for an address its index is set to 0xffff.
7594  *	If @hash is not %NULL addresses that fail to allocate an exact filter
7595  *	are hashed and update the hash filter bitmap pointed at by @hash.
7596  *
7597  *	Returns a negative error number or the number of filters allocated.
7598  */
7599 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7600 		      unsigned int viid, bool free, unsigned int naddr,
7601 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7602 {
7603 	int offset, ret = 0;
7604 	struct fw_vi_mac_cmd c;
7605 	unsigned int nfilters = 0;
7606 	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7607 	unsigned int rem = naddr;
7608 
7609 	if (naddr > max_naddr)
7610 		return -EINVAL;
7611 
7612 	for (offset = 0; offset < naddr ; /**/) {
7613 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7614 					 rem : ARRAY_SIZE(c.u.exact));
7615 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7616 						     u.exact[fw_naddr]), 16);
7617 		struct fw_vi_mac_exact *p;
7618 		int i;
7619 
7620 		memset(&c, 0, sizeof(c));
7621 		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7622 					   FW_CMD_REQUEST_F |
7623 					   FW_CMD_WRITE_F |
7624 					   FW_CMD_EXEC_V(free) |
7625 					   FW_VI_MAC_CMD_VIID_V(viid));
7626 		c.freemacs_to_len16 =
7627 			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7628 				    FW_CMD_LEN16_V(len16));
7629 
7630 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7631 			p->valid_to_idx =
7632 				cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7633 					    FW_VI_MAC_CMD_IDX_V(
7634 						    FW_VI_MAC_ADD_MAC));
7635 			memcpy(p->macaddr, addr[offset + i],
7636 			       sizeof(p->macaddr));
7637 		}
7638 
7639 		/* It's okay if we run out of space in our MAC address arena.
7640 		 * Some of the addresses we submit may get stored so we need
7641 		 * to run through the reply to see what the results were ...
7642 		 */
7643 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7644 		if (ret && ret != -FW_ENOMEM)
7645 			break;
7646 
7647 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7648 			u16 index = FW_VI_MAC_CMD_IDX_G(
7649 					be16_to_cpu(p->valid_to_idx));
7650 
7651 			if (idx)
7652 				idx[offset + i] = (index >= max_naddr ?
7653 						   0xffff : index);
7654 			if (index < max_naddr)
7655 				nfilters++;
7656 			else if (hash)
7657 				*hash |= (1ULL <<
7658 					  hash_mac_addr(addr[offset + i]));
7659 		}
7660 
7661 		free = false;
7662 		offset += fw_naddr;
7663 		rem -= fw_naddr;
7664 	}
7665 
7666 	if (ret == 0 || ret == -FW_ENOMEM)
7667 		ret = nfilters;
7668 	return ret;
7669 }
7670 
7671 /**
7672  *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
7673  *	@adap: the adapter
7674  *	@mbox: mailbox to use for the FW command
7675  *	@viid: the VI id
7676  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7677  *	@addr: the MAC address(es)
7678  *	@sleep_ok: call is allowed to sleep
7679  *
7680  *	Frees the exact-match filter for each of the supplied addresses
7681  *
7682  *	Returns a negative error number or the number of filters freed.
7683  */
7684 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7685 		     unsigned int viid, unsigned int naddr,
7686 		     const u8 **addr, bool sleep_ok)
7687 {
7688 	int offset, ret = 0;
7689 	struct fw_vi_mac_cmd c;
7690 	unsigned int nfilters = 0;
7691 	unsigned int max_naddr = is_t4(adap->params.chip) ?
7692 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
7693 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7694 	unsigned int rem = naddr;
7695 
7696 	if (naddr > max_naddr)
7697 		return -EINVAL;
7698 
7699 	for (offset = 0; offset < (int)naddr ; /**/) {
7700 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7701 					 ? rem
7702 					 : ARRAY_SIZE(c.u.exact));
7703 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7704 						     u.exact[fw_naddr]), 16);
7705 		struct fw_vi_mac_exact *p;
7706 		int i;
7707 
7708 		memset(&c, 0, sizeof(c));
7709 		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7710 				     FW_CMD_REQUEST_F |
7711 				     FW_CMD_WRITE_F |
7712 				     FW_CMD_EXEC_V(0) |
7713 				     FW_VI_MAC_CMD_VIID_V(viid));
7714 		c.freemacs_to_len16 =
7715 				cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7716 					    FW_CMD_LEN16_V(len16));
7717 
7718 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7719 			p->valid_to_idx = cpu_to_be16(
7720 				FW_VI_MAC_CMD_VALID_F |
7721 				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7722 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7723 		}
7724 
7725 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7726 		if (ret)
7727 			break;
7728 
7729 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7730 			u16 index = FW_VI_MAC_CMD_IDX_G(
7731 						be16_to_cpu(p->valid_to_idx));
7732 
7733 			if (index < max_naddr)
7734 				nfilters++;
7735 		}
7736 
7737 		offset += fw_naddr;
7738 		rem -= fw_naddr;
7739 	}
7740 
7741 	if (ret == 0)
7742 		ret = nfilters;
7743 	return ret;
7744 }
7745 
7746 /**
7747  *	t4_change_mac - modifies the exact-match filter for a MAC address
7748  *	@adap: the adapter
7749  *	@mbox: mailbox to use for the FW command
7750  *	@viid: the VI id
7751  *	@idx: index of existing filter for old value of MAC address, or -1
7752  *	@addr: the new MAC address value
7753  *	@persist: whether a new MAC allocation should be persistent
7754  *	@add_smt: if true also add the address to the HW SMT
7755  *
7756  *	Modifies an exact-match filter and sets it to the new MAC address.
7757  *	Note that in general it is not possible to modify the value of a given
7758  *	filter so the generic way to modify an address filter is to free the one
7759  *	being used by the old address value and allocate a new filter for the
7760  *	new address value.  @idx can be -1 if the address is a new addition.
7761  *
7762  *	Returns a negative error number or the index of the filter with the new
7763  *	MAC value.
7764  */
7765 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7766 		  int idx, const u8 *addr, bool persist, bool add_smt)
7767 {
7768 	int ret, mode;
7769 	struct fw_vi_mac_cmd c;
7770 	struct fw_vi_mac_exact *p = c.u.exact;
7771 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7772 
7773 	if (idx < 0)                             /* new allocation */
7774 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7775 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7776 
7777 	memset(&c, 0, sizeof(c));
7778 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7779 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7780 				   FW_VI_MAC_CMD_VIID_V(viid));
7781 	c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7782 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7783 				      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7784 				      FW_VI_MAC_CMD_IDX_V(idx));
7785 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7786 
7787 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7788 	if (ret == 0) {
7789 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7790 		if (ret >= max_mac_addr)
7791 			ret = -ENOMEM;
7792 	}
7793 	return ret;
7794 }
7795 
7796 /**
7797  *	t4_set_addr_hash - program the MAC inexact-match hash filter
7798  *	@adap: the adapter
7799  *	@mbox: mailbox to use for the FW command
7800  *	@viid: the VI id
7801  *	@ucast: whether the hash filter should also match unicast addresses
7802  *	@vec: the value to be written to the hash filter
7803  *	@sleep_ok: call is allowed to sleep
7804  *
7805  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
7806  */
7807 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7808 		     bool ucast, u64 vec, bool sleep_ok)
7809 {
7810 	struct fw_vi_mac_cmd c;
7811 
7812 	memset(&c, 0, sizeof(c));
7813 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7814 				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7815 				   FW_VI_ENABLE_CMD_VIID_V(viid));
7816 	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7817 					  FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7818 					  FW_CMD_LEN16_V(1));
7819 	c.u.hash.hashvec = cpu_to_be64(vec);
7820 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7821 }
7822 
7823 /**
7824  *      t4_enable_vi_params - enable/disable a virtual interface
7825  *      @adap: the adapter
7826  *      @mbox: mailbox to use for the FW command
7827  *      @viid: the VI id
7828  *      @rx_en: 1=enable Rx, 0=disable Rx
7829  *      @tx_en: 1=enable Tx, 0=disable Tx
7830  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
7831  *
7832  *      Enables/disables a virtual interface.  Note that setting DCB Enable
7833  *      only makes sense when enabling a Virtual Interface ...
7834  */
7835 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7836 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7837 {
7838 	struct fw_vi_enable_cmd c;
7839 
7840 	memset(&c, 0, sizeof(c));
7841 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7842 				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7843 				   FW_VI_ENABLE_CMD_VIID_V(viid));
7844 	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7845 				     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7846 				     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7847 				     FW_LEN16(c));
7848 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7849 }
7850 
7851 /**
7852  *	t4_enable_vi - enable/disable a virtual interface
7853  *	@adap: the adapter
7854  *	@mbox: mailbox to use for the FW command
7855  *	@viid: the VI id
7856  *	@rx_en: 1=enable Rx, 0=disable Rx
7857  *	@tx_en: 1=enable Tx, 0=disable Tx
7858  *
7859  *	Enables/disables a virtual interface.
7860  */
7861 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7862 		 bool rx_en, bool tx_en)
7863 {
7864 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7865 }
7866 
7867 /**
7868  *	t4_identify_port - identify a VI's port by blinking its LED
7869  *	@adap: the adapter
7870  *	@mbox: mailbox to use for the FW command
7871  *	@viid: the VI id
7872  *	@nblinks: how many times to blink LED at 2.5 Hz
7873  *
7874  *	Identifies a VI's port by blinking its LED.
7875  */
7876 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7877 		     unsigned int nblinks)
7878 {
7879 	struct fw_vi_enable_cmd c;
7880 
7881 	memset(&c, 0, sizeof(c));
7882 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7883 				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7884 				   FW_VI_ENABLE_CMD_VIID_V(viid));
7885 	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7886 	c.blinkdur = cpu_to_be16(nblinks);
7887 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7888 }
7889 
7890 /**
7891  *	t4_iq_stop - stop an ingress queue and its FLs
7892  *	@adap: the adapter
7893  *	@mbox: mailbox to use for the FW command
7894  *	@pf: the PF owning the queues
7895  *	@vf: the VF owning the queues
7896  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7897  *	@iqid: ingress queue id
7898  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7899  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7900  *
7901  *	Stops an ingress queue and its associated FLs, if any.  This causes
7902  *	any current or future data/messages destined for these queues to be
7903  *	tossed.
7904  */
7905 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7906 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7907 	       unsigned int fl0id, unsigned int fl1id)
7908 {
7909 	struct fw_iq_cmd c;
7910 
7911 	memset(&c, 0, sizeof(c));
7912 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7913 				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7914 				  FW_IQ_CMD_VFN_V(vf));
7915 	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7916 	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7917 	c.iqid = cpu_to_be16(iqid);
7918 	c.fl0id = cpu_to_be16(fl0id);
7919 	c.fl1id = cpu_to_be16(fl1id);
7920 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7921 }
7922 
7923 /**
7924  *	t4_iq_free - free an ingress queue and its FLs
7925  *	@adap: the adapter
7926  *	@mbox: mailbox to use for the FW command
7927  *	@pf: the PF owning the queues
7928  *	@vf: the VF owning the queues
7929  *	@iqtype: the ingress queue type
7930  *	@iqid: ingress queue id
7931  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7932  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7933  *
7934  *	Frees an ingress queue and its associated FLs, if any.
7935  */
7936 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7937 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7938 	       unsigned int fl0id, unsigned int fl1id)
7939 {
7940 	struct fw_iq_cmd c;
7941 
7942 	memset(&c, 0, sizeof(c));
7943 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7944 				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7945 				  FW_IQ_CMD_VFN_V(vf));
7946 	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7947 	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7948 	c.iqid = cpu_to_be16(iqid);
7949 	c.fl0id = cpu_to_be16(fl0id);
7950 	c.fl1id = cpu_to_be16(fl1id);
7951 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7952 }
7953 
7954 /**
7955  *	t4_eth_eq_free - free an Ethernet egress queue
7956  *	@adap: the adapter
7957  *	@mbox: mailbox to use for the FW command
7958  *	@pf: the PF owning the queue
7959  *	@vf: the VF owning the queue
7960  *	@eqid: egress queue id
7961  *
7962  *	Frees an Ethernet egress queue.
7963  */
7964 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7965 		   unsigned int vf, unsigned int eqid)
7966 {
7967 	struct fw_eq_eth_cmd c;
7968 
7969 	memset(&c, 0, sizeof(c));
7970 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7971 				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7972 				  FW_EQ_ETH_CMD_PFN_V(pf) |
7973 				  FW_EQ_ETH_CMD_VFN_V(vf));
7974 	c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7975 	c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
7976 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7977 }
7978 
7979 /**
7980  *	t4_ctrl_eq_free - free a control egress queue
7981  *	@adap: the adapter
7982  *	@mbox: mailbox to use for the FW command
7983  *	@pf: the PF owning the queue
7984  *	@vf: the VF owning the queue
7985  *	@eqid: egress queue id
7986  *
7987  *	Frees a control egress queue.
7988  */
7989 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7990 		    unsigned int vf, unsigned int eqid)
7991 {
7992 	struct fw_eq_ctrl_cmd c;
7993 
7994 	memset(&c, 0, sizeof(c));
7995 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7996 				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7997 				  FW_EQ_CTRL_CMD_PFN_V(pf) |
7998 				  FW_EQ_CTRL_CMD_VFN_V(vf));
7999 	c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8000 	c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8001 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8002 }
8003 
8004 /**
8005  *	t4_ofld_eq_free - free an offload egress queue
8006  *	@adap: the adapter
8007  *	@mbox: mailbox to use for the FW command
8008  *	@pf: the PF owning the queue
8009  *	@vf: the VF owning the queue
8010  *	@eqid: egress queue id
8011  *
8012  *	Frees a control egress queue.
8013  */
8014 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8015 		    unsigned int vf, unsigned int eqid)
8016 {
8017 	struct fw_eq_ofld_cmd c;
8018 
8019 	memset(&c, 0, sizeof(c));
8020 	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8021 				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8022 				  FW_EQ_OFLD_CMD_PFN_V(pf) |
8023 				  FW_EQ_OFLD_CMD_VFN_V(vf));
8024 	c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8025 	c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8026 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8027 }
8028 
8029 /**
8030  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
8031  *	@adap: the adapter
8032  *	@link_down_rc: Link Down Reason Code
8033  *
8034  *	Returns a string representation of the Link Down Reason Code.
8035  */
8036 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8037 {
8038 	static const char * const reason[] = {
8039 		"Link Down",
8040 		"Remote Fault",
8041 		"Auto-negotiation Failure",
8042 		"Reserved",
8043 		"Insufficient Airflow",
8044 		"Unable To Determine Reason",
8045 		"No RX Signal Detected",
8046 		"Reserved",
8047 	};
8048 
8049 	if (link_down_rc >= ARRAY_SIZE(reason))
8050 		return "Bad Reason Code";
8051 
8052 	return reason[link_down_rc];
8053 }
8054 
8055 /**
8056  * Return the highest speed set in the port capabilities, in Mb/s.
8057  */
8058 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8059 {
8060 	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
8061 		do { \
8062 			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8063 				return __speed; \
8064 		} while (0)
8065 
8066 	TEST_SPEED_RETURN(400G, 400000);
8067 	TEST_SPEED_RETURN(200G, 200000);
8068 	TEST_SPEED_RETURN(100G, 100000);
8069 	TEST_SPEED_RETURN(50G,   50000);
8070 	TEST_SPEED_RETURN(40G,   40000);
8071 	TEST_SPEED_RETURN(25G,   25000);
8072 	TEST_SPEED_RETURN(10G,   10000);
8073 	TEST_SPEED_RETURN(1G,     1000);
8074 	TEST_SPEED_RETURN(100M,    100);
8075 
8076 	#undef TEST_SPEED_RETURN
8077 
8078 	return 0;
8079 }
8080 
8081 /**
8082  *	fwcap_to_fwspeed - return highest speed in Port Capabilities
8083  *	@acaps: advertised Port Capabilities
8084  *
8085  *	Get the highest speed for the port from the advertised Port
8086  *	Capabilities.  It will be either the highest speed from the list of
8087  *	speeds or whatever user has set using ethtool.
8088  */
8089 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8090 {
8091 	#define TEST_SPEED_RETURN(__caps_speed) \
8092 		do { \
8093 			if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8094 				return FW_PORT_CAP32_SPEED_##__caps_speed; \
8095 		} while (0)
8096 
8097 	TEST_SPEED_RETURN(400G);
8098 	TEST_SPEED_RETURN(200G);
8099 	TEST_SPEED_RETURN(100G);
8100 	TEST_SPEED_RETURN(50G);
8101 	TEST_SPEED_RETURN(40G);
8102 	TEST_SPEED_RETURN(25G);
8103 	TEST_SPEED_RETURN(10G);
8104 	TEST_SPEED_RETURN(1G);
8105 	TEST_SPEED_RETURN(100M);
8106 
8107 	#undef TEST_SPEED_RETURN
8108 
8109 	return 0;
8110 }
8111 
8112 /**
8113  *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8114  *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8115  *
8116  *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8117  *	32-bit Port Capabilities value.
8118  */
8119 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8120 {
8121 	fw_port_cap32_t linkattr = 0;
8122 
8123 	/* Unfortunately the format of the Link Status in the old
8124 	 * 16-bit Port Information message isn't the same as the
8125 	 * 16-bit Port Capabilities bitfield used everywhere else ...
8126 	 */
8127 	if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8128 		linkattr |= FW_PORT_CAP32_FC_RX;
8129 	if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8130 		linkattr |= FW_PORT_CAP32_FC_TX;
8131 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8132 		linkattr |= FW_PORT_CAP32_SPEED_100M;
8133 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8134 		linkattr |= FW_PORT_CAP32_SPEED_1G;
8135 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8136 		linkattr |= FW_PORT_CAP32_SPEED_10G;
8137 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8138 		linkattr |= FW_PORT_CAP32_SPEED_25G;
8139 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8140 		linkattr |= FW_PORT_CAP32_SPEED_40G;
8141 	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8142 		linkattr |= FW_PORT_CAP32_SPEED_100G;
8143 
8144 	return linkattr;
8145 }
8146 
8147 /**
8148  *	t4_handle_get_port_info - process a FW reply message
8149  *	@pi: the port info
8150  *	@rpl: start of the FW message
8151  *
8152  *	Processes a GET_PORT_INFO FW reply message.
8153  */
8154 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8155 {
8156 	const struct fw_port_cmd *cmd = (const void *)rpl;
8157 	int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8158 	struct adapter *adapter = pi->adapter;
8159 	struct link_config *lc = &pi->link_cfg;
8160 	int link_ok, linkdnrc;
8161 	enum fw_port_type port_type;
8162 	enum fw_port_module_type mod_type;
8163 	unsigned int speed, fc, fec;
8164 	fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8165 
8166 	/* Extract the various fields from the Port Information message.
8167 	 */
8168 	switch (action) {
8169 	case FW_PORT_ACTION_GET_PORT_INFO: {
8170 		u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8171 
8172 		link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8173 		linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8174 		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8175 		mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8176 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8177 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8178 		lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8179 		linkattr = lstatus_to_fwcap(lstatus);
8180 		break;
8181 	}
8182 
8183 	case FW_PORT_ACTION_GET_PORT_INFO32: {
8184 		u32 lstatus32;
8185 
8186 		lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8187 		link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8188 		linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8189 		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8190 		mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8191 		pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8192 		acaps = be32_to_cpu(cmd->u.info32.acaps32);
8193 		lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8194 		linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8195 		break;
8196 	}
8197 
8198 	default:
8199 		dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8200 			be32_to_cpu(cmd->action_to_len16));
8201 		return;
8202 	}
8203 
8204 	fec = fwcap_to_cc_fec(acaps);
8205 	fc = fwcap_to_cc_pause(linkattr);
8206 	speed = fwcap_to_speed(linkattr);
8207 
8208 	if (mod_type != pi->mod_type) {
8209 		/* With the newer SFP28 and QSFP28 Transceiver Module Types,
8210 		 * various fundamental Port Capabilities which used to be
8211 		 * immutable can now change radically.  We can now have
8212 		 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8213 		 * all change based on what Transceiver Module is inserted.
8214 		 * So we need to record the Physical "Port" Capabilities on
8215 		 * every Transceiver Module change.
8216 		 */
8217 		lc->pcaps = pcaps;
8218 
8219 		/* When a new Transceiver Module is inserted, the Firmware
8220 		 * will examine its i2c EPROM to determine its type and
8221 		 * general operating parameters including things like Forward
8222 		 * Error Control, etc.  Various IEEE 802.3 standards dictate
8223 		 * how to interpret these i2c values to determine default
8224 		 * "sutomatic" settings.  We record these for future use when
8225 		 * the user explicitly requests these standards-based values.
8226 		 */
8227 		lc->def_acaps = acaps;
8228 
8229 		/* Some versions of the early T6 Firmware "cheated" when
8230 		 * handling different Transceiver Modules by changing the
8231 		 * underlaying Port Type reported to the Host Drivers.  As
8232 		 * such we need to capture whatever Port Type the Firmware
8233 		 * sends us and record it in case it's different from what we
8234 		 * were told earlier.  Unfortunately, since Firmware is
8235 		 * forever, we'll need to keep this code here forever, but in
8236 		 * later T6 Firmware it should just be an assignment of the
8237 		 * same value already recorded.
8238 		 */
8239 		pi->port_type = port_type;
8240 
8241 		pi->mod_type = mod_type;
8242 		t4_os_portmod_changed(adapter, pi->port_id);
8243 	}
8244 
8245 	if (link_ok != lc->link_ok || speed != lc->speed ||
8246 	    fc != lc->fc || fec != lc->fec) {	/* something changed */
8247 		if (!link_ok && lc->link_ok) {
8248 			lc->link_down_rc = linkdnrc;
8249 			dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8250 				 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
8251 		}
8252 		lc->link_ok = link_ok;
8253 		lc->speed = speed;
8254 		lc->fc = fc;
8255 		lc->fec = fec;
8256 
8257 		lc->lpacaps = lpacaps;
8258 		lc->acaps = acaps & ADVERT_MASK;
8259 
8260 		if (lc->acaps & FW_PORT_CAP32_ANEG) {
8261 			lc->autoneg = AUTONEG_ENABLE;
8262 		} else {
8263 			/* When Autoneg is disabled, user needs to set
8264 			 * single speed.
8265 			 * Similar to cxgb4_ethtool.c: set_link_ksettings
8266 			 */
8267 			lc->acaps = 0;
8268 			lc->speed_caps = fwcap_to_fwspeed(acaps);
8269 			lc->autoneg = AUTONEG_DISABLE;
8270 		}
8271 
8272 		t4_os_link_changed(adapter, pi->port_id, link_ok);
8273 	}
8274 }
8275 
8276 /**
8277  *	t4_update_port_info - retrieve and update port information if changed
8278  *	@pi: the port_info
8279  *
8280  *	We issue a Get Port Information Command to the Firmware and, if
8281  *	successful, we check to see if anything is different from what we
8282  *	last recorded and update things accordingly.
8283  */
8284 int t4_update_port_info(struct port_info *pi)
8285 {
8286 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8287 	struct fw_port_cmd port_cmd;
8288 	int ret;
8289 
8290 	memset(&port_cmd, 0, sizeof(port_cmd));
8291 	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8292 					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
8293 					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
8294 	port_cmd.action_to_len16 = cpu_to_be32(
8295 		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8296 				     ? FW_PORT_ACTION_GET_PORT_INFO
8297 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
8298 		FW_LEN16(port_cmd));
8299 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8300 			 &port_cmd, sizeof(port_cmd), &port_cmd);
8301 	if (ret)
8302 		return ret;
8303 
8304 	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8305 	return 0;
8306 }
8307 
8308 /**
8309  *	t4_get_link_params - retrieve basic link parameters for given port
8310  *	@pi: the port
8311  *	@link_okp: value return pointer for link up/down
8312  *	@speedp: value return pointer for speed (Mb/s)
8313  *	@mtup: value return pointer for mtu
8314  *
8315  *	Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8316  *	and MTU for a specified port.  A negative error is returned on
8317  *	failure; 0 on success.
8318  */
8319 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8320 		       unsigned int *speedp, unsigned int *mtup)
8321 {
8322 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8323 	struct fw_port_cmd port_cmd;
8324 	unsigned int action, link_ok, speed, mtu;
8325 	fw_port_cap32_t linkattr;
8326 	int ret;
8327 
8328 	memset(&port_cmd, 0, sizeof(port_cmd));
8329 	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8330 					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
8331 					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
8332 	action = (fw_caps == FW_CAPS16
8333 		  ? FW_PORT_ACTION_GET_PORT_INFO
8334 		  : FW_PORT_ACTION_GET_PORT_INFO32);
8335 	port_cmd.action_to_len16 = cpu_to_be32(
8336 		FW_PORT_CMD_ACTION_V(action) |
8337 		FW_LEN16(port_cmd));
8338 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8339 			 &port_cmd, sizeof(port_cmd), &port_cmd);
8340 	if (ret)
8341 		return ret;
8342 
8343 	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8344 		u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8345 
8346 		link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8347 		linkattr = lstatus_to_fwcap(lstatus);
8348 		mtu = be16_to_cpu(port_cmd.u.info.mtu);
8349 	} else {
8350 		u32 lstatus32 =
8351 			   be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8352 
8353 		link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8354 		linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8355 		mtu = FW_PORT_CMD_MTU32_G(
8356 			be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8357 	}
8358 	speed = fwcap_to_speed(linkattr);
8359 
8360 	*link_okp = link_ok;
8361 	*speedp = fwcap_to_speed(linkattr);
8362 	*mtup = mtu;
8363 
8364 	return 0;
8365 }
8366 
8367 /**
8368  *      t4_handle_fw_rpl - process a FW reply message
8369  *      @adap: the adapter
8370  *      @rpl: start of the FW message
8371  *
8372  *      Processes a FW message, such as link state change messages.
8373  */
8374 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8375 {
8376 	u8 opcode = *(const u8 *)rpl;
8377 
8378 	/* This might be a port command ... this simplifies the following
8379 	 * conditionals ...  We can get away with pre-dereferencing
8380 	 * action_to_len16 because it's in the first 16 bytes and all messages
8381 	 * will be at least that long.
8382 	 */
8383 	const struct fw_port_cmd *p = (const void *)rpl;
8384 	unsigned int action =
8385 		FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8386 
8387 	if (opcode == FW_PORT_CMD &&
8388 	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
8389 	     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8390 		int i;
8391 		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8392 		struct port_info *pi = NULL;
8393 
8394 		for_each_port(adap, i) {
8395 			pi = adap2pinfo(adap, i);
8396 			if (pi->tx_chan == chan)
8397 				break;
8398 		}
8399 
8400 		t4_handle_get_port_info(pi, rpl);
8401 	} else {
8402 		dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8403 			 opcode);
8404 		return -EINVAL;
8405 	}
8406 	return 0;
8407 }
8408 
8409 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8410 {
8411 	u16 val;
8412 
8413 	if (pci_is_pcie(adapter->pdev)) {
8414 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8415 		p->speed = val & PCI_EXP_LNKSTA_CLS;
8416 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8417 	}
8418 }
8419 
8420 /**
8421  *	init_link_config - initialize a link's SW state
8422  *	@lc: pointer to structure holding the link state
8423  *	@pcaps: link Port Capabilities
8424  *	@acaps: link current Advertised Port Capabilities
8425  *
8426  *	Initializes the SW state maintained for each link, including the link's
8427  *	capabilities and default speed/flow-control/autonegotiation settings.
8428  */
8429 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8430 			     fw_port_cap32_t acaps)
8431 {
8432 	lc->pcaps = pcaps;
8433 	lc->def_acaps = acaps;
8434 	lc->lpacaps = 0;
8435 	lc->speed_caps = 0;
8436 	lc->speed = 0;
8437 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8438 
8439 	/* For Forward Error Control, we default to whatever the Firmware
8440 	 * tells us the Link is currently advertising.
8441 	 */
8442 	lc->requested_fec = FEC_AUTO;
8443 	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8444 
8445 	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8446 		lc->acaps = lc->pcaps & ADVERT_MASK;
8447 		lc->autoneg = AUTONEG_ENABLE;
8448 		lc->requested_fc |= PAUSE_AUTONEG;
8449 	} else {
8450 		lc->acaps = 0;
8451 		lc->autoneg = AUTONEG_DISABLE;
8452 	}
8453 }
8454 
8455 #define CIM_PF_NOACCESS 0xeeeeeeee
8456 
8457 int t4_wait_dev_ready(void __iomem *regs)
8458 {
8459 	u32 whoami;
8460 
8461 	whoami = readl(regs + PL_WHOAMI_A);
8462 	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8463 		return 0;
8464 
8465 	msleep(500);
8466 	whoami = readl(regs + PL_WHOAMI_A);
8467 	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8468 }
8469 
8470 struct flash_desc {
8471 	u32 vendor_and_model_id;
8472 	u32 size_mb;
8473 };
8474 
8475 static int t4_get_flash_params(struct adapter *adap)
8476 {
8477 	/* Table for non-Numonix supported flash parts.  Numonix parts are left
8478 	 * to the preexisting code.  All flash parts have 64KB sectors.
8479 	 */
8480 	static struct flash_desc supported_flash[] = {
8481 		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
8482 	};
8483 
8484 	unsigned int part, manufacturer;
8485 	unsigned int density, size;
8486 	u32 flashid = 0;
8487 	int ret;
8488 
8489 	/* Issue a Read ID Command to the Flash part.  We decode supported
8490 	 * Flash parts and their sizes from this.  There's a newer Query
8491 	 * Command which can retrieve detailed geometry information but many
8492 	 * Flash parts don't support it.
8493 	 */
8494 
8495 	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8496 	if (!ret)
8497 		ret = sf1_read(adap, 3, 0, 1, &flashid);
8498 	t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
8499 	if (ret)
8500 		return ret;
8501 
8502 	/* Check to see if it's one of our non-standard supported Flash parts.
8503 	 */
8504 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8505 		if (supported_flash[part].vendor_and_model_id == flashid) {
8506 			adap->params.sf_size = supported_flash[part].size_mb;
8507 			adap->params.sf_nsec =
8508 				adap->params.sf_size / SF_SEC_SIZE;
8509 			goto found;
8510 		}
8511 
8512 	/* Decode Flash part size.  The code below looks repetative with
8513 	 * common encodings, but that's not guaranteed in the JEDEC
8514 	 * specification for the Read JADEC ID command.  The only thing that
8515 	 * we're guaranteed by the JADEC specification is where the
8516 	 * Manufacturer ID is in the returned result.  After that each
8517 	 * Manufacturer ~could~ encode things completely differently.
8518 	 * Note, all Flash parts must have 64KB sectors.
8519 	 */
8520 	manufacturer = flashid & 0xff;
8521 	switch (manufacturer) {
8522 	case 0x20: { /* Micron/Numonix */
8523 		/* This Density -> Size decoding table is taken from Micron
8524 		 * Data Sheets.
8525 		 */
8526 		density = (flashid >> 16) & 0xff;
8527 		switch (density) {
8528 		case 0x14: /* 1MB */
8529 			size = 1 << 20;
8530 			break;
8531 		case 0x15: /* 2MB */
8532 			size = 1 << 21;
8533 			break;
8534 		case 0x16: /* 4MB */
8535 			size = 1 << 22;
8536 			break;
8537 		case 0x17: /* 8MB */
8538 			size = 1 << 23;
8539 			break;
8540 		case 0x18: /* 16MB */
8541 			size = 1 << 24;
8542 			break;
8543 		case 0x19: /* 32MB */
8544 			size = 1 << 25;
8545 			break;
8546 		case 0x20: /* 64MB */
8547 			size = 1 << 26;
8548 			break;
8549 		case 0x21: /* 128MB */
8550 			size = 1 << 27;
8551 			break;
8552 		case 0x22: /* 256MB */
8553 			size = 1 << 28;
8554 			break;
8555 
8556 		default:
8557 			dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8558 				flashid, density);
8559 			return -EINVAL;
8560 		}
8561 		break;
8562 	}
8563 	case 0xc2: { /* Macronix */
8564 		/* This Density -> Size decoding table is taken from Macronix
8565 		 * Data Sheets.
8566 		 */
8567 		density = (flashid >> 16) & 0xff;
8568 		switch (density) {
8569 		case 0x17: /* 8MB */
8570 			size = 1 << 23;
8571 			break;
8572 		case 0x18: /* 16MB */
8573 			size = 1 << 24;
8574 			break;
8575 		default:
8576 			dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8577 				flashid, density);
8578 			return -EINVAL;
8579 		}
8580 		break;
8581 	}
8582 	case 0xef: { /* Winbond */
8583 		/* This Density -> Size decoding table is taken from Winbond
8584 		 * Data Sheets.
8585 		 */
8586 		density = (flashid >> 16) & 0xff;
8587 		switch (density) {
8588 		case 0x17: /* 8MB */
8589 			size = 1 << 23;
8590 			break;
8591 		case 0x18: /* 16MB */
8592 			size = 1 << 24;
8593 			break;
8594 		default:
8595 			dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8596 				flashid, density);
8597 			return -EINVAL;
8598 		}
8599 		break;
8600 	}
8601 	default:
8602 		dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
8603 			flashid);
8604 		return -EINVAL;
8605 	}
8606 
8607 	/* Store decoded Flash size and fall through into vetting code. */
8608 	adap->params.sf_size = size;
8609 	adap->params.sf_nsec = size / SF_SEC_SIZE;
8610 
8611 found:
8612 	if (adap->params.sf_size < FLASH_MIN_SIZE)
8613 		dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8614 			 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
8615 	return 0;
8616 }
8617 
8618 /**
8619  *	t4_prep_adapter - prepare SW and HW for operation
8620  *	@adapter: the adapter
8621  *	@reset: if true perform a HW reset
8622  *
8623  *	Initialize adapter SW state for the various HW modules, set initial
8624  *	values for some adapter tunables, take PHYs out of reset, and
8625  *	initialize the MDIO interface.
8626  */
8627 int t4_prep_adapter(struct adapter *adapter)
8628 {
8629 	int ret, ver;
8630 	uint16_t device_id;
8631 	u32 pl_rev;
8632 
8633 	get_pci_mode(adapter, &adapter->params.pci);
8634 	pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
8635 
8636 	ret = t4_get_flash_params(adapter);
8637 	if (ret < 0) {
8638 		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8639 		return ret;
8640 	}
8641 
8642 	/* Retrieve adapter's device ID
8643 	 */
8644 	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8645 	ver = device_id >> 12;
8646 	adapter->params.chip = 0;
8647 	switch (ver) {
8648 	case CHELSIO_T4:
8649 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8650 		adapter->params.arch.sge_fl_db = DBPRIO_F;
8651 		adapter->params.arch.mps_tcam_size =
8652 				 NUM_MPS_CLS_SRAM_L_INSTANCES;
8653 		adapter->params.arch.mps_rplc_size = 128;
8654 		adapter->params.arch.nchan = NCHAN;
8655 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8656 		adapter->params.arch.vfcount = 128;
8657 		/* Congestion map is for 4 channels so that
8658 		 * MPS can have 4 priority per port.
8659 		 */
8660 		adapter->params.arch.cng_ch_bits_log = 2;
8661 		break;
8662 	case CHELSIO_T5:
8663 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
8664 		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8665 		adapter->params.arch.mps_tcam_size =
8666 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8667 		adapter->params.arch.mps_rplc_size = 128;
8668 		adapter->params.arch.nchan = NCHAN;
8669 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8670 		adapter->params.arch.vfcount = 128;
8671 		adapter->params.arch.cng_ch_bits_log = 2;
8672 		break;
8673 	case CHELSIO_T6:
8674 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8675 		adapter->params.arch.sge_fl_db = 0;
8676 		adapter->params.arch.mps_tcam_size =
8677 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8678 		adapter->params.arch.mps_rplc_size = 256;
8679 		adapter->params.arch.nchan = 2;
8680 		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
8681 		adapter->params.arch.vfcount = 256;
8682 		/* Congestion map will be for 2 channels so that
8683 		 * MPS can have 8 priority per port.
8684 		 */
8685 		adapter->params.arch.cng_ch_bits_log = 3;
8686 		break;
8687 	default:
8688 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8689 			device_id);
8690 		return -EINVAL;
8691 	}
8692 
8693 	adapter->params.cim_la_size = CIMLA_SIZE;
8694 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8695 
8696 	/*
8697 	 * Default port for debugging in case we can't reach FW.
8698 	 */
8699 	adapter->params.nports = 1;
8700 	adapter->params.portvec = 1;
8701 	adapter->params.vpd.cclk = 50000;
8702 
8703 	/* Set PCIe completion timeout to 4 seconds. */
8704 	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
8705 					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
8706 	return 0;
8707 }
8708 
8709 /**
8710  *	t4_shutdown_adapter - shut down adapter, host & wire
8711  *	@adapter: the adapter
8712  *
8713  *	Perform an emergency shutdown of the adapter and stop it from
8714  *	continuing any further communication on the ports or DMA to the
8715  *	host.  This is typically used when the adapter and/or firmware
8716  *	have crashed and we want to prevent any further accidental
8717  *	communication with the rest of the world.  This will also force
8718  *	the port Link Status to go down -- if register writes work --
8719  *	which should help our peers figure out that we're down.
8720  */
8721 int t4_shutdown_adapter(struct adapter *adapter)
8722 {
8723 	int port;
8724 
8725 	t4_intr_disable(adapter);
8726 	t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
8727 	for_each_port(adapter, port) {
8728 		u32 a_port_cfg = is_t4(adapter->params.chip) ?
8729 				       PORT_REG(port, XGMAC_PORT_CFG_A) :
8730 				       T5_PORT_REG(port, MAC_PORT_CFG_A);
8731 
8732 		t4_write_reg(adapter, a_port_cfg,
8733 			     t4_read_reg(adapter, a_port_cfg)
8734 			     & ~SIGNAL_DET_V(1));
8735 	}
8736 	t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
8737 
8738 	return 0;
8739 }
8740 
8741 /**
8742  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8743  *	@adapter: the adapter
8744  *	@qid: the Queue ID
8745  *	@qtype: the Ingress or Egress type for @qid
8746  *	@user: true if this request is for a user mode queue
8747  *	@pbar2_qoffset: BAR2 Queue Offset
8748  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8749  *
8750  *	Returns the BAR2 SGE Queue Registers information associated with the
8751  *	indicated Absolute Queue ID.  These are passed back in return value
8752  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8753  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8754  *
8755  *	This may return an error which indicates that BAR2 SGE Queue
8756  *	registers aren't available.  If an error is not returned, then the
8757  *	following values are returned:
8758  *
8759  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8760  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8761  *
8762  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8763  *	require the "Inferred Queue ID" ability may be used.  E.g. the
8764  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8765  *	then these "Inferred Queue ID" register may not be used.
8766  */
8767 int t4_bar2_sge_qregs(struct adapter *adapter,
8768 		      unsigned int qid,
8769 		      enum t4_bar2_qtype qtype,
8770 		      int user,
8771 		      u64 *pbar2_qoffset,
8772 		      unsigned int *pbar2_qid)
8773 {
8774 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8775 	u64 bar2_page_offset, bar2_qoffset;
8776 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8777 
8778 	/* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
8779 	if (!user && is_t4(adapter->params.chip))
8780 		return -EINVAL;
8781 
8782 	/* Get our SGE Page Size parameters.
8783 	 */
8784 	page_shift = adapter->params.sge.hps + 10;
8785 	page_size = 1 << page_shift;
8786 
8787 	/* Get the right Queues per Page parameters for our Queue.
8788 	 */
8789 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8790 		     ? adapter->params.sge.eq_qpp
8791 		     : adapter->params.sge.iq_qpp);
8792 	qpp_mask = (1 << qpp_shift) - 1;
8793 
8794 	/*  Calculate the basics of the BAR2 SGE Queue register area:
8795 	 *  o The BAR2 page the Queue registers will be in.
8796 	 *  o The BAR2 Queue ID.
8797 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
8798 	 */
8799 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8800 	bar2_qid = qid & qpp_mask;
8801 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8802 
8803 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
8804 	 * hardware will infer the Absolute Queue ID simply from the writes to
8805 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8806 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
8807 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8808 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8809 	 * from the BAR2 Page and BAR2 Queue ID.
8810 	 *
8811 	 * One important censequence of this is that some BAR2 SGE registers
8812 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8813 	 * there.  But other registers synthesize the SGE Queue ID purely
8814 	 * from the writes to the registers -- the Write Combined Doorbell
8815 	 * Buffer is a good example.  These BAR2 SGE Registers are only
8816 	 * available for those BAR2 SGE Register areas where the SGE Absolute
8817 	 * Queue ID can be inferred from simple writes.
8818 	 */
8819 	bar2_qoffset = bar2_page_offset;
8820 	bar2_qinferred = (bar2_qid_offset < page_size);
8821 	if (bar2_qinferred) {
8822 		bar2_qoffset += bar2_qid_offset;
8823 		bar2_qid = 0;
8824 	}
8825 
8826 	*pbar2_qoffset = bar2_qoffset;
8827 	*pbar2_qid = bar2_qid;
8828 	return 0;
8829 }
8830 
8831 /**
8832  *	t4_init_devlog_params - initialize adapter->params.devlog
8833  *	@adap: the adapter
8834  *
8835  *	Initialize various fields of the adapter's Firmware Device Log
8836  *	Parameters structure.
8837  */
8838 int t4_init_devlog_params(struct adapter *adap)
8839 {
8840 	struct devlog_params *dparams = &adap->params.devlog;
8841 	u32 pf_dparams;
8842 	unsigned int devlog_meminfo;
8843 	struct fw_devlog_cmd devlog_cmd;
8844 	int ret;
8845 
8846 	/* If we're dealing with newer firmware, the Device Log Paramerters
8847 	 * are stored in a designated register which allows us to access the
8848 	 * Device Log even if we can't talk to the firmware.
8849 	 */
8850 	pf_dparams =
8851 		t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
8852 	if (pf_dparams) {
8853 		unsigned int nentries, nentries128;
8854 
8855 		dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
8856 		dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
8857 
8858 		nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
8859 		nentries = (nentries128 + 1) * 128;
8860 		dparams->size = nentries * sizeof(struct fw_devlog_e);
8861 
8862 		return 0;
8863 	}
8864 
8865 	/* Otherwise, ask the firmware for it's Device Log Parameters.
8866 	 */
8867 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
8868 	devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
8869 					     FW_CMD_REQUEST_F | FW_CMD_READ_F);
8870 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8871 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8872 			 &devlog_cmd);
8873 	if (ret)
8874 		return ret;
8875 
8876 	devlog_meminfo =
8877 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8878 	dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
8879 	dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
8880 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8881 
8882 	return 0;
8883 }
8884 
8885 /**
8886  *	t4_init_sge_params - initialize adap->params.sge
8887  *	@adapter: the adapter
8888  *
8889  *	Initialize various fields of the adapter's SGE Parameters structure.
8890  */
8891 int t4_init_sge_params(struct adapter *adapter)
8892 {
8893 	struct sge_params *sge_params = &adapter->params.sge;
8894 	u32 hps, qpp;
8895 	unsigned int s_hps, s_qpp;
8896 
8897 	/* Extract the SGE Page Size for our PF.
8898 	 */
8899 	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
8900 	s_hps = (HOSTPAGESIZEPF0_S +
8901 		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
8902 	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
8903 
8904 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
8905 	 */
8906 	s_qpp = (QUEUESPERPAGEPF0_S +
8907 		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
8908 	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
8909 	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8910 	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
8911 	sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8912 
8913 	return 0;
8914 }
8915 
8916 /**
8917  *      t4_init_tp_params - initialize adap->params.tp
8918  *      @adap: the adapter
8919  *      @sleep_ok: if true we may sleep while awaiting command completion
8920  *
8921  *      Initialize various fields of the adapter's TP Parameters structure.
8922  */
8923 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8924 {
8925 	int chan;
8926 	u32 v;
8927 
8928 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
8929 	adap->params.tp.tre = TIMERRESOLUTION_G(v);
8930 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
8931 
8932 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8933 	for (chan = 0; chan < NCHAN; chan++)
8934 		adap->params.tp.tx_modq[chan] = chan;
8935 
8936 	/* Cache the adapter's Compressed Filter Mode and global Incress
8937 	 * Configuration.
8938 	 */
8939 	t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
8940 		       TP_VLAN_PRI_MAP_A, sleep_ok);
8941 	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
8942 		       TP_INGRESS_CONFIG_A, sleep_ok);
8943 
8944 	/* For T6, cache the adapter's compressed error vector
8945 	 * and passing outer header info for encapsulated packets.
8946 	 */
8947 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
8948 		v = t4_read_reg(adap, TP_OUT_CONFIG_A);
8949 		adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
8950 	}
8951 
8952 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8953 	 * shift positions of several elements of the Compressed Filter Tuple
8954 	 * for this adapter which we need frequently ...
8955 	 */
8956 	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
8957 	adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
8958 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
8959 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
8960 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
8961 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
8962 							       PROTOCOL_F);
8963 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
8964 								ETHERTYPE_F);
8965 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
8966 							       MACMATCH_F);
8967 	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
8968 								MPSHITTYPE_F);
8969 	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
8970 							   FRAGMENTATION_F);
8971 
8972 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8973 	 * represents the presence of an Outer VLAN instead of a VNIC ID.
8974 	 */
8975 	if ((adap->params.tp.ingress_config & VNIC_F) == 0)
8976 		adap->params.tp.vnic_shift = -1;
8977 
8978 	v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
8979 	adap->params.tp.hash_filter_mask = v;
8980 	v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
8981 	adap->params.tp.hash_filter_mask |= ((u64)v << 32);
8982 	return 0;
8983 }
8984 
8985 /**
8986  *      t4_filter_field_shift - calculate filter field shift
8987  *      @adap: the adapter
8988  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8989  *
8990  *      Return the shift position of a filter field within the Compressed
8991  *      Filter Tuple.  The filter field is specified via its selection bit
8992  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
8993  */
8994 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8995 {
8996 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8997 	unsigned int sel;
8998 	int field_shift;
8999 
9000 	if ((filter_mode & filter_sel) == 0)
9001 		return -1;
9002 
9003 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9004 		switch (filter_mode & sel) {
9005 		case FCOE_F:
9006 			field_shift += FT_FCOE_W;
9007 			break;
9008 		case PORT_F:
9009 			field_shift += FT_PORT_W;
9010 			break;
9011 		case VNIC_ID_F:
9012 			field_shift += FT_VNIC_ID_W;
9013 			break;
9014 		case VLAN_F:
9015 			field_shift += FT_VLAN_W;
9016 			break;
9017 		case TOS_F:
9018 			field_shift += FT_TOS_W;
9019 			break;
9020 		case PROTOCOL_F:
9021 			field_shift += FT_PROTOCOL_W;
9022 			break;
9023 		case ETHERTYPE_F:
9024 			field_shift += FT_ETHERTYPE_W;
9025 			break;
9026 		case MACMATCH_F:
9027 			field_shift += FT_MACMATCH_W;
9028 			break;
9029 		case MPSHITTYPE_F:
9030 			field_shift += FT_MPSHITTYPE_W;
9031 			break;
9032 		case FRAGMENTATION_F:
9033 			field_shift += FT_FRAGMENTATION_W;
9034 			break;
9035 		}
9036 	}
9037 	return field_shift;
9038 }
9039 
9040 int t4_init_rss_mode(struct adapter *adap, int mbox)
9041 {
9042 	int i, ret;
9043 	struct fw_rss_vi_config_cmd rvc;
9044 
9045 	memset(&rvc, 0, sizeof(rvc));
9046 
9047 	for_each_port(adap, i) {
9048 		struct port_info *p = adap2pinfo(adap, i);
9049 
9050 		rvc.op_to_viid =
9051 			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9052 				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
9053 				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9054 		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9055 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9056 		if (ret)
9057 			return ret;
9058 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9059 	}
9060 	return 0;
9061 }
9062 
9063 /**
9064  *	t4_init_portinfo - allocate a virtual interface and initialize port_info
9065  *	@pi: the port_info
9066  *	@mbox: mailbox to use for the FW command
9067  *	@port: physical port associated with the VI
9068  *	@pf: the PF owning the VI
9069  *	@vf: the VF owning the VI
9070  *	@mac: the MAC address of the VI
9071  *
9072  *	Allocates a virtual interface for the given physical port.  If @mac is
9073  *	not %NULL it contains the MAC address of the VI as assigned by FW.
9074  *	@mac should be large enough to hold an Ethernet address.
9075  *	Returns < 0 on error.
9076  */
9077 int t4_init_portinfo(struct port_info *pi, int mbox,
9078 		     int port, int pf, int vf, u8 mac[])
9079 {
9080 	struct adapter *adapter = pi->adapter;
9081 	unsigned int fw_caps = adapter->params.fw_caps_support;
9082 	struct fw_port_cmd cmd;
9083 	unsigned int rss_size;
9084 	enum fw_port_type port_type;
9085 	int mdio_addr;
9086 	fw_port_cap32_t pcaps, acaps;
9087 	int ret;
9088 
9089 	/* If we haven't yet determined whether we're talking to Firmware
9090 	 * which knows the new 32-bit Port Capabilities, it's time to find
9091 	 * out now.  This will also tell new Firmware to send us Port Status
9092 	 * Updates using the new 32-bit Port Capabilities version of the
9093 	 * Port Information message.
9094 	 */
9095 	if (fw_caps == FW_CAPS_UNKNOWN) {
9096 		u32 param, val;
9097 
9098 		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9099 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9100 		val = 1;
9101 		ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
9102 		fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9103 		adapter->params.fw_caps_support = fw_caps;
9104 	}
9105 
9106 	memset(&cmd, 0, sizeof(cmd));
9107 	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9108 				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
9109 				       FW_PORT_CMD_PORTID_V(port));
9110 	cmd.action_to_len16 = cpu_to_be32(
9111 		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9112 				     ? FW_PORT_ACTION_GET_PORT_INFO
9113 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
9114 		FW_LEN16(cmd));
9115 	ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9116 	if (ret)
9117 		return ret;
9118 
9119 	/* Extract the various fields from the Port Information message.
9120 	 */
9121 	if (fw_caps == FW_CAPS16) {
9122 		u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9123 
9124 		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9125 		mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9126 			     ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9127 			     : -1);
9128 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9129 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9130 	} else {
9131 		u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9132 
9133 		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9134 		mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9135 			     ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9136 			     : -1);
9137 		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9138 		acaps = be32_to_cpu(cmd.u.info32.acaps32);
9139 	}
9140 
9141 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9142 	if (ret < 0)
9143 		return ret;
9144 
9145 	pi->viid = ret;
9146 	pi->tx_chan = port;
9147 	pi->lport = port;
9148 	pi->rss_size = rss_size;
9149 
9150 	pi->port_type = port_type;
9151 	pi->mdio_addr = mdio_addr;
9152 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
9153 
9154 	init_link_config(&pi->link_cfg, pcaps, acaps);
9155 	return 0;
9156 }
9157 
9158 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9159 {
9160 	u8 addr[6];
9161 	int ret, i, j = 0;
9162 
9163 	for_each_port(adap, i) {
9164 		struct port_info *pi = adap2pinfo(adap, i);
9165 
9166 		while ((adap->params.portvec & (1 << j)) == 0)
9167 			j++;
9168 
9169 		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9170 		if (ret)
9171 			return ret;
9172 
9173 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9174 		j++;
9175 	}
9176 	return 0;
9177 }
9178 
9179 /**
9180  *	t4_read_cimq_cfg - read CIM queue configuration
9181  *	@adap: the adapter
9182  *	@base: holds the queue base addresses in bytes
9183  *	@size: holds the queue sizes in bytes
9184  *	@thres: holds the queue full thresholds in bytes
9185  *
9186  *	Returns the current configuration of the CIM queues, starting with
9187  *	the IBQs, then the OBQs.
9188  */
9189 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9190 {
9191 	unsigned int i, v;
9192 	int cim_num_obq = is_t4(adap->params.chip) ?
9193 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9194 
9195 	for (i = 0; i < CIM_NUM_IBQ; i++) {
9196 		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9197 			     QUENUMSELECT_V(i));
9198 		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9199 		/* value is in 256-byte units */
9200 		*base++ = CIMQBASE_G(v) * 256;
9201 		*size++ = CIMQSIZE_G(v) * 256;
9202 		*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9203 	}
9204 	for (i = 0; i < cim_num_obq; i++) {
9205 		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9206 			     QUENUMSELECT_V(i));
9207 		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9208 		/* value is in 256-byte units */
9209 		*base++ = CIMQBASE_G(v) * 256;
9210 		*size++ = CIMQSIZE_G(v) * 256;
9211 	}
9212 }
9213 
9214 /**
9215  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
9216  *	@adap: the adapter
9217  *	@qid: the queue index
9218  *	@data: where to store the queue contents
9219  *	@n: capacity of @data in 32-bit words
9220  *
9221  *	Reads the contents of the selected CIM queue starting at address 0 up
9222  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9223  *	error and the number of 32-bit words actually read on success.
9224  */
9225 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9226 {
9227 	int i, err, attempts;
9228 	unsigned int addr;
9229 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
9230 
9231 	if (qid > 5 || (n & 3))
9232 		return -EINVAL;
9233 
9234 	addr = qid * nwords;
9235 	if (n > nwords)
9236 		n = nwords;
9237 
9238 	/* It might take 3-10ms before the IBQ debug read access is allowed.
9239 	 * Wait for 1 Sec with a delay of 1 usec.
9240 	 */
9241 	attempts = 1000000;
9242 
9243 	for (i = 0; i < n; i++, addr++) {
9244 		t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9245 			     IBQDBGEN_F);
9246 		err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9247 				      attempts, 1);
9248 		if (err)
9249 			return err;
9250 		*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9251 	}
9252 	t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9253 	return i;
9254 }
9255 
9256 /**
9257  *	t4_read_cim_obq - read the contents of a CIM outbound queue
9258  *	@adap: the adapter
9259  *	@qid: the queue index
9260  *	@data: where to store the queue contents
9261  *	@n: capacity of @data in 32-bit words
9262  *
9263  *	Reads the contents of the selected CIM queue starting at address 0 up
9264  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9265  *	error and the number of 32-bit words actually read on success.
9266  */
9267 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9268 {
9269 	int i, err;
9270 	unsigned int addr, v, nwords;
9271 	int cim_num_obq = is_t4(adap->params.chip) ?
9272 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9273 
9274 	if ((qid > (cim_num_obq - 1)) || (n & 3))
9275 		return -EINVAL;
9276 
9277 	t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9278 		     QUENUMSELECT_V(qid));
9279 	v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9280 
9281 	addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
9282 	nwords = CIMQSIZE_G(v) * 64;  /* same */
9283 	if (n > nwords)
9284 		n = nwords;
9285 
9286 	for (i = 0; i < n; i++, addr++) {
9287 		t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9288 			     OBQDBGEN_F);
9289 		err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9290 				      2, 1);
9291 		if (err)
9292 			return err;
9293 		*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9294 	}
9295 	t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9296 	return i;
9297 }
9298 
9299 /**
9300  *	t4_cim_read - read a block from CIM internal address space
9301  *	@adap: the adapter
9302  *	@addr: the start address within the CIM address space
9303  *	@n: number of words to read
9304  *	@valp: where to store the result
9305  *
9306  *	Reads a block of 4-byte words from the CIM intenal address space.
9307  */
9308 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9309 		unsigned int *valp)
9310 {
9311 	int ret = 0;
9312 
9313 	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9314 		return -EBUSY;
9315 
9316 	for ( ; !ret && n--; addr += 4) {
9317 		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9318 		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9319 				      0, 5, 2);
9320 		if (!ret)
9321 			*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9322 	}
9323 	return ret;
9324 }
9325 
9326 /**
9327  *	t4_cim_write - write a block into CIM internal address space
9328  *	@adap: the adapter
9329  *	@addr: the start address within the CIM address space
9330  *	@n: number of words to write
9331  *	@valp: set of values to write
9332  *
9333  *	Writes a block of 4-byte words into the CIM intenal address space.
9334  */
9335 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9336 		 const unsigned int *valp)
9337 {
9338 	int ret = 0;
9339 
9340 	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9341 		return -EBUSY;
9342 
9343 	for ( ; !ret && n--; addr += 4) {
9344 		t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9345 		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9346 		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9347 				      0, 5, 2);
9348 	}
9349 	return ret;
9350 }
9351 
9352 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9353 			 unsigned int val)
9354 {
9355 	return t4_cim_write(adap, addr, 1, &val);
9356 }
9357 
9358 /**
9359  *	t4_cim_read_la - read CIM LA capture buffer
9360  *	@adap: the adapter
9361  *	@la_buf: where to store the LA data
9362  *	@wrptr: the HW write pointer within the capture buffer
9363  *
9364  *	Reads the contents of the CIM LA buffer with the most recent entry at
9365  *	the end	of the returned data and with the entry at @wrptr first.
9366  *	We try to leave the LA in the running state we find it in.
9367  */
9368 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9369 {
9370 	int i, ret;
9371 	unsigned int cfg, val, idx;
9372 
9373 	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9374 	if (ret)
9375 		return ret;
9376 
9377 	if (cfg & UPDBGLAEN_F) {	/* LA is running, freeze it */
9378 		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9379 		if (ret)
9380 			return ret;
9381 	}
9382 
9383 	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9384 	if (ret)
9385 		goto restart;
9386 
9387 	idx = UPDBGLAWRPTR_G(val);
9388 	if (wrptr)
9389 		*wrptr = idx;
9390 
9391 	for (i = 0; i < adap->params.cim_la_size; i++) {
9392 		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9393 				    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9394 		if (ret)
9395 			break;
9396 		ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9397 		if (ret)
9398 			break;
9399 		if (val & UPDBGLARDEN_F) {
9400 			ret = -ETIMEDOUT;
9401 			break;
9402 		}
9403 		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9404 		if (ret)
9405 			break;
9406 
9407 		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9408 		 * identify the 32-bit portion of the full 312-bit data
9409 		 */
9410 		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9411 			idx = (idx & 0xff0) + 0x10;
9412 		else
9413 			idx++;
9414 		/* address can't exceed 0xfff */
9415 		idx &= UPDBGLARDPTR_M;
9416 	}
9417 restart:
9418 	if (cfg & UPDBGLAEN_F) {
9419 		int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9420 				      cfg & ~UPDBGLARDEN_F);
9421 		if (!ret)
9422 			ret = r;
9423 	}
9424 	return ret;
9425 }
9426 
9427 /**
9428  *	t4_tp_read_la - read TP LA capture buffer
9429  *	@adap: the adapter
9430  *	@la_buf: where to store the LA data
9431  *	@wrptr: the HW write pointer within the capture buffer
9432  *
9433  *	Reads the contents of the TP LA buffer with the most recent entry at
9434  *	the end	of the returned data and with the entry at @wrptr first.
9435  *	We leave the LA in the running state we find it in.
9436  */
9437 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9438 {
9439 	bool last_incomplete;
9440 	unsigned int i, cfg, val, idx;
9441 
9442 	cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9443 	if (cfg & DBGLAENABLE_F)			/* freeze LA */
9444 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9445 			     adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9446 
9447 	val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9448 	idx = DBGLAWPTR_G(val);
9449 	last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9450 	if (last_incomplete)
9451 		idx = (idx + 1) & DBGLARPTR_M;
9452 	if (wrptr)
9453 		*wrptr = idx;
9454 
9455 	val &= 0xffff;
9456 	val &= ~DBGLARPTR_V(DBGLARPTR_M);
9457 	val |= adap->params.tp.la_mask;
9458 
9459 	for (i = 0; i < TPLA_SIZE; i++) {
9460 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9461 		la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9462 		idx = (idx + 1) & DBGLARPTR_M;
9463 	}
9464 
9465 	/* Wipe out last entry if it isn't valid */
9466 	if (last_incomplete)
9467 		la_buf[TPLA_SIZE - 1] = ~0ULL;
9468 
9469 	if (cfg & DBGLAENABLE_F)                    /* restore running state */
9470 		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9471 			     cfg | adap->params.tp.la_mask);
9472 }
9473 
9474 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9475  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
9476  * state for more than the Warning Threshold then we'll issue a warning about
9477  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
9478  * appears to be hung every Warning Repeat second till the situation clears.
9479  * If the situation clears, we'll note that as well.
9480  */
9481 #define SGE_IDMA_WARN_THRESH 1
9482 #define SGE_IDMA_WARN_REPEAT 300
9483 
9484 /**
9485  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9486  *	@adapter: the adapter
9487  *	@idma: the adapter IDMA Monitor state
9488  *
9489  *	Initialize the state of an SGE Ingress DMA Monitor.
9490  */
9491 void t4_idma_monitor_init(struct adapter *adapter,
9492 			  struct sge_idma_monitor_state *idma)
9493 {
9494 	/* Initialize the state variables for detecting an SGE Ingress DMA
9495 	 * hang.  The SGE has internal counters which count up on each clock
9496 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
9497 	 * same state they were on the previous clock tick.  The clock used is
9498 	 * the Core Clock so we have a limit on the maximum "time" they can
9499 	 * record; typically a very small number of seconds.  For instance,
9500 	 * with a 600MHz Core Clock, we can only count up to a bit more than
9501 	 * 7s.  So we'll synthesize a larger counter in order to not run the
9502 	 * risk of having the "timers" overflow and give us the flexibility to
9503 	 * maintain a Hung SGE State Machine of our own which operates across
9504 	 * a longer time frame.
9505 	 */
9506 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9507 	idma->idma_stalled[0] = 0;
9508 	idma->idma_stalled[1] = 0;
9509 }
9510 
9511 /**
9512  *	t4_idma_monitor - monitor SGE Ingress DMA state
9513  *	@adapter: the adapter
9514  *	@idma: the adapter IDMA Monitor state
9515  *	@hz: number of ticks/second
9516  *	@ticks: number of ticks since the last IDMA Monitor call
9517  */
9518 void t4_idma_monitor(struct adapter *adapter,
9519 		     struct sge_idma_monitor_state *idma,
9520 		     int hz, int ticks)
9521 {
9522 	int i, idma_same_state_cnt[2];
9523 
9524 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
9525 	  * are counters inside the SGE which count up on each clock when the
9526 	  * SGE finds its Ingress DMA State Engines in the same states they
9527 	  * were in the previous clock.  The counters will peg out at
9528 	  * 0xffffffff without wrapping around so once they pass the 1s
9529 	  * threshold they'll stay above that till the IDMA state changes.
9530 	  */
9531 	t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9532 	idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9533 	idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9534 
9535 	for (i = 0; i < 2; i++) {
9536 		u32 debug0, debug11;
9537 
9538 		/* If the Ingress DMA Same State Counter ("timer") is less
9539 		 * than 1s, then we can reset our synthesized Stall Timer and
9540 		 * continue.  If we have previously emitted warnings about a
9541 		 * potential stalled Ingress Queue, issue a note indicating
9542 		 * that the Ingress Queue has resumed forward progress.
9543 		 */
9544 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9545 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9546 				dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9547 					 "resumed after %d seconds\n",
9548 					 i, idma->idma_qid[i],
9549 					 idma->idma_stalled[i] / hz);
9550 			idma->idma_stalled[i] = 0;
9551 			continue;
9552 		}
9553 
9554 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9555 		 * domain.  The first time we get here it'll be because we
9556 		 * passed the 1s Threshold; each additional time it'll be
9557 		 * because the RX Timer Callback is being fired on its regular
9558 		 * schedule.
9559 		 *
9560 		 * If the stall is below our Potential Hung Ingress Queue
9561 		 * Warning Threshold, continue.
9562 		 */
9563 		if (idma->idma_stalled[i] == 0) {
9564 			idma->idma_stalled[i] = hz;
9565 			idma->idma_warn[i] = 0;
9566 		} else {
9567 			idma->idma_stalled[i] += ticks;
9568 			idma->idma_warn[i] -= ticks;
9569 		}
9570 
9571 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9572 			continue;
9573 
9574 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9575 		 */
9576 		if (idma->idma_warn[i] > 0)
9577 			continue;
9578 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9579 
9580 		/* Read and save the SGE IDMA State and Queue ID information.
9581 		 * We do this every time in case it changes across time ...
9582 		 * can't be too careful ...
9583 		 */
9584 		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9585 		debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9586 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9587 
9588 		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9589 		debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9590 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9591 
9592 		dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9593 			 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9594 			 i, idma->idma_qid[i], idma->idma_state[i],
9595 			 idma->idma_stalled[i] / hz,
9596 			 debug0, debug11);
9597 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9598 	}
9599 }
9600 
9601 /**
9602  *	t4_load_cfg - download config file
9603  *	@adap: the adapter
9604  *	@cfg_data: the cfg text file to write
9605  *	@size: text file size
9606  *
9607  *	Write the supplied config text file to the card's serial flash.
9608  */
9609 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9610 {
9611 	int ret, i, n, cfg_addr;
9612 	unsigned int addr;
9613 	unsigned int flash_cfg_start_sec;
9614 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9615 
9616 	cfg_addr = t4_flash_cfg_addr(adap);
9617 	if (cfg_addr < 0)
9618 		return cfg_addr;
9619 
9620 	addr = cfg_addr;
9621 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9622 
9623 	if (size > FLASH_CFG_MAX_SIZE) {
9624 		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9625 			FLASH_CFG_MAX_SIZE);
9626 		return -EFBIG;
9627 	}
9628 
9629 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
9630 			 sf_sec_size);
9631 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9632 				     flash_cfg_start_sec + i - 1);
9633 	/* If size == 0 then we're simply erasing the FLASH sectors associated
9634 	 * with the on-adapter Firmware Configuration File.
9635 	 */
9636 	if (ret || size == 0)
9637 		goto out;
9638 
9639 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9640 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
9641 		if ((size - i) <  SF_PAGE_SIZE)
9642 			n = size - i;
9643 		else
9644 			n = SF_PAGE_SIZE;
9645 		ret = t4_write_flash(adap, addr, n, cfg_data);
9646 		if (ret)
9647 			goto out;
9648 
9649 		addr += SF_PAGE_SIZE;
9650 		cfg_data += SF_PAGE_SIZE;
9651 	}
9652 
9653 out:
9654 	if (ret)
9655 		dev_err(adap->pdev_dev, "config file %s failed %d\n",
9656 			(size == 0 ? "clear" : "download"), ret);
9657 	return ret;
9658 }
9659 
9660 /**
9661  *	t4_set_vf_mac - Set MAC address for the specified VF
9662  *	@adapter: The adapter
9663  *	@vf: one of the VFs instantiated by the specified PF
9664  *	@naddr: the number of MAC addresses
9665  *	@addr: the MAC address(es) to be set to the specified VF
9666  */
9667 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9668 		      unsigned int naddr, u8 *addr)
9669 {
9670 	struct fw_acl_mac_cmd cmd;
9671 
9672 	memset(&cmd, 0, sizeof(cmd));
9673 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9674 				    FW_CMD_REQUEST_F |
9675 				    FW_CMD_WRITE_F |
9676 				    FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9677 				    FW_ACL_MAC_CMD_VFN_V(vf));
9678 
9679 	/* Note: Do not enable the ACL */
9680 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9681 	cmd.nmac = naddr;
9682 
9683 	switch (adapter->pf) {
9684 	case 3:
9685 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9686 		break;
9687 	case 2:
9688 		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9689 		break;
9690 	case 1:
9691 		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9692 		break;
9693 	case 0:
9694 		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9695 		break;
9696 	}
9697 
9698 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9699 }
9700 
9701 /**
9702  * t4_read_pace_tbl - read the pace table
9703  * @adap: the adapter
9704  * @pace_vals: holds the returned values
9705  *
9706  * Returns the values of TP's pace table in microseconds.
9707  */
9708 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9709 {
9710 	unsigned int i, v;
9711 
9712 	for (i = 0; i < NTX_SCHED; i++) {
9713 		t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9714 		v = t4_read_reg(adap, TP_PACE_TABLE_A);
9715 		pace_vals[i] = dack_ticks_to_usec(adap, v);
9716 	}
9717 }
9718 
9719 /**
9720  * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9721  * @adap: the adapter
9722  * @sched: the scheduler index
9723  * @kbps: the byte rate in Kbps
9724  * @ipg: the interpacket delay in tenths of nanoseconds
9725  * @sleep_ok: if true we may sleep while awaiting command completion
9726  *
9727  * Return the current configuration of a HW Tx scheduler.
9728  */
9729 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
9730 		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
9731 {
9732 	unsigned int v, addr, bpt, cpt;
9733 
9734 	if (kbps) {
9735 		addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
9736 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9737 		if (sched & 1)
9738 			v >>= 16;
9739 		bpt = (v >> 8) & 0xff;
9740 		cpt = v & 0xff;
9741 		if (!cpt) {
9742 			*kbps = 0;	/* scheduler disabled */
9743 		} else {
9744 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9745 			*kbps = (v * bpt) / 125;
9746 		}
9747 	}
9748 	if (ipg) {
9749 		addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
9750 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9751 		if (sched & 1)
9752 			v >>= 16;
9753 		v &= 0xffff;
9754 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
9755 	}
9756 }
9757 
9758 /* t4_sge_ctxt_rd - read an SGE context through FW
9759  * @adap: the adapter
9760  * @mbox: mailbox to use for the FW command
9761  * @cid: the context id
9762  * @ctype: the context type
9763  * @data: where to store the context data
9764  *
9765  * Issues a FW command through the given mailbox to read an SGE context.
9766  */
9767 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9768 		   enum ctxt_type ctype, u32 *data)
9769 {
9770 	struct fw_ldst_cmd c;
9771 	int ret;
9772 
9773 	if (ctype == CTXT_FLM)
9774 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
9775 	else
9776 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
9777 
9778 	memset(&c, 0, sizeof(c));
9779 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9780 					FW_CMD_REQUEST_F | FW_CMD_READ_F |
9781 					FW_LDST_CMD_ADDRSPACE_V(ret));
9782 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9783 	c.u.idctxt.physid = cpu_to_be32(cid);
9784 
9785 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9786 	if (ret == 0) {
9787 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9788 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9789 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9790 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9791 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9792 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9793 	}
9794 	return ret;
9795 }
9796 
9797 /**
9798  * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9799  * @adap: the adapter
9800  * @cid: the context id
9801  * @ctype: the context type
9802  * @data: where to store the context data
9803  *
9804  * Reads an SGE context directly, bypassing FW.  This is only for
9805  * debugging when FW is unavailable.
9806  */
9807 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
9808 		      enum ctxt_type ctype, u32 *data)
9809 {
9810 	int i, ret;
9811 
9812 	t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
9813 	ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
9814 	if (!ret)
9815 		for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
9816 			*data++ = t4_read_reg(adap, i);
9817 	return ret;
9818 }
9819 
9820 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9821 		    int rateunit, int ratemode, int channel, int class,
9822 		    int minrate, int maxrate, int weight, int pktsize)
9823 {
9824 	struct fw_sched_cmd cmd;
9825 
9826 	memset(&cmd, 0, sizeof(cmd));
9827 	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
9828 				      FW_CMD_REQUEST_F |
9829 				      FW_CMD_WRITE_F);
9830 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9831 
9832 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9833 	cmd.u.params.type = type;
9834 	cmd.u.params.level = level;
9835 	cmd.u.params.mode = mode;
9836 	cmd.u.params.ch = channel;
9837 	cmd.u.params.cl = class;
9838 	cmd.u.params.unit = rateunit;
9839 	cmd.u.params.rate = ratemode;
9840 	cmd.u.params.min = cpu_to_be32(minrate);
9841 	cmd.u.params.max = cpu_to_be32(maxrate);
9842 	cmd.u.params.weight = cpu_to_be16(weight);
9843 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
9844 
9845 	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
9846 			       NULL, 1);
9847 }
9848 
9849 /**
9850  *	t4_i2c_rd - read I2C data from adapter
9851  *	@adap: the adapter
9852  *	@port: Port number if per-port device; <0 if not
9853  *	@devid: per-port device ID or absolute device ID
9854  *	@offset: byte offset into device I2C space
9855  *	@len: byte length of I2C space data
9856  *	@buf: buffer in which to return I2C data
9857  *
9858  *	Reads the I2C data from the indicated device and location.
9859  */
9860 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
9861 	      unsigned int devid, unsigned int offset,
9862 	      unsigned int len, u8 *buf)
9863 {
9864 	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
9865 	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
9866 	int ret = 0;
9867 
9868 	if (len > I2C_PAGE_SIZE)
9869 		return -EINVAL;
9870 
9871 	/* Dont allow reads that spans multiple pages */
9872 	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
9873 		return -EINVAL;
9874 
9875 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
9876 	ldst_cmd.op_to_addrspace =
9877 		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9878 			    FW_CMD_REQUEST_F |
9879 			    FW_CMD_READ_F |
9880 			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
9881 	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
9882 	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
9883 	ldst_cmd.u.i2c.did = devid;
9884 
9885 	while (len > 0) {
9886 		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
9887 
9888 		ldst_cmd.u.i2c.boffset = offset;
9889 		ldst_cmd.u.i2c.blen = i2c_len;
9890 
9891 		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
9892 				 &ldst_rpl);
9893 		if (ret)
9894 			break;
9895 
9896 		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
9897 		offset += i2c_len;
9898 		buf += i2c_len;
9899 		len -= i2c_len;
9900 	}
9901 
9902 	return ret;
9903 }
9904 
9905 /**
9906  *      t4_set_vlan_acl - Set a VLAN id for the specified VF
9907  *      @adapter: the adapter
9908  *      @mbox: mailbox to use for the FW command
9909  *      @vf: one of the VFs instantiated by the specified PF
9910  *      @vlan: The vlanid to be set
9911  */
9912 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
9913 		    u16 vlan)
9914 {
9915 	struct fw_acl_vlan_cmd vlan_cmd;
9916 	unsigned int enable;
9917 
9918 	enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
9919 	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
9920 	vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
9921 					 FW_CMD_REQUEST_F |
9922 					 FW_CMD_WRITE_F |
9923 					 FW_CMD_EXEC_F |
9924 					 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
9925 					 FW_ACL_VLAN_CMD_VFN_V(vf));
9926 	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
9927 	/* Drop all packets that donot match vlan id */
9928 	vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
9929 	if (enable != 0) {
9930 		vlan_cmd.nvlan = 1;
9931 		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
9932 	}
9933 
9934 	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
9935 }
9936