xref: /openbmc/linux/drivers/infiniband/hw/hfi1/chip.c (revision 7587eb18)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51 
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 
67 #define NUM_IB_PORTS 1
68 
69 uint kdeth_qp;
70 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72 
73 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74 module_param(num_vls, uint, S_IRUGO);
75 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76 
77 /*
78  * Default time to aggregate two 10K packets from the idle state
79  * (timer not running). The timer starts at the end of the first packet,
80  * so only the time for one 10K packet and header plus a bit extra is needed.
81  * 10 * 1024 + 64 header byte = 10304 byte
82  * 10304 byte / 12.5 GB/s = 824.32ns
83  */
84 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85 module_param(rcv_intr_timeout, uint, S_IRUGO);
86 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87 
88 uint rcv_intr_count = 16; /* same as qib */
89 module_param(rcv_intr_count, uint, S_IRUGO);
90 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91 
92 ushort link_crc_mask = SUPPORTED_CRCS;
93 module_param(link_crc_mask, ushort, S_IRUGO);
94 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95 
96 uint loopback;
97 module_param_named(loopback, loopback, uint, S_IRUGO);
98 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99 
100 /* Other driver tunables */
101 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102 static ushort crc_14b_sideband = 1;
103 static uint use_flr = 1;
104 uint quick_linkup; /* skip LNI */
105 
106 struct flag_table {
107 	u64 flag;	/* the flag */
108 	char *str;	/* description string */
109 	u16 extra;	/* extra information */
110 	u16 unused0;
111 	u32 unused1;
112 };
113 
114 /* str must be a string constant */
115 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
117 
118 /* Send Error Consequences */
119 #define SEC_WRITE_DROPPED	0x1
120 #define SEC_PACKET_DROPPED	0x2
121 #define SEC_SC_HALTED		0x4	/* per-context only */
122 #define SEC_SPC_FREEZE		0x8	/* per-HFI only */
123 
124 #define MIN_KERNEL_KCTXTS         2
125 #define FIRST_KERNEL_KCTXT        1
126 /* sizes for both the QP and RSM map tables */
127 #define NUM_MAP_ENTRIES		256
128 #define NUM_MAP_REGS             32
129 
130 /* Bit offset into the GUID which carries HFI id information */
131 #define GUID_HFI_INDEX_SHIFT     39
132 
133 /* extract the emulation revision */
134 #define emulator_rev(dd) ((dd)->irev >> 8)
135 /* parallel and serial emulation versions are 3 and 4 respectively */
136 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138 
139 /* RSM fields */
140 
141 /* packet type */
142 #define IB_PACKET_TYPE         2ull
143 #define QW_SHIFT               6ull
144 /* QPN[7..1] */
145 #define QPN_WIDTH              7ull
146 
147 /* LRH.BTH: QW 0, OFFSET 48 - for match */
148 #define LRH_BTH_QW             0ull
149 #define LRH_BTH_BIT_OFFSET     48ull
150 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
151 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152 #define LRH_BTH_SELECT
153 #define LRH_BTH_MASK           3ull
154 #define LRH_BTH_VALUE          2ull
155 
156 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157 #define LRH_SC_QW              0ull
158 #define LRH_SC_BIT_OFFSET      56ull
159 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
160 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161 #define LRH_SC_MASK            128ull
162 #define LRH_SC_VALUE           0ull
163 
164 /* SC[n..0] QW 0, OFFSET 60 - for select */
165 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
166 
167 /* QPN[m+n:1] QW 1, OFFSET 1 */
168 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
169 
170 /* defines to build power on SC2VL table */
171 #define SC2VL_VAL( \
172 	num, \
173 	sc0, sc0val, \
174 	sc1, sc1val, \
175 	sc2, sc2val, \
176 	sc3, sc3val, \
177 	sc4, sc4val, \
178 	sc5, sc5val, \
179 	sc6, sc6val, \
180 	sc7, sc7val) \
181 ( \
182 	((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 	((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 	((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 	((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 	((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 	((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 	((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 	((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
190 )
191 
192 #define DC_SC_VL_VAL( \
193 	range, \
194 	e0, e0val, \
195 	e1, e1val, \
196 	e2, e2val, \
197 	e3, e3val, \
198 	e4, e4val, \
199 	e5, e5val, \
200 	e6, e6val, \
201 	e7, e7val, \
202 	e8, e8val, \
203 	e9, e9val, \
204 	e10, e10val, \
205 	e11, e11val, \
206 	e12, e12val, \
207 	e13, e13val, \
208 	e14, e14val, \
209 	e15, e15val) \
210 ( \
211 	((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 	((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 	((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 	((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 	((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 	((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 	((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 	((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 	((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 	((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 	((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 	((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 	((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 	((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 	((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 	((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227 )
228 
229 /* all CceStatus sub-block freeze bits */
230 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 			| CCE_STATUS_RXE_FROZE_SMASK \
232 			| CCE_STATUS_TXE_FROZE_SMASK \
233 			| CCE_STATUS_TXE_PIO_FROZE_SMASK)
234 /* all CceStatus sub-block TXE pause bits */
235 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 			| CCE_STATUS_TXE_PAUSED_SMASK \
237 			| CCE_STATUS_SDMA_PAUSED_SMASK)
238 /* all CceStatus sub-block RXE pause bits */
239 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240 
241 /*
242  * CCE Error flags.
243  */
244 static struct flag_table cce_err_status_flags[] = {
245 /* 0*/	FLAG_ENTRY0("CceCsrParityErr",
246 		CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247 /* 1*/	FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 		CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249 /* 2*/	FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 		CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251 /* 3*/	FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 		CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253 /* 4*/	FLAG_ENTRY0("CceTrgtAccessErr",
254 		CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255 /* 5*/	FLAG_ENTRY0("CceRspdDataParityErr",
256 		CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257 /* 6*/	FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 		CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259 /* 7*/	FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 		CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261 /* 8*/	FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 		CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263 /* 9*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265 /*10*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267 /*11*/	FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269 /*12*/	FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 		CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271 /*13*/	FLAG_ENTRY0("PcicRetryMemCorErr",
272 		CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273 /*14*/	FLAG_ENTRY0("PcicRetryMemCorErr",
274 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275 /*15*/	FLAG_ENTRY0("PcicPostHdQCorErr",
276 		CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277 /*16*/	FLAG_ENTRY0("PcicPostHdQCorErr",
278 		CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279 /*17*/	FLAG_ENTRY0("PcicPostHdQCorErr",
280 		CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281 /*18*/	FLAG_ENTRY0("PcicCplDatQCorErr",
282 		CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283 /*19*/	FLAG_ENTRY0("PcicNPostHQParityErr",
284 		CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285 /*20*/	FLAG_ENTRY0("PcicNPostDatQParityErr",
286 		CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287 /*21*/	FLAG_ENTRY0("PcicRetryMemUncErr",
288 		CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289 /*22*/	FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291 /*23*/	FLAG_ENTRY0("PcicPostHdQUncErr",
292 		CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293 /*24*/	FLAG_ENTRY0("PcicPostDatQUncErr",
294 		CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295 /*25*/	FLAG_ENTRY0("PcicCplHdQUncErr",
296 		CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297 /*26*/	FLAG_ENTRY0("PcicCplDatQUncErr",
298 		CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299 /*27*/	FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 		CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301 /*28*/	FLAG_ENTRY0("PcicTransmitBackParityErr",
302 		CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303 /*29*/	FLAG_ENTRY0("PcicReceiveParityErr",
304 		CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305 /*30*/	FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 		CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307 /*31*/	FLAG_ENTRY0("LATriggered",
308 		CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309 /*32*/	FLAG_ENTRY0("CceSegReadBadAddrErr",
310 		CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311 /*33*/	FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 		CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313 /*34*/	FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 		CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315 /*35*/	FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 		CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317 /*36*/	FLAG_ENTRY0("CceMsixTableCorErr",
318 		CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319 /*37*/	FLAG_ENTRY0("CceMsixTableUncErr",
320 		CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321 /*38*/	FLAG_ENTRY0("CceIntMapCorErr",
322 		CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323 /*39*/	FLAG_ENTRY0("CceIntMapUncErr",
324 		CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325 /*40*/	FLAG_ENTRY0("CceMsixCsrParityErr",
326 		CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327 /*41-63 reserved*/
328 };
329 
330 /*
331  * Misc Error flags
332  */
333 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334 static struct flag_table misc_err_status_flags[] = {
335 /* 0*/	FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336 /* 1*/	FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337 /* 2*/	FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338 /* 3*/	FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339 /* 4*/	FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340 /* 5*/	FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341 /* 6*/	FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342 /* 7*/	FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343 /* 8*/	FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344 /* 9*/	FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345 /*10*/	FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346 /*11*/	FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347 /*12*/	FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348 };
349 
350 /*
351  * TXE PIO Error flags and consequences
352  */
353 static struct flag_table pio_err_status_flags[] = {
354 /* 0*/	FLAG_ENTRY("PioWriteBadCtxt",
355 	SEC_WRITE_DROPPED,
356 	SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357 /* 1*/	FLAG_ENTRY("PioWriteAddrParity",
358 	SEC_SPC_FREEZE,
359 	SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360 /* 2*/	FLAG_ENTRY("PioCsrParity",
361 	SEC_SPC_FREEZE,
362 	SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363 /* 3*/	FLAG_ENTRY("PioSbMemFifo0",
364 	SEC_SPC_FREEZE,
365 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366 /* 4*/	FLAG_ENTRY("PioSbMemFifo1",
367 	SEC_SPC_FREEZE,
368 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369 /* 5*/	FLAG_ENTRY("PioPccFifoParity",
370 	SEC_SPC_FREEZE,
371 	SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372 /* 6*/	FLAG_ENTRY("PioPecFifoParity",
373 	SEC_SPC_FREEZE,
374 	SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375 /* 7*/	FLAG_ENTRY("PioSbrdctlCrrelParity",
376 	SEC_SPC_FREEZE,
377 	SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378 /* 8*/	FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 	SEC_SPC_FREEZE,
380 	SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381 /* 9*/	FLAG_ENTRY("PioPktEvictFifoParityErr",
382 	SEC_SPC_FREEZE,
383 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384 /*10*/	FLAG_ENTRY("PioSmPktResetParity",
385 	SEC_SPC_FREEZE,
386 	SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387 /*11*/	FLAG_ENTRY("PioVlLenMemBank0Unc",
388 	SEC_SPC_FREEZE,
389 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390 /*12*/	FLAG_ENTRY("PioVlLenMemBank1Unc",
391 	SEC_SPC_FREEZE,
392 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393 /*13*/	FLAG_ENTRY("PioVlLenMemBank0Cor",
394 	0,
395 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396 /*14*/	FLAG_ENTRY("PioVlLenMemBank1Cor",
397 	0,
398 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399 /*15*/	FLAG_ENTRY("PioCreditRetFifoParity",
400 	SEC_SPC_FREEZE,
401 	SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402 /*16*/	FLAG_ENTRY("PioPpmcPblFifo",
403 	SEC_SPC_FREEZE,
404 	SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405 /*17*/	FLAG_ENTRY("PioInitSmIn",
406 	0,
407 	SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408 /*18*/	FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 	SEC_SPC_FREEZE,
410 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411 /*19*/	FLAG_ENTRY("PioHostAddrMemUnc",
412 	SEC_SPC_FREEZE,
413 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414 /*20*/	FLAG_ENTRY("PioHostAddrMemCor",
415 	0,
416 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417 /*21*/	FLAG_ENTRY("PioWriteDataParity",
418 	SEC_SPC_FREEZE,
419 	SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420 /*22*/	FLAG_ENTRY("PioStateMachine",
421 	SEC_SPC_FREEZE,
422 	SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423 /*23*/	FLAG_ENTRY("PioWriteQwValidParity",
424 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
425 	SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426 /*24*/	FLAG_ENTRY("PioBlockQwCountParity",
427 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
428 	SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429 /*25*/	FLAG_ENTRY("PioVlfVlLenParity",
430 	SEC_SPC_FREEZE,
431 	SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432 /*26*/	FLAG_ENTRY("PioVlfSopParity",
433 	SEC_SPC_FREEZE,
434 	SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435 /*27*/	FLAG_ENTRY("PioVlFifoParity",
436 	SEC_SPC_FREEZE,
437 	SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438 /*28*/	FLAG_ENTRY("PioPpmcBqcMemParity",
439 	SEC_SPC_FREEZE,
440 	SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441 /*29*/	FLAG_ENTRY("PioPpmcSopLen",
442 	SEC_SPC_FREEZE,
443 	SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444 /*30-31 reserved*/
445 /*32*/	FLAG_ENTRY("PioCurrentFreeCntParity",
446 	SEC_SPC_FREEZE,
447 	SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448 /*33*/	FLAG_ENTRY("PioLastReturnedCntParity",
449 	SEC_SPC_FREEZE,
450 	SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451 /*34*/	FLAG_ENTRY("PioPccSopHeadParity",
452 	SEC_SPC_FREEZE,
453 	SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454 /*35*/	FLAG_ENTRY("PioPecSopHeadParityErr",
455 	SEC_SPC_FREEZE,
456 	SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457 /*36-63 reserved*/
458 };
459 
460 /* TXE PIO errors that cause an SPC freeze */
461 #define ALL_PIO_FREEZE_ERR \
462 	(SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 	| SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 	| SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 	| SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 	| SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 	| SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 	| SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 	| SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 	| SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 	| SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 	| SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 	| SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 	| SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 	| SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 	| SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 	| SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 	| SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 	| SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 	| SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 	| SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 	| SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491 
492 /*
493  * TXE SDMA Error flags
494  */
495 static struct flag_table sdma_err_status_flags[] = {
496 /* 0*/	FLAG_ENTRY0("SDmaRpyTagErr",
497 		SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498 /* 1*/	FLAG_ENTRY0("SDmaCsrParityErr",
499 		SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500 /* 2*/	FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502 /* 3*/	FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504 /*04-63 reserved*/
505 };
506 
507 /* TXE SDMA errors that cause an SPC freeze */
508 #define ALL_SDMA_FREEZE_ERR  \
509 		(SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 		| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 		| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512 
513 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514 #define PORT_DISCARD_EGRESS_ERRS \
515 	(SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 	| SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 	| SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518 
519 /*
520  * TXE Egress Error flags
521  */
522 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523 static struct flag_table egress_err_status_flags[] = {
524 /* 0*/	FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525 /* 1*/	FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526 /* 2 reserved */
527 /* 3*/	FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 		SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529 /* 4*/	FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530 /* 5*/	FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531 /* 6 reserved */
532 /* 7*/	FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 		SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534 /* 8*/	FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 		SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536 /* 9-10 reserved */
537 /*11*/	FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 		SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539 /*12*/	FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540 /*13*/	FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541 /*14*/	FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542 /*15*/	FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543 /*16*/	FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 		SEES(TX_SDMA0_DISALLOWED_PACKET)),
545 /*17*/	FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 		SEES(TX_SDMA1_DISALLOWED_PACKET)),
547 /*18*/	FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 		SEES(TX_SDMA2_DISALLOWED_PACKET)),
549 /*19*/	FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 		SEES(TX_SDMA3_DISALLOWED_PACKET)),
551 /*20*/	FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 		SEES(TX_SDMA4_DISALLOWED_PACKET)),
553 /*21*/	FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 		SEES(TX_SDMA5_DISALLOWED_PACKET)),
555 /*22*/	FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 		SEES(TX_SDMA6_DISALLOWED_PACKET)),
557 /*23*/	FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 		SEES(TX_SDMA7_DISALLOWED_PACKET)),
559 /*24*/	FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 		SEES(TX_SDMA8_DISALLOWED_PACKET)),
561 /*25*/	FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 		SEES(TX_SDMA9_DISALLOWED_PACKET)),
563 /*26*/	FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 		SEES(TX_SDMA10_DISALLOWED_PACKET)),
565 /*27*/	FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 		SEES(TX_SDMA11_DISALLOWED_PACKET)),
567 /*28*/	FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 		SEES(TX_SDMA12_DISALLOWED_PACKET)),
569 /*29*/	FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 		SEES(TX_SDMA13_DISALLOWED_PACKET)),
571 /*30*/	FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 		SEES(TX_SDMA14_DISALLOWED_PACKET)),
573 /*31*/	FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 		SEES(TX_SDMA15_DISALLOWED_PACKET)),
575 /*32*/	FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 		SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577 /*33*/	FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 		SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579 /*34*/	FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 		SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581 /*35*/	FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 		SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583 /*36*/	FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 		SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585 /*37*/	FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 		SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587 /*38*/	FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 		SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589 /*39*/	FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 		SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591 /*40*/	FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 		SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593 /*41*/	FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594 /*42*/	FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595 /*43*/	FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596 /*44*/	FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597 /*45*/	FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598 /*46*/	FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599 /*47*/	FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600 /*48*/	FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601 /*49*/	FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602 /*50*/	FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603 /*51*/	FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604 /*52*/	FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605 /*53*/	FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606 /*54*/	FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607 /*55*/	FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608 /*56*/	FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609 /*57*/	FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610 /*58*/	FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611 /*59*/	FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612 /*60*/	FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613 /*61*/	FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614 /*62*/	FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 		SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616 /*63*/	FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 		SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618 };
619 
620 /*
621  * TXE Egress Error Info flags
622  */
623 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624 static struct flag_table egress_err_info_flags[] = {
625 /* 0*/	FLAG_ENTRY0("Reserved", 0ull),
626 /* 1*/	FLAG_ENTRY0("VLErr", SEEI(VL)),
627 /* 2*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628 /* 3*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629 /* 4*/	FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630 /* 5*/	FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631 /* 6*/	FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632 /* 7*/	FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633 /* 8*/	FLAG_ENTRY0("RawErr", SEEI(RAW)),
634 /* 9*/	FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635 /*10*/	FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636 /*11*/	FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637 /*12*/	FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638 /*13*/	FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639 /*14*/	FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640 /*15*/	FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641 /*16*/	FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642 /*17*/	FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643 /*18*/	FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644 /*19*/	FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645 /*20*/	FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646 /*21*/	FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647 };
648 
649 /* TXE Egress errors that cause an SPC freeze */
650 #define ALL_TXE_EGRESS_FREEZE_ERR \
651 	(SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 	| SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 	| SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 	| SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 	| SEES(TX_LAUNCH_CSR_PARITY) \
656 	| SEES(TX_SBRD_CTL_CSR_PARITY) \
657 	| SEES(TX_CONFIG_PARITY) \
658 	| SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 	| SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 	| SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 	| SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 	| SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 	| SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 	| SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 	| SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 	| SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 	| SEES(TX_CREDIT_RETURN_PARITY))
668 
669 /*
670  * TXE Send error flags
671  */
672 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673 static struct flag_table send_err_status_flags[] = {
674 /* 0*/	FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
675 /* 1*/	FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676 /* 2*/	FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677 };
678 
679 /*
680  * TXE Send Context Error flags and consequences
681  */
682 static struct flag_table sc_err_status_flags[] = {
683 /* 0*/	FLAG_ENTRY("InconsistentSop",
684 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 		SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686 /* 1*/	FLAG_ENTRY("DisallowedPacket",
687 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 		SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689 /* 2*/	FLAG_ENTRY("WriteCrossesBoundary",
690 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 		SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692 /* 3*/	FLAG_ENTRY("WriteOverflow",
693 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695 /* 4*/	FLAG_ENTRY("WriteOutOfBounds",
696 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698 /* 5-63 reserved*/
699 };
700 
701 /*
702  * RXE Receive Error flags
703  */
704 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705 static struct flag_table rxe_err_status_flags[] = {
706 /* 0*/	FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707 /* 1*/	FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708 /* 2*/	FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709 /* 3*/	FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710 /* 4*/	FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711 /* 5*/	FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712 /* 6*/	FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713 /* 7*/	FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714 /* 8*/	FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715 /* 9*/	FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716 /*10*/	FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717 /*11*/	FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718 /*12*/	FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719 /*13*/	FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720 /*14*/	FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721 /*15*/	FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722 /*16*/	FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 		RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724 /*17*/	FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725 /*18*/	FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726 /*19*/	FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 		RXES(RBUF_BLOCK_LIST_READ_UNC)),
728 /*20*/	FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 		RXES(RBUF_BLOCK_LIST_READ_COR)),
730 /*21*/	FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 		RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732 /*22*/	FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 		RXES(RBUF_CSR_QENT_CNT_PARITY)),
734 /*23*/	FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 		RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736 /*24*/	FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 		RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738 /*25*/	FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739 /*26*/	FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740 /*27*/	FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 		RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742 /*28*/	FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743 /*29*/	FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744 /*30*/	FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745 /*31*/	FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746 /*32*/	FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747 /*33*/	FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748 /*34*/	FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749 /*35*/	FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 		RXES(RBUF_FL_INITDONE_PARITY)),
751 /*36*/	FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 		RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753 /*37*/	FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754 /*38*/	FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755 /*39*/	FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756 /*40*/	FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 		RXES(LOOKUP_DES_PART1_UNC_COR)),
758 /*41*/	FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 		RXES(LOOKUP_DES_PART2_PARITY)),
760 /*42*/	FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761 /*43*/	FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762 /*44*/	FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763 /*45*/	FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764 /*46*/	FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765 /*47*/	FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766 /*48*/	FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767 /*49*/	FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768 /*50*/	FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769 /*51*/	FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770 /*52*/	FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771 /*53*/	FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772 /*54*/	FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773 /*55*/	FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774 /*56*/	FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775 /*57*/	FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776 /*58*/	FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777 /*59*/	FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778 /*60*/	FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779 /*61*/	FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780 /*62*/	FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781 /*63*/	FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782 };
783 
784 /* RXE errors that will trigger an SPC freeze */
785 #define ALL_RXE_FREEZE_ERR  \
786 	(RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 	| RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 	| RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 	| RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 	| RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 	| RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 	| RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 	| RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 	| RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 	| RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 	| RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 	| RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 	| RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 	| RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 	| RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 	| RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 	| RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 	| RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 	| RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 	| RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 	| RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 	| RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 	| RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 	| RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 	| RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 	| RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 	| RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 	| RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 	| RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 	| RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 	| RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830 
831 #define RXE_FREEZE_ABORT_MASK \
832 	(RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 	RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 	RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835 
836 /*
837  * DCC Error Flags
838  */
839 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840 static struct flag_table dcc_err_flags[] = {
841 	FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 	FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 	FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 	FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 	FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 	FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 	FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 	FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 	FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 	FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 	FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 	FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 	FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 	FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 	FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 	FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 	FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 	FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 	FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 	FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 	FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 	FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 	FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 	FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 	FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 	FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 	FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 	FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 	FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 	FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 	FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 	FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 	FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 	FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 	FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 	FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 	FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 	FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 	FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 	FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 	FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 	FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 	FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 	FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 	FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 	FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887 };
888 
889 /*
890  * LCB error flags
891  */
892 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893 static struct flag_table lcb_err_flags[] = {
894 /* 0*/	FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895 /* 1*/	FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896 /* 2*/	FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897 /* 3*/	FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 		LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899 /* 4*/	FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900 /* 5*/	FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901 /* 6*/	FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902 /* 7*/	FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903 /* 8*/	FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904 /* 9*/	FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905 /*10*/	FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906 /*11*/	FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907 /*12*/	FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908 /*13*/	FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 		LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910 /*14*/	FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911 /*15*/	FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912 /*16*/	FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913 /*17*/	FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914 /*18*/	FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915 /*19*/	FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 		LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917 /*20*/	FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918 /*21*/	FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919 /*22*/	FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920 /*23*/	FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921 /*24*/	FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922 /*25*/	FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923 /*26*/	FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 		LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925 /*27*/	FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926 /*28*/	FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 		LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928 /*29*/	FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 		LCBE(REDUNDANT_FLIT_PARITY_ERR))
930 };
931 
932 /*
933  * DC8051 Error Flags
934  */
935 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936 static struct flag_table dc8051_err_flags[] = {
937 	FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 	FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 	FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 	FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 	FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 	FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 	FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 	FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 	FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
946 		    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
947 	FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948 };
949 
950 /*
951  * DC8051 Information Error flags
952  *
953  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954  */
955 static struct flag_table dc8051_info_err_flags[] = {
956 	FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
957 	FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
958 	FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
959 	FLAG_ENTRY0("Serdes internal loopback failure",
960 		    FAILED_SERDES_INTERNAL_LOOPBACK),
961 	FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
962 	FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
963 	FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
964 	FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
965 	FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
966 	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
968 	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
969 	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT)
970 };
971 
972 /*
973  * DC8051 Information Host Information flags
974  *
975  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976  */
977 static struct flag_table dc8051_info_host_msg_flags[] = {
978 	FLAG_ENTRY0("Host request done", 0x0001),
979 	FLAG_ENTRY0("BC SMA message", 0x0002),
980 	FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 	FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 	FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 	FLAG_ENTRY0("External device config request", 0x0020),
984 	FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 	FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 	FLAG_ENTRY0("Link going down", 0x0100),
987 };
988 
989 static u32 encoded_size(u32 size);
990 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 			       u8 *continuous);
994 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 				      u8 *remote_tx_rate, u16 *link_widths);
998 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 				     u8 *flag_bits, u16 *link_widths);
1000 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 				  u8 *device_rev);
1002 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 			    u8 *tx_polarity_inversion,
1006 			    u8 *rx_polarity_inversion, u8 *max_rate);
1007 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 				unsigned int context, u64 err_status);
1009 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010 static void handle_dcc_err(struct hfi1_devdata *dd,
1011 			   unsigned int context, u64 err_status);
1012 static void handle_lcb_err(struct hfi1_devdata *dd,
1013 			   unsigned int context, u64 err_status);
1014 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022 static void set_partition_keys(struct hfi1_pportdata *);
1023 static const char *link_state_name(u32 state);
1024 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 					  u32 state);
1026 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 			   u64 *out_data);
1028 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029 static int thermal_init(struct hfi1_devdata *dd);
1030 
1031 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 				  int msecs);
1033 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1035 static void handle_temp_err(struct hfi1_devdata *);
1036 static void dc_shutdown(struct hfi1_devdata *);
1037 static void dc_start(struct hfi1_devdata *);
1038 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 			   unsigned int *np);
1040 static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1041 
1042 /*
1043  * Error interrupt table entry.  This is used as input to the interrupt
1044  * "clear down" routine used for all second tier error interrupt register.
1045  * Second tier interrupt registers have a single bit representing them
1046  * in the top-level CceIntStatus.
1047  */
1048 struct err_reg_info {
1049 	u32 status;		/* status CSR offset */
1050 	u32 clear;		/* clear CSR offset */
1051 	u32 mask;		/* mask CSR offset */
1052 	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1053 	const char *desc;
1054 };
1055 
1056 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1057 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1058 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1059 
1060 /*
1061  * Helpers for building HFI and DC error interrupt table entries.  Different
1062  * helpers are needed because of inconsistent register names.
1063  */
1064 #define EE(reg, handler, desc) \
1065 	{ reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1066 		handler, desc }
1067 #define DC_EE1(reg, handler, desc) \
1068 	{ reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1069 #define DC_EE2(reg, handler, desc) \
1070 	{ reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1071 
1072 /*
1073  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1074  * another register containing more information.
1075  */
1076 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1077 /* 0*/	EE(CCE_ERR,		handle_cce_err,    "CceErr"),
1078 /* 1*/	EE(RCV_ERR,		handle_rxe_err,    "RxeErr"),
1079 /* 2*/	EE(MISC_ERR,	handle_misc_err,   "MiscErr"),
1080 /* 3*/	{ 0, 0, 0, NULL }, /* reserved */
1081 /* 4*/	EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1082 /* 5*/	EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1083 /* 6*/	EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1084 /* 7*/	EE(SEND_ERR,	handle_txe_err,    "TxeErr")
1085 	/* the rest are reserved */
1086 };
1087 
1088 /*
1089  * Index into the Various section of the interrupt sources
1090  * corresponding to the Critical Temperature interrupt.
1091  */
1092 #define TCRIT_INT_SOURCE 4
1093 
1094 /*
1095  * SDMA error interrupt entry - refers to another register containing more
1096  * information.
1097  */
1098 static const struct err_reg_info sdma_eng_err =
1099 	EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1100 
1101 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1102 /* 0*/	{ 0, 0, 0, NULL }, /* PbcInt */
1103 /* 1*/	{ 0, 0, 0, NULL }, /* GpioAssertInt */
1104 /* 2*/	EE(ASIC_QSFP1,	handle_qsfp_int,	"QSFP1"),
1105 /* 3*/	EE(ASIC_QSFP2,	handle_qsfp_int,	"QSFP2"),
1106 /* 4*/	{ 0, 0, 0, NULL }, /* TCritInt */
1107 	/* rest are reserved */
1108 };
1109 
1110 /*
1111  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1112  * register can not be derived from the MTU value because 10K is not
1113  * a power of 2. Therefore, we need a constant. Everything else can
1114  * be calculated.
1115  */
1116 #define DCC_CFG_PORT_MTU_CAP_10240 7
1117 
1118 /*
1119  * Table of the DC grouping of error interrupts.  Each entry refers to
1120  * another register containing more information.
1121  */
1122 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1123 /* 0*/	DC_EE1(DCC_ERR,		handle_dcc_err,	       "DCC Err"),
1124 /* 1*/	DC_EE2(DC_LCB_ERR,	handle_lcb_err,	       "LCB Err"),
1125 /* 2*/	DC_EE2(DC_DC8051_ERR,	handle_8051_interrupt, "DC8051 Interrupt"),
1126 /* 3*/	/* dc_lbm_int - special, see is_dc_int() */
1127 	/* the rest are reserved */
1128 };
1129 
1130 struct cntr_entry {
1131 	/*
1132 	 * counter name
1133 	 */
1134 	char *name;
1135 
1136 	/*
1137 	 * csr to read for name (if applicable)
1138 	 */
1139 	u64 csr;
1140 
1141 	/*
1142 	 * offset into dd or ppd to store the counter's value
1143 	 */
1144 	int offset;
1145 
1146 	/*
1147 	 * flags
1148 	 */
1149 	u8 flags;
1150 
1151 	/*
1152 	 * accessor for stat element, context either dd or ppd
1153 	 */
1154 	u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1155 		       int mode, u64 data);
1156 };
1157 
1158 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160 
1161 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162 { \
1163 	name, \
1164 	csr, \
1165 	offset, \
1166 	flags, \
1167 	accessor \
1168 }
1169 
1170 /* 32bit RXE */
1171 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172 CNTR_ELEM(#name, \
1173 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 	  0, flags | CNTR_32BIT, \
1175 	  port_access_u32_csr)
1176 
1177 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178 CNTR_ELEM(#name, \
1179 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 	  0, flags | CNTR_32BIT, \
1181 	  dev_access_u32_csr)
1182 
1183 /* 64bit RXE */
1184 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185 CNTR_ELEM(#name, \
1186 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1187 	  0, flags, \
1188 	  port_access_u64_csr)
1189 
1190 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191 CNTR_ELEM(#name, \
1192 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1193 	  0, flags, \
1194 	  dev_access_u64_csr)
1195 
1196 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197 #define OVR_ELM(ctx) \
1198 CNTR_ELEM("RcvHdrOvr" #ctx, \
1199 	  (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1200 	  0, CNTR_NORMAL, port_access_u64_csr)
1201 
1202 /* 32bit TXE */
1203 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204 CNTR_ELEM(#name, \
1205 	  (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 	  0, flags | CNTR_32BIT, \
1207 	  port_access_u32_csr)
1208 
1209 /* 64bit TXE */
1210 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211 CNTR_ELEM(#name, \
1212 	  (counter * 8 + SEND_COUNTER_ARRAY64), \
1213 	  0, flags, \
1214 	  port_access_u64_csr)
1215 
1216 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217 CNTR_ELEM(#name,\
1218 	  counter * 8 + SEND_COUNTER_ARRAY64, \
1219 	  0, \
1220 	  flags, \
1221 	  dev_access_u64_csr)
1222 
1223 /* CCE */
1224 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226 	  (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 	  0, flags | CNTR_32BIT, \
1228 	  dev_access_u32_csr)
1229 
1230 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231 CNTR_ELEM(#name, \
1232 	  (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 	  0, flags | CNTR_32BIT, \
1234 	  dev_access_u32_csr)
1235 
1236 /* DC */
1237 #define DC_PERF_CNTR(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239 	  counter, \
1240 	  0, \
1241 	  flags, \
1242 	  dev_access_u64_csr)
1243 
1244 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1245 CNTR_ELEM(#name, \
1246 	  counter, \
1247 	  0, \
1248 	  flags, \
1249 	  dc_access_lcb_cntr)
1250 
1251 /* ibp counters */
1252 #define SW_IBP_CNTR(name, cntr) \
1253 CNTR_ELEM(#name, \
1254 	  0, \
1255 	  0, \
1256 	  CNTR_SYNTH, \
1257 	  access_ibp_##cntr)
1258 
1259 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260 {
1261 	if (dd->flags & HFI1_PRESENT) {
1262 		return readq((void __iomem *)dd->kregbase + offset);
1263 	}
1264 	return -1;
1265 }
1266 
1267 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1268 {
1269 	if (dd->flags & HFI1_PRESENT)
1270 		writeq(value, (void __iomem *)dd->kregbase + offset);
1271 }
1272 
1273 void __iomem *get_csr_addr(
1274 	struct hfi1_devdata *dd,
1275 	u32 offset)
1276 {
1277 	return (void __iomem *)dd->kregbase + offset;
1278 }
1279 
1280 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1281 				 int mode, u64 value)
1282 {
1283 	u64 ret;
1284 
1285 	if (mode == CNTR_MODE_R) {
1286 		ret = read_csr(dd, csr);
1287 	} else if (mode == CNTR_MODE_W) {
1288 		write_csr(dd, csr, value);
1289 		ret = value;
1290 	} else {
1291 		dd_dev_err(dd, "Invalid cntr register access mode");
1292 		return 0;
1293 	}
1294 
1295 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296 	return ret;
1297 }
1298 
1299 /* Dev Access */
1300 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1301 			      void *context, int vl, int mode, u64 data)
1302 {
1303 	struct hfi1_devdata *dd = context;
1304 	u64 csr = entry->csr;
1305 
1306 	if (entry->flags & CNTR_SDMA) {
1307 		if (vl == CNTR_INVALID_VL)
1308 			return 0;
1309 		csr += 0x100 * vl;
1310 	} else {
1311 		if (vl != CNTR_INVALID_VL)
1312 			return 0;
1313 	}
1314 	return read_write_csr(dd, csr, mode, data);
1315 }
1316 
1317 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1318 			      void *context, int idx, int mode, u64 data)
1319 {
1320 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1321 
1322 	if (dd->per_sdma && idx < dd->num_sdma)
1323 		return dd->per_sdma[idx].err_cnt;
1324 	return 0;
1325 }
1326 
1327 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1328 			      void *context, int idx, int mode, u64 data)
1329 {
1330 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1331 
1332 	if (dd->per_sdma && idx < dd->num_sdma)
1333 		return dd->per_sdma[idx].sdma_int_cnt;
1334 	return 0;
1335 }
1336 
1337 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1338 				   void *context, int idx, int mode, u64 data)
1339 {
1340 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1341 
1342 	if (dd->per_sdma && idx < dd->num_sdma)
1343 		return dd->per_sdma[idx].idle_int_cnt;
1344 	return 0;
1345 }
1346 
1347 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1348 				       void *context, int idx, int mode,
1349 				       u64 data)
1350 {
1351 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1352 
1353 	if (dd->per_sdma && idx < dd->num_sdma)
1354 		return dd->per_sdma[idx].progress_int_cnt;
1355 	return 0;
1356 }
1357 
1358 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1359 			      int vl, int mode, u64 data)
1360 {
1361 	struct hfi1_devdata *dd = context;
1362 
1363 	u64 val = 0;
1364 	u64 csr = entry->csr;
1365 
1366 	if (entry->flags & CNTR_VL) {
1367 		if (vl == CNTR_INVALID_VL)
1368 			return 0;
1369 		csr += 8 * vl;
1370 	} else {
1371 		if (vl != CNTR_INVALID_VL)
1372 			return 0;
1373 	}
1374 
1375 	val = read_write_csr(dd, csr, mode, data);
1376 	return val;
1377 }
1378 
1379 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1380 			      int vl, int mode, u64 data)
1381 {
1382 	struct hfi1_devdata *dd = context;
1383 	u32 csr = entry->csr;
1384 	int ret = 0;
1385 
1386 	if (vl != CNTR_INVALID_VL)
1387 		return 0;
1388 	if (mode == CNTR_MODE_R)
1389 		ret = read_lcb_csr(dd, csr, &data);
1390 	else if (mode == CNTR_MODE_W)
1391 		ret = write_lcb_csr(dd, csr, data);
1392 
1393 	if (ret) {
1394 		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1395 		return 0;
1396 	}
1397 
1398 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1399 	return data;
1400 }
1401 
1402 /* Port Access */
1403 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1404 			       int vl, int mode, u64 data)
1405 {
1406 	struct hfi1_pportdata *ppd = context;
1407 
1408 	if (vl != CNTR_INVALID_VL)
1409 		return 0;
1410 	return read_write_csr(ppd->dd, entry->csr, mode, data);
1411 }
1412 
1413 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1414 			       void *context, int vl, int mode, u64 data)
1415 {
1416 	struct hfi1_pportdata *ppd = context;
1417 	u64 val;
1418 	u64 csr = entry->csr;
1419 
1420 	if (entry->flags & CNTR_VL) {
1421 		if (vl == CNTR_INVALID_VL)
1422 			return 0;
1423 		csr += 8 * vl;
1424 	} else {
1425 		if (vl != CNTR_INVALID_VL)
1426 			return 0;
1427 	}
1428 	val = read_write_csr(ppd->dd, csr, mode, data);
1429 	return val;
1430 }
1431 
1432 /* Software defined */
1433 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1434 				u64 data)
1435 {
1436 	u64 ret;
1437 
1438 	if (mode == CNTR_MODE_R) {
1439 		ret = *cntr;
1440 	} else if (mode == CNTR_MODE_W) {
1441 		*cntr = data;
1442 		ret = data;
1443 	} else {
1444 		dd_dev_err(dd, "Invalid cntr sw access mode");
1445 		return 0;
1446 	}
1447 
1448 	hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1449 
1450 	return ret;
1451 }
1452 
1453 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1454 				 int vl, int mode, u64 data)
1455 {
1456 	struct hfi1_pportdata *ppd = context;
1457 
1458 	if (vl != CNTR_INVALID_VL)
1459 		return 0;
1460 	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1461 }
1462 
1463 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1464 				 int vl, int mode, u64 data)
1465 {
1466 	struct hfi1_pportdata *ppd = context;
1467 
1468 	if (vl != CNTR_INVALID_VL)
1469 		return 0;
1470 	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1471 }
1472 
1473 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1474 				       void *context, int vl, int mode,
1475 				       u64 data)
1476 {
1477 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1478 
1479 	if (vl != CNTR_INVALID_VL)
1480 		return 0;
1481 	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1482 }
1483 
1484 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1485 				   void *context, int vl, int mode, u64 data)
1486 {
1487 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1488 	u64 zero = 0;
1489 	u64 *counter;
1490 
1491 	if (vl == CNTR_INVALID_VL)
1492 		counter = &ppd->port_xmit_discards;
1493 	else if (vl >= 0 && vl < C_VL_COUNT)
1494 		counter = &ppd->port_xmit_discards_vl[vl];
1495 	else
1496 		counter = &zero;
1497 
1498 	return read_write_sw(ppd->dd, counter, mode, data);
1499 }
1500 
1501 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1502 				       void *context, int vl, int mode,
1503 				       u64 data)
1504 {
1505 	struct hfi1_pportdata *ppd = context;
1506 
1507 	if (vl != CNTR_INVALID_VL)
1508 		return 0;
1509 
1510 	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1511 			     mode, data);
1512 }
1513 
1514 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1515 				      void *context, int vl, int mode, u64 data)
1516 {
1517 	struct hfi1_pportdata *ppd = context;
1518 
1519 	if (vl != CNTR_INVALID_VL)
1520 		return 0;
1521 
1522 	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1523 			     mode, data);
1524 }
1525 
1526 u64 get_all_cpu_total(u64 __percpu *cntr)
1527 {
1528 	int cpu;
1529 	u64 counter = 0;
1530 
1531 	for_each_possible_cpu(cpu)
1532 		counter += *per_cpu_ptr(cntr, cpu);
1533 	return counter;
1534 }
1535 
1536 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1537 			  u64 __percpu *cntr,
1538 			  int vl, int mode, u64 data)
1539 {
1540 	u64 ret = 0;
1541 
1542 	if (vl != CNTR_INVALID_VL)
1543 		return 0;
1544 
1545 	if (mode == CNTR_MODE_R) {
1546 		ret = get_all_cpu_total(cntr) - *z_val;
1547 	} else if (mode == CNTR_MODE_W) {
1548 		/* A write can only zero the counter */
1549 		if (data == 0)
1550 			*z_val = get_all_cpu_total(cntr);
1551 		else
1552 			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1553 	} else {
1554 		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1555 		return 0;
1556 	}
1557 
1558 	return ret;
1559 }
1560 
1561 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1562 			      void *context, int vl, int mode, u64 data)
1563 {
1564 	struct hfi1_devdata *dd = context;
1565 
1566 	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1567 			      mode, data);
1568 }
1569 
1570 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1571 				   void *context, int vl, int mode, u64 data)
1572 {
1573 	struct hfi1_devdata *dd = context;
1574 
1575 	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1576 			      mode, data);
1577 }
1578 
1579 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1580 			      void *context, int vl, int mode, u64 data)
1581 {
1582 	struct hfi1_devdata *dd = context;
1583 
1584 	return dd->verbs_dev.n_piowait;
1585 }
1586 
1587 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1588 			       void *context, int vl, int mode, u64 data)
1589 {
1590 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1591 
1592 	return dd->verbs_dev.n_piodrain;
1593 }
1594 
1595 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1596 			      void *context, int vl, int mode, u64 data)
1597 {
1598 	struct hfi1_devdata *dd = context;
1599 
1600 	return dd->verbs_dev.n_txwait;
1601 }
1602 
1603 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1604 			       void *context, int vl, int mode, u64 data)
1605 {
1606 	struct hfi1_devdata *dd = context;
1607 
1608 	return dd->verbs_dev.n_kmem_wait;
1609 }
1610 
1611 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1612 				   void *context, int vl, int mode, u64 data)
1613 {
1614 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1615 
1616 	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1617 			      mode, data);
1618 }
1619 
1620 /* Software counters for the error status bits within MISC_ERR_STATUS */
1621 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1622 					     void *context, int vl, int mode,
1623 					     u64 data)
1624 {
1625 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626 
1627 	return dd->misc_err_status_cnt[12];
1628 }
1629 
1630 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1631 					  void *context, int vl, int mode,
1632 					  u64 data)
1633 {
1634 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635 
1636 	return dd->misc_err_status_cnt[11];
1637 }
1638 
1639 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1640 					       void *context, int vl, int mode,
1641 					       u64 data)
1642 {
1643 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644 
1645 	return dd->misc_err_status_cnt[10];
1646 }
1647 
1648 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1649 						 void *context, int vl,
1650 						 int mode, u64 data)
1651 {
1652 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653 
1654 	return dd->misc_err_status_cnt[9];
1655 }
1656 
1657 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1658 					   void *context, int vl, int mode,
1659 					   u64 data)
1660 {
1661 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662 
1663 	return dd->misc_err_status_cnt[8];
1664 }
1665 
1666 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1667 				const struct cntr_entry *entry,
1668 				void *context, int vl, int mode, u64 data)
1669 {
1670 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671 
1672 	return dd->misc_err_status_cnt[7];
1673 }
1674 
1675 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1676 						void *context, int vl,
1677 						int mode, u64 data)
1678 {
1679 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680 
1681 	return dd->misc_err_status_cnt[6];
1682 }
1683 
1684 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1685 					      void *context, int vl, int mode,
1686 					      u64 data)
1687 {
1688 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689 
1690 	return dd->misc_err_status_cnt[5];
1691 }
1692 
1693 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1694 					    void *context, int vl, int mode,
1695 					    u64 data)
1696 {
1697 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698 
1699 	return dd->misc_err_status_cnt[4];
1700 }
1701 
1702 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1703 						 void *context, int vl,
1704 						 int mode, u64 data)
1705 {
1706 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707 
1708 	return dd->misc_err_status_cnt[3];
1709 }
1710 
1711 static u64 access_misc_csr_write_bad_addr_err_cnt(
1712 				const struct cntr_entry *entry,
1713 				void *context, int vl, int mode, u64 data)
1714 {
1715 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716 
1717 	return dd->misc_err_status_cnt[2];
1718 }
1719 
1720 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1721 						 void *context, int vl,
1722 						 int mode, u64 data)
1723 {
1724 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1725 
1726 	return dd->misc_err_status_cnt[1];
1727 }
1728 
1729 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1730 					  void *context, int vl, int mode,
1731 					  u64 data)
1732 {
1733 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1734 
1735 	return dd->misc_err_status_cnt[0];
1736 }
1737 
1738 /*
1739  * Software counter for the aggregate of
1740  * individual CceErrStatus counters
1741  */
1742 static u64 access_sw_cce_err_status_aggregated_cnt(
1743 				const struct cntr_entry *entry,
1744 				void *context, int vl, int mode, u64 data)
1745 {
1746 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747 
1748 	return dd->sw_cce_err_status_aggregate;
1749 }
1750 
1751 /*
1752  * Software counters corresponding to each of the
1753  * error status bits within CceErrStatus
1754  */
1755 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1756 					      void *context, int vl, int mode,
1757 					      u64 data)
1758 {
1759 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760 
1761 	return dd->cce_err_status_cnt[40];
1762 }
1763 
1764 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1765 					  void *context, int vl, int mode,
1766 					  u64 data)
1767 {
1768 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769 
1770 	return dd->cce_err_status_cnt[39];
1771 }
1772 
1773 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1774 					  void *context, int vl, int mode,
1775 					  u64 data)
1776 {
1777 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778 
1779 	return dd->cce_err_status_cnt[38];
1780 }
1781 
1782 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1783 					     void *context, int vl, int mode,
1784 					     u64 data)
1785 {
1786 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787 
1788 	return dd->cce_err_status_cnt[37];
1789 }
1790 
1791 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1792 					     void *context, int vl, int mode,
1793 					     u64 data)
1794 {
1795 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796 
1797 	return dd->cce_err_status_cnt[36];
1798 }
1799 
1800 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1801 				const struct cntr_entry *entry,
1802 				void *context, int vl, int mode, u64 data)
1803 {
1804 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805 
1806 	return dd->cce_err_status_cnt[35];
1807 }
1808 
1809 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1810 				const struct cntr_entry *entry,
1811 				void *context, int vl, int mode, u64 data)
1812 {
1813 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814 
1815 	return dd->cce_err_status_cnt[34];
1816 }
1817 
1818 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1819 						 void *context, int vl,
1820 						 int mode, u64 data)
1821 {
1822 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1823 
1824 	return dd->cce_err_status_cnt[33];
1825 }
1826 
1827 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1828 						void *context, int vl, int mode,
1829 						u64 data)
1830 {
1831 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1832 
1833 	return dd->cce_err_status_cnt[32];
1834 }
1835 
1836 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1837 				   void *context, int vl, int mode, u64 data)
1838 {
1839 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840 
1841 	return dd->cce_err_status_cnt[31];
1842 }
1843 
1844 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1845 					       void *context, int vl, int mode,
1846 					       u64 data)
1847 {
1848 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849 
1850 	return dd->cce_err_status_cnt[30];
1851 }
1852 
1853 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1854 					      void *context, int vl, int mode,
1855 					      u64 data)
1856 {
1857 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858 
1859 	return dd->cce_err_status_cnt[29];
1860 }
1861 
1862 static u64 access_pcic_transmit_back_parity_err_cnt(
1863 				const struct cntr_entry *entry,
1864 				void *context, int vl, int mode, u64 data)
1865 {
1866 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867 
1868 	return dd->cce_err_status_cnt[28];
1869 }
1870 
1871 static u64 access_pcic_transmit_front_parity_err_cnt(
1872 				const struct cntr_entry *entry,
1873 				void *context, int vl, int mode, u64 data)
1874 {
1875 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876 
1877 	return dd->cce_err_status_cnt[27];
1878 }
1879 
1880 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881 					     void *context, int vl, int mode,
1882 					     u64 data)
1883 {
1884 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885 
1886 	return dd->cce_err_status_cnt[26];
1887 }
1888 
1889 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890 					    void *context, int vl, int mode,
1891 					    u64 data)
1892 {
1893 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894 
1895 	return dd->cce_err_status_cnt[25];
1896 }
1897 
1898 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1899 					      void *context, int vl, int mode,
1900 					      u64 data)
1901 {
1902 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903 
1904 	return dd->cce_err_status_cnt[24];
1905 }
1906 
1907 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1908 					     void *context, int vl, int mode,
1909 					     u64 data)
1910 {
1911 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912 
1913 	return dd->cce_err_status_cnt[23];
1914 }
1915 
1916 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1917 						 void *context, int vl,
1918 						 int mode, u64 data)
1919 {
1920 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921 
1922 	return dd->cce_err_status_cnt[22];
1923 }
1924 
1925 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1926 					 void *context, int vl, int mode,
1927 					 u64 data)
1928 {
1929 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930 
1931 	return dd->cce_err_status_cnt[21];
1932 }
1933 
1934 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1935 				const struct cntr_entry *entry,
1936 				void *context, int vl, int mode, u64 data)
1937 {
1938 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939 
1940 	return dd->cce_err_status_cnt[20];
1941 }
1942 
1943 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1944 						 void *context, int vl,
1945 						 int mode, u64 data)
1946 {
1947 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948 
1949 	return dd->cce_err_status_cnt[19];
1950 }
1951 
1952 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953 					     void *context, int vl, int mode,
1954 					     u64 data)
1955 {
1956 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957 
1958 	return dd->cce_err_status_cnt[18];
1959 }
1960 
1961 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962 					    void *context, int vl, int mode,
1963 					    u64 data)
1964 {
1965 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966 
1967 	return dd->cce_err_status_cnt[17];
1968 }
1969 
1970 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1971 					      void *context, int vl, int mode,
1972 					      u64 data)
1973 {
1974 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975 
1976 	return dd->cce_err_status_cnt[16];
1977 }
1978 
1979 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1980 					     void *context, int vl, int mode,
1981 					     u64 data)
1982 {
1983 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984 
1985 	return dd->cce_err_status_cnt[15];
1986 }
1987 
1988 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1989 						 void *context, int vl,
1990 						 int mode, u64 data)
1991 {
1992 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993 
1994 	return dd->cce_err_status_cnt[14];
1995 }
1996 
1997 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1998 					     void *context, int vl, int mode,
1999 					     u64 data)
2000 {
2001 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002 
2003 	return dd->cce_err_status_cnt[13];
2004 }
2005 
2006 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2007 				const struct cntr_entry *entry,
2008 				void *context, int vl, int mode, u64 data)
2009 {
2010 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011 
2012 	return dd->cce_err_status_cnt[12];
2013 }
2014 
2015 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2016 				const struct cntr_entry *entry,
2017 				void *context, int vl, int mode, u64 data)
2018 {
2019 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020 
2021 	return dd->cce_err_status_cnt[11];
2022 }
2023 
2024 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2025 				const struct cntr_entry *entry,
2026 				void *context, int vl, int mode, u64 data)
2027 {
2028 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029 
2030 	return dd->cce_err_status_cnt[10];
2031 }
2032 
2033 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2034 				const struct cntr_entry *entry,
2035 				void *context, int vl, int mode, u64 data)
2036 {
2037 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038 
2039 	return dd->cce_err_status_cnt[9];
2040 }
2041 
2042 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2043 				const struct cntr_entry *entry,
2044 				void *context, int vl, int mode, u64 data)
2045 {
2046 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047 
2048 	return dd->cce_err_status_cnt[8];
2049 }
2050 
2051 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2052 						 void *context, int vl,
2053 						 int mode, u64 data)
2054 {
2055 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056 
2057 	return dd->cce_err_status_cnt[7];
2058 }
2059 
2060 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2061 				const struct cntr_entry *entry,
2062 				void *context, int vl, int mode, u64 data)
2063 {
2064 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065 
2066 	return dd->cce_err_status_cnt[6];
2067 }
2068 
2069 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2070 					       void *context, int vl, int mode,
2071 					       u64 data)
2072 {
2073 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074 
2075 	return dd->cce_err_status_cnt[5];
2076 }
2077 
2078 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2079 					  void *context, int vl, int mode,
2080 					  u64 data)
2081 {
2082 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083 
2084 	return dd->cce_err_status_cnt[4];
2085 }
2086 
2087 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2088 				const struct cntr_entry *entry,
2089 				void *context, int vl, int mode, u64 data)
2090 {
2091 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092 
2093 	return dd->cce_err_status_cnt[3];
2094 }
2095 
2096 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2097 						 void *context, int vl,
2098 						 int mode, u64 data)
2099 {
2100 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101 
2102 	return dd->cce_err_status_cnt[2];
2103 }
2104 
2105 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2106 						void *context, int vl,
2107 						int mode, u64 data)
2108 {
2109 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110 
2111 	return dd->cce_err_status_cnt[1];
2112 }
2113 
2114 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2115 					 void *context, int vl, int mode,
2116 					 u64 data)
2117 {
2118 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119 
2120 	return dd->cce_err_status_cnt[0];
2121 }
2122 
2123 /*
2124  * Software counters corresponding to each of the
2125  * error status bits within RcvErrStatus
2126  */
2127 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2128 					void *context, int vl, int mode,
2129 					u64 data)
2130 {
2131 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132 
2133 	return dd->rcv_err_status_cnt[63];
2134 }
2135 
2136 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2137 						void *context, int vl,
2138 						int mode, u64 data)
2139 {
2140 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141 
2142 	return dd->rcv_err_status_cnt[62];
2143 }
2144 
2145 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2146 					       void *context, int vl, int mode,
2147 					       u64 data)
2148 {
2149 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150 
2151 	return dd->rcv_err_status_cnt[61];
2152 }
2153 
2154 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2155 					 void *context, int vl, int mode,
2156 					 u64 data)
2157 {
2158 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159 
2160 	return dd->rcv_err_status_cnt[60];
2161 }
2162 
2163 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2164 						 void *context, int vl,
2165 						 int mode, u64 data)
2166 {
2167 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168 
2169 	return dd->rcv_err_status_cnt[59];
2170 }
2171 
2172 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2173 						 void *context, int vl,
2174 						 int mode, u64 data)
2175 {
2176 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177 
2178 	return dd->rcv_err_status_cnt[58];
2179 }
2180 
2181 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2182 					    void *context, int vl, int mode,
2183 					    u64 data)
2184 {
2185 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186 
2187 	return dd->rcv_err_status_cnt[57];
2188 }
2189 
2190 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2191 					   void *context, int vl, int mode,
2192 					   u64 data)
2193 {
2194 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195 
2196 	return dd->rcv_err_status_cnt[56];
2197 }
2198 
2199 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2200 					   void *context, int vl, int mode,
2201 					   u64 data)
2202 {
2203 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204 
2205 	return dd->rcv_err_status_cnt[55];
2206 }
2207 
2208 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2209 				const struct cntr_entry *entry,
2210 				void *context, int vl, int mode, u64 data)
2211 {
2212 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213 
2214 	return dd->rcv_err_status_cnt[54];
2215 }
2216 
2217 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2218 				const struct cntr_entry *entry,
2219 				void *context, int vl, int mode, u64 data)
2220 {
2221 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222 
2223 	return dd->rcv_err_status_cnt[53];
2224 }
2225 
2226 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2227 						 void *context, int vl,
2228 						 int mode, u64 data)
2229 {
2230 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231 
2232 	return dd->rcv_err_status_cnt[52];
2233 }
2234 
2235 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2236 						 void *context, int vl,
2237 						 int mode, u64 data)
2238 {
2239 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240 
2241 	return dd->rcv_err_status_cnt[51];
2242 }
2243 
2244 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2245 						 void *context, int vl,
2246 						 int mode, u64 data)
2247 {
2248 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249 
2250 	return dd->rcv_err_status_cnt[50];
2251 }
2252 
2253 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2254 						 void *context, int vl,
2255 						 int mode, u64 data)
2256 {
2257 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258 
2259 	return dd->rcv_err_status_cnt[49];
2260 }
2261 
2262 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2263 						 void *context, int vl,
2264 						 int mode, u64 data)
2265 {
2266 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267 
2268 	return dd->rcv_err_status_cnt[48];
2269 }
2270 
2271 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2272 						 void *context, int vl,
2273 						 int mode, u64 data)
2274 {
2275 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276 
2277 	return dd->rcv_err_status_cnt[47];
2278 }
2279 
2280 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2281 					 void *context, int vl, int mode,
2282 					 u64 data)
2283 {
2284 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285 
2286 	return dd->rcv_err_status_cnt[46];
2287 }
2288 
2289 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2290 				const struct cntr_entry *entry,
2291 				void *context, int vl, int mode, u64 data)
2292 {
2293 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294 
2295 	return dd->rcv_err_status_cnt[45];
2296 }
2297 
2298 static u64 access_rx_lookup_csr_parity_err_cnt(
2299 				const struct cntr_entry *entry,
2300 				void *context, int vl, int mode, u64 data)
2301 {
2302 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303 
2304 	return dd->rcv_err_status_cnt[44];
2305 }
2306 
2307 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2308 				const struct cntr_entry *entry,
2309 				void *context, int vl, int mode, u64 data)
2310 {
2311 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312 
2313 	return dd->rcv_err_status_cnt[43];
2314 }
2315 
2316 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2317 				const struct cntr_entry *entry,
2318 				void *context, int vl, int mode, u64 data)
2319 {
2320 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321 
2322 	return dd->rcv_err_status_cnt[42];
2323 }
2324 
2325 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2326 				const struct cntr_entry *entry,
2327 				void *context, int vl, int mode, u64 data)
2328 {
2329 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330 
2331 	return dd->rcv_err_status_cnt[41];
2332 }
2333 
2334 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2335 				const struct cntr_entry *entry,
2336 				void *context, int vl, int mode, u64 data)
2337 {
2338 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339 
2340 	return dd->rcv_err_status_cnt[40];
2341 }
2342 
2343 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2344 				const struct cntr_entry *entry,
2345 				void *context, int vl, int mode, u64 data)
2346 {
2347 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348 
2349 	return dd->rcv_err_status_cnt[39];
2350 }
2351 
2352 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2353 				const struct cntr_entry *entry,
2354 				void *context, int vl, int mode, u64 data)
2355 {
2356 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357 
2358 	return dd->rcv_err_status_cnt[38];
2359 }
2360 
2361 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2362 				const struct cntr_entry *entry,
2363 				void *context, int vl, int mode, u64 data)
2364 {
2365 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366 
2367 	return dd->rcv_err_status_cnt[37];
2368 }
2369 
2370 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2371 				const struct cntr_entry *entry,
2372 				void *context, int vl, int mode, u64 data)
2373 {
2374 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375 
2376 	return dd->rcv_err_status_cnt[36];
2377 }
2378 
2379 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2380 				const struct cntr_entry *entry,
2381 				void *context, int vl, int mode, u64 data)
2382 {
2383 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384 
2385 	return dd->rcv_err_status_cnt[35];
2386 }
2387 
2388 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2389 				const struct cntr_entry *entry,
2390 				void *context, int vl, int mode, u64 data)
2391 {
2392 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393 
2394 	return dd->rcv_err_status_cnt[34];
2395 }
2396 
2397 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2398 				const struct cntr_entry *entry,
2399 				void *context, int vl, int mode, u64 data)
2400 {
2401 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402 
2403 	return dd->rcv_err_status_cnt[33];
2404 }
2405 
2406 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2407 					void *context, int vl, int mode,
2408 					u64 data)
2409 {
2410 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411 
2412 	return dd->rcv_err_status_cnt[32];
2413 }
2414 
2415 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2416 				       void *context, int vl, int mode,
2417 				       u64 data)
2418 {
2419 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420 
2421 	return dd->rcv_err_status_cnt[31];
2422 }
2423 
2424 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2425 					  void *context, int vl, int mode,
2426 					  u64 data)
2427 {
2428 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429 
2430 	return dd->rcv_err_status_cnt[30];
2431 }
2432 
2433 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2434 					     void *context, int vl, int mode,
2435 					     u64 data)
2436 {
2437 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438 
2439 	return dd->rcv_err_status_cnt[29];
2440 }
2441 
2442 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2443 						 void *context, int vl,
2444 						 int mode, u64 data)
2445 {
2446 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447 
2448 	return dd->rcv_err_status_cnt[28];
2449 }
2450 
2451 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2452 				const struct cntr_entry *entry,
2453 				void *context, int vl, int mode, u64 data)
2454 {
2455 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456 
2457 	return dd->rcv_err_status_cnt[27];
2458 }
2459 
2460 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2461 				const struct cntr_entry *entry,
2462 				void *context, int vl, int mode, u64 data)
2463 {
2464 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465 
2466 	return dd->rcv_err_status_cnt[26];
2467 }
2468 
2469 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2470 				const struct cntr_entry *entry,
2471 				void *context, int vl, int mode, u64 data)
2472 {
2473 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474 
2475 	return dd->rcv_err_status_cnt[25];
2476 }
2477 
2478 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2479 				const struct cntr_entry *entry,
2480 				void *context, int vl, int mode, u64 data)
2481 {
2482 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483 
2484 	return dd->rcv_err_status_cnt[24];
2485 }
2486 
2487 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2488 				const struct cntr_entry *entry,
2489 				void *context, int vl, int mode, u64 data)
2490 {
2491 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492 
2493 	return dd->rcv_err_status_cnt[23];
2494 }
2495 
2496 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2497 				const struct cntr_entry *entry,
2498 				void *context, int vl, int mode, u64 data)
2499 {
2500 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501 
2502 	return dd->rcv_err_status_cnt[22];
2503 }
2504 
2505 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2506 				const struct cntr_entry *entry,
2507 				void *context, int vl, int mode, u64 data)
2508 {
2509 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510 
2511 	return dd->rcv_err_status_cnt[21];
2512 }
2513 
2514 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2515 				const struct cntr_entry *entry,
2516 				void *context, int vl, int mode, u64 data)
2517 {
2518 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519 
2520 	return dd->rcv_err_status_cnt[20];
2521 }
2522 
2523 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2524 				const struct cntr_entry *entry,
2525 				void *context, int vl, int mode, u64 data)
2526 {
2527 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528 
2529 	return dd->rcv_err_status_cnt[19];
2530 }
2531 
2532 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2533 						 void *context, int vl,
2534 						 int mode, u64 data)
2535 {
2536 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537 
2538 	return dd->rcv_err_status_cnt[18];
2539 }
2540 
2541 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2542 						 void *context, int vl,
2543 						 int mode, u64 data)
2544 {
2545 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546 
2547 	return dd->rcv_err_status_cnt[17];
2548 }
2549 
2550 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2551 				const struct cntr_entry *entry,
2552 				void *context, int vl, int mode, u64 data)
2553 {
2554 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555 
2556 	return dd->rcv_err_status_cnt[16];
2557 }
2558 
2559 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2560 				const struct cntr_entry *entry,
2561 				void *context, int vl, int mode, u64 data)
2562 {
2563 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564 
2565 	return dd->rcv_err_status_cnt[15];
2566 }
2567 
2568 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2569 						void *context, int vl,
2570 						int mode, u64 data)
2571 {
2572 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573 
2574 	return dd->rcv_err_status_cnt[14];
2575 }
2576 
2577 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2578 						void *context, int vl,
2579 						int mode, u64 data)
2580 {
2581 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582 
2583 	return dd->rcv_err_status_cnt[13];
2584 }
2585 
2586 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2587 					      void *context, int vl, int mode,
2588 					      u64 data)
2589 {
2590 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591 
2592 	return dd->rcv_err_status_cnt[12];
2593 }
2594 
2595 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2596 					  void *context, int vl, int mode,
2597 					  u64 data)
2598 {
2599 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600 
2601 	return dd->rcv_err_status_cnt[11];
2602 }
2603 
2604 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2605 					  void *context, int vl, int mode,
2606 					  u64 data)
2607 {
2608 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609 
2610 	return dd->rcv_err_status_cnt[10];
2611 }
2612 
2613 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2614 					       void *context, int vl, int mode,
2615 					       u64 data)
2616 {
2617 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618 
2619 	return dd->rcv_err_status_cnt[9];
2620 }
2621 
2622 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2623 					    void *context, int vl, int mode,
2624 					    u64 data)
2625 {
2626 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627 
2628 	return dd->rcv_err_status_cnt[8];
2629 }
2630 
2631 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2632 				const struct cntr_entry *entry,
2633 				void *context, int vl, int mode, u64 data)
2634 {
2635 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636 
2637 	return dd->rcv_err_status_cnt[7];
2638 }
2639 
2640 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2641 				const struct cntr_entry *entry,
2642 				void *context, int vl, int mode, u64 data)
2643 {
2644 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645 
2646 	return dd->rcv_err_status_cnt[6];
2647 }
2648 
2649 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2650 					  void *context, int vl, int mode,
2651 					  u64 data)
2652 {
2653 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654 
2655 	return dd->rcv_err_status_cnt[5];
2656 }
2657 
2658 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2659 					  void *context, int vl, int mode,
2660 					  u64 data)
2661 {
2662 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663 
2664 	return dd->rcv_err_status_cnt[4];
2665 }
2666 
2667 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2668 					 void *context, int vl, int mode,
2669 					 u64 data)
2670 {
2671 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672 
2673 	return dd->rcv_err_status_cnt[3];
2674 }
2675 
2676 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2677 					 void *context, int vl, int mode,
2678 					 u64 data)
2679 {
2680 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681 
2682 	return dd->rcv_err_status_cnt[2];
2683 }
2684 
2685 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2686 					    void *context, int vl, int mode,
2687 					    u64 data)
2688 {
2689 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690 
2691 	return dd->rcv_err_status_cnt[1];
2692 }
2693 
2694 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2695 					 void *context, int vl, int mode,
2696 					 u64 data)
2697 {
2698 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699 
2700 	return dd->rcv_err_status_cnt[0];
2701 }
2702 
2703 /*
2704  * Software counters corresponding to each of the
2705  * error status bits within SendPioErrStatus
2706  */
2707 static u64 access_pio_pec_sop_head_parity_err_cnt(
2708 				const struct cntr_entry *entry,
2709 				void *context, int vl, int mode, u64 data)
2710 {
2711 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712 
2713 	return dd->send_pio_err_status_cnt[35];
2714 }
2715 
2716 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2717 				const struct cntr_entry *entry,
2718 				void *context, int vl, int mode, u64 data)
2719 {
2720 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721 
2722 	return dd->send_pio_err_status_cnt[34];
2723 }
2724 
2725 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2726 				const struct cntr_entry *entry,
2727 				void *context, int vl, int mode, u64 data)
2728 {
2729 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730 
2731 	return dd->send_pio_err_status_cnt[33];
2732 }
2733 
2734 static u64 access_pio_current_free_cnt_parity_err_cnt(
2735 				const struct cntr_entry *entry,
2736 				void *context, int vl, int mode, u64 data)
2737 {
2738 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739 
2740 	return dd->send_pio_err_status_cnt[32];
2741 }
2742 
2743 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2744 					  void *context, int vl, int mode,
2745 					  u64 data)
2746 {
2747 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748 
2749 	return dd->send_pio_err_status_cnt[31];
2750 }
2751 
2752 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2753 					  void *context, int vl, int mode,
2754 					  u64 data)
2755 {
2756 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757 
2758 	return dd->send_pio_err_status_cnt[30];
2759 }
2760 
2761 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2762 					   void *context, int vl, int mode,
2763 					   u64 data)
2764 {
2765 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766 
2767 	return dd->send_pio_err_status_cnt[29];
2768 }
2769 
2770 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2771 				const struct cntr_entry *entry,
2772 				void *context, int vl, int mode, u64 data)
2773 {
2774 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775 
2776 	return dd->send_pio_err_status_cnt[28];
2777 }
2778 
2779 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2780 					     void *context, int vl, int mode,
2781 					     u64 data)
2782 {
2783 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784 
2785 	return dd->send_pio_err_status_cnt[27];
2786 }
2787 
2788 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2789 					     void *context, int vl, int mode,
2790 					     u64 data)
2791 {
2792 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793 
2794 	return dd->send_pio_err_status_cnt[26];
2795 }
2796 
2797 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2798 						void *context, int vl,
2799 						int mode, u64 data)
2800 {
2801 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802 
2803 	return dd->send_pio_err_status_cnt[25];
2804 }
2805 
2806 static u64 access_pio_block_qw_count_parity_err_cnt(
2807 				const struct cntr_entry *entry,
2808 				void *context, int vl, int mode, u64 data)
2809 {
2810 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811 
2812 	return dd->send_pio_err_status_cnt[24];
2813 }
2814 
2815 static u64 access_pio_write_qw_valid_parity_err_cnt(
2816 				const struct cntr_entry *entry,
2817 				void *context, int vl, int mode, u64 data)
2818 {
2819 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820 
2821 	return dd->send_pio_err_status_cnt[23];
2822 }
2823 
2824 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2825 					    void *context, int vl, int mode,
2826 					    u64 data)
2827 {
2828 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829 
2830 	return dd->send_pio_err_status_cnt[22];
2831 }
2832 
2833 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2834 						void *context, int vl,
2835 						int mode, u64 data)
2836 {
2837 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838 
2839 	return dd->send_pio_err_status_cnt[21];
2840 }
2841 
2842 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2843 						void *context, int vl,
2844 						int mode, u64 data)
2845 {
2846 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847 
2848 	return dd->send_pio_err_status_cnt[20];
2849 }
2850 
2851 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2852 						void *context, int vl,
2853 						int mode, u64 data)
2854 {
2855 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856 
2857 	return dd->send_pio_err_status_cnt[19];
2858 }
2859 
2860 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2861 				const struct cntr_entry *entry,
2862 				void *context, int vl, int mode, u64 data)
2863 {
2864 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865 
2866 	return dd->send_pio_err_status_cnt[18];
2867 }
2868 
2869 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2870 					 void *context, int vl, int mode,
2871 					 u64 data)
2872 {
2873 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874 
2875 	return dd->send_pio_err_status_cnt[17];
2876 }
2877 
2878 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2879 					    void *context, int vl, int mode,
2880 					    u64 data)
2881 {
2882 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883 
2884 	return dd->send_pio_err_status_cnt[16];
2885 }
2886 
2887 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2888 				const struct cntr_entry *entry,
2889 				void *context, int vl, int mode, u64 data)
2890 {
2891 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892 
2893 	return dd->send_pio_err_status_cnt[15];
2894 }
2895 
2896 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2897 				const struct cntr_entry *entry,
2898 				void *context, int vl, int mode, u64 data)
2899 {
2900 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901 
2902 	return dd->send_pio_err_status_cnt[14];
2903 }
2904 
2905 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2906 				const struct cntr_entry *entry,
2907 				void *context, int vl, int mode, u64 data)
2908 {
2909 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910 
2911 	return dd->send_pio_err_status_cnt[13];
2912 }
2913 
2914 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2915 				const struct cntr_entry *entry,
2916 				void *context, int vl, int mode, u64 data)
2917 {
2918 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919 
2920 	return dd->send_pio_err_status_cnt[12];
2921 }
2922 
2923 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2924 				const struct cntr_entry *entry,
2925 				void *context, int vl, int mode, u64 data)
2926 {
2927 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928 
2929 	return dd->send_pio_err_status_cnt[11];
2930 }
2931 
2932 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2933 				const struct cntr_entry *entry,
2934 				void *context, int vl, int mode, u64 data)
2935 {
2936 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937 
2938 	return dd->send_pio_err_status_cnt[10];
2939 }
2940 
2941 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2942 				const struct cntr_entry *entry,
2943 				void *context, int vl, int mode, u64 data)
2944 {
2945 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946 
2947 	return dd->send_pio_err_status_cnt[9];
2948 }
2949 
2950 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2951 				const struct cntr_entry *entry,
2952 				void *context, int vl, int mode, u64 data)
2953 {
2954 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955 
2956 	return dd->send_pio_err_status_cnt[8];
2957 }
2958 
2959 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2960 				const struct cntr_entry *entry,
2961 				void *context, int vl, int mode, u64 data)
2962 {
2963 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964 
2965 	return dd->send_pio_err_status_cnt[7];
2966 }
2967 
2968 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2969 					      void *context, int vl, int mode,
2970 					      u64 data)
2971 {
2972 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973 
2974 	return dd->send_pio_err_status_cnt[6];
2975 }
2976 
2977 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2978 					      void *context, int vl, int mode,
2979 					      u64 data)
2980 {
2981 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982 
2983 	return dd->send_pio_err_status_cnt[5];
2984 }
2985 
2986 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2987 					   void *context, int vl, int mode,
2988 					   u64 data)
2989 {
2990 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991 
2992 	return dd->send_pio_err_status_cnt[4];
2993 }
2994 
2995 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2996 					   void *context, int vl, int mode,
2997 					   u64 data)
2998 {
2999 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000 
3001 	return dd->send_pio_err_status_cnt[3];
3002 }
3003 
3004 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3005 					 void *context, int vl, int mode,
3006 					 u64 data)
3007 {
3008 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009 
3010 	return dd->send_pio_err_status_cnt[2];
3011 }
3012 
3013 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3014 						void *context, int vl,
3015 						int mode, u64 data)
3016 {
3017 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3018 
3019 	return dd->send_pio_err_status_cnt[1];
3020 }
3021 
3022 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3023 					     void *context, int vl, int mode,
3024 					     u64 data)
3025 {
3026 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3027 
3028 	return dd->send_pio_err_status_cnt[0];
3029 }
3030 
3031 /*
3032  * Software counters corresponding to each of the
3033  * error status bits within SendDmaErrStatus
3034  */
3035 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3036 				const struct cntr_entry *entry,
3037 				void *context, int vl, int mode, u64 data)
3038 {
3039 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040 
3041 	return dd->send_dma_err_status_cnt[3];
3042 }
3043 
3044 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3045 				const struct cntr_entry *entry,
3046 				void *context, int vl, int mode, u64 data)
3047 {
3048 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049 
3050 	return dd->send_dma_err_status_cnt[2];
3051 }
3052 
3053 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3054 					  void *context, int vl, int mode,
3055 					  u64 data)
3056 {
3057 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058 
3059 	return dd->send_dma_err_status_cnt[1];
3060 }
3061 
3062 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3063 				       void *context, int vl, int mode,
3064 				       u64 data)
3065 {
3066 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067 
3068 	return dd->send_dma_err_status_cnt[0];
3069 }
3070 
3071 /*
3072  * Software counters corresponding to each of the
3073  * error status bits within SendEgressErrStatus
3074  */
3075 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3076 				const struct cntr_entry *entry,
3077 				void *context, int vl, int mode, u64 data)
3078 {
3079 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080 
3081 	return dd->send_egress_err_status_cnt[63];
3082 }
3083 
3084 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3085 				const struct cntr_entry *entry,
3086 				void *context, int vl, int mode, u64 data)
3087 {
3088 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089 
3090 	return dd->send_egress_err_status_cnt[62];
3091 }
3092 
3093 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3094 					     void *context, int vl, int mode,
3095 					     u64 data)
3096 {
3097 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098 
3099 	return dd->send_egress_err_status_cnt[61];
3100 }
3101 
3102 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3103 						 void *context, int vl,
3104 						 int mode, u64 data)
3105 {
3106 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107 
3108 	return dd->send_egress_err_status_cnt[60];
3109 }
3110 
3111 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3112 				const struct cntr_entry *entry,
3113 				void *context, int vl, int mode, u64 data)
3114 {
3115 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116 
3117 	return dd->send_egress_err_status_cnt[59];
3118 }
3119 
3120 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3121 					void *context, int vl, int mode,
3122 					u64 data)
3123 {
3124 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125 
3126 	return dd->send_egress_err_status_cnt[58];
3127 }
3128 
3129 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3130 					    void *context, int vl, int mode,
3131 					    u64 data)
3132 {
3133 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134 
3135 	return dd->send_egress_err_status_cnt[57];
3136 }
3137 
3138 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3139 					      void *context, int vl, int mode,
3140 					      u64 data)
3141 {
3142 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143 
3144 	return dd->send_egress_err_status_cnt[56];
3145 }
3146 
3147 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3148 					      void *context, int vl, int mode,
3149 					      u64 data)
3150 {
3151 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152 
3153 	return dd->send_egress_err_status_cnt[55];
3154 }
3155 
3156 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3157 					      void *context, int vl, int mode,
3158 					      u64 data)
3159 {
3160 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161 
3162 	return dd->send_egress_err_status_cnt[54];
3163 }
3164 
3165 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3166 					      void *context, int vl, int mode,
3167 					      u64 data)
3168 {
3169 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170 
3171 	return dd->send_egress_err_status_cnt[53];
3172 }
3173 
3174 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3175 					      void *context, int vl, int mode,
3176 					      u64 data)
3177 {
3178 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179 
3180 	return dd->send_egress_err_status_cnt[52];
3181 }
3182 
3183 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3184 					      void *context, int vl, int mode,
3185 					      u64 data)
3186 {
3187 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188 
3189 	return dd->send_egress_err_status_cnt[51];
3190 }
3191 
3192 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3193 					      void *context, int vl, int mode,
3194 					      u64 data)
3195 {
3196 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197 
3198 	return dd->send_egress_err_status_cnt[50];
3199 }
3200 
3201 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3202 					      void *context, int vl, int mode,
3203 					      u64 data)
3204 {
3205 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206 
3207 	return dd->send_egress_err_status_cnt[49];
3208 }
3209 
3210 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3211 					      void *context, int vl, int mode,
3212 					      u64 data)
3213 {
3214 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215 
3216 	return dd->send_egress_err_status_cnt[48];
3217 }
3218 
3219 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3220 					      void *context, int vl, int mode,
3221 					      u64 data)
3222 {
3223 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224 
3225 	return dd->send_egress_err_status_cnt[47];
3226 }
3227 
3228 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3229 					    void *context, int vl, int mode,
3230 					    u64 data)
3231 {
3232 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233 
3234 	return dd->send_egress_err_status_cnt[46];
3235 }
3236 
3237 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3238 					     void *context, int vl, int mode,
3239 					     u64 data)
3240 {
3241 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242 
3243 	return dd->send_egress_err_status_cnt[45];
3244 }
3245 
3246 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3247 						 void *context, int vl,
3248 						 int mode, u64 data)
3249 {
3250 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251 
3252 	return dd->send_egress_err_status_cnt[44];
3253 }
3254 
3255 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3256 				const struct cntr_entry *entry,
3257 				void *context, int vl, int mode, u64 data)
3258 {
3259 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260 
3261 	return dd->send_egress_err_status_cnt[43];
3262 }
3263 
3264 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3265 					void *context, int vl, int mode,
3266 					u64 data)
3267 {
3268 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269 
3270 	return dd->send_egress_err_status_cnt[42];
3271 }
3272 
3273 static u64 access_tx_credit_return_partiy_err_cnt(
3274 				const struct cntr_entry *entry,
3275 				void *context, int vl, int mode, u64 data)
3276 {
3277 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278 
3279 	return dd->send_egress_err_status_cnt[41];
3280 }
3281 
3282 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3283 				const struct cntr_entry *entry,
3284 				void *context, int vl, int mode, u64 data)
3285 {
3286 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287 
3288 	return dd->send_egress_err_status_cnt[40];
3289 }
3290 
3291 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3292 				const struct cntr_entry *entry,
3293 				void *context, int vl, int mode, u64 data)
3294 {
3295 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296 
3297 	return dd->send_egress_err_status_cnt[39];
3298 }
3299 
3300 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3301 				const struct cntr_entry *entry,
3302 				void *context, int vl, int mode, u64 data)
3303 {
3304 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305 
3306 	return dd->send_egress_err_status_cnt[38];
3307 }
3308 
3309 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3310 				const struct cntr_entry *entry,
3311 				void *context, int vl, int mode, u64 data)
3312 {
3313 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314 
3315 	return dd->send_egress_err_status_cnt[37];
3316 }
3317 
3318 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3319 				const struct cntr_entry *entry,
3320 				void *context, int vl, int mode, u64 data)
3321 {
3322 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323 
3324 	return dd->send_egress_err_status_cnt[36];
3325 }
3326 
3327 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3328 				const struct cntr_entry *entry,
3329 				void *context, int vl, int mode, u64 data)
3330 {
3331 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332 
3333 	return dd->send_egress_err_status_cnt[35];
3334 }
3335 
3336 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3337 				const struct cntr_entry *entry,
3338 				void *context, int vl, int mode, u64 data)
3339 {
3340 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341 
3342 	return dd->send_egress_err_status_cnt[34];
3343 }
3344 
3345 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3346 				const struct cntr_entry *entry,
3347 				void *context, int vl, int mode, u64 data)
3348 {
3349 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350 
3351 	return dd->send_egress_err_status_cnt[33];
3352 }
3353 
3354 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3355 				const struct cntr_entry *entry,
3356 				void *context, int vl, int mode, u64 data)
3357 {
3358 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359 
3360 	return dd->send_egress_err_status_cnt[32];
3361 }
3362 
3363 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3364 				const struct cntr_entry *entry,
3365 				void *context, int vl, int mode, u64 data)
3366 {
3367 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368 
3369 	return dd->send_egress_err_status_cnt[31];
3370 }
3371 
3372 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3373 				const struct cntr_entry *entry,
3374 				void *context, int vl, int mode, u64 data)
3375 {
3376 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377 
3378 	return dd->send_egress_err_status_cnt[30];
3379 }
3380 
3381 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3382 				const struct cntr_entry *entry,
3383 				void *context, int vl, int mode, u64 data)
3384 {
3385 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386 
3387 	return dd->send_egress_err_status_cnt[29];
3388 }
3389 
3390 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3391 				const struct cntr_entry *entry,
3392 				void *context, int vl, int mode, u64 data)
3393 {
3394 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395 
3396 	return dd->send_egress_err_status_cnt[28];
3397 }
3398 
3399 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3400 				const struct cntr_entry *entry,
3401 				void *context, int vl, int mode, u64 data)
3402 {
3403 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404 
3405 	return dd->send_egress_err_status_cnt[27];
3406 }
3407 
3408 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3409 				const struct cntr_entry *entry,
3410 				void *context, int vl, int mode, u64 data)
3411 {
3412 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413 
3414 	return dd->send_egress_err_status_cnt[26];
3415 }
3416 
3417 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3418 				const struct cntr_entry *entry,
3419 				void *context, int vl, int mode, u64 data)
3420 {
3421 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422 
3423 	return dd->send_egress_err_status_cnt[25];
3424 }
3425 
3426 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3427 				const struct cntr_entry *entry,
3428 				void *context, int vl, int mode, u64 data)
3429 {
3430 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431 
3432 	return dd->send_egress_err_status_cnt[24];
3433 }
3434 
3435 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3436 				const struct cntr_entry *entry,
3437 				void *context, int vl, int mode, u64 data)
3438 {
3439 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440 
3441 	return dd->send_egress_err_status_cnt[23];
3442 }
3443 
3444 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3445 				const struct cntr_entry *entry,
3446 				void *context, int vl, int mode, u64 data)
3447 {
3448 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449 
3450 	return dd->send_egress_err_status_cnt[22];
3451 }
3452 
3453 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3454 				const struct cntr_entry *entry,
3455 				void *context, int vl, int mode, u64 data)
3456 {
3457 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458 
3459 	return dd->send_egress_err_status_cnt[21];
3460 }
3461 
3462 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3463 				const struct cntr_entry *entry,
3464 				void *context, int vl, int mode, u64 data)
3465 {
3466 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467 
3468 	return dd->send_egress_err_status_cnt[20];
3469 }
3470 
3471 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3472 				const struct cntr_entry *entry,
3473 				void *context, int vl, int mode, u64 data)
3474 {
3475 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476 
3477 	return dd->send_egress_err_status_cnt[19];
3478 }
3479 
3480 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3481 				const struct cntr_entry *entry,
3482 				void *context, int vl, int mode, u64 data)
3483 {
3484 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485 
3486 	return dd->send_egress_err_status_cnt[18];
3487 }
3488 
3489 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3490 				const struct cntr_entry *entry,
3491 				void *context, int vl, int mode, u64 data)
3492 {
3493 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494 
3495 	return dd->send_egress_err_status_cnt[17];
3496 }
3497 
3498 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3499 				const struct cntr_entry *entry,
3500 				void *context, int vl, int mode, u64 data)
3501 {
3502 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503 
3504 	return dd->send_egress_err_status_cnt[16];
3505 }
3506 
3507 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3508 					   void *context, int vl, int mode,
3509 					   u64 data)
3510 {
3511 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512 
3513 	return dd->send_egress_err_status_cnt[15];
3514 }
3515 
3516 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3517 						 void *context, int vl,
3518 						 int mode, u64 data)
3519 {
3520 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521 
3522 	return dd->send_egress_err_status_cnt[14];
3523 }
3524 
3525 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3526 					       void *context, int vl, int mode,
3527 					       u64 data)
3528 {
3529 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530 
3531 	return dd->send_egress_err_status_cnt[13];
3532 }
3533 
3534 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3535 					void *context, int vl, int mode,
3536 					u64 data)
3537 {
3538 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539 
3540 	return dd->send_egress_err_status_cnt[12];
3541 }
3542 
3543 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3544 				const struct cntr_entry *entry,
3545 				void *context, int vl, int mode, u64 data)
3546 {
3547 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548 
3549 	return dd->send_egress_err_status_cnt[11];
3550 }
3551 
3552 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3553 					     void *context, int vl, int mode,
3554 					     u64 data)
3555 {
3556 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557 
3558 	return dd->send_egress_err_status_cnt[10];
3559 }
3560 
3561 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3562 					    void *context, int vl, int mode,
3563 					    u64 data)
3564 {
3565 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566 
3567 	return dd->send_egress_err_status_cnt[9];
3568 }
3569 
3570 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3571 				const struct cntr_entry *entry,
3572 				void *context, int vl, int mode, u64 data)
3573 {
3574 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575 
3576 	return dd->send_egress_err_status_cnt[8];
3577 }
3578 
3579 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3580 				const struct cntr_entry *entry,
3581 				void *context, int vl, int mode, u64 data)
3582 {
3583 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584 
3585 	return dd->send_egress_err_status_cnt[7];
3586 }
3587 
3588 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3589 					    void *context, int vl, int mode,
3590 					    u64 data)
3591 {
3592 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593 
3594 	return dd->send_egress_err_status_cnt[6];
3595 }
3596 
3597 static u64 access_tx_incorrect_link_state_err_cnt(
3598 				const struct cntr_entry *entry,
3599 				void *context, int vl, int mode, u64 data)
3600 {
3601 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602 
3603 	return dd->send_egress_err_status_cnt[5];
3604 }
3605 
3606 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3607 				      void *context, int vl, int mode,
3608 				      u64 data)
3609 {
3610 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611 
3612 	return dd->send_egress_err_status_cnt[4];
3613 }
3614 
3615 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3616 				const struct cntr_entry *entry,
3617 				void *context, int vl, int mode, u64 data)
3618 {
3619 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620 
3621 	return dd->send_egress_err_status_cnt[3];
3622 }
3623 
3624 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3625 					    void *context, int vl, int mode,
3626 					    u64 data)
3627 {
3628 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629 
3630 	return dd->send_egress_err_status_cnt[2];
3631 }
3632 
3633 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3634 				const struct cntr_entry *entry,
3635 				void *context, int vl, int mode, u64 data)
3636 {
3637 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3638 
3639 	return dd->send_egress_err_status_cnt[1];
3640 }
3641 
3642 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3643 				const struct cntr_entry *entry,
3644 				void *context, int vl, int mode, u64 data)
3645 {
3646 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3647 
3648 	return dd->send_egress_err_status_cnt[0];
3649 }
3650 
3651 /*
3652  * Software counters corresponding to each of the
3653  * error status bits within SendErrStatus
3654  */
3655 static u64 access_send_csr_write_bad_addr_err_cnt(
3656 				const struct cntr_entry *entry,
3657 				void *context, int vl, int mode, u64 data)
3658 {
3659 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660 
3661 	return dd->send_err_status_cnt[2];
3662 }
3663 
3664 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3665 						 void *context, int vl,
3666 						 int mode, u64 data)
3667 {
3668 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669 
3670 	return dd->send_err_status_cnt[1];
3671 }
3672 
3673 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3674 				      void *context, int vl, int mode,
3675 				      u64 data)
3676 {
3677 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678 
3679 	return dd->send_err_status_cnt[0];
3680 }
3681 
3682 /*
3683  * Software counters corresponding to each of the
3684  * error status bits within SendCtxtErrStatus
3685  */
3686 static u64 access_pio_write_out_of_bounds_err_cnt(
3687 				const struct cntr_entry *entry,
3688 				void *context, int vl, int mode, u64 data)
3689 {
3690 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691 
3692 	return dd->sw_ctxt_err_status_cnt[4];
3693 }
3694 
3695 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3696 					     void *context, int vl, int mode,
3697 					     u64 data)
3698 {
3699 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700 
3701 	return dd->sw_ctxt_err_status_cnt[3];
3702 }
3703 
3704 static u64 access_pio_write_crosses_boundary_err_cnt(
3705 				const struct cntr_entry *entry,
3706 				void *context, int vl, int mode, u64 data)
3707 {
3708 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709 
3710 	return dd->sw_ctxt_err_status_cnt[2];
3711 }
3712 
3713 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3714 						void *context, int vl,
3715 						int mode, u64 data)
3716 {
3717 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718 
3719 	return dd->sw_ctxt_err_status_cnt[1];
3720 }
3721 
3722 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3723 					       void *context, int vl, int mode,
3724 					       u64 data)
3725 {
3726 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727 
3728 	return dd->sw_ctxt_err_status_cnt[0];
3729 }
3730 
3731 /*
3732  * Software counters corresponding to each of the
3733  * error status bits within SendDmaEngErrStatus
3734  */
3735 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3736 				const struct cntr_entry *entry,
3737 				void *context, int vl, int mode, u64 data)
3738 {
3739 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740 
3741 	return dd->sw_send_dma_eng_err_status_cnt[23];
3742 }
3743 
3744 static u64 access_sdma_header_storage_cor_err_cnt(
3745 				const struct cntr_entry *entry,
3746 				void *context, int vl, int mode, u64 data)
3747 {
3748 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749 
3750 	return dd->sw_send_dma_eng_err_status_cnt[22];
3751 }
3752 
3753 static u64 access_sdma_packet_tracking_cor_err_cnt(
3754 				const struct cntr_entry *entry,
3755 				void *context, int vl, int mode, u64 data)
3756 {
3757 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758 
3759 	return dd->sw_send_dma_eng_err_status_cnt[21];
3760 }
3761 
3762 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3763 					    void *context, int vl, int mode,
3764 					    u64 data)
3765 {
3766 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767 
3768 	return dd->sw_send_dma_eng_err_status_cnt[20];
3769 }
3770 
3771 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3772 					      void *context, int vl, int mode,
3773 					      u64 data)
3774 {
3775 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776 
3777 	return dd->sw_send_dma_eng_err_status_cnt[19];
3778 }
3779 
3780 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3781 				const struct cntr_entry *entry,
3782 				void *context, int vl, int mode, u64 data)
3783 {
3784 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785 
3786 	return dd->sw_send_dma_eng_err_status_cnt[18];
3787 }
3788 
3789 static u64 access_sdma_header_storage_unc_err_cnt(
3790 				const struct cntr_entry *entry,
3791 				void *context, int vl, int mode, u64 data)
3792 {
3793 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794 
3795 	return dd->sw_send_dma_eng_err_status_cnt[17];
3796 }
3797 
3798 static u64 access_sdma_packet_tracking_unc_err_cnt(
3799 				const struct cntr_entry *entry,
3800 				void *context, int vl, int mode, u64 data)
3801 {
3802 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803 
3804 	return dd->sw_send_dma_eng_err_status_cnt[16];
3805 }
3806 
3807 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3808 					    void *context, int vl, int mode,
3809 					    u64 data)
3810 {
3811 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812 
3813 	return dd->sw_send_dma_eng_err_status_cnt[15];
3814 }
3815 
3816 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3817 					      void *context, int vl, int mode,
3818 					      u64 data)
3819 {
3820 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821 
3822 	return dd->sw_send_dma_eng_err_status_cnt[14];
3823 }
3824 
3825 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3826 				       void *context, int vl, int mode,
3827 				       u64 data)
3828 {
3829 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830 
3831 	return dd->sw_send_dma_eng_err_status_cnt[13];
3832 }
3833 
3834 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3835 					     void *context, int vl, int mode,
3836 					     u64 data)
3837 {
3838 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839 
3840 	return dd->sw_send_dma_eng_err_status_cnt[12];
3841 }
3842 
3843 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3844 					      void *context, int vl, int mode,
3845 					      u64 data)
3846 {
3847 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848 
3849 	return dd->sw_send_dma_eng_err_status_cnt[11];
3850 }
3851 
3852 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3853 					     void *context, int vl, int mode,
3854 					     u64 data)
3855 {
3856 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857 
3858 	return dd->sw_send_dma_eng_err_status_cnt[10];
3859 }
3860 
3861 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3862 					  void *context, int vl, int mode,
3863 					  u64 data)
3864 {
3865 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866 
3867 	return dd->sw_send_dma_eng_err_status_cnt[9];
3868 }
3869 
3870 static u64 access_sdma_packet_desc_overflow_err_cnt(
3871 				const struct cntr_entry *entry,
3872 				void *context, int vl, int mode, u64 data)
3873 {
3874 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3875 
3876 	return dd->sw_send_dma_eng_err_status_cnt[8];
3877 }
3878 
3879 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3880 					       void *context, int vl,
3881 					       int mode, u64 data)
3882 {
3883 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3884 
3885 	return dd->sw_send_dma_eng_err_status_cnt[7];
3886 }
3887 
3888 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3889 				    void *context, int vl, int mode, u64 data)
3890 {
3891 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892 
3893 	return dd->sw_send_dma_eng_err_status_cnt[6];
3894 }
3895 
3896 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3897 					void *context, int vl, int mode,
3898 					u64 data)
3899 {
3900 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901 
3902 	return dd->sw_send_dma_eng_err_status_cnt[5];
3903 }
3904 
3905 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3906 					  void *context, int vl, int mode,
3907 					  u64 data)
3908 {
3909 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910 
3911 	return dd->sw_send_dma_eng_err_status_cnt[4];
3912 }
3913 
3914 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3915 				const struct cntr_entry *entry,
3916 				void *context, int vl, int mode, u64 data)
3917 {
3918 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919 
3920 	return dd->sw_send_dma_eng_err_status_cnt[3];
3921 }
3922 
3923 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3924 					void *context, int vl, int mode,
3925 					u64 data)
3926 {
3927 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928 
3929 	return dd->sw_send_dma_eng_err_status_cnt[2];
3930 }
3931 
3932 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3933 					    void *context, int vl, int mode,
3934 					    u64 data)
3935 {
3936 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3937 
3938 	return dd->sw_send_dma_eng_err_status_cnt[1];
3939 }
3940 
3941 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3942 					void *context, int vl, int mode,
3943 					u64 data)
3944 {
3945 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3946 
3947 	return dd->sw_send_dma_eng_err_status_cnt[0];
3948 }
3949 
3950 #define def_access_sw_cpu(cntr) \
3951 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,		      \
3952 			      void *context, int vl, int mode, u64 data)      \
3953 {									      \
3954 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
3955 	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
3956 			      ppd->ibport_data.rvp.cntr, vl,		      \
3957 			      mode, data);				      \
3958 }
3959 
3960 def_access_sw_cpu(rc_acks);
3961 def_access_sw_cpu(rc_qacks);
3962 def_access_sw_cpu(rc_delayed_comp);
3963 
3964 #define def_access_ibp_counter(cntr) \
3965 static u64 access_ibp_##cntr(const struct cntr_entry *entry,		      \
3966 				void *context, int vl, int mode, u64 data)    \
3967 {									      \
3968 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
3969 									      \
3970 	if (vl != CNTR_INVALID_VL)					      \
3971 		return 0;						      \
3972 									      \
3973 	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
3974 			     mode, data);				      \
3975 }
3976 
3977 def_access_ibp_counter(loop_pkts);
3978 def_access_ibp_counter(rc_resends);
3979 def_access_ibp_counter(rnr_naks);
3980 def_access_ibp_counter(other_naks);
3981 def_access_ibp_counter(rc_timeouts);
3982 def_access_ibp_counter(pkt_drops);
3983 def_access_ibp_counter(dmawait);
3984 def_access_ibp_counter(rc_seqnak);
3985 def_access_ibp_counter(rc_dupreq);
3986 def_access_ibp_counter(rdma_seq);
3987 def_access_ibp_counter(unaligned);
3988 def_access_ibp_counter(seq_naks);
3989 
3990 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3991 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3992 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3993 			CNTR_NORMAL),
3994 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3995 			CNTR_NORMAL),
3996 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3997 			RCV_TID_FLOW_GEN_MISMATCH_CNT,
3998 			CNTR_NORMAL),
3999 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4000 			CNTR_NORMAL),
4001 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4002 			RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4003 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4004 			CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4005 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4006 			CNTR_NORMAL),
4007 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4008 			CNTR_NORMAL),
4009 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4010 			CNTR_NORMAL),
4011 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4012 			CNTR_NORMAL),
4013 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4014 			CNTR_NORMAL),
4015 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4016 			CNTR_NORMAL),
4017 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4018 			CCE_RCV_URGENT_INT_CNT,	CNTR_NORMAL),
4019 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4020 			CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4021 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4022 			      CNTR_SYNTH),
4023 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4024 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4025 				 CNTR_SYNTH),
4026 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4027 				  CNTR_SYNTH),
4028 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4029 				  CNTR_SYNTH),
4030 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4031 				   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4032 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4033 				  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4034 				  CNTR_SYNTH),
4035 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4036 				DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4037 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4038 			       CNTR_SYNTH),
4039 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4040 			      CNTR_SYNTH),
4041 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4042 			       CNTR_SYNTH),
4043 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4044 				 CNTR_SYNTH),
4045 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4046 				CNTR_SYNTH),
4047 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4048 				CNTR_SYNTH),
4049 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4050 			       CNTR_SYNTH),
4051 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4052 				 CNTR_SYNTH | CNTR_VL),
4053 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4054 				CNTR_SYNTH | CNTR_VL),
4055 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4056 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4057 				 CNTR_SYNTH | CNTR_VL),
4058 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4059 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4060 				 CNTR_SYNTH | CNTR_VL),
4061 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4062 			      CNTR_SYNTH),
4063 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4064 				 CNTR_SYNTH | CNTR_VL),
4065 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4066 				CNTR_SYNTH),
4067 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4068 				   CNTR_SYNTH | CNTR_VL),
4069 [C_DC_TOTAL_CRC] =
4070 	DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4071 			 CNTR_SYNTH),
4072 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4073 				  CNTR_SYNTH),
4074 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4075 				  CNTR_SYNTH),
4076 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4077 				  CNTR_SYNTH),
4078 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4079 				  CNTR_SYNTH),
4080 [C_DC_CRC_MULT_LN] =
4081 	DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4082 			 CNTR_SYNTH),
4083 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4084 				    CNTR_SYNTH),
4085 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4086 				    CNTR_SYNTH),
4087 [C_DC_SEQ_CRC_CNT] =
4088 	DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4089 			 CNTR_SYNTH),
4090 [C_DC_ESC0_ONLY_CNT] =
4091 	DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4092 			 CNTR_SYNTH),
4093 [C_DC_ESC0_PLUS1_CNT] =
4094 	DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4095 			 CNTR_SYNTH),
4096 [C_DC_ESC0_PLUS2_CNT] =
4097 	DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4098 			 CNTR_SYNTH),
4099 [C_DC_REINIT_FROM_PEER_CNT] =
4100 	DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4101 			 CNTR_SYNTH),
4102 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4103 				  CNTR_SYNTH),
4104 [C_DC_MISC_FLG_CNT] =
4105 	DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4106 			 CNTR_SYNTH),
4107 [C_DC_PRF_GOOD_LTP_CNT] =
4108 	DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4109 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4110 	DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4111 			 CNTR_SYNTH),
4112 [C_DC_PRF_RX_FLIT_CNT] =
4113 	DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4114 [C_DC_PRF_TX_FLIT_CNT] =
4115 	DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4116 [C_DC_PRF_CLK_CNTR] =
4117 	DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4118 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4119 	DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4120 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4121 	DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4122 			 CNTR_SYNTH),
4123 [C_DC_PG_STS_TX_SBE_CNT] =
4124 	DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4125 [C_DC_PG_STS_TX_MBE_CNT] =
4126 	DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4127 			 CNTR_SYNTH),
4128 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4129 			    access_sw_cpu_intr),
4130 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4131 			    access_sw_cpu_rcv_limit),
4132 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4133 			    access_sw_vtx_wait),
4134 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4135 			    access_sw_pio_wait),
4136 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4137 			    access_sw_pio_drain),
4138 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4139 			    access_sw_kmem_wait),
4140 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4141 			    access_sw_send_schedule),
4142 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4143 				      SEND_DMA_DESC_FETCHED_CNT, 0,
4144 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4145 				      dev_access_u32_csr),
4146 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4147 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4148 			     access_sde_int_cnt),
4149 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4150 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4151 			     access_sde_err_cnt),
4152 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4153 				  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4154 				  access_sde_idle_int_cnt),
4155 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4156 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4157 				      access_sde_progress_int_cnt),
4158 /* MISC_ERR_STATUS */
4159 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4160 				CNTR_NORMAL,
4161 				access_misc_pll_lock_fail_err_cnt),
4162 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4163 				CNTR_NORMAL,
4164 				access_misc_mbist_fail_err_cnt),
4165 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4166 				CNTR_NORMAL,
4167 				access_misc_invalid_eep_cmd_err_cnt),
4168 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4169 				CNTR_NORMAL,
4170 				access_misc_efuse_done_parity_err_cnt),
4171 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4172 				CNTR_NORMAL,
4173 				access_misc_efuse_write_err_cnt),
4174 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4175 				0, CNTR_NORMAL,
4176 				access_misc_efuse_read_bad_addr_err_cnt),
4177 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4178 				CNTR_NORMAL,
4179 				access_misc_efuse_csr_parity_err_cnt),
4180 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4181 				CNTR_NORMAL,
4182 				access_misc_fw_auth_failed_err_cnt),
4183 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4184 				CNTR_NORMAL,
4185 				access_misc_key_mismatch_err_cnt),
4186 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4187 				CNTR_NORMAL,
4188 				access_misc_sbus_write_failed_err_cnt),
4189 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4190 				CNTR_NORMAL,
4191 				access_misc_csr_write_bad_addr_err_cnt),
4192 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4193 				CNTR_NORMAL,
4194 				access_misc_csr_read_bad_addr_err_cnt),
4195 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4196 				CNTR_NORMAL,
4197 				access_misc_csr_parity_err_cnt),
4198 /* CceErrStatus */
4199 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4200 				CNTR_NORMAL,
4201 				access_sw_cce_err_status_aggregated_cnt),
4202 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4203 				CNTR_NORMAL,
4204 				access_cce_msix_csr_parity_err_cnt),
4205 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4206 				CNTR_NORMAL,
4207 				access_cce_int_map_unc_err_cnt),
4208 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4209 				CNTR_NORMAL,
4210 				access_cce_int_map_cor_err_cnt),
4211 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4212 				CNTR_NORMAL,
4213 				access_cce_msix_table_unc_err_cnt),
4214 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4215 				CNTR_NORMAL,
4216 				access_cce_msix_table_cor_err_cnt),
4217 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4218 				0, CNTR_NORMAL,
4219 				access_cce_rxdma_conv_fifo_parity_err_cnt),
4220 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4221 				0, CNTR_NORMAL,
4222 				access_cce_rcpl_async_fifo_parity_err_cnt),
4223 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4224 				CNTR_NORMAL,
4225 				access_cce_seg_write_bad_addr_err_cnt),
4226 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4227 				CNTR_NORMAL,
4228 				access_cce_seg_read_bad_addr_err_cnt),
4229 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4230 				CNTR_NORMAL,
4231 				access_la_triggered_cnt),
4232 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4233 				CNTR_NORMAL,
4234 				access_cce_trgt_cpl_timeout_err_cnt),
4235 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4236 				CNTR_NORMAL,
4237 				access_pcic_receive_parity_err_cnt),
4238 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4239 				CNTR_NORMAL,
4240 				access_pcic_transmit_back_parity_err_cnt),
4241 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4242 				0, CNTR_NORMAL,
4243 				access_pcic_transmit_front_parity_err_cnt),
4244 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4245 				CNTR_NORMAL,
4246 				access_pcic_cpl_dat_q_unc_err_cnt),
4247 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4248 				CNTR_NORMAL,
4249 				access_pcic_cpl_hd_q_unc_err_cnt),
4250 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4251 				CNTR_NORMAL,
4252 				access_pcic_post_dat_q_unc_err_cnt),
4253 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4254 				CNTR_NORMAL,
4255 				access_pcic_post_hd_q_unc_err_cnt),
4256 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4257 				CNTR_NORMAL,
4258 				access_pcic_retry_sot_mem_unc_err_cnt),
4259 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4260 				CNTR_NORMAL,
4261 				access_pcic_retry_mem_unc_err),
4262 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4263 				CNTR_NORMAL,
4264 				access_pcic_n_post_dat_q_parity_err_cnt),
4265 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4266 				CNTR_NORMAL,
4267 				access_pcic_n_post_h_q_parity_err_cnt),
4268 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4269 				CNTR_NORMAL,
4270 				access_pcic_cpl_dat_q_cor_err_cnt),
4271 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4272 				CNTR_NORMAL,
4273 				access_pcic_cpl_hd_q_cor_err_cnt),
4274 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4275 				CNTR_NORMAL,
4276 				access_pcic_post_dat_q_cor_err_cnt),
4277 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4278 				CNTR_NORMAL,
4279 				access_pcic_post_hd_q_cor_err_cnt),
4280 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4281 				CNTR_NORMAL,
4282 				access_pcic_retry_sot_mem_cor_err_cnt),
4283 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4284 				CNTR_NORMAL,
4285 				access_pcic_retry_mem_cor_err_cnt),
4286 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4287 				"CceCli1AsyncFifoDbgParityError", 0, 0,
4288 				CNTR_NORMAL,
4289 				access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4290 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4291 				"CceCli1AsyncFifoRxdmaParityError", 0, 0,
4292 				CNTR_NORMAL,
4293 				access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4294 				),
4295 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4296 			"CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4297 			CNTR_NORMAL,
4298 			access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4299 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4300 			"CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4301 			CNTR_NORMAL,
4302 			access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4303 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4304 			0, CNTR_NORMAL,
4305 			access_cce_cli2_async_fifo_parity_err_cnt),
4306 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4307 			CNTR_NORMAL,
4308 			access_cce_csr_cfg_bus_parity_err_cnt),
4309 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4310 			0, CNTR_NORMAL,
4311 			access_cce_cli0_async_fifo_parity_err_cnt),
4312 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4313 			CNTR_NORMAL,
4314 			access_cce_rspd_data_parity_err_cnt),
4315 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4316 			CNTR_NORMAL,
4317 			access_cce_trgt_access_err_cnt),
4318 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4319 			0, CNTR_NORMAL,
4320 			access_cce_trgt_async_fifo_parity_err_cnt),
4321 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4322 			CNTR_NORMAL,
4323 			access_cce_csr_write_bad_addr_err_cnt),
4324 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4325 			CNTR_NORMAL,
4326 			access_cce_csr_read_bad_addr_err_cnt),
4327 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4328 			CNTR_NORMAL,
4329 			access_ccs_csr_parity_err_cnt),
4330 
4331 /* RcvErrStatus */
4332 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4333 			CNTR_NORMAL,
4334 			access_rx_csr_parity_err_cnt),
4335 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4336 			CNTR_NORMAL,
4337 			access_rx_csr_write_bad_addr_err_cnt),
4338 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4339 			CNTR_NORMAL,
4340 			access_rx_csr_read_bad_addr_err_cnt),
4341 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4342 			CNTR_NORMAL,
4343 			access_rx_dma_csr_unc_err_cnt),
4344 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4345 			CNTR_NORMAL,
4346 			access_rx_dma_dq_fsm_encoding_err_cnt),
4347 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4348 			CNTR_NORMAL,
4349 			access_rx_dma_eq_fsm_encoding_err_cnt),
4350 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4351 			CNTR_NORMAL,
4352 			access_rx_dma_csr_parity_err_cnt),
4353 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4354 			CNTR_NORMAL,
4355 			access_rx_rbuf_data_cor_err_cnt),
4356 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4357 			CNTR_NORMAL,
4358 			access_rx_rbuf_data_unc_err_cnt),
4359 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4360 			CNTR_NORMAL,
4361 			access_rx_dma_data_fifo_rd_cor_err_cnt),
4362 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4363 			CNTR_NORMAL,
4364 			access_rx_dma_data_fifo_rd_unc_err_cnt),
4365 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4366 			CNTR_NORMAL,
4367 			access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4368 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4369 			CNTR_NORMAL,
4370 			access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4371 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4372 			CNTR_NORMAL,
4373 			access_rx_rbuf_desc_part2_cor_err_cnt),
4374 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4375 			CNTR_NORMAL,
4376 			access_rx_rbuf_desc_part2_unc_err_cnt),
4377 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4378 			CNTR_NORMAL,
4379 			access_rx_rbuf_desc_part1_cor_err_cnt),
4380 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4381 			CNTR_NORMAL,
4382 			access_rx_rbuf_desc_part1_unc_err_cnt),
4383 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4384 			CNTR_NORMAL,
4385 			access_rx_hq_intr_fsm_err_cnt),
4386 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4387 			CNTR_NORMAL,
4388 			access_rx_hq_intr_csr_parity_err_cnt),
4389 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4390 			CNTR_NORMAL,
4391 			access_rx_lookup_csr_parity_err_cnt),
4392 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4393 			CNTR_NORMAL,
4394 			access_rx_lookup_rcv_array_cor_err_cnt),
4395 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4396 			CNTR_NORMAL,
4397 			access_rx_lookup_rcv_array_unc_err_cnt),
4398 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4399 			0, CNTR_NORMAL,
4400 			access_rx_lookup_des_part2_parity_err_cnt),
4401 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4402 			0, CNTR_NORMAL,
4403 			access_rx_lookup_des_part1_unc_cor_err_cnt),
4404 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4405 			CNTR_NORMAL,
4406 			access_rx_lookup_des_part1_unc_err_cnt),
4407 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4408 			CNTR_NORMAL,
4409 			access_rx_rbuf_next_free_buf_cor_err_cnt),
4410 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4411 			CNTR_NORMAL,
4412 			access_rx_rbuf_next_free_buf_unc_err_cnt),
4413 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4414 			"RxRbufFlInitWrAddrParityErr", 0, 0,
4415 			CNTR_NORMAL,
4416 			access_rbuf_fl_init_wr_addr_parity_err_cnt),
4417 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4418 			0, CNTR_NORMAL,
4419 			access_rx_rbuf_fl_initdone_parity_err_cnt),
4420 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4421 			0, CNTR_NORMAL,
4422 			access_rx_rbuf_fl_write_addr_parity_err_cnt),
4423 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4424 			CNTR_NORMAL,
4425 			access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4426 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4427 			CNTR_NORMAL,
4428 			access_rx_rbuf_empty_err_cnt),
4429 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4430 			CNTR_NORMAL,
4431 			access_rx_rbuf_full_err_cnt),
4432 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4433 			CNTR_NORMAL,
4434 			access_rbuf_bad_lookup_err_cnt),
4435 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4436 			CNTR_NORMAL,
4437 			access_rbuf_ctx_id_parity_err_cnt),
4438 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4439 			CNTR_NORMAL,
4440 			access_rbuf_csr_qeopdw_parity_err_cnt),
4441 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4442 			"RxRbufCsrQNumOfPktParityErr", 0, 0,
4443 			CNTR_NORMAL,
4444 			access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4445 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4446 			"RxRbufCsrQTlPtrParityErr", 0, 0,
4447 			CNTR_NORMAL,
4448 			access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4449 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4450 			0, CNTR_NORMAL,
4451 			access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4452 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4453 			0, CNTR_NORMAL,
4454 			access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4455 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4456 			0, 0, CNTR_NORMAL,
4457 			access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4458 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4459 			0, CNTR_NORMAL,
4460 			access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4461 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4462 			"RxRbufCsrQHeadBufNumParityErr", 0, 0,
4463 			CNTR_NORMAL,
4464 			access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4465 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4466 			0, CNTR_NORMAL,
4467 			access_rx_rbuf_block_list_read_cor_err_cnt),
4468 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4469 			0, CNTR_NORMAL,
4470 			access_rx_rbuf_block_list_read_unc_err_cnt),
4471 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4472 			CNTR_NORMAL,
4473 			access_rx_rbuf_lookup_des_cor_err_cnt),
4474 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4475 			CNTR_NORMAL,
4476 			access_rx_rbuf_lookup_des_unc_err_cnt),
4477 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4478 			"RxRbufLookupDesRegUncCorErr", 0, 0,
4479 			CNTR_NORMAL,
4480 			access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4481 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4482 			CNTR_NORMAL,
4483 			access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4484 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4485 			CNTR_NORMAL,
4486 			access_rx_rbuf_free_list_cor_err_cnt),
4487 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4488 			CNTR_NORMAL,
4489 			access_rx_rbuf_free_list_unc_err_cnt),
4490 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4491 			CNTR_NORMAL,
4492 			access_rx_rcv_fsm_encoding_err_cnt),
4493 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4494 			CNTR_NORMAL,
4495 			access_rx_dma_flag_cor_err_cnt),
4496 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4497 			CNTR_NORMAL,
4498 			access_rx_dma_flag_unc_err_cnt),
4499 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4500 			CNTR_NORMAL,
4501 			access_rx_dc_sop_eop_parity_err_cnt),
4502 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4503 			CNTR_NORMAL,
4504 			access_rx_rcv_csr_parity_err_cnt),
4505 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4506 			CNTR_NORMAL,
4507 			access_rx_rcv_qp_map_table_cor_err_cnt),
4508 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4509 			CNTR_NORMAL,
4510 			access_rx_rcv_qp_map_table_unc_err_cnt),
4511 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4512 			CNTR_NORMAL,
4513 			access_rx_rcv_data_cor_err_cnt),
4514 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4515 			CNTR_NORMAL,
4516 			access_rx_rcv_data_unc_err_cnt),
4517 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4518 			CNTR_NORMAL,
4519 			access_rx_rcv_hdr_cor_err_cnt),
4520 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4521 			CNTR_NORMAL,
4522 			access_rx_rcv_hdr_unc_err_cnt),
4523 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4524 			CNTR_NORMAL,
4525 			access_rx_dc_intf_parity_err_cnt),
4526 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4527 			CNTR_NORMAL,
4528 			access_rx_dma_csr_cor_err_cnt),
4529 /* SendPioErrStatus */
4530 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4531 			CNTR_NORMAL,
4532 			access_pio_pec_sop_head_parity_err_cnt),
4533 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4534 			CNTR_NORMAL,
4535 			access_pio_pcc_sop_head_parity_err_cnt),
4536 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4537 			0, 0, CNTR_NORMAL,
4538 			access_pio_last_returned_cnt_parity_err_cnt),
4539 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4540 			0, CNTR_NORMAL,
4541 			access_pio_current_free_cnt_parity_err_cnt),
4542 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4543 			CNTR_NORMAL,
4544 			access_pio_reserved_31_err_cnt),
4545 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4546 			CNTR_NORMAL,
4547 			access_pio_reserved_30_err_cnt),
4548 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4549 			CNTR_NORMAL,
4550 			access_pio_ppmc_sop_len_err_cnt),
4551 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4552 			CNTR_NORMAL,
4553 			access_pio_ppmc_bqc_mem_parity_err_cnt),
4554 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4555 			CNTR_NORMAL,
4556 			access_pio_vl_fifo_parity_err_cnt),
4557 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4558 			CNTR_NORMAL,
4559 			access_pio_vlf_sop_parity_err_cnt),
4560 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4561 			CNTR_NORMAL,
4562 			access_pio_vlf_v1_len_parity_err_cnt),
4563 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4564 			CNTR_NORMAL,
4565 			access_pio_block_qw_count_parity_err_cnt),
4566 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4567 			CNTR_NORMAL,
4568 			access_pio_write_qw_valid_parity_err_cnt),
4569 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4570 			CNTR_NORMAL,
4571 			access_pio_state_machine_err_cnt),
4572 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4573 			CNTR_NORMAL,
4574 			access_pio_write_data_parity_err_cnt),
4575 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4576 			CNTR_NORMAL,
4577 			access_pio_host_addr_mem_cor_err_cnt),
4578 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4579 			CNTR_NORMAL,
4580 			access_pio_host_addr_mem_unc_err_cnt),
4581 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4582 			CNTR_NORMAL,
4583 			access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4584 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4585 			CNTR_NORMAL,
4586 			access_pio_init_sm_in_err_cnt),
4587 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4588 			CNTR_NORMAL,
4589 			access_pio_ppmc_pbl_fifo_err_cnt),
4590 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4591 			0, CNTR_NORMAL,
4592 			access_pio_credit_ret_fifo_parity_err_cnt),
4593 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4594 			CNTR_NORMAL,
4595 			access_pio_v1_len_mem_bank1_cor_err_cnt),
4596 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4597 			CNTR_NORMAL,
4598 			access_pio_v1_len_mem_bank0_cor_err_cnt),
4599 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4600 			CNTR_NORMAL,
4601 			access_pio_v1_len_mem_bank1_unc_err_cnt),
4602 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4603 			CNTR_NORMAL,
4604 			access_pio_v1_len_mem_bank0_unc_err_cnt),
4605 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4606 			CNTR_NORMAL,
4607 			access_pio_sm_pkt_reset_parity_err_cnt),
4608 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4609 			CNTR_NORMAL,
4610 			access_pio_pkt_evict_fifo_parity_err_cnt),
4611 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4612 			"PioSbrdctrlCrrelFifoParityErr", 0, 0,
4613 			CNTR_NORMAL,
4614 			access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4615 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4616 			CNTR_NORMAL,
4617 			access_pio_sbrdctl_crrel_parity_err_cnt),
4618 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4619 			CNTR_NORMAL,
4620 			access_pio_pec_fifo_parity_err_cnt),
4621 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4622 			CNTR_NORMAL,
4623 			access_pio_pcc_fifo_parity_err_cnt),
4624 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4625 			CNTR_NORMAL,
4626 			access_pio_sb_mem_fifo1_err_cnt),
4627 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4628 			CNTR_NORMAL,
4629 			access_pio_sb_mem_fifo0_err_cnt),
4630 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4631 			CNTR_NORMAL,
4632 			access_pio_csr_parity_err_cnt),
4633 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4634 			CNTR_NORMAL,
4635 			access_pio_write_addr_parity_err_cnt),
4636 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4637 			CNTR_NORMAL,
4638 			access_pio_write_bad_ctxt_err_cnt),
4639 /* SendDmaErrStatus */
4640 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4641 			0, CNTR_NORMAL,
4642 			access_sdma_pcie_req_tracking_cor_err_cnt),
4643 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4644 			0, CNTR_NORMAL,
4645 			access_sdma_pcie_req_tracking_unc_err_cnt),
4646 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4647 			CNTR_NORMAL,
4648 			access_sdma_csr_parity_err_cnt),
4649 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4650 			CNTR_NORMAL,
4651 			access_sdma_rpy_tag_err_cnt),
4652 /* SendEgressErrStatus */
4653 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4654 			CNTR_NORMAL,
4655 			access_tx_read_pio_memory_csr_unc_err_cnt),
4656 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4657 			0, CNTR_NORMAL,
4658 			access_tx_read_sdma_memory_csr_err_cnt),
4659 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4660 			CNTR_NORMAL,
4661 			access_tx_egress_fifo_cor_err_cnt),
4662 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4663 			CNTR_NORMAL,
4664 			access_tx_read_pio_memory_cor_err_cnt),
4665 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4666 			CNTR_NORMAL,
4667 			access_tx_read_sdma_memory_cor_err_cnt),
4668 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4669 			CNTR_NORMAL,
4670 			access_tx_sb_hdr_cor_err_cnt),
4671 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4672 			CNTR_NORMAL,
4673 			access_tx_credit_overrun_err_cnt),
4674 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4675 			CNTR_NORMAL,
4676 			access_tx_launch_fifo8_cor_err_cnt),
4677 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4678 			CNTR_NORMAL,
4679 			access_tx_launch_fifo7_cor_err_cnt),
4680 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4681 			CNTR_NORMAL,
4682 			access_tx_launch_fifo6_cor_err_cnt),
4683 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4684 			CNTR_NORMAL,
4685 			access_tx_launch_fifo5_cor_err_cnt),
4686 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4687 			CNTR_NORMAL,
4688 			access_tx_launch_fifo4_cor_err_cnt),
4689 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4690 			CNTR_NORMAL,
4691 			access_tx_launch_fifo3_cor_err_cnt),
4692 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4693 			CNTR_NORMAL,
4694 			access_tx_launch_fifo2_cor_err_cnt),
4695 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4696 			CNTR_NORMAL,
4697 			access_tx_launch_fifo1_cor_err_cnt),
4698 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4699 			CNTR_NORMAL,
4700 			access_tx_launch_fifo0_cor_err_cnt),
4701 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4702 			CNTR_NORMAL,
4703 			access_tx_credit_return_vl_err_cnt),
4704 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4705 			CNTR_NORMAL,
4706 			access_tx_hcrc_insertion_err_cnt),
4707 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4708 			CNTR_NORMAL,
4709 			access_tx_egress_fifo_unc_err_cnt),
4710 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4711 			CNTR_NORMAL,
4712 			access_tx_read_pio_memory_unc_err_cnt),
4713 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4714 			CNTR_NORMAL,
4715 			access_tx_read_sdma_memory_unc_err_cnt),
4716 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4717 			CNTR_NORMAL,
4718 			access_tx_sb_hdr_unc_err_cnt),
4719 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4720 			CNTR_NORMAL,
4721 			access_tx_credit_return_partiy_err_cnt),
4722 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4723 			0, 0, CNTR_NORMAL,
4724 			access_tx_launch_fifo8_unc_or_parity_err_cnt),
4725 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4726 			0, 0, CNTR_NORMAL,
4727 			access_tx_launch_fifo7_unc_or_parity_err_cnt),
4728 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4729 			0, 0, CNTR_NORMAL,
4730 			access_tx_launch_fifo6_unc_or_parity_err_cnt),
4731 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4732 			0, 0, CNTR_NORMAL,
4733 			access_tx_launch_fifo5_unc_or_parity_err_cnt),
4734 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4735 			0, 0, CNTR_NORMAL,
4736 			access_tx_launch_fifo4_unc_or_parity_err_cnt),
4737 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4738 			0, 0, CNTR_NORMAL,
4739 			access_tx_launch_fifo3_unc_or_parity_err_cnt),
4740 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4741 			0, 0, CNTR_NORMAL,
4742 			access_tx_launch_fifo2_unc_or_parity_err_cnt),
4743 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4744 			0, 0, CNTR_NORMAL,
4745 			access_tx_launch_fifo1_unc_or_parity_err_cnt),
4746 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4747 			0, 0, CNTR_NORMAL,
4748 			access_tx_launch_fifo0_unc_or_parity_err_cnt),
4749 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4750 			0, 0, CNTR_NORMAL,
4751 			access_tx_sdma15_disallowed_packet_err_cnt),
4752 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4753 			0, 0, CNTR_NORMAL,
4754 			access_tx_sdma14_disallowed_packet_err_cnt),
4755 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4756 			0, 0, CNTR_NORMAL,
4757 			access_tx_sdma13_disallowed_packet_err_cnt),
4758 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4759 			0, 0, CNTR_NORMAL,
4760 			access_tx_sdma12_disallowed_packet_err_cnt),
4761 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4762 			0, 0, CNTR_NORMAL,
4763 			access_tx_sdma11_disallowed_packet_err_cnt),
4764 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4765 			0, 0, CNTR_NORMAL,
4766 			access_tx_sdma10_disallowed_packet_err_cnt),
4767 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4768 			0, 0, CNTR_NORMAL,
4769 			access_tx_sdma9_disallowed_packet_err_cnt),
4770 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4771 			0, 0, CNTR_NORMAL,
4772 			access_tx_sdma8_disallowed_packet_err_cnt),
4773 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4774 			0, 0, CNTR_NORMAL,
4775 			access_tx_sdma7_disallowed_packet_err_cnt),
4776 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4777 			0, 0, CNTR_NORMAL,
4778 			access_tx_sdma6_disallowed_packet_err_cnt),
4779 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4780 			0, 0, CNTR_NORMAL,
4781 			access_tx_sdma5_disallowed_packet_err_cnt),
4782 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4783 			0, 0, CNTR_NORMAL,
4784 			access_tx_sdma4_disallowed_packet_err_cnt),
4785 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4786 			0, 0, CNTR_NORMAL,
4787 			access_tx_sdma3_disallowed_packet_err_cnt),
4788 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4789 			0, 0, CNTR_NORMAL,
4790 			access_tx_sdma2_disallowed_packet_err_cnt),
4791 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4792 			0, 0, CNTR_NORMAL,
4793 			access_tx_sdma1_disallowed_packet_err_cnt),
4794 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4795 			0, 0, CNTR_NORMAL,
4796 			access_tx_sdma0_disallowed_packet_err_cnt),
4797 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4798 			CNTR_NORMAL,
4799 			access_tx_config_parity_err_cnt),
4800 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4801 			CNTR_NORMAL,
4802 			access_tx_sbrd_ctl_csr_parity_err_cnt),
4803 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4804 			CNTR_NORMAL,
4805 			access_tx_launch_csr_parity_err_cnt),
4806 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4807 			CNTR_NORMAL,
4808 			access_tx_illegal_vl_err_cnt),
4809 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4810 			"TxSbrdCtlStateMachineParityErr", 0, 0,
4811 			CNTR_NORMAL,
4812 			access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4813 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4814 			CNTR_NORMAL,
4815 			access_egress_reserved_10_err_cnt),
4816 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4817 			CNTR_NORMAL,
4818 			access_egress_reserved_9_err_cnt),
4819 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4820 			0, 0, CNTR_NORMAL,
4821 			access_tx_sdma_launch_intf_parity_err_cnt),
4822 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4823 			CNTR_NORMAL,
4824 			access_tx_pio_launch_intf_parity_err_cnt),
4825 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4826 			CNTR_NORMAL,
4827 			access_egress_reserved_6_err_cnt),
4828 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4829 			CNTR_NORMAL,
4830 			access_tx_incorrect_link_state_err_cnt),
4831 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4832 			CNTR_NORMAL,
4833 			access_tx_linkdown_err_cnt),
4834 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4835 			"EgressFifoUnderrunOrParityErr", 0, 0,
4836 			CNTR_NORMAL,
4837 			access_tx_egress_fifi_underrun_or_parity_err_cnt),
4838 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4839 			CNTR_NORMAL,
4840 			access_egress_reserved_2_err_cnt),
4841 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4842 			CNTR_NORMAL,
4843 			access_tx_pkt_integrity_mem_unc_err_cnt),
4844 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4845 			CNTR_NORMAL,
4846 			access_tx_pkt_integrity_mem_cor_err_cnt),
4847 /* SendErrStatus */
4848 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4849 			CNTR_NORMAL,
4850 			access_send_csr_write_bad_addr_err_cnt),
4851 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4852 			CNTR_NORMAL,
4853 			access_send_csr_read_bad_addr_err_cnt),
4854 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4855 			CNTR_NORMAL,
4856 			access_send_csr_parity_cnt),
4857 /* SendCtxtErrStatus */
4858 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4859 			CNTR_NORMAL,
4860 			access_pio_write_out_of_bounds_err_cnt),
4861 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4862 			CNTR_NORMAL,
4863 			access_pio_write_overflow_err_cnt),
4864 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4865 			0, 0, CNTR_NORMAL,
4866 			access_pio_write_crosses_boundary_err_cnt),
4867 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4868 			CNTR_NORMAL,
4869 			access_pio_disallowed_packet_err_cnt),
4870 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4871 			CNTR_NORMAL,
4872 			access_pio_inconsistent_sop_err_cnt),
4873 /* SendDmaEngErrStatus */
4874 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4875 			0, 0, CNTR_NORMAL,
4876 			access_sdma_header_request_fifo_cor_err_cnt),
4877 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4878 			CNTR_NORMAL,
4879 			access_sdma_header_storage_cor_err_cnt),
4880 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4881 			CNTR_NORMAL,
4882 			access_sdma_packet_tracking_cor_err_cnt),
4883 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4884 			CNTR_NORMAL,
4885 			access_sdma_assembly_cor_err_cnt),
4886 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4887 			CNTR_NORMAL,
4888 			access_sdma_desc_table_cor_err_cnt),
4889 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4890 			0, 0, CNTR_NORMAL,
4891 			access_sdma_header_request_fifo_unc_err_cnt),
4892 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4893 			CNTR_NORMAL,
4894 			access_sdma_header_storage_unc_err_cnt),
4895 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4896 			CNTR_NORMAL,
4897 			access_sdma_packet_tracking_unc_err_cnt),
4898 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4899 			CNTR_NORMAL,
4900 			access_sdma_assembly_unc_err_cnt),
4901 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4902 			CNTR_NORMAL,
4903 			access_sdma_desc_table_unc_err_cnt),
4904 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4905 			CNTR_NORMAL,
4906 			access_sdma_timeout_err_cnt),
4907 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4908 			CNTR_NORMAL,
4909 			access_sdma_header_length_err_cnt),
4910 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4911 			CNTR_NORMAL,
4912 			access_sdma_header_address_err_cnt),
4913 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4914 			CNTR_NORMAL,
4915 			access_sdma_header_select_err_cnt),
4916 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4917 			CNTR_NORMAL,
4918 			access_sdma_reserved_9_err_cnt),
4919 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4920 			CNTR_NORMAL,
4921 			access_sdma_packet_desc_overflow_err_cnt),
4922 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4923 			CNTR_NORMAL,
4924 			access_sdma_length_mismatch_err_cnt),
4925 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4926 			CNTR_NORMAL,
4927 			access_sdma_halt_err_cnt),
4928 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4929 			CNTR_NORMAL,
4930 			access_sdma_mem_read_err_cnt),
4931 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4932 			CNTR_NORMAL,
4933 			access_sdma_first_desc_err_cnt),
4934 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4935 			CNTR_NORMAL,
4936 			access_sdma_tail_out_of_bounds_err_cnt),
4937 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4938 			CNTR_NORMAL,
4939 			access_sdma_too_long_err_cnt),
4940 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4941 			CNTR_NORMAL,
4942 			access_sdma_gen_mismatch_err_cnt),
4943 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4944 			CNTR_NORMAL,
4945 			access_sdma_wrong_dw_err_cnt),
4946 };
4947 
4948 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4949 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4950 			CNTR_NORMAL),
4951 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4952 			CNTR_NORMAL),
4953 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4954 			CNTR_NORMAL),
4955 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4956 			CNTR_NORMAL),
4957 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4958 			CNTR_NORMAL),
4959 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4960 			CNTR_NORMAL),
4961 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4962 			CNTR_NORMAL),
4963 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4964 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4965 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4966 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4967 				      CNTR_SYNTH | CNTR_VL),
4968 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4969 				     CNTR_SYNTH | CNTR_VL),
4970 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4971 				      CNTR_SYNTH | CNTR_VL),
4972 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4973 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4974 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4975 			     access_sw_link_dn_cnt),
4976 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4977 			   access_sw_link_up_cnt),
4978 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4979 				 access_sw_unknown_frame_cnt),
4980 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4981 			     access_sw_xmit_discards),
4982 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4983 				CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4984 				access_sw_xmit_discards),
4985 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4986 				 access_xmit_constraint_errs),
4987 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4988 				access_rcv_constraint_errs),
4989 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4990 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4991 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4992 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4993 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4994 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4995 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4996 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4997 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4998 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4999 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5000 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5001 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5002 			       access_sw_cpu_rc_acks),
5003 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5004 				access_sw_cpu_rc_qacks),
5005 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5006 				       access_sw_cpu_rc_delayed_comp),
5007 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5008 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5009 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5010 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5011 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5012 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5013 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5014 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5015 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5016 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5017 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5018 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5019 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5020 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5021 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5022 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5023 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5024 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5025 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5026 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5027 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5028 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5029 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5030 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5031 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5032 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5033 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5034 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5035 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5036 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5037 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5038 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5039 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5040 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5041 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5042 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5043 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5044 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5045 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5046 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5047 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5048 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5049 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5050 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5051 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5052 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5053 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5054 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5055 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5056 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5057 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5058 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5059 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5060 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5061 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5062 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5063 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5064 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5065 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5066 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5067 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5068 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5069 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5070 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5071 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5072 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5073 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5074 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5075 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5076 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5077 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5078 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5079 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5080 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5081 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5082 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5083 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5084 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5085 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5086 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5087 };
5088 
5089 /* ======================================================================== */
5090 
5091 /* return true if this is chip revision revision a */
5092 int is_ax(struct hfi1_devdata *dd)
5093 {
5094 	u8 chip_rev_minor =
5095 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5096 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5097 	return (chip_rev_minor & 0xf0) == 0;
5098 }
5099 
5100 /* return true if this is chip revision revision b */
5101 int is_bx(struct hfi1_devdata *dd)
5102 {
5103 	u8 chip_rev_minor =
5104 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5105 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5106 	return (chip_rev_minor & 0xF0) == 0x10;
5107 }
5108 
5109 /*
5110  * Append string s to buffer buf.  Arguments curp and len are the current
5111  * position and remaining length, respectively.
5112  *
5113  * return 0 on success, 1 on out of room
5114  */
5115 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5116 {
5117 	char *p = *curp;
5118 	int len = *lenp;
5119 	int result = 0; /* success */
5120 	char c;
5121 
5122 	/* add a comma, if first in the buffer */
5123 	if (p != buf) {
5124 		if (len == 0) {
5125 			result = 1; /* out of room */
5126 			goto done;
5127 		}
5128 		*p++ = ',';
5129 		len--;
5130 	}
5131 
5132 	/* copy the string */
5133 	while ((c = *s++) != 0) {
5134 		if (len == 0) {
5135 			result = 1; /* out of room */
5136 			goto done;
5137 		}
5138 		*p++ = c;
5139 		len--;
5140 	}
5141 
5142 done:
5143 	/* write return values */
5144 	*curp = p;
5145 	*lenp = len;
5146 
5147 	return result;
5148 }
5149 
5150 /*
5151  * Using the given flag table, print a comma separated string into
5152  * the buffer.  End in '*' if the buffer is too short.
5153  */
5154 static char *flag_string(char *buf, int buf_len, u64 flags,
5155 			 struct flag_table *table, int table_size)
5156 {
5157 	char extra[32];
5158 	char *p = buf;
5159 	int len = buf_len;
5160 	int no_room = 0;
5161 	int i;
5162 
5163 	/* make sure there is at least 2 so we can form "*" */
5164 	if (len < 2)
5165 		return "";
5166 
5167 	len--;	/* leave room for a nul */
5168 	for (i = 0; i < table_size; i++) {
5169 		if (flags & table[i].flag) {
5170 			no_room = append_str(buf, &p, &len, table[i].str);
5171 			if (no_room)
5172 				break;
5173 			flags &= ~table[i].flag;
5174 		}
5175 	}
5176 
5177 	/* any undocumented bits left? */
5178 	if (!no_room && flags) {
5179 		snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5180 		no_room = append_str(buf, &p, &len, extra);
5181 	}
5182 
5183 	/* add * if ran out of room */
5184 	if (no_room) {
5185 		/* may need to back up to add space for a '*' */
5186 		if (len == 0)
5187 			--p;
5188 		*p++ = '*';
5189 	}
5190 
5191 	/* add final nul - space already allocated above */
5192 	*p = 0;
5193 	return buf;
5194 }
5195 
5196 /* first 8 CCE error interrupt source names */
5197 static const char * const cce_misc_names[] = {
5198 	"CceErrInt",		/* 0 */
5199 	"RxeErrInt",		/* 1 */
5200 	"MiscErrInt",		/* 2 */
5201 	"Reserved3",		/* 3 */
5202 	"PioErrInt",		/* 4 */
5203 	"SDmaErrInt",		/* 5 */
5204 	"EgressErrInt",		/* 6 */
5205 	"TxeErrInt"		/* 7 */
5206 };
5207 
5208 /*
5209  * Return the miscellaneous error interrupt name.
5210  */
5211 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5212 {
5213 	if (source < ARRAY_SIZE(cce_misc_names))
5214 		strncpy(buf, cce_misc_names[source], bsize);
5215 	else
5216 		snprintf(buf, bsize, "Reserved%u",
5217 			 source + IS_GENERAL_ERR_START);
5218 
5219 	return buf;
5220 }
5221 
5222 /*
5223  * Return the SDMA engine error interrupt name.
5224  */
5225 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5226 {
5227 	snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5228 	return buf;
5229 }
5230 
5231 /*
5232  * Return the send context error interrupt name.
5233  */
5234 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5235 {
5236 	snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5237 	return buf;
5238 }
5239 
5240 static const char * const various_names[] = {
5241 	"PbcInt",
5242 	"GpioAssertInt",
5243 	"Qsfp1Int",
5244 	"Qsfp2Int",
5245 	"TCritInt"
5246 };
5247 
5248 /*
5249  * Return the various interrupt name.
5250  */
5251 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5252 {
5253 	if (source < ARRAY_SIZE(various_names))
5254 		strncpy(buf, various_names[source], bsize);
5255 	else
5256 		snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5257 	return buf;
5258 }
5259 
5260 /*
5261  * Return the DC interrupt name.
5262  */
5263 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5264 {
5265 	static const char * const dc_int_names[] = {
5266 		"common",
5267 		"lcb",
5268 		"8051",
5269 		"lbm"	/* local block merge */
5270 	};
5271 
5272 	if (source < ARRAY_SIZE(dc_int_names))
5273 		snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5274 	else
5275 		snprintf(buf, bsize, "DCInt%u", source);
5276 	return buf;
5277 }
5278 
5279 static const char * const sdma_int_names[] = {
5280 	"SDmaInt",
5281 	"SdmaIdleInt",
5282 	"SdmaProgressInt",
5283 };
5284 
5285 /*
5286  * Return the SDMA engine interrupt name.
5287  */
5288 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5289 {
5290 	/* what interrupt */
5291 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5292 	/* which engine */
5293 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5294 
5295 	if (likely(what < 3))
5296 		snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5297 	else
5298 		snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5299 	return buf;
5300 }
5301 
5302 /*
5303  * Return the receive available interrupt name.
5304  */
5305 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5306 {
5307 	snprintf(buf, bsize, "RcvAvailInt%u", source);
5308 	return buf;
5309 }
5310 
5311 /*
5312  * Return the receive urgent interrupt name.
5313  */
5314 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5315 {
5316 	snprintf(buf, bsize, "RcvUrgentInt%u", source);
5317 	return buf;
5318 }
5319 
5320 /*
5321  * Return the send credit interrupt name.
5322  */
5323 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5324 {
5325 	snprintf(buf, bsize, "SendCreditInt%u", source);
5326 	return buf;
5327 }
5328 
5329 /*
5330  * Return the reserved interrupt name.
5331  */
5332 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5333 {
5334 	snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5335 	return buf;
5336 }
5337 
5338 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5339 {
5340 	return flag_string(buf, buf_len, flags,
5341 			   cce_err_status_flags,
5342 			   ARRAY_SIZE(cce_err_status_flags));
5343 }
5344 
5345 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5346 {
5347 	return flag_string(buf, buf_len, flags,
5348 			   rxe_err_status_flags,
5349 			   ARRAY_SIZE(rxe_err_status_flags));
5350 }
5351 
5352 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5353 {
5354 	return flag_string(buf, buf_len, flags, misc_err_status_flags,
5355 			   ARRAY_SIZE(misc_err_status_flags));
5356 }
5357 
5358 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5359 {
5360 	return flag_string(buf, buf_len, flags,
5361 			   pio_err_status_flags,
5362 			   ARRAY_SIZE(pio_err_status_flags));
5363 }
5364 
5365 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366 {
5367 	return flag_string(buf, buf_len, flags,
5368 			   sdma_err_status_flags,
5369 			   ARRAY_SIZE(sdma_err_status_flags));
5370 }
5371 
5372 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373 {
5374 	return flag_string(buf, buf_len, flags,
5375 			   egress_err_status_flags,
5376 			   ARRAY_SIZE(egress_err_status_flags));
5377 }
5378 
5379 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5380 {
5381 	return flag_string(buf, buf_len, flags,
5382 			   egress_err_info_flags,
5383 			   ARRAY_SIZE(egress_err_info_flags));
5384 }
5385 
5386 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5387 {
5388 	return flag_string(buf, buf_len, flags,
5389 			   send_err_status_flags,
5390 			   ARRAY_SIZE(send_err_status_flags));
5391 }
5392 
5393 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5394 {
5395 	char buf[96];
5396 	int i = 0;
5397 
5398 	/*
5399 	 * For most these errors, there is nothing that can be done except
5400 	 * report or record it.
5401 	 */
5402 	dd_dev_info(dd, "CCE Error: %s\n",
5403 		    cce_err_status_string(buf, sizeof(buf), reg));
5404 
5405 	if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5406 	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5407 		/* this error requires a manual drop into SPC freeze mode */
5408 		/* then a fix up */
5409 		start_freeze_handling(dd->pport, FREEZE_SELF);
5410 	}
5411 
5412 	for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5413 		if (reg & (1ull << i)) {
5414 			incr_cntr64(&dd->cce_err_status_cnt[i]);
5415 			/* maintain a counter over all cce_err_status errors */
5416 			incr_cntr64(&dd->sw_cce_err_status_aggregate);
5417 		}
5418 	}
5419 }
5420 
5421 /*
5422  * Check counters for receive errors that do not have an interrupt
5423  * associated with them.
5424  */
5425 #define RCVERR_CHECK_TIME 10
5426 static void update_rcverr_timer(unsigned long opaque)
5427 {
5428 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5429 	struct hfi1_pportdata *ppd = dd->pport;
5430 	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5431 
5432 	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5433 	    ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5434 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5435 		set_link_down_reason(
5436 		ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5437 		OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5438 		queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5439 	}
5440 	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5441 
5442 	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5443 }
5444 
5445 static int init_rcverr(struct hfi1_devdata *dd)
5446 {
5447 	setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5448 	/* Assume the hardware counter has been reset */
5449 	dd->rcv_ovfl_cnt = 0;
5450 	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5451 }
5452 
5453 static void free_rcverr(struct hfi1_devdata *dd)
5454 {
5455 	if (dd->rcverr_timer.data)
5456 		del_timer_sync(&dd->rcverr_timer);
5457 	dd->rcverr_timer.data = 0;
5458 }
5459 
5460 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5461 {
5462 	char buf[96];
5463 	int i = 0;
5464 
5465 	dd_dev_info(dd, "Receive Error: %s\n",
5466 		    rxe_err_status_string(buf, sizeof(buf), reg));
5467 
5468 	if (reg & ALL_RXE_FREEZE_ERR) {
5469 		int flags = 0;
5470 
5471 		/*
5472 		 * Freeze mode recovery is disabled for the errors
5473 		 * in RXE_FREEZE_ABORT_MASK
5474 		 */
5475 		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5476 			flags = FREEZE_ABORT;
5477 
5478 		start_freeze_handling(dd->pport, flags);
5479 	}
5480 
5481 	for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5482 		if (reg & (1ull << i))
5483 			incr_cntr64(&dd->rcv_err_status_cnt[i]);
5484 	}
5485 }
5486 
5487 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488 {
5489 	char buf[96];
5490 	int i = 0;
5491 
5492 	dd_dev_info(dd, "Misc Error: %s",
5493 		    misc_err_status_string(buf, sizeof(buf), reg));
5494 	for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5495 		if (reg & (1ull << i))
5496 			incr_cntr64(&dd->misc_err_status_cnt[i]);
5497 	}
5498 }
5499 
5500 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5501 {
5502 	char buf[96];
5503 	int i = 0;
5504 
5505 	dd_dev_info(dd, "PIO Error: %s\n",
5506 		    pio_err_status_string(buf, sizeof(buf), reg));
5507 
5508 	if (reg & ALL_PIO_FREEZE_ERR)
5509 		start_freeze_handling(dd->pport, 0);
5510 
5511 	for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5512 		if (reg & (1ull << i))
5513 			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5514 	}
5515 }
5516 
5517 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518 {
5519 	char buf[96];
5520 	int i = 0;
5521 
5522 	dd_dev_info(dd, "SDMA Error: %s\n",
5523 		    sdma_err_status_string(buf, sizeof(buf), reg));
5524 
5525 	if (reg & ALL_SDMA_FREEZE_ERR)
5526 		start_freeze_handling(dd->pport, 0);
5527 
5528 	for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5529 		if (reg & (1ull << i))
5530 			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5531 	}
5532 }
5533 
5534 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5535 {
5536 	incr_cntr64(&ppd->port_xmit_discards);
5537 }
5538 
5539 static void count_port_inactive(struct hfi1_devdata *dd)
5540 {
5541 	__count_port_discards(dd->pport);
5542 }
5543 
5544 /*
5545  * We have had a "disallowed packet" error during egress. Determine the
5546  * integrity check which failed, and update relevant error counter, etc.
5547  *
5548  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5549  * bit of state per integrity check, and so we can miss the reason for an
5550  * egress error if more than one packet fails the same integrity check
5551  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5552  */
5553 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5554 					int vl)
5555 {
5556 	struct hfi1_pportdata *ppd = dd->pport;
5557 	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5558 	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5559 	char buf[96];
5560 
5561 	/* clear down all observed info as quickly as possible after read */
5562 	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5563 
5564 	dd_dev_info(dd,
5565 		    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5566 		    info, egress_err_info_string(buf, sizeof(buf), info), src);
5567 
5568 	/* Eventually add other counters for each bit */
5569 	if (info & PORT_DISCARD_EGRESS_ERRS) {
5570 		int weight, i;
5571 
5572 		/*
5573 		 * Count all applicable bits as individual errors and
5574 		 * attribute them to the packet that triggered this handler.
5575 		 * This may not be completely accurate due to limitations
5576 		 * on the available hardware error information.  There is
5577 		 * a single information register and any number of error
5578 		 * packets may have occurred and contributed to it before
5579 		 * this routine is called.  This means that:
5580 		 * a) If multiple packets with the same error occur before
5581 		 *    this routine is called, earlier packets are missed.
5582 		 *    There is only a single bit for each error type.
5583 		 * b) Errors may not be attributed to the correct VL.
5584 		 *    The driver is attributing all bits in the info register
5585 		 *    to the packet that triggered this call, but bits
5586 		 *    could be an accumulation of different packets with
5587 		 *    different VLs.
5588 		 * c) A single error packet may have multiple counts attached
5589 		 *    to it.  There is no way for the driver to know if
5590 		 *    multiple bits set in the info register are due to a
5591 		 *    single packet or multiple packets.  The driver assumes
5592 		 *    multiple packets.
5593 		 */
5594 		weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5595 		for (i = 0; i < weight; i++) {
5596 			__count_port_discards(ppd);
5597 			if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5598 				incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5599 			else if (vl == 15)
5600 				incr_cntr64(&ppd->port_xmit_discards_vl
5601 					    [C_VL_15]);
5602 		}
5603 	}
5604 }
5605 
5606 /*
5607  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5608  * register. Does it represent a 'port inactive' error?
5609  */
5610 static inline int port_inactive_err(u64 posn)
5611 {
5612 	return (posn >= SEES(TX_LINKDOWN) &&
5613 		posn <= SEES(TX_INCORRECT_LINK_STATE));
5614 }
5615 
5616 /*
5617  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5618  * register. Does it represent a 'disallowed packet' error?
5619  */
5620 static inline int disallowed_pkt_err(int posn)
5621 {
5622 	return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5623 		posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5624 }
5625 
5626 /*
5627  * Input value is a bit position of one of the SDMA engine disallowed
5628  * packet errors.  Return which engine.  Use of this must be guarded by
5629  * disallowed_pkt_err().
5630  */
5631 static inline int disallowed_pkt_engine(int posn)
5632 {
5633 	return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5634 }
5635 
5636 /*
5637  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5638  * be done.
5639  */
5640 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5641 {
5642 	struct sdma_vl_map *m;
5643 	int vl;
5644 
5645 	/* range check */
5646 	if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5647 		return -1;
5648 
5649 	rcu_read_lock();
5650 	m = rcu_dereference(dd->sdma_map);
5651 	vl = m->engine_to_vl[engine];
5652 	rcu_read_unlock();
5653 
5654 	return vl;
5655 }
5656 
5657 /*
5658  * Translate the send context (sofware index) into a VL.  Return -1 if the
5659  * translation cannot be done.
5660  */
5661 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5662 {
5663 	struct send_context_info *sci;
5664 	struct send_context *sc;
5665 	int i;
5666 
5667 	sci = &dd->send_contexts[sw_index];
5668 
5669 	/* there is no information for user (PSM) and ack contexts */
5670 	if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5671 		return -1;
5672 
5673 	sc = sci->sc;
5674 	if (!sc)
5675 		return -1;
5676 	if (dd->vld[15].sc == sc)
5677 		return 15;
5678 	for (i = 0; i < num_vls; i++)
5679 		if (dd->vld[i].sc == sc)
5680 			return i;
5681 
5682 	return -1;
5683 }
5684 
5685 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5686 {
5687 	u64 reg_copy = reg, handled = 0;
5688 	char buf[96];
5689 	int i = 0;
5690 
5691 	if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5692 		start_freeze_handling(dd->pport, 0);
5693 	else if (is_ax(dd) &&
5694 		 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5695 		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5696 		start_freeze_handling(dd->pport, 0);
5697 
5698 	while (reg_copy) {
5699 		int posn = fls64(reg_copy);
5700 		/* fls64() returns a 1-based offset, we want it zero based */
5701 		int shift = posn - 1;
5702 		u64 mask = 1ULL << shift;
5703 
5704 		if (port_inactive_err(shift)) {
5705 			count_port_inactive(dd);
5706 			handled |= mask;
5707 		} else if (disallowed_pkt_err(shift)) {
5708 			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5709 
5710 			handle_send_egress_err_info(dd, vl);
5711 			handled |= mask;
5712 		}
5713 		reg_copy &= ~mask;
5714 	}
5715 
5716 	reg &= ~handled;
5717 
5718 	if (reg)
5719 		dd_dev_info(dd, "Egress Error: %s\n",
5720 			    egress_err_status_string(buf, sizeof(buf), reg));
5721 
5722 	for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5723 		if (reg & (1ull << i))
5724 			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5725 	}
5726 }
5727 
5728 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5729 {
5730 	char buf[96];
5731 	int i = 0;
5732 
5733 	dd_dev_info(dd, "Send Error: %s\n",
5734 		    send_err_status_string(buf, sizeof(buf), reg));
5735 
5736 	for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5737 		if (reg & (1ull << i))
5738 			incr_cntr64(&dd->send_err_status_cnt[i]);
5739 	}
5740 }
5741 
5742 /*
5743  * The maximum number of times the error clear down will loop before
5744  * blocking a repeating error.  This value is arbitrary.
5745  */
5746 #define MAX_CLEAR_COUNT 20
5747 
5748 /*
5749  * Clear and handle an error register.  All error interrupts are funneled
5750  * through here to have a central location to correctly handle single-
5751  * or multi-shot errors.
5752  *
5753  * For non per-context registers, call this routine with a context value
5754  * of 0 so the per-context offset is zero.
5755  *
5756  * If the handler loops too many times, assume that something is wrong
5757  * and can't be fixed, so mask the error bits.
5758  */
5759 static void interrupt_clear_down(struct hfi1_devdata *dd,
5760 				 u32 context,
5761 				 const struct err_reg_info *eri)
5762 {
5763 	u64 reg;
5764 	u32 count;
5765 
5766 	/* read in a loop until no more errors are seen */
5767 	count = 0;
5768 	while (1) {
5769 		reg = read_kctxt_csr(dd, context, eri->status);
5770 		if (reg == 0)
5771 			break;
5772 		write_kctxt_csr(dd, context, eri->clear, reg);
5773 		if (likely(eri->handler))
5774 			eri->handler(dd, context, reg);
5775 		count++;
5776 		if (count > MAX_CLEAR_COUNT) {
5777 			u64 mask;
5778 
5779 			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5780 				   eri->desc, reg);
5781 			/*
5782 			 * Read-modify-write so any other masked bits
5783 			 * remain masked.
5784 			 */
5785 			mask = read_kctxt_csr(dd, context, eri->mask);
5786 			mask &= ~reg;
5787 			write_kctxt_csr(dd, context, eri->mask, mask);
5788 			break;
5789 		}
5790 	}
5791 }
5792 
5793 /*
5794  * CCE block "misc" interrupt.  Source is < 16.
5795  */
5796 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5797 {
5798 	const struct err_reg_info *eri = &misc_errs[source];
5799 
5800 	if (eri->handler) {
5801 		interrupt_clear_down(dd, 0, eri);
5802 	} else {
5803 		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5804 			   source);
5805 	}
5806 }
5807 
5808 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5809 {
5810 	return flag_string(buf, buf_len, flags,
5811 			   sc_err_status_flags,
5812 			   ARRAY_SIZE(sc_err_status_flags));
5813 }
5814 
5815 /*
5816  * Send context error interrupt.  Source (hw_context) is < 160.
5817  *
5818  * All send context errors cause the send context to halt.  The normal
5819  * clear-down mechanism cannot be used because we cannot clear the
5820  * error bits until several other long-running items are done first.
5821  * This is OK because with the context halted, nothing else is going
5822  * to happen on it anyway.
5823  */
5824 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5825 				unsigned int hw_context)
5826 {
5827 	struct send_context_info *sci;
5828 	struct send_context *sc;
5829 	char flags[96];
5830 	u64 status;
5831 	u32 sw_index;
5832 	int i = 0;
5833 
5834 	sw_index = dd->hw_to_sw[hw_context];
5835 	if (sw_index >= dd->num_send_contexts) {
5836 		dd_dev_err(dd,
5837 			   "out of range sw index %u for send context %u\n",
5838 			   sw_index, hw_context);
5839 		return;
5840 	}
5841 	sci = &dd->send_contexts[sw_index];
5842 	sc = sci->sc;
5843 	if (!sc) {
5844 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5845 			   sw_index, hw_context);
5846 		return;
5847 	}
5848 
5849 	/* tell the software that a halt has begun */
5850 	sc_stop(sc, SCF_HALTED);
5851 
5852 	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5853 
5854 	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5855 		    send_context_err_status_string(flags, sizeof(flags),
5856 						   status));
5857 
5858 	if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5859 		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5860 
5861 	/*
5862 	 * Automatically restart halted kernel contexts out of interrupt
5863 	 * context.  User contexts must ask the driver to restart the context.
5864 	 */
5865 	if (sc->type != SC_USER)
5866 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5867 
5868 	/*
5869 	 * Update the counters for the corresponding status bits.
5870 	 * Note that these particular counters are aggregated over all
5871 	 * 160 contexts.
5872 	 */
5873 	for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5874 		if (status & (1ull << i))
5875 			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5876 	}
5877 }
5878 
5879 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5880 				unsigned int source, u64 status)
5881 {
5882 	struct sdma_engine *sde;
5883 	int i = 0;
5884 
5885 	sde = &dd->per_sdma[source];
5886 #ifdef CONFIG_SDMA_VERBOSITY
5887 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5888 		   slashstrip(__FILE__), __LINE__, __func__);
5889 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5890 		   sde->this_idx, source, (unsigned long long)status);
5891 #endif
5892 	sde->err_cnt++;
5893 	sdma_engine_error(sde, status);
5894 
5895 	/*
5896 	* Update the counters for the corresponding status bits.
5897 	* Note that these particular counters are aggregated over
5898 	* all 16 DMA engines.
5899 	*/
5900 	for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5901 		if (status & (1ull << i))
5902 			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5903 	}
5904 }
5905 
5906 /*
5907  * CCE block SDMA error interrupt.  Source is < 16.
5908  */
5909 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5910 {
5911 #ifdef CONFIG_SDMA_VERBOSITY
5912 	struct sdma_engine *sde = &dd->per_sdma[source];
5913 
5914 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5915 		   slashstrip(__FILE__), __LINE__, __func__);
5916 	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5917 		   source);
5918 	sdma_dumpstate(sde);
5919 #endif
5920 	interrupt_clear_down(dd, source, &sdma_eng_err);
5921 }
5922 
5923 /*
5924  * CCE block "various" interrupt.  Source is < 8.
5925  */
5926 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5927 {
5928 	const struct err_reg_info *eri = &various_err[source];
5929 
5930 	/*
5931 	 * TCritInt cannot go through interrupt_clear_down()
5932 	 * because it is not a second tier interrupt. The handler
5933 	 * should be called directly.
5934 	 */
5935 	if (source == TCRIT_INT_SOURCE)
5936 		handle_temp_err(dd);
5937 	else if (eri->handler)
5938 		interrupt_clear_down(dd, 0, eri);
5939 	else
5940 		dd_dev_info(dd,
5941 			    "%s: Unimplemented/reserved interrupt %d\n",
5942 			    __func__, source);
5943 }
5944 
5945 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5946 {
5947 	/* src_ctx is always zero */
5948 	struct hfi1_pportdata *ppd = dd->pport;
5949 	unsigned long flags;
5950 	u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5951 
5952 	if (reg & QSFP_HFI0_MODPRST_N) {
5953 		if (!qsfp_mod_present(ppd)) {
5954 			dd_dev_info(dd, "%s: QSFP module removed\n",
5955 				    __func__);
5956 
5957 			ppd->driver_link_ready = 0;
5958 			/*
5959 			 * Cable removed, reset all our information about the
5960 			 * cache and cable capabilities
5961 			 */
5962 
5963 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5964 			/*
5965 			 * We don't set cache_refresh_required here as we expect
5966 			 * an interrupt when a cable is inserted
5967 			 */
5968 			ppd->qsfp_info.cache_valid = 0;
5969 			ppd->qsfp_info.reset_needed = 0;
5970 			ppd->qsfp_info.limiting_active = 0;
5971 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5972 					       flags);
5973 			/* Invert the ModPresent pin now to detect plug-in */
5974 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5975 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5976 
5977 			if ((ppd->offline_disabled_reason >
5978 			  HFI1_ODR_MASK(
5979 			  OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
5980 			  (ppd->offline_disabled_reason ==
5981 			  HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5982 				ppd->offline_disabled_reason =
5983 				HFI1_ODR_MASK(
5984 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
5985 
5986 			if (ppd->host_link_state == HLS_DN_POLL) {
5987 				/*
5988 				 * The link is still in POLL. This means
5989 				 * that the normal link down processing
5990 				 * will not happen. We have to do it here
5991 				 * before turning the DC off.
5992 				 */
5993 				queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5994 			}
5995 		} else {
5996 			dd_dev_info(dd, "%s: QSFP module inserted\n",
5997 				    __func__);
5998 
5999 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6000 			ppd->qsfp_info.cache_valid = 0;
6001 			ppd->qsfp_info.cache_refresh_required = 1;
6002 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6003 					       flags);
6004 
6005 			/*
6006 			 * Stop inversion of ModPresent pin to detect
6007 			 * removal of the cable
6008 			 */
6009 			qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6010 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6011 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6012 
6013 			ppd->offline_disabled_reason =
6014 				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6015 		}
6016 	}
6017 
6018 	if (reg & QSFP_HFI0_INT_N) {
6019 		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6020 			    __func__);
6021 		spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6022 		ppd->qsfp_info.check_interrupt_flags = 1;
6023 		spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6024 	}
6025 
6026 	/* Schedule the QSFP work only if there is a cable attached. */
6027 	if (qsfp_mod_present(ppd))
6028 		queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6029 }
6030 
6031 static int request_host_lcb_access(struct hfi1_devdata *dd)
6032 {
6033 	int ret;
6034 
6035 	ret = do_8051_command(dd, HCMD_MISC,
6036 			      (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6037 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6038 	if (ret != HCMD_SUCCESS) {
6039 		dd_dev_err(dd, "%s: command failed with error %d\n",
6040 			   __func__, ret);
6041 	}
6042 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6043 }
6044 
6045 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6046 {
6047 	int ret;
6048 
6049 	ret = do_8051_command(dd, HCMD_MISC,
6050 			      (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6051 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6052 	if (ret != HCMD_SUCCESS) {
6053 		dd_dev_err(dd, "%s: command failed with error %d\n",
6054 			   __func__, ret);
6055 	}
6056 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6057 }
6058 
6059 /*
6060  * Set the LCB selector - allow host access.  The DCC selector always
6061  * points to the host.
6062  */
6063 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6064 {
6065 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6066 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6067 		  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6068 }
6069 
6070 /*
6071  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6072  * points to the host.
6073  */
6074 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6075 {
6076 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6077 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6078 }
6079 
6080 /*
6081  * Acquire LCB access from the 8051.  If the host already has access,
6082  * just increment a counter.  Otherwise, inform the 8051 that the
6083  * host is taking access.
6084  *
6085  * Returns:
6086  *	0 on success
6087  *	-EBUSY if the 8051 has control and cannot be disturbed
6088  *	-errno if unable to acquire access from the 8051
6089  */
6090 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6091 {
6092 	struct hfi1_pportdata *ppd = dd->pport;
6093 	int ret = 0;
6094 
6095 	/*
6096 	 * Use the host link state lock so the operation of this routine
6097 	 * { link state check, selector change, count increment } can occur
6098 	 * as a unit against a link state change.  Otherwise there is a
6099 	 * race between the state change and the count increment.
6100 	 */
6101 	if (sleep_ok) {
6102 		mutex_lock(&ppd->hls_lock);
6103 	} else {
6104 		while (!mutex_trylock(&ppd->hls_lock))
6105 			udelay(1);
6106 	}
6107 
6108 	/* this access is valid only when the link is up */
6109 	if (ppd->host_link_state & HLS_DOWN) {
6110 		dd_dev_info(dd, "%s: link state %s not up\n",
6111 			    __func__, link_state_name(ppd->host_link_state));
6112 		ret = -EBUSY;
6113 		goto done;
6114 	}
6115 
6116 	if (dd->lcb_access_count == 0) {
6117 		ret = request_host_lcb_access(dd);
6118 		if (ret) {
6119 			dd_dev_err(dd,
6120 				   "%s: unable to acquire LCB access, err %d\n",
6121 				   __func__, ret);
6122 			goto done;
6123 		}
6124 		set_host_lcb_access(dd);
6125 	}
6126 	dd->lcb_access_count++;
6127 done:
6128 	mutex_unlock(&ppd->hls_lock);
6129 	return ret;
6130 }
6131 
6132 /*
6133  * Release LCB access by decrementing the use count.  If the count is moving
6134  * from 1 to 0, inform 8051 that it has control back.
6135  *
6136  * Returns:
6137  *	0 on success
6138  *	-errno if unable to release access to the 8051
6139  */
6140 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6141 {
6142 	int ret = 0;
6143 
6144 	/*
6145 	 * Use the host link state lock because the acquire needed it.
6146 	 * Here, we only need to keep { selector change, count decrement }
6147 	 * as a unit.
6148 	 */
6149 	if (sleep_ok) {
6150 		mutex_lock(&dd->pport->hls_lock);
6151 	} else {
6152 		while (!mutex_trylock(&dd->pport->hls_lock))
6153 			udelay(1);
6154 	}
6155 
6156 	if (dd->lcb_access_count == 0) {
6157 		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6158 			   __func__);
6159 		goto done;
6160 	}
6161 
6162 	if (dd->lcb_access_count == 1) {
6163 		set_8051_lcb_access(dd);
6164 		ret = request_8051_lcb_access(dd);
6165 		if (ret) {
6166 			dd_dev_err(dd,
6167 				   "%s: unable to release LCB access, err %d\n",
6168 				   __func__, ret);
6169 			/* restore host access if the grant didn't work */
6170 			set_host_lcb_access(dd);
6171 			goto done;
6172 		}
6173 	}
6174 	dd->lcb_access_count--;
6175 done:
6176 	mutex_unlock(&dd->pport->hls_lock);
6177 	return ret;
6178 }
6179 
6180 /*
6181  * Initialize LCB access variables and state.  Called during driver load,
6182  * after most of the initialization is finished.
6183  *
6184  * The DC default is LCB access on for the host.  The driver defaults to
6185  * leaving access to the 8051.  Assign access now - this constrains the call
6186  * to this routine to be after all LCB set-up is done.  In particular, after
6187  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6188  */
6189 static void init_lcb_access(struct hfi1_devdata *dd)
6190 {
6191 	dd->lcb_access_count = 0;
6192 }
6193 
6194 /*
6195  * Write a response back to a 8051 request.
6196  */
6197 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6198 {
6199 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6200 		  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6201 		  (u64)return_code <<
6202 		  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6203 		  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6204 }
6205 
6206 /*
6207  * Handle host requests from the 8051.
6208  */
6209 static void handle_8051_request(struct hfi1_pportdata *ppd)
6210 {
6211 	struct hfi1_devdata *dd = ppd->dd;
6212 	u64 reg;
6213 	u16 data = 0;
6214 	u8 type;
6215 
6216 	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6217 	if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6218 		return;	/* no request */
6219 
6220 	/* zero out COMPLETED so the response is seen */
6221 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6222 
6223 	/* extract request details */
6224 	type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6225 			& DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6226 	data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6227 			& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6228 
6229 	switch (type) {
6230 	case HREQ_LOAD_CONFIG:
6231 	case HREQ_SAVE_CONFIG:
6232 	case HREQ_READ_CONFIG:
6233 	case HREQ_SET_TX_EQ_ABS:
6234 	case HREQ_SET_TX_EQ_REL:
6235 	case HREQ_ENABLE:
6236 		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6237 			    type);
6238 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6239 		break;
6240 	case HREQ_CONFIG_DONE:
6241 		hreq_response(dd, HREQ_SUCCESS, 0);
6242 		break;
6243 
6244 	case HREQ_INTERFACE_TEST:
6245 		hreq_response(dd, HREQ_SUCCESS, data);
6246 		break;
6247 	default:
6248 		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6249 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6250 		break;
6251 	}
6252 }
6253 
6254 static void write_global_credit(struct hfi1_devdata *dd,
6255 				u8 vau, u16 total, u16 shared)
6256 {
6257 	write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6258 		  ((u64)total <<
6259 		   SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6260 		  ((u64)shared <<
6261 		   SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6262 		  ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6263 }
6264 
6265 /*
6266  * Set up initial VL15 credits of the remote.  Assumes the rest of
6267  * the CM credit registers are zero from a previous global or credit reset .
6268  */
6269 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6270 {
6271 	/* leave shared count at zero for both global and VL15 */
6272 	write_global_credit(dd, vau, vl15buf, 0);
6273 
6274 	/* We may need some credits for another VL when sending packets
6275 	 * with the snoop interface. Dividing it down the middle for VL15
6276 	 * and VL0 should suffice.
6277 	 */
6278 	if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6279 		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6280 		    << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6281 		write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6282 		    << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6283 	} else {
6284 		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6285 			<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6286 	}
6287 }
6288 
6289 /*
6290  * Zero all credit details from the previous connection and
6291  * reset the CM manager's internal counters.
6292  */
6293 void reset_link_credits(struct hfi1_devdata *dd)
6294 {
6295 	int i;
6296 
6297 	/* remove all previous VL credit limits */
6298 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
6299 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6300 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6301 	write_global_credit(dd, 0, 0, 0);
6302 	/* reset the CM block */
6303 	pio_send_control(dd, PSC_CM_RESET);
6304 }
6305 
6306 /* convert a vCU to a CU */
6307 static u32 vcu_to_cu(u8 vcu)
6308 {
6309 	return 1 << vcu;
6310 }
6311 
6312 /* convert a CU to a vCU */
6313 static u8 cu_to_vcu(u32 cu)
6314 {
6315 	return ilog2(cu);
6316 }
6317 
6318 /* convert a vAU to an AU */
6319 static u32 vau_to_au(u8 vau)
6320 {
6321 	return 8 * (1 << vau);
6322 }
6323 
6324 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6325 {
6326 	ppd->sm_trap_qp = 0x0;
6327 	ppd->sa_qp = 0x1;
6328 }
6329 
6330 /*
6331  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6332  */
6333 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6334 {
6335 	u64 reg;
6336 
6337 	/* clear lcb run: LCB_CFG_RUN.EN = 0 */
6338 	write_csr(dd, DC_LCB_CFG_RUN, 0);
6339 	/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6340 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6341 		  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6342 	/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6343 	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6344 	reg = read_csr(dd, DCC_CFG_RESET);
6345 	write_csr(dd, DCC_CFG_RESET, reg |
6346 		  (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6347 		  (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6348 	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6349 	if (!abort) {
6350 		udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6351 		write_csr(dd, DCC_CFG_RESET, reg);
6352 		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6353 	}
6354 }
6355 
6356 /*
6357  * This routine should be called after the link has been transitioned to
6358  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6359  * reset).
6360  *
6361  * The expectation is that the caller of this routine would have taken
6362  * care of properly transitioning the link into the correct state.
6363  */
6364 static void dc_shutdown(struct hfi1_devdata *dd)
6365 {
6366 	unsigned long flags;
6367 
6368 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6369 	if (dd->dc_shutdown) {
6370 		spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6371 		return;
6372 	}
6373 	dd->dc_shutdown = 1;
6374 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6375 	/* Shutdown the LCB */
6376 	lcb_shutdown(dd, 1);
6377 	/*
6378 	 * Going to OFFLINE would have causes the 8051 to put the
6379 	 * SerDes into reset already. Just need to shut down the 8051,
6380 	 * itself.
6381 	 */
6382 	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6383 }
6384 
6385 /*
6386  * Calling this after the DC has been brought out of reset should not
6387  * do any damage.
6388  */
6389 static void dc_start(struct hfi1_devdata *dd)
6390 {
6391 	unsigned long flags;
6392 	int ret;
6393 
6394 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6395 	if (!dd->dc_shutdown)
6396 		goto done;
6397 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398 	/* Take the 8051 out of reset */
6399 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6400 	/* Wait until 8051 is ready */
6401 	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6402 	if (ret) {
6403 		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6404 			   __func__);
6405 	}
6406 	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6407 	write_csr(dd, DCC_CFG_RESET, 0x10);
6408 	/* lcb_shutdown() with abort=1 does not restore these */
6409 	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6410 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6411 	dd->dc_shutdown = 0;
6412 done:
6413 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6414 }
6415 
6416 /*
6417  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6418  */
6419 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6420 {
6421 	u64 rx_radr, tx_radr;
6422 	u32 version;
6423 
6424 	if (dd->icode != ICODE_FPGA_EMULATION)
6425 		return;
6426 
6427 	/*
6428 	 * These LCB defaults on emulator _s are good, nothing to do here:
6429 	 *	LCB_CFG_TX_FIFOS_RADR
6430 	 *	LCB_CFG_RX_FIFOS_RADR
6431 	 *	LCB_CFG_LN_DCLK
6432 	 *	LCB_CFG_IGNORE_LOST_RCLK
6433 	 */
6434 	if (is_emulator_s(dd))
6435 		return;
6436 	/* else this is _p */
6437 
6438 	version = emulator_rev(dd);
6439 	if (!is_ax(dd))
6440 		version = 0x2d;	/* all B0 use 0x2d or higher settings */
6441 
6442 	if (version <= 0x12) {
6443 		/* release 0x12 and below */
6444 
6445 		/*
6446 		 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6447 		 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6448 		 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6449 		 */
6450 		rx_radr =
6451 		      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6452 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6453 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6454 		/*
6455 		 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6456 		 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6457 		 */
6458 		tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6459 	} else if (version <= 0x18) {
6460 		/* release 0x13 up to 0x18 */
6461 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6462 		rx_radr =
6463 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6464 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6465 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6466 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6467 	} else if (version == 0x19) {
6468 		/* release 0x19 */
6469 		/* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6470 		rx_radr =
6471 		      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6475 	} else if (version == 0x1a) {
6476 		/* release 0x1a */
6477 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6478 		rx_radr =
6479 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6482 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6483 		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6484 	} else {
6485 		/* release 0x1b and higher */
6486 		/* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6487 		rx_radr =
6488 		      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6491 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6492 	}
6493 
6494 	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6495 	/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6496 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6497 		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6498 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6499 }
6500 
6501 /*
6502  * Handle a SMA idle message
6503  *
6504  * This is a work-queue function outside of the interrupt.
6505  */
6506 void handle_sma_message(struct work_struct *work)
6507 {
6508 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6509 							sma_message_work);
6510 	struct hfi1_devdata *dd = ppd->dd;
6511 	u64 msg;
6512 	int ret;
6513 
6514 	/*
6515 	 * msg is bytes 1-4 of the 40-bit idle message - the command code
6516 	 * is stripped off
6517 	 */
6518 	ret = read_idle_sma(dd, &msg);
6519 	if (ret)
6520 		return;
6521 	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6522 	/*
6523 	 * React to the SMA message.  Byte[1] (0 for us) is the command.
6524 	 */
6525 	switch (msg & 0xff) {
6526 	case SMA_IDLE_ARM:
6527 		/*
6528 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6529 		 * State Transitions
6530 		 *
6531 		 * Only expected in INIT or ARMED, discard otherwise.
6532 		 */
6533 		if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6534 			ppd->neighbor_normal = 1;
6535 		break;
6536 	case SMA_IDLE_ACTIVE:
6537 		/*
6538 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6539 		 * State Transitions
6540 		 *
6541 		 * Can activate the node.  Discard otherwise.
6542 		 */
6543 		if (ppd->host_link_state == HLS_UP_ARMED &&
6544 		    ppd->is_active_optimize_enabled) {
6545 			ppd->neighbor_normal = 1;
6546 			ret = set_link_state(ppd, HLS_UP_ACTIVE);
6547 			if (ret)
6548 				dd_dev_err(
6549 					dd,
6550 					"%s: received Active SMA idle message, couldn't set link to Active\n",
6551 					__func__);
6552 		}
6553 		break;
6554 	default:
6555 		dd_dev_err(dd,
6556 			   "%s: received unexpected SMA idle message 0x%llx\n",
6557 			   __func__, msg);
6558 		break;
6559 	}
6560 }
6561 
6562 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6563 {
6564 	u64 rcvctrl;
6565 	unsigned long flags;
6566 
6567 	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6568 	rcvctrl = read_csr(dd, RCV_CTRL);
6569 	rcvctrl |= add;
6570 	rcvctrl &= ~clear;
6571 	write_csr(dd, RCV_CTRL, rcvctrl);
6572 	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6573 }
6574 
6575 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6576 {
6577 	adjust_rcvctrl(dd, add, 0);
6578 }
6579 
6580 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6581 {
6582 	adjust_rcvctrl(dd, 0, clear);
6583 }
6584 
6585 /*
6586  * Called from all interrupt handlers to start handling an SPC freeze.
6587  */
6588 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6589 {
6590 	struct hfi1_devdata *dd = ppd->dd;
6591 	struct send_context *sc;
6592 	int i;
6593 
6594 	if (flags & FREEZE_SELF)
6595 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6596 
6597 	/* enter frozen mode */
6598 	dd->flags |= HFI1_FROZEN;
6599 
6600 	/* notify all SDMA engines that they are going into a freeze */
6601 	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6602 
6603 	/* do halt pre-handling on all enabled send contexts */
6604 	for (i = 0; i < dd->num_send_contexts; i++) {
6605 		sc = dd->send_contexts[i].sc;
6606 		if (sc && (sc->flags & SCF_ENABLED))
6607 			sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6608 	}
6609 
6610 	/* Send context are frozen. Notify user space */
6611 	hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6612 
6613 	if (flags & FREEZE_ABORT) {
6614 		dd_dev_err(dd,
6615 			   "Aborted freeze recovery. Please REBOOT system\n");
6616 		return;
6617 	}
6618 	/* queue non-interrupt handler */
6619 	queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6620 }
6621 
6622 /*
6623  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6624  * depending on the "freeze" parameter.
6625  *
6626  * No need to return an error if it times out, our only option
6627  * is to proceed anyway.
6628  */
6629 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6630 {
6631 	unsigned long timeout;
6632 	u64 reg;
6633 
6634 	timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6635 	while (1) {
6636 		reg = read_csr(dd, CCE_STATUS);
6637 		if (freeze) {
6638 			/* waiting until all indicators are set */
6639 			if ((reg & ALL_FROZE) == ALL_FROZE)
6640 				return;	/* all done */
6641 		} else {
6642 			/* waiting until all indicators are clear */
6643 			if ((reg & ALL_FROZE) == 0)
6644 				return; /* all done */
6645 		}
6646 
6647 		if (time_after(jiffies, timeout)) {
6648 			dd_dev_err(dd,
6649 				   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6650 				   freeze ? "" : "un", reg & ALL_FROZE,
6651 				   freeze ? ALL_FROZE : 0ull);
6652 			return;
6653 		}
6654 		usleep_range(80, 120);
6655 	}
6656 }
6657 
6658 /*
6659  * Do all freeze handling for the RXE block.
6660  */
6661 static void rxe_freeze(struct hfi1_devdata *dd)
6662 {
6663 	int i;
6664 
6665 	/* disable port */
6666 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6667 
6668 	/* disable all receive contexts */
6669 	for (i = 0; i < dd->num_rcv_contexts; i++)
6670 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6671 }
6672 
6673 /*
6674  * Unfreeze handling for the RXE block - kernel contexts only.
6675  * This will also enable the port.  User contexts will do unfreeze
6676  * handling on a per-context basis as they call into the driver.
6677  *
6678  */
6679 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6680 {
6681 	u32 rcvmask;
6682 	int i;
6683 
6684 	/* enable all kernel contexts */
6685 	for (i = 0; i < dd->n_krcv_queues; i++) {
6686 		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6687 		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6688 		rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6689 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6690 		hfi1_rcvctrl(dd, rcvmask, i);
6691 	}
6692 
6693 	/* enable port */
6694 	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6695 }
6696 
6697 /*
6698  * Non-interrupt SPC freeze handling.
6699  *
6700  * This is a work-queue function outside of the triggering interrupt.
6701  */
6702 void handle_freeze(struct work_struct *work)
6703 {
6704 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6705 								freeze_work);
6706 	struct hfi1_devdata *dd = ppd->dd;
6707 
6708 	/* wait for freeze indicators on all affected blocks */
6709 	wait_for_freeze_status(dd, 1);
6710 
6711 	/* SPC is now frozen */
6712 
6713 	/* do send PIO freeze steps */
6714 	pio_freeze(dd);
6715 
6716 	/* do send DMA freeze steps */
6717 	sdma_freeze(dd);
6718 
6719 	/* do send egress freeze steps - nothing to do */
6720 
6721 	/* do receive freeze steps */
6722 	rxe_freeze(dd);
6723 
6724 	/*
6725 	 * Unfreeze the hardware - clear the freeze, wait for each
6726 	 * block's frozen bit to clear, then clear the frozen flag.
6727 	 */
6728 	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6729 	wait_for_freeze_status(dd, 0);
6730 
6731 	if (is_ax(dd)) {
6732 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6733 		wait_for_freeze_status(dd, 1);
6734 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6735 		wait_for_freeze_status(dd, 0);
6736 	}
6737 
6738 	/* do send PIO unfreeze steps for kernel contexts */
6739 	pio_kernel_unfreeze(dd);
6740 
6741 	/* do send DMA unfreeze steps */
6742 	sdma_unfreeze(dd);
6743 
6744 	/* do send egress unfreeze steps - nothing to do */
6745 
6746 	/* do receive unfreeze steps for kernel contexts */
6747 	rxe_kernel_unfreeze(dd);
6748 
6749 	/*
6750 	 * The unfreeze procedure touches global device registers when
6751 	 * it disables and re-enables RXE. Mark the device unfrozen
6752 	 * after all that is done so other parts of the driver waiting
6753 	 * for the device to unfreeze don't do things out of order.
6754 	 *
6755 	 * The above implies that the meaning of HFI1_FROZEN flag is
6756 	 * "Device has gone into freeze mode and freeze mode handling
6757 	 * is still in progress."
6758 	 *
6759 	 * The flag will be removed when freeze mode processing has
6760 	 * completed.
6761 	 */
6762 	dd->flags &= ~HFI1_FROZEN;
6763 	wake_up(&dd->event_queue);
6764 
6765 	/* no longer frozen */
6766 }
6767 
6768 /*
6769  * Handle a link up interrupt from the 8051.
6770  *
6771  * This is a work-queue function outside of the interrupt.
6772  */
6773 void handle_link_up(struct work_struct *work)
6774 {
6775 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6776 						  link_up_work);
6777 	set_link_state(ppd, HLS_UP_INIT);
6778 
6779 	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6780 	read_ltp_rtt(ppd->dd);
6781 	/*
6782 	 * OPA specifies that certain counters are cleared on a transition
6783 	 * to link up, so do that.
6784 	 */
6785 	clear_linkup_counters(ppd->dd);
6786 	/*
6787 	 * And (re)set link up default values.
6788 	 */
6789 	set_linkup_defaults(ppd);
6790 
6791 	/* enforce link speed enabled */
6792 	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6793 		/* oops - current speed is not enabled, bounce */
6794 		dd_dev_err(ppd->dd,
6795 			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6796 			   ppd->link_speed_active, ppd->link_speed_enabled);
6797 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6798 				     OPA_LINKDOWN_REASON_SPEED_POLICY);
6799 		set_link_state(ppd, HLS_DN_OFFLINE);
6800 		tune_serdes(ppd);
6801 		start_link(ppd);
6802 	}
6803 }
6804 
6805 /*
6806  * Several pieces of LNI information were cached for SMA in ppd.
6807  * Reset these on link down
6808  */
6809 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6810 {
6811 	ppd->neighbor_guid = 0;
6812 	ppd->neighbor_port_number = 0;
6813 	ppd->neighbor_type = 0;
6814 	ppd->neighbor_fm_security = 0;
6815 }
6816 
6817 static const char * const link_down_reason_strs[] = {
6818 	[OPA_LINKDOWN_REASON_NONE] = "None",
6819 	[OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6820 	[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6821 	[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6822 	[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6823 	[OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6824 	[OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6825 	[OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6826 	[OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6827 	[OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6828 	[OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6829 	[OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6830 	[OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6831 	[OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6832 	[OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6833 	[OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6834 	[OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6835 	[OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6836 	[OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6837 	[OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6838 	[OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6839 	[OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6840 	[OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6841 	[OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6842 	[OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6843 	[OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6844 	[OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6845 	[OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6846 	[OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6847 	[OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6848 	[OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6849 	[OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6850 	[OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6851 					"Excessive buffer overrun",
6852 	[OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6853 	[OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6854 	[OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6855 	[OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6856 	[OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6857 	[OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6858 	[OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6859 	[OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6860 					"Local media not installed",
6861 	[OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6862 	[OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6863 	[OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6864 					"End to end not installed",
6865 	[OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6866 	[OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6867 	[OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6868 	[OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6869 	[OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6870 	[OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6871 };
6872 
6873 /* return the neighbor link down reason string */
6874 static const char *link_down_reason_str(u8 reason)
6875 {
6876 	const char *str = NULL;
6877 
6878 	if (reason < ARRAY_SIZE(link_down_reason_strs))
6879 		str = link_down_reason_strs[reason];
6880 	if (!str)
6881 		str = "(invalid)";
6882 
6883 	return str;
6884 }
6885 
6886 /*
6887  * Handle a link down interrupt from the 8051.
6888  *
6889  * This is a work-queue function outside of the interrupt.
6890  */
6891 void handle_link_down(struct work_struct *work)
6892 {
6893 	u8 lcl_reason, neigh_reason = 0;
6894 	u8 link_down_reason;
6895 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6896 						  link_down_work);
6897 	int was_up;
6898 	static const char ldr_str[] = "Link down reason: ";
6899 
6900 	if ((ppd->host_link_state &
6901 	     (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6902 	     ppd->port_type == PORT_TYPE_FIXED)
6903 		ppd->offline_disabled_reason =
6904 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6905 
6906 	/* Go offline first, then deal with reading/writing through 8051 */
6907 	was_up = !!(ppd->host_link_state & HLS_UP);
6908 	set_link_state(ppd, HLS_DN_OFFLINE);
6909 
6910 	if (was_up) {
6911 		lcl_reason = 0;
6912 		/* link down reason is only valid if the link was up */
6913 		read_link_down_reason(ppd->dd, &link_down_reason);
6914 		switch (link_down_reason) {
6915 		case LDR_LINK_TRANSFER_ACTIVE_LOW:
6916 			/* the link went down, no idle message reason */
6917 			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6918 				    ldr_str);
6919 			break;
6920 		case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6921 			/*
6922 			 * The neighbor reason is only valid if an idle message
6923 			 * was received for it.
6924 			 */
6925 			read_planned_down_reason_code(ppd->dd, &neigh_reason);
6926 			dd_dev_info(ppd->dd,
6927 				    "%sNeighbor link down message %d, %s\n",
6928 				    ldr_str, neigh_reason,
6929 				    link_down_reason_str(neigh_reason));
6930 			break;
6931 		case LDR_RECEIVED_HOST_OFFLINE_REQ:
6932 			dd_dev_info(ppd->dd,
6933 				    "%sHost requested link to go offline\n",
6934 				    ldr_str);
6935 			break;
6936 		default:
6937 			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6938 				    ldr_str, link_down_reason);
6939 			break;
6940 		}
6941 
6942 		/*
6943 		 * If no reason, assume peer-initiated but missed
6944 		 * LinkGoingDown idle flits.
6945 		 */
6946 		if (neigh_reason == 0)
6947 			lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6948 	} else {
6949 		/* went down while polling or going up */
6950 		lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6951 	}
6952 
6953 	set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6954 
6955 	/* inform the SMA when the link transitions from up to down */
6956 	if (was_up && ppd->local_link_down_reason.sma == 0 &&
6957 	    ppd->neigh_link_down_reason.sma == 0) {
6958 		ppd->local_link_down_reason.sma =
6959 					ppd->local_link_down_reason.latest;
6960 		ppd->neigh_link_down_reason.sma =
6961 					ppd->neigh_link_down_reason.latest;
6962 	}
6963 
6964 	reset_neighbor_info(ppd);
6965 	if (ppd->mgmt_allowed)
6966 		remove_full_mgmt_pkey(ppd);
6967 
6968 	/* disable the port */
6969 	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6970 
6971 	/*
6972 	 * If there is no cable attached, turn the DC off. Otherwise,
6973 	 * start the link bring up.
6974 	 */
6975 	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
6976 		dc_shutdown(ppd->dd);
6977 	} else {
6978 		tune_serdes(ppd);
6979 		start_link(ppd);
6980 	}
6981 }
6982 
6983 void handle_link_bounce(struct work_struct *work)
6984 {
6985 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6986 							link_bounce_work);
6987 
6988 	/*
6989 	 * Only do something if the link is currently up.
6990 	 */
6991 	if (ppd->host_link_state & HLS_UP) {
6992 		set_link_state(ppd, HLS_DN_OFFLINE);
6993 		tune_serdes(ppd);
6994 		start_link(ppd);
6995 	} else {
6996 		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6997 			    __func__, link_state_name(ppd->host_link_state));
6998 	}
6999 }
7000 
7001 /*
7002  * Mask conversion: Capability exchange to Port LTP.  The capability
7003  * exchange has an implicit 16b CRC that is mandatory.
7004  */
7005 static int cap_to_port_ltp(int cap)
7006 {
7007 	int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7008 
7009 	if (cap & CAP_CRC_14B)
7010 		port_ltp |= PORT_LTP_CRC_MODE_14;
7011 	if (cap & CAP_CRC_48B)
7012 		port_ltp |= PORT_LTP_CRC_MODE_48;
7013 	if (cap & CAP_CRC_12B_16B_PER_LANE)
7014 		port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7015 
7016 	return port_ltp;
7017 }
7018 
7019 /*
7020  * Convert an OPA Port LTP mask to capability mask
7021  */
7022 int port_ltp_to_cap(int port_ltp)
7023 {
7024 	int cap_mask = 0;
7025 
7026 	if (port_ltp & PORT_LTP_CRC_MODE_14)
7027 		cap_mask |= CAP_CRC_14B;
7028 	if (port_ltp & PORT_LTP_CRC_MODE_48)
7029 		cap_mask |= CAP_CRC_48B;
7030 	if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7031 		cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7032 
7033 	return cap_mask;
7034 }
7035 
7036 /*
7037  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7038  */
7039 static int lcb_to_port_ltp(int lcb_crc)
7040 {
7041 	int port_ltp = 0;
7042 
7043 	if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7044 		port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7045 	else if (lcb_crc == LCB_CRC_48B)
7046 		port_ltp = PORT_LTP_CRC_MODE_48;
7047 	else if (lcb_crc == LCB_CRC_14B)
7048 		port_ltp = PORT_LTP_CRC_MODE_14;
7049 	else
7050 		port_ltp = PORT_LTP_CRC_MODE_16;
7051 
7052 	return port_ltp;
7053 }
7054 
7055 /*
7056  * Our neighbor has indicated that we are allowed to act as a fabric
7057  * manager, so place the full management partition key in the second
7058  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7059  * that we should already have the limited management partition key in
7060  * array element 1, and also that the port is not yet up when
7061  * add_full_mgmt_pkey() is invoked.
7062  */
7063 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7064 {
7065 	struct hfi1_devdata *dd = ppd->dd;
7066 
7067 	/* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7068 	if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7069 		dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7070 			    __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7071 	ppd->pkeys[2] = FULL_MGMT_P_KEY;
7072 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7073 }
7074 
7075 static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7076 {
7077 	ppd->pkeys[2] = 0;
7078 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079 }
7080 
7081 /*
7082  * Convert the given link width to the OPA link width bitmask.
7083  */
7084 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7085 {
7086 	switch (width) {
7087 	case 0:
7088 		/*
7089 		 * Simulator and quick linkup do not set the width.
7090 		 * Just set it to 4x without complaint.
7091 		 */
7092 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7093 			return OPA_LINK_WIDTH_4X;
7094 		return 0; /* no lanes up */
7095 	case 1: return OPA_LINK_WIDTH_1X;
7096 	case 2: return OPA_LINK_WIDTH_2X;
7097 	case 3: return OPA_LINK_WIDTH_3X;
7098 	default:
7099 		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7100 			    __func__, width);
7101 		/* fall through */
7102 	case 4: return OPA_LINK_WIDTH_4X;
7103 	}
7104 }
7105 
7106 /*
7107  * Do a population count on the bottom nibble.
7108  */
7109 static const u8 bit_counts[16] = {
7110 	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7111 };
7112 
7113 static inline u8 nibble_to_count(u8 nibble)
7114 {
7115 	return bit_counts[nibble & 0xf];
7116 }
7117 
7118 /*
7119  * Read the active lane information from the 8051 registers and return
7120  * their widths.
7121  *
7122  * Active lane information is found in these 8051 registers:
7123  *	enable_lane_tx
7124  *	enable_lane_rx
7125  */
7126 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7127 			    u16 *rx_width)
7128 {
7129 	u16 tx, rx;
7130 	u8 enable_lane_rx;
7131 	u8 enable_lane_tx;
7132 	u8 tx_polarity_inversion;
7133 	u8 rx_polarity_inversion;
7134 	u8 max_rate;
7135 
7136 	/* read the active lanes */
7137 	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7138 			 &rx_polarity_inversion, &max_rate);
7139 	read_local_lni(dd, &enable_lane_rx);
7140 
7141 	/* convert to counts */
7142 	tx = nibble_to_count(enable_lane_tx);
7143 	rx = nibble_to_count(enable_lane_rx);
7144 
7145 	/*
7146 	 * Set link_speed_active here, overriding what was set in
7147 	 * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7148 	 * set the max_rate field in handle_verify_cap until v0.19.
7149 	 */
7150 	if ((dd->icode == ICODE_RTL_SILICON) &&
7151 	    (dd->dc8051_ver < dc8051_ver(0, 19))) {
7152 		/* max_rate: 0 = 12.5G, 1 = 25G */
7153 		switch (max_rate) {
7154 		case 0:
7155 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7156 			break;
7157 		default:
7158 			dd_dev_err(dd,
7159 				   "%s: unexpected max rate %d, using 25Gb\n",
7160 				   __func__, (int)max_rate);
7161 			/* fall through */
7162 		case 1:
7163 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7164 			break;
7165 		}
7166 	}
7167 
7168 	dd_dev_info(dd,
7169 		    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7170 		    enable_lane_tx, tx, enable_lane_rx, rx);
7171 	*tx_width = link_width_to_bits(dd, tx);
7172 	*rx_width = link_width_to_bits(dd, rx);
7173 }
7174 
7175 /*
7176  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7177  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7178  * after link up.  I.e. look elsewhere for downgrade information.
7179  *
7180  * Bits are:
7181  *	+ bits [7:4] contain the number of active transmitters
7182  *	+ bits [3:0] contain the number of active receivers
7183  * These are numbers 1 through 4 and can be different values if the
7184  * link is asymmetric.
7185  *
7186  * verify_cap_local_fm_link_width[0] retains its original value.
7187  */
7188 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7189 			      u16 *rx_width)
7190 {
7191 	u16 widths, tx, rx;
7192 	u8 misc_bits, local_flags;
7193 	u16 active_tx, active_rx;
7194 
7195 	read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7196 	tx = widths >> 12;
7197 	rx = (widths >> 8) & 0xf;
7198 
7199 	*tx_width = link_width_to_bits(dd, tx);
7200 	*rx_width = link_width_to_bits(dd, rx);
7201 
7202 	/* print the active widths */
7203 	get_link_widths(dd, &active_tx, &active_rx);
7204 }
7205 
7206 /*
7207  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7208  * hardware information when the link first comes up.
7209  *
7210  * The link width is not available until after VerifyCap.AllFramesReceived
7211  * (the trigger for handle_verify_cap), so this is outside that routine
7212  * and should be called when the 8051 signals linkup.
7213  */
7214 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7215 {
7216 	u16 tx_width, rx_width;
7217 
7218 	/* get end-of-LNI link widths */
7219 	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7220 
7221 	/* use tx_width as the link is supposed to be symmetric on link up */
7222 	ppd->link_width_active = tx_width;
7223 	/* link width downgrade active (LWD.A) starts out matching LW.A */
7224 	ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7225 	ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7226 	/* per OPA spec, on link up LWD.E resets to LWD.S */
7227 	ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7228 	/* cache the active egress rate (units {10^6 bits/sec]) */
7229 	ppd->current_egress_rate = active_egress_rate(ppd);
7230 }
7231 
7232 /*
7233  * Handle a verify capabilities interrupt from the 8051.
7234  *
7235  * This is a work-queue function outside of the interrupt.
7236  */
7237 void handle_verify_cap(struct work_struct *work)
7238 {
7239 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7240 								link_vc_work);
7241 	struct hfi1_devdata *dd = ppd->dd;
7242 	u64 reg;
7243 	u8 power_management;
7244 	u8 continious;
7245 	u8 vcu;
7246 	u8 vau;
7247 	u8 z;
7248 	u16 vl15buf;
7249 	u16 link_widths;
7250 	u16 crc_mask;
7251 	u16 crc_val;
7252 	u16 device_id;
7253 	u16 active_tx, active_rx;
7254 	u8 partner_supported_crc;
7255 	u8 remote_tx_rate;
7256 	u8 device_rev;
7257 
7258 	set_link_state(ppd, HLS_VERIFY_CAP);
7259 
7260 	lcb_shutdown(dd, 0);
7261 	adjust_lcb_for_fpga_serdes(dd);
7262 
7263 	/*
7264 	 * These are now valid:
7265 	 *	remote VerifyCap fields in the general LNI config
7266 	 *	CSR DC8051_STS_REMOTE_GUID
7267 	 *	CSR DC8051_STS_REMOTE_NODE_TYPE
7268 	 *	CSR DC8051_STS_REMOTE_FM_SECURITY
7269 	 *	CSR DC8051_STS_REMOTE_PORT_NO
7270 	 */
7271 
7272 	read_vc_remote_phy(dd, &power_management, &continious);
7273 	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7274 			      &partner_supported_crc);
7275 	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7276 	read_remote_device_id(dd, &device_id, &device_rev);
7277 	/*
7278 	 * And the 'MgmtAllowed' information, which is exchanged during
7279 	 * LNI, is also be available at this point.
7280 	 */
7281 	read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7282 	/* print the active widths */
7283 	get_link_widths(dd, &active_tx, &active_rx);
7284 	dd_dev_info(dd,
7285 		    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7286 		    (int)power_management, (int)continious);
7287 	dd_dev_info(dd,
7288 		    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7289 		    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7290 		    (int)partner_supported_crc);
7291 	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7292 		    (u32)remote_tx_rate, (u32)link_widths);
7293 	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7294 		    (u32)device_id, (u32)device_rev);
7295 	/*
7296 	 * The peer vAU value just read is the peer receiver value.  HFI does
7297 	 * not support a transmit vAU of 0 (AU == 8).  We advertised that
7298 	 * with Z=1 in the fabric capabilities sent to the peer.  The peer
7299 	 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7300 	 * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7301 	 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7302 	 * subject to the Z value exception.
7303 	 */
7304 	if (vau == 0)
7305 		vau = 1;
7306 	set_up_vl15(dd, vau, vl15buf);
7307 
7308 	/* set up the LCB CRC mode */
7309 	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7310 
7311 	/* order is important: use the lowest bit in common */
7312 	if (crc_mask & CAP_CRC_14B)
7313 		crc_val = LCB_CRC_14B;
7314 	else if (crc_mask & CAP_CRC_48B)
7315 		crc_val = LCB_CRC_48B;
7316 	else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7317 		crc_val = LCB_CRC_12B_16B_PER_LANE;
7318 	else
7319 		crc_val = LCB_CRC_16B;
7320 
7321 	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7322 	write_csr(dd, DC_LCB_CFG_CRC_MODE,
7323 		  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7324 
7325 	/* set (14b only) or clear sideband credit */
7326 	reg = read_csr(dd, SEND_CM_CTRL);
7327 	if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7328 		write_csr(dd, SEND_CM_CTRL,
7329 			  reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7330 	} else {
7331 		write_csr(dd, SEND_CM_CTRL,
7332 			  reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7333 	}
7334 
7335 	ppd->link_speed_active = 0;	/* invalid value */
7336 	if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7337 		/* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7338 		switch (remote_tx_rate) {
7339 		case 0:
7340 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7341 			break;
7342 		case 1:
7343 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7344 			break;
7345 		}
7346 	} else {
7347 		/* actual rate is highest bit of the ANDed rates */
7348 		u8 rate = remote_tx_rate & ppd->local_tx_rate;
7349 
7350 		if (rate & 2)
7351 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7352 		else if (rate & 1)
7353 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7354 	}
7355 	if (ppd->link_speed_active == 0) {
7356 		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7357 			   __func__, (int)remote_tx_rate);
7358 		ppd->link_speed_active = OPA_LINK_SPEED_25G;
7359 	}
7360 
7361 	/*
7362 	 * Cache the values of the supported, enabled, and active
7363 	 * LTP CRC modes to return in 'portinfo' queries. But the bit
7364 	 * flags that are returned in the portinfo query differ from
7365 	 * what's in the link_crc_mask, crc_sizes, and crc_val
7366 	 * variables. Convert these here.
7367 	 */
7368 	ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7369 		/* supported crc modes */
7370 	ppd->port_ltp_crc_mode |=
7371 		cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7372 		/* enabled crc modes */
7373 	ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7374 		/* active crc mode */
7375 
7376 	/* set up the remote credit return table */
7377 	assign_remote_cm_au_table(dd, vcu);
7378 
7379 	/*
7380 	 * The LCB is reset on entry to handle_verify_cap(), so this must
7381 	 * be applied on every link up.
7382 	 *
7383 	 * Adjust LCB error kill enable to kill the link if
7384 	 * these RBUF errors are seen:
7385 	 *	REPLAY_BUF_MBE_SMASK
7386 	 *	FLIT_INPUT_BUF_MBE_SMASK
7387 	 */
7388 	if (is_ax(dd)) {			/* fixed in B0 */
7389 		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7390 		reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7391 			| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7392 		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7393 	}
7394 
7395 	/* pull LCB fifos out of reset - all fifo clocks must be stable */
7396 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7397 
7398 	/* give 8051 access to the LCB CSRs */
7399 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7400 	set_8051_lcb_access(dd);
7401 
7402 	ppd->neighbor_guid =
7403 		read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7404 	ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7405 					DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7406 	ppd->neighbor_type =
7407 		read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7408 		DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7409 	ppd->neighbor_fm_security =
7410 		read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7411 		DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7412 	dd_dev_info(dd,
7413 		    "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7414 		    ppd->neighbor_guid, ppd->neighbor_type,
7415 		    ppd->mgmt_allowed, ppd->neighbor_fm_security);
7416 	if (ppd->mgmt_allowed)
7417 		add_full_mgmt_pkey(ppd);
7418 
7419 	/* tell the 8051 to go to LinkUp */
7420 	set_link_state(ppd, HLS_GOING_UP);
7421 }
7422 
7423 /*
7424  * Apply the link width downgrade enabled policy against the current active
7425  * link widths.
7426  *
7427  * Called when the enabled policy changes or the active link widths change.
7428  */
7429 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7430 {
7431 	int do_bounce = 0;
7432 	int tries;
7433 	u16 lwde;
7434 	u16 tx, rx;
7435 
7436 	/* use the hls lock to avoid a race with actual link up */
7437 	tries = 0;
7438 retry:
7439 	mutex_lock(&ppd->hls_lock);
7440 	/* only apply if the link is up */
7441 	if (ppd->host_link_state & HLS_DOWN) {
7442 		/* still going up..wait and retry */
7443 		if (ppd->host_link_state & HLS_GOING_UP) {
7444 			if (++tries < 1000) {
7445 				mutex_unlock(&ppd->hls_lock);
7446 				usleep_range(100, 120); /* arbitrary */
7447 				goto retry;
7448 			}
7449 			dd_dev_err(ppd->dd,
7450 				   "%s: giving up waiting for link state change\n",
7451 				   __func__);
7452 		}
7453 		goto done;
7454 	}
7455 
7456 	lwde = ppd->link_width_downgrade_enabled;
7457 
7458 	if (refresh_widths) {
7459 		get_link_widths(ppd->dd, &tx, &rx);
7460 		ppd->link_width_downgrade_tx_active = tx;
7461 		ppd->link_width_downgrade_rx_active = rx;
7462 	}
7463 
7464 	if (ppd->link_width_downgrade_tx_active == 0 ||
7465 	    ppd->link_width_downgrade_rx_active == 0) {
7466 		/* the 8051 reported a dead link as a downgrade */
7467 		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7468 	} else if (lwde == 0) {
7469 		/* downgrade is disabled */
7470 
7471 		/* bounce if not at starting active width */
7472 		if ((ppd->link_width_active !=
7473 		     ppd->link_width_downgrade_tx_active) ||
7474 		    (ppd->link_width_active !=
7475 		     ppd->link_width_downgrade_rx_active)) {
7476 			dd_dev_err(ppd->dd,
7477 				   "Link downgrade is disabled and link has downgraded, downing link\n");
7478 			dd_dev_err(ppd->dd,
7479 				   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7480 				   ppd->link_width_active,
7481 				   ppd->link_width_downgrade_tx_active,
7482 				   ppd->link_width_downgrade_rx_active);
7483 			do_bounce = 1;
7484 		}
7485 	} else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7486 		   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7487 		/* Tx or Rx is outside the enabled policy */
7488 		dd_dev_err(ppd->dd,
7489 			   "Link is outside of downgrade allowed, downing link\n");
7490 		dd_dev_err(ppd->dd,
7491 			   "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7492 			   lwde, ppd->link_width_downgrade_tx_active,
7493 			   ppd->link_width_downgrade_rx_active);
7494 		do_bounce = 1;
7495 	}
7496 
7497 done:
7498 	mutex_unlock(&ppd->hls_lock);
7499 
7500 	if (do_bounce) {
7501 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7502 				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7503 		set_link_state(ppd, HLS_DN_OFFLINE);
7504 		tune_serdes(ppd);
7505 		start_link(ppd);
7506 	}
7507 }
7508 
7509 /*
7510  * Handle a link downgrade interrupt from the 8051.
7511  *
7512  * This is a work-queue function outside of the interrupt.
7513  */
7514 void handle_link_downgrade(struct work_struct *work)
7515 {
7516 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7517 							link_downgrade_work);
7518 
7519 	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7520 	apply_link_downgrade_policy(ppd, 1);
7521 }
7522 
7523 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7524 {
7525 	return flag_string(buf, buf_len, flags, dcc_err_flags,
7526 		ARRAY_SIZE(dcc_err_flags));
7527 }
7528 
7529 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7530 {
7531 	return flag_string(buf, buf_len, flags, lcb_err_flags,
7532 		ARRAY_SIZE(lcb_err_flags));
7533 }
7534 
7535 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7536 {
7537 	return flag_string(buf, buf_len, flags, dc8051_err_flags,
7538 		ARRAY_SIZE(dc8051_err_flags));
7539 }
7540 
7541 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7542 {
7543 	return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7544 		ARRAY_SIZE(dc8051_info_err_flags));
7545 }
7546 
7547 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7548 {
7549 	return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7550 		ARRAY_SIZE(dc8051_info_host_msg_flags));
7551 }
7552 
7553 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7554 {
7555 	struct hfi1_pportdata *ppd = dd->pport;
7556 	u64 info, err, host_msg;
7557 	int queue_link_down = 0;
7558 	char buf[96];
7559 
7560 	/* look at the flags */
7561 	if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7562 		/* 8051 information set by firmware */
7563 		/* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7564 		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7565 		err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7566 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7567 		host_msg = (info >>
7568 			DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7569 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7570 
7571 		/*
7572 		 * Handle error flags.
7573 		 */
7574 		if (err & FAILED_LNI) {
7575 			/*
7576 			 * LNI error indications are cleared by the 8051
7577 			 * only when starting polling.  Only pay attention
7578 			 * to them when in the states that occur during
7579 			 * LNI.
7580 			 */
7581 			if (ppd->host_link_state
7582 			    & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7583 				queue_link_down = 1;
7584 				dd_dev_info(dd, "Link error: %s\n",
7585 					    dc8051_info_err_string(buf,
7586 								   sizeof(buf),
7587 								   err &
7588 								   FAILED_LNI));
7589 			}
7590 			err &= ~(u64)FAILED_LNI;
7591 		}
7592 		/* unknown frames can happen durning LNI, just count */
7593 		if (err & UNKNOWN_FRAME) {
7594 			ppd->unknown_frame_count++;
7595 			err &= ~(u64)UNKNOWN_FRAME;
7596 		}
7597 		if (err) {
7598 			/* report remaining errors, but do not do anything */
7599 			dd_dev_err(dd, "8051 info error: %s\n",
7600 				   dc8051_info_err_string(buf, sizeof(buf),
7601 							  err));
7602 		}
7603 
7604 		/*
7605 		 * Handle host message flags.
7606 		 */
7607 		if (host_msg & HOST_REQ_DONE) {
7608 			/*
7609 			 * Presently, the driver does a busy wait for
7610 			 * host requests to complete.  This is only an
7611 			 * informational message.
7612 			 * NOTE: The 8051 clears the host message
7613 			 * information *on the next 8051 command*.
7614 			 * Therefore, when linkup is achieved,
7615 			 * this flag will still be set.
7616 			 */
7617 			host_msg &= ~(u64)HOST_REQ_DONE;
7618 		}
7619 		if (host_msg & BC_SMA_MSG) {
7620 			queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7621 			host_msg &= ~(u64)BC_SMA_MSG;
7622 		}
7623 		if (host_msg & LINKUP_ACHIEVED) {
7624 			dd_dev_info(dd, "8051: Link up\n");
7625 			queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7626 			host_msg &= ~(u64)LINKUP_ACHIEVED;
7627 		}
7628 		if (host_msg & EXT_DEVICE_CFG_REQ) {
7629 			handle_8051_request(ppd);
7630 			host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7631 		}
7632 		if (host_msg & VERIFY_CAP_FRAME) {
7633 			queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7634 			host_msg &= ~(u64)VERIFY_CAP_FRAME;
7635 		}
7636 		if (host_msg & LINK_GOING_DOWN) {
7637 			const char *extra = "";
7638 			/* no downgrade action needed if going down */
7639 			if (host_msg & LINK_WIDTH_DOWNGRADED) {
7640 				host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7641 				extra = " (ignoring downgrade)";
7642 			}
7643 			dd_dev_info(dd, "8051: Link down%s\n", extra);
7644 			queue_link_down = 1;
7645 			host_msg &= ~(u64)LINK_GOING_DOWN;
7646 		}
7647 		if (host_msg & LINK_WIDTH_DOWNGRADED) {
7648 			queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7649 			host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7650 		}
7651 		if (host_msg) {
7652 			/* report remaining messages, but do not do anything */
7653 			dd_dev_info(dd, "8051 info host message: %s\n",
7654 				    dc8051_info_host_msg_string(buf,
7655 								sizeof(buf),
7656 								host_msg));
7657 		}
7658 
7659 		reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7660 	}
7661 	if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7662 		/*
7663 		 * Lost the 8051 heartbeat.  If this happens, we
7664 		 * receive constant interrupts about it.  Disable
7665 		 * the interrupt after the first.
7666 		 */
7667 		dd_dev_err(dd, "Lost 8051 heartbeat\n");
7668 		write_csr(dd, DC_DC8051_ERR_EN,
7669 			  read_csr(dd, DC_DC8051_ERR_EN) &
7670 			  ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7671 
7672 		reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7673 	}
7674 	if (reg) {
7675 		/* report the error, but do not do anything */
7676 		dd_dev_err(dd, "8051 error: %s\n",
7677 			   dc8051_err_string(buf, sizeof(buf), reg));
7678 	}
7679 
7680 	if (queue_link_down) {
7681 		/*
7682 		 * if the link is already going down or disabled, do not
7683 		 * queue another
7684 		 */
7685 		if ((ppd->host_link_state &
7686 		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7687 		    ppd->link_enabled == 0) {
7688 			dd_dev_info(dd, "%s: not queuing link down\n",
7689 				    __func__);
7690 		} else {
7691 			queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7692 		}
7693 	}
7694 }
7695 
7696 static const char * const fm_config_txt[] = {
7697 [0] =
7698 	"BadHeadDist: Distance violation between two head flits",
7699 [1] =
7700 	"BadTailDist: Distance violation between two tail flits",
7701 [2] =
7702 	"BadCtrlDist: Distance violation between two credit control flits",
7703 [3] =
7704 	"BadCrdAck: Credits return for unsupported VL",
7705 [4] =
7706 	"UnsupportedVLMarker: Received VL Marker",
7707 [5] =
7708 	"BadPreempt: Exceeded the preemption nesting level",
7709 [6] =
7710 	"BadControlFlit: Received unsupported control flit",
7711 /* no 7 */
7712 [8] =
7713 	"UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7714 };
7715 
7716 static const char * const port_rcv_txt[] = {
7717 [1] =
7718 	"BadPktLen: Illegal PktLen",
7719 [2] =
7720 	"PktLenTooLong: Packet longer than PktLen",
7721 [3] =
7722 	"PktLenTooShort: Packet shorter than PktLen",
7723 [4] =
7724 	"BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7725 [5] =
7726 	"BadDLID: Illegal DLID (0, doesn't match HFI)",
7727 [6] =
7728 	"BadL2: Illegal L2 opcode",
7729 [7] =
7730 	"BadSC: Unsupported SC",
7731 [9] =
7732 	"BadRC: Illegal RC",
7733 [11] =
7734 	"PreemptError: Preempting with same VL",
7735 [12] =
7736 	"PreemptVL15: Preempting a VL15 packet",
7737 };
7738 
7739 #define OPA_LDR_FMCONFIG_OFFSET 16
7740 #define OPA_LDR_PORTRCV_OFFSET 0
7741 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7742 {
7743 	u64 info, hdr0, hdr1;
7744 	const char *extra;
7745 	char buf[96];
7746 	struct hfi1_pportdata *ppd = dd->pport;
7747 	u8 lcl_reason = 0;
7748 	int do_bounce = 0;
7749 
7750 	if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7751 		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7752 			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7753 			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7754 			/* set status bit */
7755 			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7756 		}
7757 		reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7758 	}
7759 
7760 	if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7761 		struct hfi1_pportdata *ppd = dd->pport;
7762 		/* this counter saturates at (2^32) - 1 */
7763 		if (ppd->link_downed < (u32)UINT_MAX)
7764 			ppd->link_downed++;
7765 		reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7766 	}
7767 
7768 	if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7769 		u8 reason_valid = 1;
7770 
7771 		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7772 		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7773 			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7774 			/* set status bit */
7775 			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7776 		}
7777 		switch (info) {
7778 		case 0:
7779 		case 1:
7780 		case 2:
7781 		case 3:
7782 		case 4:
7783 		case 5:
7784 		case 6:
7785 			extra = fm_config_txt[info];
7786 			break;
7787 		case 8:
7788 			extra = fm_config_txt[info];
7789 			if (ppd->port_error_action &
7790 			    OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7791 				do_bounce = 1;
7792 				/*
7793 				 * lcl_reason cannot be derived from info
7794 				 * for this error
7795 				 */
7796 				lcl_reason =
7797 				  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7798 			}
7799 			break;
7800 		default:
7801 			reason_valid = 0;
7802 			snprintf(buf, sizeof(buf), "reserved%lld", info);
7803 			extra = buf;
7804 			break;
7805 		}
7806 
7807 		if (reason_valid && !do_bounce) {
7808 			do_bounce = ppd->port_error_action &
7809 					(1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7810 			lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7811 		}
7812 
7813 		/* just report this */
7814 		dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7815 		reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7816 	}
7817 
7818 	if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7819 		u8 reason_valid = 1;
7820 
7821 		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7822 		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7823 		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7824 		if (!(dd->err_info_rcvport.status_and_code &
7825 		      OPA_EI_STATUS_SMASK)) {
7826 			dd->err_info_rcvport.status_and_code =
7827 				info & OPA_EI_CODE_SMASK;
7828 			/* set status bit */
7829 			dd->err_info_rcvport.status_and_code |=
7830 				OPA_EI_STATUS_SMASK;
7831 			/*
7832 			 * save first 2 flits in the packet that caused
7833 			 * the error
7834 			 */
7835 			dd->err_info_rcvport.packet_flit1 = hdr0;
7836 			dd->err_info_rcvport.packet_flit2 = hdr1;
7837 		}
7838 		switch (info) {
7839 		case 1:
7840 		case 2:
7841 		case 3:
7842 		case 4:
7843 		case 5:
7844 		case 6:
7845 		case 7:
7846 		case 9:
7847 		case 11:
7848 		case 12:
7849 			extra = port_rcv_txt[info];
7850 			break;
7851 		default:
7852 			reason_valid = 0;
7853 			snprintf(buf, sizeof(buf), "reserved%lld", info);
7854 			extra = buf;
7855 			break;
7856 		}
7857 
7858 		if (reason_valid && !do_bounce) {
7859 			do_bounce = ppd->port_error_action &
7860 					(1 << (OPA_LDR_PORTRCV_OFFSET + info));
7861 			lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7862 		}
7863 
7864 		/* just report this */
7865 		dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7866 		dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7867 			    hdr0, hdr1);
7868 
7869 		reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7870 	}
7871 
7872 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7873 		/* informative only */
7874 		dd_dev_info(dd, "8051 access to LCB blocked\n");
7875 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7876 	}
7877 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7878 		/* informative only */
7879 		dd_dev_info(dd, "host access to LCB blocked\n");
7880 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7881 	}
7882 
7883 	/* report any remaining errors */
7884 	if (reg)
7885 		dd_dev_info(dd, "DCC Error: %s\n",
7886 			    dcc_err_string(buf, sizeof(buf), reg));
7887 
7888 	if (lcl_reason == 0)
7889 		lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7890 
7891 	if (do_bounce) {
7892 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7893 		set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7894 		queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7895 	}
7896 }
7897 
7898 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7899 {
7900 	char buf[96];
7901 
7902 	dd_dev_info(dd, "LCB Error: %s\n",
7903 		    lcb_err_string(buf, sizeof(buf), reg));
7904 }
7905 
7906 /*
7907  * CCE block DC interrupt.  Source is < 8.
7908  */
7909 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7910 {
7911 	const struct err_reg_info *eri = &dc_errs[source];
7912 
7913 	if (eri->handler) {
7914 		interrupt_clear_down(dd, 0, eri);
7915 	} else if (source == 3 /* dc_lbm_int */) {
7916 		/*
7917 		 * This indicates that a parity error has occurred on the
7918 		 * address/control lines presented to the LBM.  The error
7919 		 * is a single pulse, there is no associated error flag,
7920 		 * and it is non-maskable.  This is because if a parity
7921 		 * error occurs on the request the request is dropped.
7922 		 * This should never occur, but it is nice to know if it
7923 		 * ever does.
7924 		 */
7925 		dd_dev_err(dd, "Parity error in DC LBM block\n");
7926 	} else {
7927 		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7928 	}
7929 }
7930 
7931 /*
7932  * TX block send credit interrupt.  Source is < 160.
7933  */
7934 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7935 {
7936 	sc_group_release_update(dd, source);
7937 }
7938 
7939 /*
7940  * TX block SDMA interrupt.  Source is < 48.
7941  *
7942  * SDMA interrupts are grouped by type:
7943  *
7944  *	 0 -  N-1 = SDma
7945  *	 N - 2N-1 = SDmaProgress
7946  *	2N - 3N-1 = SDmaIdle
7947  */
7948 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7949 {
7950 	/* what interrupt */
7951 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7952 	/* which engine */
7953 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7954 
7955 #ifdef CONFIG_SDMA_VERBOSITY
7956 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7957 		   slashstrip(__FILE__), __LINE__, __func__);
7958 	sdma_dumpstate(&dd->per_sdma[which]);
7959 #endif
7960 
7961 	if (likely(what < 3 && which < dd->num_sdma)) {
7962 		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7963 	} else {
7964 		/* should not happen */
7965 		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7966 	}
7967 }
7968 
7969 /*
7970  * RX block receive available interrupt.  Source is < 160.
7971  */
7972 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7973 {
7974 	struct hfi1_ctxtdata *rcd;
7975 	char *err_detail;
7976 
7977 	if (likely(source < dd->num_rcv_contexts)) {
7978 		rcd = dd->rcd[source];
7979 		if (rcd) {
7980 			if (source < dd->first_user_ctxt)
7981 				rcd->do_interrupt(rcd, 0);
7982 			else
7983 				handle_user_interrupt(rcd);
7984 			return;	/* OK */
7985 		}
7986 		/* received an interrupt, but no rcd */
7987 		err_detail = "dataless";
7988 	} else {
7989 		/* received an interrupt, but are not using that context */
7990 		err_detail = "out of range";
7991 	}
7992 	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7993 		   err_detail, source);
7994 }
7995 
7996 /*
7997  * RX block receive urgent interrupt.  Source is < 160.
7998  */
7999 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8000 {
8001 	struct hfi1_ctxtdata *rcd;
8002 	char *err_detail;
8003 
8004 	if (likely(source < dd->num_rcv_contexts)) {
8005 		rcd = dd->rcd[source];
8006 		if (rcd) {
8007 			/* only pay attention to user urgent interrupts */
8008 			if (source >= dd->first_user_ctxt)
8009 				handle_user_interrupt(rcd);
8010 			return;	/* OK */
8011 		}
8012 		/* received an interrupt, but no rcd */
8013 		err_detail = "dataless";
8014 	} else {
8015 		/* received an interrupt, but are not using that context */
8016 		err_detail = "out of range";
8017 	}
8018 	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8019 		   err_detail, source);
8020 }
8021 
8022 /*
8023  * Reserved range interrupt.  Should not be called in normal operation.
8024  */
8025 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8026 {
8027 	char name[64];
8028 
8029 	dd_dev_err(dd, "unexpected %s interrupt\n",
8030 		   is_reserved_name(name, sizeof(name), source));
8031 }
8032 
8033 static const struct is_table is_table[] = {
8034 /*
8035  * start		 end
8036  *				name func		interrupt func
8037  */
8038 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8039 				is_misc_err_name,	is_misc_err_int },
8040 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8041 				is_sdma_eng_err_name,	is_sdma_eng_err_int },
8042 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8043 				is_sendctxt_err_name,	is_sendctxt_err_int },
8044 { IS_SDMA_START,	     IS_SDMA_END,
8045 				is_sdma_eng_name,	is_sdma_eng_int },
8046 { IS_VARIOUS_START,	     IS_VARIOUS_END,
8047 				is_various_name,	is_various_int },
8048 { IS_DC_START,	     IS_DC_END,
8049 				is_dc_name,		is_dc_int },
8050 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8051 				is_rcv_avail_name,	is_rcv_avail_int },
8052 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8053 				is_rcv_urgent_name,	is_rcv_urgent_int },
8054 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8055 				is_send_credit_name,	is_send_credit_int},
8056 { IS_RESERVED_START,     IS_RESERVED_END,
8057 				is_reserved_name,	is_reserved_int},
8058 };
8059 
8060 /*
8061  * Interrupt source interrupt - called when the given source has an interrupt.
8062  * Source is a bit index into an array of 64-bit integers.
8063  */
8064 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8065 {
8066 	const struct is_table *entry;
8067 
8068 	/* avoids a double compare by walking the table in-order */
8069 	for (entry = &is_table[0]; entry->is_name; entry++) {
8070 		if (source < entry->end) {
8071 			trace_hfi1_interrupt(dd, entry, source);
8072 			entry->is_int(dd, source - entry->start);
8073 			return;
8074 		}
8075 	}
8076 	/* fell off the end */
8077 	dd_dev_err(dd, "invalid interrupt source %u\n", source);
8078 }
8079 
8080 /*
8081  * General interrupt handler.  This is able to correctly handle
8082  * all interrupts in case INTx is used.
8083  */
8084 static irqreturn_t general_interrupt(int irq, void *data)
8085 {
8086 	struct hfi1_devdata *dd = data;
8087 	u64 regs[CCE_NUM_INT_CSRS];
8088 	u32 bit;
8089 	int i;
8090 
8091 	this_cpu_inc(*dd->int_counter);
8092 
8093 	/* phase 1: scan and clear all handled interrupts */
8094 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8095 		if (dd->gi_mask[i] == 0) {
8096 			regs[i] = 0;	/* used later */
8097 			continue;
8098 		}
8099 		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8100 				dd->gi_mask[i];
8101 		/* only clear if anything is set */
8102 		if (regs[i])
8103 			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8104 	}
8105 
8106 	/* phase 2: call the appropriate handler */
8107 	for_each_set_bit(bit, (unsigned long *)&regs[0],
8108 			 CCE_NUM_INT_CSRS * 64) {
8109 		is_interrupt(dd, bit);
8110 	}
8111 
8112 	return IRQ_HANDLED;
8113 }
8114 
8115 static irqreturn_t sdma_interrupt(int irq, void *data)
8116 {
8117 	struct sdma_engine *sde = data;
8118 	struct hfi1_devdata *dd = sde->dd;
8119 	u64 status;
8120 
8121 #ifdef CONFIG_SDMA_VERBOSITY
8122 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8123 		   slashstrip(__FILE__), __LINE__, __func__);
8124 	sdma_dumpstate(sde);
8125 #endif
8126 
8127 	this_cpu_inc(*dd->int_counter);
8128 
8129 	/* This read_csr is really bad in the hot path */
8130 	status = read_csr(dd,
8131 			  CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8132 			  & sde->imask;
8133 	if (likely(status)) {
8134 		/* clear the interrupt(s) */
8135 		write_csr(dd,
8136 			  CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8137 			  status);
8138 
8139 		/* handle the interrupt(s) */
8140 		sdma_engine_interrupt(sde, status);
8141 	} else
8142 		dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8143 			   sde->this_idx);
8144 
8145 	return IRQ_HANDLED;
8146 }
8147 
8148 /*
8149  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8150  * to insure that the write completed.  This does NOT guarantee that
8151  * queued DMA writes to memory from the chip are pushed.
8152  */
8153 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8154 {
8155 	struct hfi1_devdata *dd = rcd->dd;
8156 	u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8157 
8158 	mmiowb();	/* make sure everything before is written */
8159 	write_csr(dd, addr, rcd->imask);
8160 	/* force the above write on the chip and get a value back */
8161 	(void)read_csr(dd, addr);
8162 }
8163 
8164 /* force the receive interrupt */
8165 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8166 {
8167 	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8168 }
8169 
8170 /*
8171  * Return non-zero if a packet is present.
8172  *
8173  * This routine is called when rechecking for packets after the RcvAvail
8174  * interrupt has been cleared down.  First, do a quick check of memory for
8175  * a packet present.  If not found, use an expensive CSR read of the context
8176  * tail to determine the actual tail.  The CSR read is necessary because there
8177  * is no method to push pending DMAs to memory other than an interrupt and we
8178  * are trying to determine if we need to force an interrupt.
8179  */
8180 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8181 {
8182 	u32 tail;
8183 	int present;
8184 
8185 	if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8186 		present = (rcd->seq_cnt ==
8187 				rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8188 	else /* is RDMA rtail */
8189 		present = (rcd->head != get_rcvhdrtail(rcd));
8190 
8191 	if (present)
8192 		return 1;
8193 
8194 	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8195 	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8196 	return rcd->head != tail;
8197 }
8198 
8199 /*
8200  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8201  * This routine will try to handle packets immediately (latency), but if
8202  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8203  * chip receive interrupt is *not* cleared down until this or the thread (if
8204  * invoked) is finished.  The intent is to avoid extra interrupts while we
8205  * are processing packets anyway.
8206  */
8207 static irqreturn_t receive_context_interrupt(int irq, void *data)
8208 {
8209 	struct hfi1_ctxtdata *rcd = data;
8210 	struct hfi1_devdata *dd = rcd->dd;
8211 	int disposition;
8212 	int present;
8213 
8214 	trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8215 	this_cpu_inc(*dd->int_counter);
8216 	aspm_ctx_disable(rcd);
8217 
8218 	/* receive interrupt remains blocked while processing packets */
8219 	disposition = rcd->do_interrupt(rcd, 0);
8220 
8221 	/*
8222 	 * Too many packets were seen while processing packets in this
8223 	 * IRQ handler.  Invoke the handler thread.  The receive interrupt
8224 	 * remains blocked.
8225 	 */
8226 	if (disposition == RCV_PKT_LIMIT)
8227 		return IRQ_WAKE_THREAD;
8228 
8229 	/*
8230 	 * The packet processor detected no more packets.  Clear the receive
8231 	 * interrupt and recheck for a packet packet that may have arrived
8232 	 * after the previous check and interrupt clear.  If a packet arrived,
8233 	 * force another interrupt.
8234 	 */
8235 	clear_recv_intr(rcd);
8236 	present = check_packet_present(rcd);
8237 	if (present)
8238 		force_recv_intr(rcd);
8239 
8240 	return IRQ_HANDLED;
8241 }
8242 
8243 /*
8244  * Receive packet thread handler.  This expects to be invoked with the
8245  * receive interrupt still blocked.
8246  */
8247 static irqreturn_t receive_context_thread(int irq, void *data)
8248 {
8249 	struct hfi1_ctxtdata *rcd = data;
8250 	int present;
8251 
8252 	/* receive interrupt is still blocked from the IRQ handler */
8253 	(void)rcd->do_interrupt(rcd, 1);
8254 
8255 	/*
8256 	 * The packet processor will only return if it detected no more
8257 	 * packets.  Hold IRQs here so we can safely clear the interrupt and
8258 	 * recheck for a packet that may have arrived after the previous
8259 	 * check and the interrupt clear.  If a packet arrived, force another
8260 	 * interrupt.
8261 	 */
8262 	local_irq_disable();
8263 	clear_recv_intr(rcd);
8264 	present = check_packet_present(rcd);
8265 	if (present)
8266 		force_recv_intr(rcd);
8267 	local_irq_enable();
8268 
8269 	return IRQ_HANDLED;
8270 }
8271 
8272 /* ========================================================================= */
8273 
8274 u32 read_physical_state(struct hfi1_devdata *dd)
8275 {
8276 	u64 reg;
8277 
8278 	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8279 	return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8280 				& DC_DC8051_STS_CUR_STATE_PORT_MASK;
8281 }
8282 
8283 u32 read_logical_state(struct hfi1_devdata *dd)
8284 {
8285 	u64 reg;
8286 
8287 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8288 	return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8289 				& DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8290 }
8291 
8292 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8293 {
8294 	u64 reg;
8295 
8296 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8297 	/* clear current state, set new state */
8298 	reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8299 	reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8300 	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8301 }
8302 
8303 /*
8304  * Use the 8051 to read a LCB CSR.
8305  */
8306 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8307 {
8308 	u32 regno;
8309 	int ret;
8310 
8311 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8312 		if (acquire_lcb_access(dd, 0) == 0) {
8313 			*data = read_csr(dd, addr);
8314 			release_lcb_access(dd, 0);
8315 			return 0;
8316 		}
8317 		return -EBUSY;
8318 	}
8319 
8320 	/* register is an index of LCB registers: (offset - base) / 8 */
8321 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8322 	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8323 	if (ret != HCMD_SUCCESS)
8324 		return -EBUSY;
8325 	return 0;
8326 }
8327 
8328 /*
8329  * Read an LCB CSR.  Access may not be in host control, so check.
8330  * Return 0 on success, -EBUSY on failure.
8331  */
8332 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8333 {
8334 	struct hfi1_pportdata *ppd = dd->pport;
8335 
8336 	/* if up, go through the 8051 for the value */
8337 	if (ppd->host_link_state & HLS_UP)
8338 		return read_lcb_via_8051(dd, addr, data);
8339 	/* if going up or down, no access */
8340 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8341 		return -EBUSY;
8342 	/* otherwise, host has access */
8343 	*data = read_csr(dd, addr);
8344 	return 0;
8345 }
8346 
8347 /*
8348  * Use the 8051 to write a LCB CSR.
8349  */
8350 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8351 {
8352 	u32 regno;
8353 	int ret;
8354 
8355 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8356 	    (dd->dc8051_ver < dc8051_ver(0, 20))) {
8357 		if (acquire_lcb_access(dd, 0) == 0) {
8358 			write_csr(dd, addr, data);
8359 			release_lcb_access(dd, 0);
8360 			return 0;
8361 		}
8362 		return -EBUSY;
8363 	}
8364 
8365 	/* register is an index of LCB registers: (offset - base) / 8 */
8366 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8367 	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8368 	if (ret != HCMD_SUCCESS)
8369 		return -EBUSY;
8370 	return 0;
8371 }
8372 
8373 /*
8374  * Write an LCB CSR.  Access may not be in host control, so check.
8375  * Return 0 on success, -EBUSY on failure.
8376  */
8377 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8378 {
8379 	struct hfi1_pportdata *ppd = dd->pport;
8380 
8381 	/* if up, go through the 8051 for the value */
8382 	if (ppd->host_link_state & HLS_UP)
8383 		return write_lcb_via_8051(dd, addr, data);
8384 	/* if going up or down, no access */
8385 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8386 		return -EBUSY;
8387 	/* otherwise, host has access */
8388 	write_csr(dd, addr, data);
8389 	return 0;
8390 }
8391 
8392 /*
8393  * Returns:
8394  *	< 0 = Linux error, not able to get access
8395  *	> 0 = 8051 command RETURN_CODE
8396  */
8397 static int do_8051_command(
8398 	struct hfi1_devdata *dd,
8399 	u32 type,
8400 	u64 in_data,
8401 	u64 *out_data)
8402 {
8403 	u64 reg, completed;
8404 	int return_code;
8405 	unsigned long flags;
8406 	unsigned long timeout;
8407 
8408 	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8409 
8410 	/*
8411 	 * Alternative to holding the lock for a long time:
8412 	 * - keep busy wait - have other users bounce off
8413 	 */
8414 	spin_lock_irqsave(&dd->dc8051_lock, flags);
8415 
8416 	/* We can't send any commands to the 8051 if it's in reset */
8417 	if (dd->dc_shutdown) {
8418 		return_code = -ENODEV;
8419 		goto fail;
8420 	}
8421 
8422 	/*
8423 	 * If an 8051 host command timed out previously, then the 8051 is
8424 	 * stuck.
8425 	 *
8426 	 * On first timeout, attempt to reset and restart the entire DC
8427 	 * block (including 8051). (Is this too big of a hammer?)
8428 	 *
8429 	 * If the 8051 times out a second time, the reset did not bring it
8430 	 * back to healthy life. In that case, fail any subsequent commands.
8431 	 */
8432 	if (dd->dc8051_timed_out) {
8433 		if (dd->dc8051_timed_out > 1) {
8434 			dd_dev_err(dd,
8435 				   "Previous 8051 host command timed out, skipping command %u\n",
8436 				   type);
8437 			return_code = -ENXIO;
8438 			goto fail;
8439 		}
8440 		spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8441 		dc_shutdown(dd);
8442 		dc_start(dd);
8443 		spin_lock_irqsave(&dd->dc8051_lock, flags);
8444 	}
8445 
8446 	/*
8447 	 * If there is no timeout, then the 8051 command interface is
8448 	 * waiting for a command.
8449 	 */
8450 
8451 	/*
8452 	 * When writing a LCB CSR, out_data contains the full value to
8453 	 * to be written, while in_data contains the relative LCB
8454 	 * address in 7:0.  Do the work here, rather than the caller,
8455 	 * of distrubting the write data to where it needs to go:
8456 	 *
8457 	 * Write data
8458 	 *   39:00 -> in_data[47:8]
8459 	 *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8460 	 *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8461 	 */
8462 	if (type == HCMD_WRITE_LCB_CSR) {
8463 		in_data |= ((*out_data) & 0xffffffffffull) << 8;
8464 		reg = ((((*out_data) >> 40) & 0xff) <<
8465 				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8466 		      | ((((*out_data) >> 48) & 0xffff) <<
8467 				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8468 		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8469 	}
8470 
8471 	/*
8472 	 * Do two writes: the first to stabilize the type and req_data, the
8473 	 * second to activate.
8474 	 */
8475 	reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8476 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8477 		| (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8478 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8479 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8480 	reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8481 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8482 
8483 	/* wait for completion, alternate: interrupt */
8484 	timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8485 	while (1) {
8486 		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8487 		completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8488 		if (completed)
8489 			break;
8490 		if (time_after(jiffies, timeout)) {
8491 			dd->dc8051_timed_out++;
8492 			dd_dev_err(dd, "8051 host command %u timeout\n", type);
8493 			if (out_data)
8494 				*out_data = 0;
8495 			return_code = -ETIMEDOUT;
8496 			goto fail;
8497 		}
8498 		udelay(2);
8499 	}
8500 
8501 	if (out_data) {
8502 		*out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8503 				& DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8504 		if (type == HCMD_READ_LCB_CSR) {
8505 			/* top 16 bits are in a different register */
8506 			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8507 				& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8508 				<< (48
8509 				    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8510 		}
8511 	}
8512 	return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8513 				& DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8514 	dd->dc8051_timed_out = 0;
8515 	/*
8516 	 * Clear command for next user.
8517 	 */
8518 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8519 
8520 fail:
8521 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8522 
8523 	return return_code;
8524 }
8525 
8526 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8527 {
8528 	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8529 }
8530 
8531 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8532 		     u8 lane_id, u32 config_data)
8533 {
8534 	u64 data;
8535 	int ret;
8536 
8537 	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8538 		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8539 		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
8540 	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8541 	if (ret != HCMD_SUCCESS) {
8542 		dd_dev_err(dd,
8543 			   "load 8051 config: field id %d, lane %d, err %d\n",
8544 			   (int)field_id, (int)lane_id, ret);
8545 	}
8546 	return ret;
8547 }
8548 
8549 /*
8550  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8551  * set the result, even on error.
8552  * Return 0 on success, -errno on failure
8553  */
8554 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8555 		     u32 *result)
8556 {
8557 	u64 big_data;
8558 	u32 addr;
8559 	int ret;
8560 
8561 	/* address start depends on the lane_id */
8562 	if (lane_id < 4)
8563 		addr = (4 * NUM_GENERAL_FIELDS)
8564 			+ (lane_id * 4 * NUM_LANE_FIELDS);
8565 	else
8566 		addr = 0;
8567 	addr += field_id * 4;
8568 
8569 	/* read is in 8-byte chunks, hardware will truncate the address down */
8570 	ret = read_8051_data(dd, addr, 8, &big_data);
8571 
8572 	if (ret == 0) {
8573 		/* extract the 4 bytes we want */
8574 		if (addr & 0x4)
8575 			*result = (u32)(big_data >> 32);
8576 		else
8577 			*result = (u32)big_data;
8578 	} else {
8579 		*result = 0;
8580 		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8581 			   __func__, lane_id, field_id);
8582 	}
8583 
8584 	return ret;
8585 }
8586 
8587 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8588 			      u8 continuous)
8589 {
8590 	u32 frame;
8591 
8592 	frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8593 		| power_management << POWER_MANAGEMENT_SHIFT;
8594 	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8595 				GENERAL_CONFIG, frame);
8596 }
8597 
8598 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8599 				 u16 vl15buf, u8 crc_sizes)
8600 {
8601 	u32 frame;
8602 
8603 	frame = (u32)vau << VAU_SHIFT
8604 		| (u32)z << Z_SHIFT
8605 		| (u32)vcu << VCU_SHIFT
8606 		| (u32)vl15buf << VL15BUF_SHIFT
8607 		| (u32)crc_sizes << CRC_SIZES_SHIFT;
8608 	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8609 				GENERAL_CONFIG, frame);
8610 }
8611 
8612 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8613 				     u8 *flag_bits, u16 *link_widths)
8614 {
8615 	u32 frame;
8616 
8617 	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8618 			 &frame);
8619 	*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8620 	*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8621 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8622 }
8623 
8624 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8625 				     u8 misc_bits,
8626 				     u8 flag_bits,
8627 				     u16 link_widths)
8628 {
8629 	u32 frame;
8630 
8631 	frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8632 		| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8633 		| (u32)link_widths << LINK_WIDTH_SHIFT;
8634 	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8635 		     frame);
8636 }
8637 
8638 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8639 				 u8 device_rev)
8640 {
8641 	u32 frame;
8642 
8643 	frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8644 		| ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8645 	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8646 }
8647 
8648 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8649 				  u8 *device_rev)
8650 {
8651 	u32 frame;
8652 
8653 	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8654 	*device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8655 	*device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8656 			& REMOTE_DEVICE_REV_MASK;
8657 }
8658 
8659 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8660 {
8661 	u32 frame;
8662 
8663 	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8664 	*ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8665 	*ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8666 }
8667 
8668 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8669 			       u8 *continuous)
8670 {
8671 	u32 frame;
8672 
8673 	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8674 	*power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8675 					& POWER_MANAGEMENT_MASK;
8676 	*continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8677 					& CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8678 }
8679 
8680 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8681 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8682 {
8683 	u32 frame;
8684 
8685 	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8686 	*vau = (frame >> VAU_SHIFT) & VAU_MASK;
8687 	*z = (frame >> Z_SHIFT) & Z_MASK;
8688 	*vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8689 	*vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8690 	*crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8691 }
8692 
8693 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8694 				      u8 *remote_tx_rate,
8695 				      u16 *link_widths)
8696 {
8697 	u32 frame;
8698 
8699 	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8700 			 &frame);
8701 	*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8702 				& REMOTE_TX_RATE_MASK;
8703 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8704 }
8705 
8706 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8707 {
8708 	u32 frame;
8709 
8710 	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8711 	*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8712 }
8713 
8714 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8715 {
8716 	u32 frame;
8717 
8718 	read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8719 	*mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8720 }
8721 
8722 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8723 {
8724 	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8725 }
8726 
8727 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8728 {
8729 	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8730 }
8731 
8732 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8733 {
8734 	u32 frame;
8735 	int ret;
8736 
8737 	*link_quality = 0;
8738 	if (dd->pport->host_link_state & HLS_UP) {
8739 		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8740 				       &frame);
8741 		if (ret == 0)
8742 			*link_quality = (frame >> LINK_QUALITY_SHIFT)
8743 						& LINK_QUALITY_MASK;
8744 	}
8745 }
8746 
8747 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8748 {
8749 	u32 frame;
8750 
8751 	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8752 	*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8753 }
8754 
8755 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8756 {
8757 	u32 frame;
8758 
8759 	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8760 	*ldr = (frame & 0xff);
8761 }
8762 
8763 static int read_tx_settings(struct hfi1_devdata *dd,
8764 			    u8 *enable_lane_tx,
8765 			    u8 *tx_polarity_inversion,
8766 			    u8 *rx_polarity_inversion,
8767 			    u8 *max_rate)
8768 {
8769 	u32 frame;
8770 	int ret;
8771 
8772 	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8773 	*enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8774 				& ENABLE_LANE_TX_MASK;
8775 	*tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8776 				& TX_POLARITY_INVERSION_MASK;
8777 	*rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8778 				& RX_POLARITY_INVERSION_MASK;
8779 	*max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8780 	return ret;
8781 }
8782 
8783 static int write_tx_settings(struct hfi1_devdata *dd,
8784 			     u8 enable_lane_tx,
8785 			     u8 tx_polarity_inversion,
8786 			     u8 rx_polarity_inversion,
8787 			     u8 max_rate)
8788 {
8789 	u32 frame;
8790 
8791 	/* no need to mask, all variable sizes match field widths */
8792 	frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8793 		| tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8794 		| rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8795 		| max_rate << MAX_RATE_SHIFT;
8796 	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8797 }
8798 
8799 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8800 {
8801 	u32 frame, version, prod_id;
8802 	int ret, lane;
8803 
8804 	/* 4 lanes */
8805 	for (lane = 0; lane < 4; lane++) {
8806 		ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8807 		if (ret) {
8808 			dd_dev_err(dd,
8809 				   "Unable to read lane %d firmware details\n",
8810 				   lane);
8811 			continue;
8812 		}
8813 		version = (frame >> SPICO_ROM_VERSION_SHIFT)
8814 					& SPICO_ROM_VERSION_MASK;
8815 		prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8816 					& SPICO_ROM_PROD_ID_MASK;
8817 		dd_dev_info(dd,
8818 			    "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8819 			    lane, version, prod_id);
8820 	}
8821 }
8822 
8823 /*
8824  * Read an idle LCB message.
8825  *
8826  * Returns 0 on success, -EINVAL on error
8827  */
8828 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8829 {
8830 	int ret;
8831 
8832 	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8833 	if (ret != HCMD_SUCCESS) {
8834 		dd_dev_err(dd, "read idle message: type %d, err %d\n",
8835 			   (u32)type, ret);
8836 		return -EINVAL;
8837 	}
8838 	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8839 	/* return only the payload as we already know the type */
8840 	*data_out >>= IDLE_PAYLOAD_SHIFT;
8841 	return 0;
8842 }
8843 
8844 /*
8845  * Read an idle SMA message.  To be done in response to a notification from
8846  * the 8051.
8847  *
8848  * Returns 0 on success, -EINVAL on error
8849  */
8850 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8851 {
8852 	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8853 				 data);
8854 }
8855 
8856 /*
8857  * Send an idle LCB message.
8858  *
8859  * Returns 0 on success, -EINVAL on error
8860  */
8861 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8862 {
8863 	int ret;
8864 
8865 	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8866 	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8867 	if (ret != HCMD_SUCCESS) {
8868 		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8869 			   data, ret);
8870 		return -EINVAL;
8871 	}
8872 	return 0;
8873 }
8874 
8875 /*
8876  * Send an idle SMA message.
8877  *
8878  * Returns 0 on success, -EINVAL on error
8879  */
8880 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8881 {
8882 	u64 data;
8883 
8884 	data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8885 		((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8886 	return send_idle_message(dd, data);
8887 }
8888 
8889 /*
8890  * Initialize the LCB then do a quick link up.  This may or may not be
8891  * in loopback.
8892  *
8893  * return 0 on success, -errno on error
8894  */
8895 static int do_quick_linkup(struct hfi1_devdata *dd)
8896 {
8897 	u64 reg;
8898 	unsigned long timeout;
8899 	int ret;
8900 
8901 	lcb_shutdown(dd, 0);
8902 
8903 	if (loopback) {
8904 		/* LCB_CFG_LOOPBACK.VAL = 2 */
8905 		/* LCB_CFG_LANE_WIDTH.VAL = 0 */
8906 		write_csr(dd, DC_LCB_CFG_LOOPBACK,
8907 			  IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8908 		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8909 	}
8910 
8911 	/* start the LCBs */
8912 	/* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8913 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8914 
8915 	/* simulator only loopback steps */
8916 	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8917 		/* LCB_CFG_RUN.EN = 1 */
8918 		write_csr(dd, DC_LCB_CFG_RUN,
8919 			  1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8920 
8921 		/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8922 		timeout = jiffies + msecs_to_jiffies(10);
8923 		while (1) {
8924 			reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8925 			if (reg)
8926 				break;
8927 			if (time_after(jiffies, timeout)) {
8928 				dd_dev_err(dd,
8929 					   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8930 				return -ETIMEDOUT;
8931 			}
8932 			udelay(2);
8933 		}
8934 
8935 		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8936 			  1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8937 	}
8938 
8939 	if (!loopback) {
8940 		/*
8941 		 * When doing quick linkup and not in loopback, both
8942 		 * sides must be done with LCB set-up before either
8943 		 * starts the quick linkup.  Put a delay here so that
8944 		 * both sides can be started and have a chance to be
8945 		 * done with LCB set up before resuming.
8946 		 */
8947 		dd_dev_err(dd,
8948 			   "Pausing for peer to be finished with LCB set up\n");
8949 		msleep(5000);
8950 		dd_dev_err(dd, "Continuing with quick linkup\n");
8951 	}
8952 
8953 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8954 	set_8051_lcb_access(dd);
8955 
8956 	/*
8957 	 * State "quick" LinkUp request sets the physical link state to
8958 	 * LinkUp without a verify capability sequence.
8959 	 * This state is in simulator v37 and later.
8960 	 */
8961 	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8962 	if (ret != HCMD_SUCCESS) {
8963 		dd_dev_err(dd,
8964 			   "%s: set physical link state to quick LinkUp failed with return %d\n",
8965 			   __func__, ret);
8966 
8967 		set_host_lcb_access(dd);
8968 		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8969 
8970 		if (ret >= 0)
8971 			ret = -EINVAL;
8972 		return ret;
8973 	}
8974 
8975 	return 0; /* success */
8976 }
8977 
8978 /*
8979  * Set the SerDes to internal loopback mode.
8980  * Returns 0 on success, -errno on error.
8981  */
8982 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8983 {
8984 	int ret;
8985 
8986 	ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8987 	if (ret == HCMD_SUCCESS)
8988 		return 0;
8989 	dd_dev_err(dd,
8990 		   "Set physical link state to SerDes Loopback failed with return %d\n",
8991 		   ret);
8992 	if (ret >= 0)
8993 		ret = -EINVAL;
8994 	return ret;
8995 }
8996 
8997 /*
8998  * Do all special steps to set up loopback.
8999  */
9000 static int init_loopback(struct hfi1_devdata *dd)
9001 {
9002 	dd_dev_info(dd, "Entering loopback mode\n");
9003 
9004 	/* all loopbacks should disable self GUID check */
9005 	write_csr(dd, DC_DC8051_CFG_MODE,
9006 		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9007 
9008 	/*
9009 	 * The simulator has only one loopback option - LCB.  Switch
9010 	 * to that option, which includes quick link up.
9011 	 *
9012 	 * Accept all valid loopback values.
9013 	 */
9014 	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9015 	    (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9016 	     loopback == LOOPBACK_CABLE)) {
9017 		loopback = LOOPBACK_LCB;
9018 		quick_linkup = 1;
9019 		return 0;
9020 	}
9021 
9022 	/* handle serdes loopback */
9023 	if (loopback == LOOPBACK_SERDES) {
9024 		/* internal serdes loopack needs quick linkup on RTL */
9025 		if (dd->icode == ICODE_RTL_SILICON)
9026 			quick_linkup = 1;
9027 		return set_serdes_loopback_mode(dd);
9028 	}
9029 
9030 	/* LCB loopback - handled at poll time */
9031 	if (loopback == LOOPBACK_LCB) {
9032 		quick_linkup = 1; /* LCB is always quick linkup */
9033 
9034 		/* not supported in emulation due to emulation RTL changes */
9035 		if (dd->icode == ICODE_FPGA_EMULATION) {
9036 			dd_dev_err(dd,
9037 				   "LCB loopback not supported in emulation\n");
9038 			return -EINVAL;
9039 		}
9040 		return 0;
9041 	}
9042 
9043 	/* external cable loopback requires no extra steps */
9044 	if (loopback == LOOPBACK_CABLE)
9045 		return 0;
9046 
9047 	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9048 	return -EINVAL;
9049 }
9050 
9051 /*
9052  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9053  * used in the Verify Capability link width attribute.
9054  */
9055 static u16 opa_to_vc_link_widths(u16 opa_widths)
9056 {
9057 	int i;
9058 	u16 result = 0;
9059 
9060 	static const struct link_bits {
9061 		u16 from;
9062 		u16 to;
9063 	} opa_link_xlate[] = {
9064 		{ OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9065 		{ OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9066 		{ OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9067 		{ OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9068 	};
9069 
9070 	for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9071 		if (opa_widths & opa_link_xlate[i].from)
9072 			result |= opa_link_xlate[i].to;
9073 	}
9074 	return result;
9075 }
9076 
9077 /*
9078  * Set link attributes before moving to polling.
9079  */
9080 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9081 {
9082 	struct hfi1_devdata *dd = ppd->dd;
9083 	u8 enable_lane_tx;
9084 	u8 tx_polarity_inversion;
9085 	u8 rx_polarity_inversion;
9086 	int ret;
9087 
9088 	/* reset our fabric serdes to clear any lingering problems */
9089 	fabric_serdes_reset(dd);
9090 
9091 	/* set the local tx rate - need to read-modify-write */
9092 	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9093 			       &rx_polarity_inversion, &ppd->local_tx_rate);
9094 	if (ret)
9095 		goto set_local_link_attributes_fail;
9096 
9097 	if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9098 		/* set the tx rate to the fastest enabled */
9099 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9100 			ppd->local_tx_rate = 1;
9101 		else
9102 			ppd->local_tx_rate = 0;
9103 	} else {
9104 		/* set the tx rate to all enabled */
9105 		ppd->local_tx_rate = 0;
9106 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9107 			ppd->local_tx_rate |= 2;
9108 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9109 			ppd->local_tx_rate |= 1;
9110 	}
9111 
9112 	enable_lane_tx = 0xF; /* enable all four lanes */
9113 	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9114 				rx_polarity_inversion, ppd->local_tx_rate);
9115 	if (ret != HCMD_SUCCESS)
9116 		goto set_local_link_attributes_fail;
9117 
9118 	/*
9119 	 * DC supports continuous updates.
9120 	 */
9121 	ret = write_vc_local_phy(dd,
9122 				 0 /* no power management */,
9123 				 1 /* continuous updates */);
9124 	if (ret != HCMD_SUCCESS)
9125 		goto set_local_link_attributes_fail;
9126 
9127 	/* z=1 in the next call: AU of 0 is not supported by the hardware */
9128 	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9129 				    ppd->port_crc_mode_enabled);
9130 	if (ret != HCMD_SUCCESS)
9131 		goto set_local_link_attributes_fail;
9132 
9133 	ret = write_vc_local_link_width(dd, 0, 0,
9134 					opa_to_vc_link_widths(
9135 						ppd->link_width_enabled));
9136 	if (ret != HCMD_SUCCESS)
9137 		goto set_local_link_attributes_fail;
9138 
9139 	/* let peer know who we are */
9140 	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9141 	if (ret == HCMD_SUCCESS)
9142 		return 0;
9143 
9144 set_local_link_attributes_fail:
9145 	dd_dev_err(dd,
9146 		   "Failed to set local link attributes, return 0x%x\n",
9147 		   ret);
9148 	return ret;
9149 }
9150 
9151 /*
9152  * Call this to start the link.
9153  * Do not do anything if the link is disabled.
9154  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9155  */
9156 int start_link(struct hfi1_pportdata *ppd)
9157 {
9158 	if (!ppd->link_enabled) {
9159 		dd_dev_info(ppd->dd,
9160 			    "%s: stopping link start because link is disabled\n",
9161 			    __func__);
9162 		return 0;
9163 	}
9164 	if (!ppd->driver_link_ready) {
9165 		dd_dev_info(ppd->dd,
9166 			    "%s: stopping link start because driver is not ready\n",
9167 			    __func__);
9168 		return 0;
9169 	}
9170 
9171 	return set_link_state(ppd, HLS_DN_POLL);
9172 }
9173 
9174 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9175 {
9176 	struct hfi1_devdata *dd = ppd->dd;
9177 	u64 mask;
9178 	unsigned long timeout;
9179 
9180 	/*
9181 	 * Check for QSFP interrupt for t_init (SFF 8679)
9182 	 */
9183 	timeout = jiffies + msecs_to_jiffies(2000);
9184 	while (1) {
9185 		mask = read_csr(dd, dd->hfi1_id ?
9186 				ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9187 		if (!(mask & QSFP_HFI0_INT_N)) {
9188 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9189 				  ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9190 			break;
9191 		}
9192 		if (time_after(jiffies, timeout)) {
9193 			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9194 				    __func__);
9195 			break;
9196 		}
9197 		udelay(2);
9198 	}
9199 }
9200 
9201 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9202 {
9203 	struct hfi1_devdata *dd = ppd->dd;
9204 	u64 mask;
9205 
9206 	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9207 	if (enable)
9208 		mask |= (u64)QSFP_HFI0_INT_N;
9209 	else
9210 		mask &= ~(u64)QSFP_HFI0_INT_N;
9211 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9212 }
9213 
9214 void reset_qsfp(struct hfi1_pportdata *ppd)
9215 {
9216 	struct hfi1_devdata *dd = ppd->dd;
9217 	u64 mask, qsfp_mask;
9218 
9219 	/* Disable INT_N from triggering QSFP interrupts */
9220 	set_qsfp_int_n(ppd, 0);
9221 
9222 	/* Reset the QSFP */
9223 	mask = (u64)QSFP_HFI0_RESET_N;
9224 
9225 	qsfp_mask = read_csr(dd,
9226 			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9227 	qsfp_mask &= ~mask;
9228 	write_csr(dd,
9229 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9230 
9231 	udelay(10);
9232 
9233 	qsfp_mask |= mask;
9234 	write_csr(dd,
9235 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9236 
9237 	wait_for_qsfp_init(ppd);
9238 
9239 	/*
9240 	 * Allow INT_N to trigger the QSFP interrupt to watch
9241 	 * for alarms and warnings
9242 	 */
9243 	set_qsfp_int_n(ppd, 1);
9244 }
9245 
9246 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9247 					u8 *qsfp_interrupt_status)
9248 {
9249 	struct hfi1_devdata *dd = ppd->dd;
9250 
9251 	if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9252 	    (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9253 		dd_dev_info(dd, "%s: QSFP cable on fire\n",
9254 			    __func__);
9255 
9256 	if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9257 	    (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9258 		dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9259 			    __func__);
9260 
9261 	/*
9262 	 * The remaining alarms/warnings don't matter if the link is down.
9263 	 */
9264 	if (ppd->host_link_state & HLS_DOWN)
9265 		return 0;
9266 
9267 	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9268 	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9269 		dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9270 			    __func__);
9271 
9272 	if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9273 	    (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9274 		dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9275 			    __func__);
9276 
9277 	/* Byte 2 is vendor specific */
9278 
9279 	if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9280 	    (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9281 		dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9282 			    __func__);
9283 
9284 	if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9285 	    (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9286 		dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9287 			    __func__);
9288 
9289 	if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9290 	    (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9291 		dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9292 			    __func__);
9293 
9294 	if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9295 	    (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9296 		dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9297 			    __func__);
9298 
9299 	if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9300 	    (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9301 		dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9302 			    __func__);
9303 
9304 	if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9305 	    (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9306 		dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9307 			    __func__);
9308 
9309 	if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9310 	    (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9311 		dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9312 			    __func__);
9313 
9314 	if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9315 	    (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9316 		dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9317 			    __func__);
9318 
9319 	if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9320 	    (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9321 		dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9322 			    __func__);
9323 
9324 	if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9325 	    (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9326 		dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9327 			    __func__);
9328 
9329 	if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9330 	    (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9331 		dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9332 			    __func__);
9333 
9334 	if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9335 	    (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9336 		dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9337 			    __func__);
9338 
9339 	/* Bytes 9-10 and 11-12 are reserved */
9340 	/* Bytes 13-15 are vendor specific */
9341 
9342 	return 0;
9343 }
9344 
9345 /* This routine will only be scheduled if the QSFP module present is asserted */
9346 void qsfp_event(struct work_struct *work)
9347 {
9348 	struct qsfp_data *qd;
9349 	struct hfi1_pportdata *ppd;
9350 	struct hfi1_devdata *dd;
9351 
9352 	qd = container_of(work, struct qsfp_data, qsfp_work);
9353 	ppd = qd->ppd;
9354 	dd = ppd->dd;
9355 
9356 	/* Sanity check */
9357 	if (!qsfp_mod_present(ppd))
9358 		return;
9359 
9360 	/*
9361 	 * Turn DC back on after cable has been re-inserted. Up until
9362 	 * now, the DC has been in reset to save power.
9363 	 */
9364 	dc_start(dd);
9365 
9366 	if (qd->cache_refresh_required) {
9367 		set_qsfp_int_n(ppd, 0);
9368 
9369 		wait_for_qsfp_init(ppd);
9370 
9371 		/*
9372 		 * Allow INT_N to trigger the QSFP interrupt to watch
9373 		 * for alarms and warnings
9374 		 */
9375 		set_qsfp_int_n(ppd, 1);
9376 
9377 		tune_serdes(ppd);
9378 
9379 		start_link(ppd);
9380 	}
9381 
9382 	if (qd->check_interrupt_flags) {
9383 		u8 qsfp_interrupt_status[16] = {0,};
9384 
9385 		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9386 				  &qsfp_interrupt_status[0], 16) != 16) {
9387 			dd_dev_info(dd,
9388 				    "%s: Failed to read status of QSFP module\n",
9389 				    __func__);
9390 		} else {
9391 			unsigned long flags;
9392 
9393 			handle_qsfp_error_conditions(
9394 					ppd, qsfp_interrupt_status);
9395 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9396 			ppd->qsfp_info.check_interrupt_flags = 0;
9397 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9398 					       flags);
9399 		}
9400 	}
9401 }
9402 
9403 static void init_qsfp_int(struct hfi1_devdata *dd)
9404 {
9405 	struct hfi1_pportdata *ppd = dd->pport;
9406 	u64 qsfp_mask, cce_int_mask;
9407 	const int qsfp1_int_smask = QSFP1_INT % 64;
9408 	const int qsfp2_int_smask = QSFP2_INT % 64;
9409 
9410 	/*
9411 	 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9412 	 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9413 	 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9414 	 * the index of the appropriate CSR in the CCEIntMask CSR array
9415 	 */
9416 	cce_int_mask = read_csr(dd, CCE_INT_MASK +
9417 				(8 * (QSFP1_INT / 64)));
9418 	if (dd->hfi1_id) {
9419 		cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9420 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9421 			  cce_int_mask);
9422 	} else {
9423 		cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9424 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9425 			  cce_int_mask);
9426 	}
9427 
9428 	qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9429 	/* Clear current status to avoid spurious interrupts */
9430 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9431 		  qsfp_mask);
9432 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9433 		  qsfp_mask);
9434 
9435 	set_qsfp_int_n(ppd, 0);
9436 
9437 	/* Handle active low nature of INT_N and MODPRST_N pins */
9438 	if (qsfp_mod_present(ppd))
9439 		qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9440 	write_csr(dd,
9441 		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9442 		  qsfp_mask);
9443 }
9444 
9445 /*
9446  * Do a one-time initialize of the LCB block.
9447  */
9448 static void init_lcb(struct hfi1_devdata *dd)
9449 {
9450 	/* simulator does not correctly handle LCB cclk loopback, skip */
9451 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9452 		return;
9453 
9454 	/* the DC has been reset earlier in the driver load */
9455 
9456 	/* set LCB for cclk loopback on the port */
9457 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9458 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9459 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9460 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9461 	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9462 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9463 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9464 }
9465 
9466 int bringup_serdes(struct hfi1_pportdata *ppd)
9467 {
9468 	struct hfi1_devdata *dd = ppd->dd;
9469 	u64 guid;
9470 	int ret;
9471 
9472 	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9473 		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9474 
9475 	guid = ppd->guid;
9476 	if (!guid) {
9477 		if (dd->base_guid)
9478 			guid = dd->base_guid + ppd->port - 1;
9479 		ppd->guid = guid;
9480 	}
9481 
9482 	/* Set linkinit_reason on power up per OPA spec */
9483 	ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9484 
9485 	/* one-time init of the LCB */
9486 	init_lcb(dd);
9487 
9488 	if (loopback) {
9489 		ret = init_loopback(dd);
9490 		if (ret < 0)
9491 			return ret;
9492 	}
9493 
9494 	get_port_type(ppd);
9495 	if (ppd->port_type == PORT_TYPE_QSFP) {
9496 		set_qsfp_int_n(ppd, 0);
9497 		wait_for_qsfp_init(ppd);
9498 		set_qsfp_int_n(ppd, 1);
9499 	}
9500 
9501 	/*
9502 	 * Tune the SerDes to a ballpark setting for
9503 	 * optimal signal and bit error rate
9504 	 * Needs to be done before starting the link
9505 	 */
9506 	tune_serdes(ppd);
9507 
9508 	return start_link(ppd);
9509 }
9510 
9511 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9512 {
9513 	struct hfi1_devdata *dd = ppd->dd;
9514 
9515 	/*
9516 	 * Shut down the link and keep it down.   First turn off that the
9517 	 * driver wants to allow the link to be up (driver_link_ready).
9518 	 * Then make sure the link is not automatically restarted
9519 	 * (link_enabled).  Cancel any pending restart.  And finally
9520 	 * go offline.
9521 	 */
9522 	ppd->driver_link_ready = 0;
9523 	ppd->link_enabled = 0;
9524 
9525 	ppd->offline_disabled_reason =
9526 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9527 	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9528 			     OPA_LINKDOWN_REASON_SMA_DISABLED);
9529 	set_link_state(ppd, HLS_DN_OFFLINE);
9530 
9531 	/* disable the port */
9532 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9533 }
9534 
9535 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9536 {
9537 	struct hfi1_pportdata *ppd;
9538 	int i;
9539 
9540 	ppd = (struct hfi1_pportdata *)(dd + 1);
9541 	for (i = 0; i < dd->num_pports; i++, ppd++) {
9542 		ppd->ibport_data.rvp.rc_acks = NULL;
9543 		ppd->ibport_data.rvp.rc_qacks = NULL;
9544 		ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9545 		ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9546 		ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9547 		if (!ppd->ibport_data.rvp.rc_acks ||
9548 		    !ppd->ibport_data.rvp.rc_delayed_comp ||
9549 		    !ppd->ibport_data.rvp.rc_qacks)
9550 			return -ENOMEM;
9551 	}
9552 
9553 	return 0;
9554 }
9555 
9556 static const char * const pt_names[] = {
9557 	"expected",
9558 	"eager",
9559 	"invalid"
9560 };
9561 
9562 static const char *pt_name(u32 type)
9563 {
9564 	return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9565 }
9566 
9567 /*
9568  * index is the index into the receive array
9569  */
9570 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9571 		  u32 type, unsigned long pa, u16 order)
9572 {
9573 	u64 reg;
9574 	void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9575 			      (dd->kregbase + RCV_ARRAY));
9576 
9577 	if (!(dd->flags & HFI1_PRESENT))
9578 		goto done;
9579 
9580 	if (type == PT_INVALID) {
9581 		pa = 0;
9582 	} else if (type > PT_INVALID) {
9583 		dd_dev_err(dd,
9584 			   "unexpected receive array type %u for index %u, not handled\n",
9585 			   type, index);
9586 		goto done;
9587 	}
9588 
9589 	hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9590 		  pt_name(type), index, pa, (unsigned long)order);
9591 
9592 #define RT_ADDR_SHIFT 12	/* 4KB kernel address boundary */
9593 	reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9594 		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9595 		| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9596 					<< RCV_ARRAY_RT_ADDR_SHIFT;
9597 	writeq(reg, base + (index * 8));
9598 
9599 	if (type == PT_EAGER)
9600 		/*
9601 		 * Eager entries are written one-by-one so we have to push them
9602 		 * after we write the entry.
9603 		 */
9604 		flush_wc();
9605 done:
9606 	return;
9607 }
9608 
9609 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9610 {
9611 	struct hfi1_devdata *dd = rcd->dd;
9612 	u32 i;
9613 
9614 	/* this could be optimized */
9615 	for (i = rcd->eager_base; i < rcd->eager_base +
9616 		     rcd->egrbufs.alloced; i++)
9617 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9618 
9619 	for (i = rcd->expected_base;
9620 			i < rcd->expected_base + rcd->expected_count; i++)
9621 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9622 }
9623 
9624 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9625 			struct hfi1_ctxt_info *kinfo)
9626 {
9627 	kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9628 		HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9629 	return 0;
9630 }
9631 
9632 struct hfi1_message_header *hfi1_get_msgheader(
9633 				struct hfi1_devdata *dd, __le32 *rhf_addr)
9634 {
9635 	u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9636 
9637 	return (struct hfi1_message_header *)
9638 		(rhf_addr - dd->rhf_offset + offset);
9639 }
9640 
9641 static const char * const ib_cfg_name_strings[] = {
9642 	"HFI1_IB_CFG_LIDLMC",
9643 	"HFI1_IB_CFG_LWID_DG_ENB",
9644 	"HFI1_IB_CFG_LWID_ENB",
9645 	"HFI1_IB_CFG_LWID",
9646 	"HFI1_IB_CFG_SPD_ENB",
9647 	"HFI1_IB_CFG_SPD",
9648 	"HFI1_IB_CFG_RXPOL_ENB",
9649 	"HFI1_IB_CFG_LREV_ENB",
9650 	"HFI1_IB_CFG_LINKLATENCY",
9651 	"HFI1_IB_CFG_HRTBT",
9652 	"HFI1_IB_CFG_OP_VLS",
9653 	"HFI1_IB_CFG_VL_HIGH_CAP",
9654 	"HFI1_IB_CFG_VL_LOW_CAP",
9655 	"HFI1_IB_CFG_OVERRUN_THRESH",
9656 	"HFI1_IB_CFG_PHYERR_THRESH",
9657 	"HFI1_IB_CFG_LINKDEFAULT",
9658 	"HFI1_IB_CFG_PKEYS",
9659 	"HFI1_IB_CFG_MTU",
9660 	"HFI1_IB_CFG_LSTATE",
9661 	"HFI1_IB_CFG_VL_HIGH_LIMIT",
9662 	"HFI1_IB_CFG_PMA_TICKS",
9663 	"HFI1_IB_CFG_PORT"
9664 };
9665 
9666 static const char *ib_cfg_name(int which)
9667 {
9668 	if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9669 		return "invalid";
9670 	return ib_cfg_name_strings[which];
9671 }
9672 
9673 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9674 {
9675 	struct hfi1_devdata *dd = ppd->dd;
9676 	int val = 0;
9677 
9678 	switch (which) {
9679 	case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9680 		val = ppd->link_width_enabled;
9681 		break;
9682 	case HFI1_IB_CFG_LWID: /* currently active Link-width */
9683 		val = ppd->link_width_active;
9684 		break;
9685 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9686 		val = ppd->link_speed_enabled;
9687 		break;
9688 	case HFI1_IB_CFG_SPD: /* current Link speed */
9689 		val = ppd->link_speed_active;
9690 		break;
9691 
9692 	case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9693 	case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9694 	case HFI1_IB_CFG_LINKLATENCY:
9695 		goto unimplemented;
9696 
9697 	case HFI1_IB_CFG_OP_VLS:
9698 		val = ppd->vls_operational;
9699 		break;
9700 	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9701 		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9702 		break;
9703 	case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9704 		val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9705 		break;
9706 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9707 		val = ppd->overrun_threshold;
9708 		break;
9709 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9710 		val = ppd->phy_error_threshold;
9711 		break;
9712 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9713 		val = dd->link_default;
9714 		break;
9715 
9716 	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9717 	case HFI1_IB_CFG_PMA_TICKS:
9718 	default:
9719 unimplemented:
9720 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9721 			dd_dev_info(
9722 				dd,
9723 				"%s: which %s: not implemented\n",
9724 				__func__,
9725 				ib_cfg_name(which));
9726 		break;
9727 	}
9728 
9729 	return val;
9730 }
9731 
9732 /*
9733  * The largest MAD packet size.
9734  */
9735 #define MAX_MAD_PACKET 2048
9736 
9737 /*
9738  * Return the maximum header bytes that can go on the _wire_
9739  * for this device. This count includes the ICRC which is
9740  * not part of the packet held in memory but it is appended
9741  * by the HW.
9742  * This is dependent on the device's receive header entry size.
9743  * HFI allows this to be set per-receive context, but the
9744  * driver presently enforces a global value.
9745  */
9746 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9747 {
9748 	/*
9749 	 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9750 	 * the Receive Header Entry Size minus the PBC (or RHF) size
9751 	 * plus one DW for the ICRC appended by HW.
9752 	 *
9753 	 * dd->rcd[0].rcvhdrqentsize is in DW.
9754 	 * We use rcd[0] as all context will have the same value. Also,
9755 	 * the first kernel context would have been allocated by now so
9756 	 * we are guaranteed a valid value.
9757 	 */
9758 	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9759 }
9760 
9761 /*
9762  * Set Send Length
9763  * @ppd - per port data
9764  *
9765  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9766  * registers compare against LRH.PktLen, so use the max bytes included
9767  * in the LRH.
9768  *
9769  * This routine changes all VL values except VL15, which it maintains at
9770  * the same value.
9771  */
9772 static void set_send_length(struct hfi1_pportdata *ppd)
9773 {
9774 	struct hfi1_devdata *dd = ppd->dd;
9775 	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9776 	u32 maxvlmtu = dd->vld[15].mtu;
9777 	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9778 			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9779 		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9780 	int i;
9781 	u32 thres;
9782 
9783 	for (i = 0; i < ppd->vls_supported; i++) {
9784 		if (dd->vld[i].mtu > maxvlmtu)
9785 			maxvlmtu = dd->vld[i].mtu;
9786 		if (i <= 3)
9787 			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9788 				 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9789 				((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9790 		else
9791 			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9792 				 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9793 				((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9794 	}
9795 	write_csr(dd, SEND_LEN_CHECK0, len1);
9796 	write_csr(dd, SEND_LEN_CHECK1, len2);
9797 	/* adjust kernel credit return thresholds based on new MTUs */
9798 	/* all kernel receive contexts have the same hdrqentsize */
9799 	for (i = 0; i < ppd->vls_supported; i++) {
9800 		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9801 			    sc_mtu_to_threshold(dd->vld[i].sc,
9802 						dd->vld[i].mtu,
9803 						dd->rcd[0]->rcvhdrqentsize));
9804 		sc_set_cr_threshold(dd->vld[i].sc, thres);
9805 	}
9806 	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9807 		    sc_mtu_to_threshold(dd->vld[15].sc,
9808 					dd->vld[15].mtu,
9809 					dd->rcd[0]->rcvhdrqentsize));
9810 	sc_set_cr_threshold(dd->vld[15].sc, thres);
9811 
9812 	/* Adjust maximum MTU for the port in DC */
9813 	dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9814 		(ilog2(maxvlmtu >> 8) + 1);
9815 	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9816 	len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9817 	len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9818 		DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9819 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9820 }
9821 
9822 static void set_lidlmc(struct hfi1_pportdata *ppd)
9823 {
9824 	int i;
9825 	u64 sreg = 0;
9826 	struct hfi1_devdata *dd = ppd->dd;
9827 	u32 mask = ~((1U << ppd->lmc) - 1);
9828 	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9829 
9830 	if (dd->hfi1_snoop.mode_flag)
9831 		dd_dev_info(dd, "Set lid/lmc while snooping");
9832 
9833 	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9834 		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9835 	c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9836 			<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9837 	      ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9838 			<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9839 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9840 
9841 	/*
9842 	 * Iterate over all the send contexts and set their SLID check
9843 	 */
9844 	sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9845 			SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9846 	       (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9847 			SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9848 
9849 	for (i = 0; i < dd->chip_send_contexts; i++) {
9850 		hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9851 			  i, (u32)sreg);
9852 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9853 	}
9854 
9855 	/* Now we have to do the same thing for the sdma engines */
9856 	sdma_update_lmc(dd, mask, ppd->lid);
9857 }
9858 
9859 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9860 {
9861 	unsigned long timeout;
9862 	u32 curr_state;
9863 
9864 	timeout = jiffies + msecs_to_jiffies(msecs);
9865 	while (1) {
9866 		curr_state = read_physical_state(dd);
9867 		if (curr_state == state)
9868 			break;
9869 		if (time_after(jiffies, timeout)) {
9870 			dd_dev_err(dd,
9871 				   "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9872 				   state, curr_state);
9873 			return -ETIMEDOUT;
9874 		}
9875 		usleep_range(1950, 2050); /* sleep 2ms-ish */
9876 	}
9877 
9878 	return 0;
9879 }
9880 
9881 /*
9882  * Helper for set_link_state().  Do not call except from that routine.
9883  * Expects ppd->hls_mutex to be held.
9884  *
9885  * @rem_reason value to be sent to the neighbor
9886  *
9887  * LinkDownReasons only set if transition succeeds.
9888  */
9889 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9890 {
9891 	struct hfi1_devdata *dd = ppd->dd;
9892 	u32 pstate, previous_state;
9893 	u32 last_local_state;
9894 	u32 last_remote_state;
9895 	int ret;
9896 	int do_transition;
9897 	int do_wait;
9898 
9899 	previous_state = ppd->host_link_state;
9900 	ppd->host_link_state = HLS_GOING_OFFLINE;
9901 	pstate = read_physical_state(dd);
9902 	if (pstate == PLS_OFFLINE) {
9903 		do_transition = 0;	/* in right state */
9904 		do_wait = 0;		/* ...no need to wait */
9905 	} else if ((pstate & 0xff) == PLS_OFFLINE) {
9906 		do_transition = 0;	/* in an offline transient state */
9907 		do_wait = 1;		/* ...wait for it to settle */
9908 	} else {
9909 		do_transition = 1;	/* need to move to offline */
9910 		do_wait = 1;		/* ...will need to wait */
9911 	}
9912 
9913 	if (do_transition) {
9914 		ret = set_physical_link_state(dd,
9915 					      (rem_reason << 8) | PLS_OFFLINE);
9916 
9917 		if (ret != HCMD_SUCCESS) {
9918 			dd_dev_err(dd,
9919 				   "Failed to transition to Offline link state, return %d\n",
9920 				   ret);
9921 			return -EINVAL;
9922 		}
9923 		if (ppd->offline_disabled_reason ==
9924 				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9925 			ppd->offline_disabled_reason =
9926 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9927 	}
9928 
9929 	if (do_wait) {
9930 		/* it can take a while for the link to go down */
9931 		ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9932 		if (ret < 0)
9933 			return ret;
9934 	}
9935 
9936 	/* make sure the logical state is also down */
9937 	wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9938 
9939 	/*
9940 	 * Now in charge of LCB - must be after the physical state is
9941 	 * offline.quiet and before host_link_state is changed.
9942 	 */
9943 	set_host_lcb_access(dd);
9944 	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9945 	ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9946 
9947 	if (ppd->port_type == PORT_TYPE_QSFP &&
9948 	    ppd->qsfp_info.limiting_active &&
9949 	    qsfp_mod_present(ppd)) {
9950 		int ret;
9951 
9952 		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9953 		if (ret == 0) {
9954 			set_qsfp_tx(ppd, 0);
9955 			release_chip_resource(dd, qsfp_resource(dd));
9956 		} else {
9957 			/* not fatal, but should warn */
9958 			dd_dev_err(dd,
9959 				   "Unable to acquire lock to turn off QSFP TX\n");
9960 		}
9961 	}
9962 
9963 	/*
9964 	 * The LNI has a mandatory wait time after the physical state
9965 	 * moves to Offline.Quiet.  The wait time may be different
9966 	 * depending on how the link went down.  The 8051 firmware
9967 	 * will observe the needed wait time and only move to ready
9968 	 * when that is completed.  The largest of the quiet timeouts
9969 	 * is 6s, so wait that long and then at least 0.5s more for
9970 	 * other transitions, and another 0.5s for a buffer.
9971 	 */
9972 	ret = wait_fm_ready(dd, 7000);
9973 	if (ret) {
9974 		dd_dev_err(dd,
9975 			   "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9976 		/* state is really offline, so make it so */
9977 		ppd->host_link_state = HLS_DN_OFFLINE;
9978 		return ret;
9979 	}
9980 
9981 	/*
9982 	 * The state is now offline and the 8051 is ready to accept host
9983 	 * requests.
9984 	 *	- change our state
9985 	 *	- notify others if we were previously in a linkup state
9986 	 */
9987 	ppd->host_link_state = HLS_DN_OFFLINE;
9988 	if (previous_state & HLS_UP) {
9989 		/* went down while link was up */
9990 		handle_linkup_change(dd, 0);
9991 	} else if (previous_state
9992 			& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9993 		/* went down while attempting link up */
9994 		/* byte 1 of last_*_state is the failure reason */
9995 		read_last_local_state(dd, &last_local_state);
9996 		read_last_remote_state(dd, &last_remote_state);
9997 		dd_dev_err(dd,
9998 			   "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9999 			   last_local_state, last_remote_state);
10000 	}
10001 
10002 	/* the active link width (downgrade) is 0 on link down */
10003 	ppd->link_width_active = 0;
10004 	ppd->link_width_downgrade_tx_active = 0;
10005 	ppd->link_width_downgrade_rx_active = 0;
10006 	ppd->current_egress_rate = 0;
10007 	return 0;
10008 }
10009 
10010 /* return the link state name */
10011 static const char *link_state_name(u32 state)
10012 {
10013 	const char *name;
10014 	int n = ilog2(state);
10015 	static const char * const names[] = {
10016 		[__HLS_UP_INIT_BP]	 = "INIT",
10017 		[__HLS_UP_ARMED_BP]	 = "ARMED",
10018 		[__HLS_UP_ACTIVE_BP]	 = "ACTIVE",
10019 		[__HLS_DN_DOWNDEF_BP]	 = "DOWNDEF",
10020 		[__HLS_DN_POLL_BP]	 = "POLL",
10021 		[__HLS_DN_DISABLE_BP]	 = "DISABLE",
10022 		[__HLS_DN_OFFLINE_BP]	 = "OFFLINE",
10023 		[__HLS_VERIFY_CAP_BP]	 = "VERIFY_CAP",
10024 		[__HLS_GOING_UP_BP]	 = "GOING_UP",
10025 		[__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10026 		[__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10027 	};
10028 
10029 	name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10030 	return name ? name : "unknown";
10031 }
10032 
10033 /* return the link state reason name */
10034 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10035 {
10036 	if (state == HLS_UP_INIT) {
10037 		switch (ppd->linkinit_reason) {
10038 		case OPA_LINKINIT_REASON_LINKUP:
10039 			return "(LINKUP)";
10040 		case OPA_LINKINIT_REASON_FLAPPING:
10041 			return "(FLAPPING)";
10042 		case OPA_LINKINIT_OUTSIDE_POLICY:
10043 			return "(OUTSIDE_POLICY)";
10044 		case OPA_LINKINIT_QUARANTINED:
10045 			return "(QUARANTINED)";
10046 		case OPA_LINKINIT_INSUFIC_CAPABILITY:
10047 			return "(INSUFIC_CAPABILITY)";
10048 		default:
10049 			break;
10050 		}
10051 	}
10052 	return "";
10053 }
10054 
10055 /*
10056  * driver_physical_state - convert the driver's notion of a port's
10057  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10058  * Return -1 (converted to a u32) to indicate error.
10059  */
10060 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10061 {
10062 	switch (ppd->host_link_state) {
10063 	case HLS_UP_INIT:
10064 	case HLS_UP_ARMED:
10065 	case HLS_UP_ACTIVE:
10066 		return IB_PORTPHYSSTATE_LINKUP;
10067 	case HLS_DN_POLL:
10068 		return IB_PORTPHYSSTATE_POLLING;
10069 	case HLS_DN_DISABLE:
10070 		return IB_PORTPHYSSTATE_DISABLED;
10071 	case HLS_DN_OFFLINE:
10072 		return OPA_PORTPHYSSTATE_OFFLINE;
10073 	case HLS_VERIFY_CAP:
10074 		return IB_PORTPHYSSTATE_POLLING;
10075 	case HLS_GOING_UP:
10076 		return IB_PORTPHYSSTATE_POLLING;
10077 	case HLS_GOING_OFFLINE:
10078 		return OPA_PORTPHYSSTATE_OFFLINE;
10079 	case HLS_LINK_COOLDOWN:
10080 		return OPA_PORTPHYSSTATE_OFFLINE;
10081 	case HLS_DN_DOWNDEF:
10082 	default:
10083 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10084 			   ppd->host_link_state);
10085 		return  -1;
10086 	}
10087 }
10088 
10089 /*
10090  * driver_logical_state - convert the driver's notion of a port's
10091  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10092  * (converted to a u32) to indicate error.
10093  */
10094 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10095 {
10096 	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10097 		return IB_PORT_DOWN;
10098 
10099 	switch (ppd->host_link_state & HLS_UP) {
10100 	case HLS_UP_INIT:
10101 		return IB_PORT_INIT;
10102 	case HLS_UP_ARMED:
10103 		return IB_PORT_ARMED;
10104 	case HLS_UP_ACTIVE:
10105 		return IB_PORT_ACTIVE;
10106 	default:
10107 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10108 			   ppd->host_link_state);
10109 	return -1;
10110 	}
10111 }
10112 
10113 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10114 			  u8 neigh_reason, u8 rem_reason)
10115 {
10116 	if (ppd->local_link_down_reason.latest == 0 &&
10117 	    ppd->neigh_link_down_reason.latest == 0) {
10118 		ppd->local_link_down_reason.latest = lcl_reason;
10119 		ppd->neigh_link_down_reason.latest = neigh_reason;
10120 		ppd->remote_link_down_reason = rem_reason;
10121 	}
10122 }
10123 
10124 /*
10125  * Change the physical and/or logical link state.
10126  *
10127  * Do not call this routine while inside an interrupt.  It contains
10128  * calls to routines that can take multiple seconds to finish.
10129  *
10130  * Returns 0 on success, -errno on failure.
10131  */
10132 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10133 {
10134 	struct hfi1_devdata *dd = ppd->dd;
10135 	struct ib_event event = {.device = NULL};
10136 	int ret1, ret = 0;
10137 	int orig_new_state, poll_bounce;
10138 
10139 	mutex_lock(&ppd->hls_lock);
10140 
10141 	orig_new_state = state;
10142 	if (state == HLS_DN_DOWNDEF)
10143 		state = dd->link_default;
10144 
10145 	/* interpret poll -> poll as a link bounce */
10146 	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10147 		      state == HLS_DN_POLL;
10148 
10149 	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10150 		    link_state_name(ppd->host_link_state),
10151 		    link_state_name(orig_new_state),
10152 		    poll_bounce ? "(bounce) " : "",
10153 		    link_state_reason_name(ppd, state));
10154 
10155 	/*
10156 	 * If we're going to a (HLS_*) link state that implies the logical
10157 	 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10158 	 * reset is_sm_config_started to 0.
10159 	 */
10160 	if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10161 		ppd->is_sm_config_started = 0;
10162 
10163 	/*
10164 	 * Do nothing if the states match.  Let a poll to poll link bounce
10165 	 * go through.
10166 	 */
10167 	if (ppd->host_link_state == state && !poll_bounce)
10168 		goto done;
10169 
10170 	switch (state) {
10171 	case HLS_UP_INIT:
10172 		if (ppd->host_link_state == HLS_DN_POLL &&
10173 		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10174 			/*
10175 			 * Quick link up jumps from polling to here.
10176 			 *
10177 			 * Whether in normal or loopback mode, the
10178 			 * simulator jumps from polling to link up.
10179 			 * Accept that here.
10180 			 */
10181 			/* OK */
10182 		} else if (ppd->host_link_state != HLS_GOING_UP) {
10183 			goto unexpected;
10184 		}
10185 
10186 		ppd->host_link_state = HLS_UP_INIT;
10187 		ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10188 		if (ret) {
10189 			/* logical state didn't change, stay at going_up */
10190 			ppd->host_link_state = HLS_GOING_UP;
10191 			dd_dev_err(dd,
10192 				   "%s: logical state did not change to INIT\n",
10193 				   __func__);
10194 		} else {
10195 			/* clear old transient LINKINIT_REASON code */
10196 			if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10197 				ppd->linkinit_reason =
10198 					OPA_LINKINIT_REASON_LINKUP;
10199 
10200 			/* enable the port */
10201 			add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10202 
10203 			handle_linkup_change(dd, 1);
10204 		}
10205 		break;
10206 	case HLS_UP_ARMED:
10207 		if (ppd->host_link_state != HLS_UP_INIT)
10208 			goto unexpected;
10209 
10210 		ppd->host_link_state = HLS_UP_ARMED;
10211 		set_logical_state(dd, LSTATE_ARMED);
10212 		ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10213 		if (ret) {
10214 			/* logical state didn't change, stay at init */
10215 			ppd->host_link_state = HLS_UP_INIT;
10216 			dd_dev_err(dd,
10217 				   "%s: logical state did not change to ARMED\n",
10218 				   __func__);
10219 		}
10220 		/*
10221 		 * The simulator does not currently implement SMA messages,
10222 		 * so neighbor_normal is not set.  Set it here when we first
10223 		 * move to Armed.
10224 		 */
10225 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10226 			ppd->neighbor_normal = 1;
10227 		break;
10228 	case HLS_UP_ACTIVE:
10229 		if (ppd->host_link_state != HLS_UP_ARMED)
10230 			goto unexpected;
10231 
10232 		ppd->host_link_state = HLS_UP_ACTIVE;
10233 		set_logical_state(dd, LSTATE_ACTIVE);
10234 		ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10235 		if (ret) {
10236 			/* logical state didn't change, stay at armed */
10237 			ppd->host_link_state = HLS_UP_ARMED;
10238 			dd_dev_err(dd,
10239 				   "%s: logical state did not change to ACTIVE\n",
10240 				   __func__);
10241 		} else {
10242 			/* tell all engines to go running */
10243 			sdma_all_running(dd);
10244 
10245 			/* Signal the IB layer that the port has went active */
10246 			event.device = &dd->verbs_dev.rdi.ibdev;
10247 			event.element.port_num = ppd->port;
10248 			event.event = IB_EVENT_PORT_ACTIVE;
10249 		}
10250 		break;
10251 	case HLS_DN_POLL:
10252 		if ((ppd->host_link_state == HLS_DN_DISABLE ||
10253 		     ppd->host_link_state == HLS_DN_OFFLINE) &&
10254 		    dd->dc_shutdown)
10255 			dc_start(dd);
10256 		/* Hand LED control to the DC */
10257 		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10258 
10259 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10260 			u8 tmp = ppd->link_enabled;
10261 
10262 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10263 			if (ret) {
10264 				ppd->link_enabled = tmp;
10265 				break;
10266 			}
10267 			ppd->remote_link_down_reason = 0;
10268 
10269 			if (ppd->driver_link_ready)
10270 				ppd->link_enabled = 1;
10271 		}
10272 
10273 		set_all_slowpath(ppd->dd);
10274 		ret = set_local_link_attributes(ppd);
10275 		if (ret)
10276 			break;
10277 
10278 		ppd->port_error_action = 0;
10279 		ppd->host_link_state = HLS_DN_POLL;
10280 
10281 		if (quick_linkup) {
10282 			/* quick linkup does not go into polling */
10283 			ret = do_quick_linkup(dd);
10284 		} else {
10285 			ret1 = set_physical_link_state(dd, PLS_POLLING);
10286 			if (ret1 != HCMD_SUCCESS) {
10287 				dd_dev_err(dd,
10288 					   "Failed to transition to Polling link state, return 0x%x\n",
10289 					   ret1);
10290 				ret = -EINVAL;
10291 			}
10292 		}
10293 		ppd->offline_disabled_reason =
10294 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10295 		/*
10296 		 * If an error occurred above, go back to offline.  The
10297 		 * caller may reschedule another attempt.
10298 		 */
10299 		if (ret)
10300 			goto_offline(ppd, 0);
10301 		break;
10302 	case HLS_DN_DISABLE:
10303 		/* link is disabled */
10304 		ppd->link_enabled = 0;
10305 
10306 		/* allow any state to transition to disabled */
10307 
10308 		/* must transition to offline first */
10309 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10310 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10311 			if (ret)
10312 				break;
10313 			ppd->remote_link_down_reason = 0;
10314 		}
10315 
10316 		ret1 = set_physical_link_state(dd, PLS_DISABLED);
10317 		if (ret1 != HCMD_SUCCESS) {
10318 			dd_dev_err(dd,
10319 				   "Failed to transition to Disabled link state, return 0x%x\n",
10320 				   ret1);
10321 			ret = -EINVAL;
10322 			break;
10323 		}
10324 		ppd->host_link_state = HLS_DN_DISABLE;
10325 		dc_shutdown(dd);
10326 		break;
10327 	case HLS_DN_OFFLINE:
10328 		if (ppd->host_link_state == HLS_DN_DISABLE)
10329 			dc_start(dd);
10330 
10331 		/* allow any state to transition to offline */
10332 		ret = goto_offline(ppd, ppd->remote_link_down_reason);
10333 		if (!ret)
10334 			ppd->remote_link_down_reason = 0;
10335 		break;
10336 	case HLS_VERIFY_CAP:
10337 		if (ppd->host_link_state != HLS_DN_POLL)
10338 			goto unexpected;
10339 		ppd->host_link_state = HLS_VERIFY_CAP;
10340 		break;
10341 	case HLS_GOING_UP:
10342 		if (ppd->host_link_state != HLS_VERIFY_CAP)
10343 			goto unexpected;
10344 
10345 		ret1 = set_physical_link_state(dd, PLS_LINKUP);
10346 		if (ret1 != HCMD_SUCCESS) {
10347 			dd_dev_err(dd,
10348 				   "Failed to transition to link up state, return 0x%x\n",
10349 				   ret1);
10350 			ret = -EINVAL;
10351 			break;
10352 		}
10353 		ppd->host_link_state = HLS_GOING_UP;
10354 		break;
10355 
10356 	case HLS_GOING_OFFLINE:		/* transient within goto_offline() */
10357 	case HLS_LINK_COOLDOWN:		/* transient within goto_offline() */
10358 	default:
10359 		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10360 			    __func__, state);
10361 		ret = -EINVAL;
10362 		break;
10363 	}
10364 
10365 	goto done;
10366 
10367 unexpected:
10368 	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10369 		   __func__, link_state_name(ppd->host_link_state),
10370 		   link_state_name(state));
10371 	ret = -EINVAL;
10372 
10373 done:
10374 	mutex_unlock(&ppd->hls_lock);
10375 
10376 	if (event.device)
10377 		ib_dispatch_event(&event);
10378 
10379 	return ret;
10380 }
10381 
10382 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10383 {
10384 	u64 reg;
10385 	int ret = 0;
10386 
10387 	switch (which) {
10388 	case HFI1_IB_CFG_LIDLMC:
10389 		set_lidlmc(ppd);
10390 		break;
10391 	case HFI1_IB_CFG_VL_HIGH_LIMIT:
10392 		/*
10393 		 * The VL Arbitrator high limit is sent in units of 4k
10394 		 * bytes, while HFI stores it in units of 64 bytes.
10395 		 */
10396 		val *= 4096 / 64;
10397 		reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10398 			<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10399 		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10400 		break;
10401 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10402 		/* HFI only supports POLL as the default link down state */
10403 		if (val != HLS_DN_POLL)
10404 			ret = -EINVAL;
10405 		break;
10406 	case HFI1_IB_CFG_OP_VLS:
10407 		if (ppd->vls_operational != val) {
10408 			ppd->vls_operational = val;
10409 			if (!ppd->port)
10410 				ret = -EINVAL;
10411 		}
10412 		break;
10413 	/*
10414 	 * For link width, link width downgrade, and speed enable, always AND
10415 	 * the setting with what is actually supported.  This has two benefits.
10416 	 * First, enabled can't have unsupported values, no matter what the
10417 	 * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10418 	 * "fill in with your supported value" have all the bits in the
10419 	 * field set, so simply ANDing with supported has the desired result.
10420 	 */
10421 	case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10422 		ppd->link_width_enabled = val & ppd->link_width_supported;
10423 		break;
10424 	case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10425 		ppd->link_width_downgrade_enabled =
10426 				val & ppd->link_width_downgrade_supported;
10427 		break;
10428 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10429 		ppd->link_speed_enabled = val & ppd->link_speed_supported;
10430 		break;
10431 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10432 		/*
10433 		 * HFI does not follow IB specs, save this value
10434 		 * so we can report it, if asked.
10435 		 */
10436 		ppd->overrun_threshold = val;
10437 		break;
10438 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10439 		/*
10440 		 * HFI does not follow IB specs, save this value
10441 		 * so we can report it, if asked.
10442 		 */
10443 		ppd->phy_error_threshold = val;
10444 		break;
10445 
10446 	case HFI1_IB_CFG_MTU:
10447 		set_send_length(ppd);
10448 		break;
10449 
10450 	case HFI1_IB_CFG_PKEYS:
10451 		if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10452 			set_partition_keys(ppd);
10453 		break;
10454 
10455 	default:
10456 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10457 			dd_dev_info(ppd->dd,
10458 				    "%s: which %s, val 0x%x: not implemented\n",
10459 				    __func__, ib_cfg_name(which), val);
10460 		break;
10461 	}
10462 	return ret;
10463 }
10464 
10465 /* begin functions related to vl arbitration table caching */
10466 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10467 {
10468 	int i;
10469 
10470 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10471 			VL_ARB_LOW_PRIO_TABLE_SIZE);
10472 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10473 			VL_ARB_HIGH_PRIO_TABLE_SIZE);
10474 
10475 	/*
10476 	 * Note that we always return values directly from the
10477 	 * 'vl_arb_cache' (and do no CSR reads) in response to a
10478 	 * 'Get(VLArbTable)'. This is obviously correct after a
10479 	 * 'Set(VLArbTable)', since the cache will then be up to
10480 	 * date. But it's also correct prior to any 'Set(VLArbTable)'
10481 	 * since then both the cache, and the relevant h/w registers
10482 	 * will be zeroed.
10483 	 */
10484 
10485 	for (i = 0; i < MAX_PRIO_TABLE; i++)
10486 		spin_lock_init(&ppd->vl_arb_cache[i].lock);
10487 }
10488 
10489 /*
10490  * vl_arb_lock_cache
10491  *
10492  * All other vl_arb_* functions should be called only after locking
10493  * the cache.
10494  */
10495 static inline struct vl_arb_cache *
10496 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10497 {
10498 	if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10499 		return NULL;
10500 	spin_lock(&ppd->vl_arb_cache[idx].lock);
10501 	return &ppd->vl_arb_cache[idx];
10502 }
10503 
10504 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10505 {
10506 	spin_unlock(&ppd->vl_arb_cache[idx].lock);
10507 }
10508 
10509 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10510 			     struct ib_vl_weight_elem *vl)
10511 {
10512 	memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10513 }
10514 
10515 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10516 			     struct ib_vl_weight_elem *vl)
10517 {
10518 	memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10519 }
10520 
10521 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10522 			      struct ib_vl_weight_elem *vl)
10523 {
10524 	return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10525 }
10526 
10527 /* end functions related to vl arbitration table caching */
10528 
10529 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10530 			  u32 size, struct ib_vl_weight_elem *vl)
10531 {
10532 	struct hfi1_devdata *dd = ppd->dd;
10533 	u64 reg;
10534 	unsigned int i, is_up = 0;
10535 	int drain, ret = 0;
10536 
10537 	mutex_lock(&ppd->hls_lock);
10538 
10539 	if (ppd->host_link_state & HLS_UP)
10540 		is_up = 1;
10541 
10542 	drain = !is_ax(dd) && is_up;
10543 
10544 	if (drain)
10545 		/*
10546 		 * Before adjusting VL arbitration weights, empty per-VL
10547 		 * FIFOs, otherwise a packet whose VL weight is being
10548 		 * set to 0 could get stuck in a FIFO with no chance to
10549 		 * egress.
10550 		 */
10551 		ret = stop_drain_data_vls(dd);
10552 
10553 	if (ret) {
10554 		dd_dev_err(
10555 			dd,
10556 			"%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10557 			__func__);
10558 		goto err;
10559 	}
10560 
10561 	for (i = 0; i < size; i++, vl++) {
10562 		/*
10563 		 * NOTE: The low priority shift and mask are used here, but
10564 		 * they are the same for both the low and high registers.
10565 		 */
10566 		reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10567 				<< SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10568 		      | (((u64)vl->weight
10569 				& SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10570 				<< SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10571 		write_csr(dd, target + (i * 8), reg);
10572 	}
10573 	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10574 
10575 	if (drain)
10576 		open_fill_data_vls(dd); /* reopen all VLs */
10577 
10578 err:
10579 	mutex_unlock(&ppd->hls_lock);
10580 
10581 	return ret;
10582 }
10583 
10584 /*
10585  * Read one credit merge VL register.
10586  */
10587 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10588 			   struct vl_limit *vll)
10589 {
10590 	u64 reg = read_csr(dd, csr);
10591 
10592 	vll->dedicated = cpu_to_be16(
10593 		(reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10594 		& SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10595 	vll->shared = cpu_to_be16(
10596 		(reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10597 		& SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10598 }
10599 
10600 /*
10601  * Read the current credit merge limits.
10602  */
10603 static int get_buffer_control(struct hfi1_devdata *dd,
10604 			      struct buffer_control *bc, u16 *overall_limit)
10605 {
10606 	u64 reg;
10607 	int i;
10608 
10609 	/* not all entries are filled in */
10610 	memset(bc, 0, sizeof(*bc));
10611 
10612 	/* OPA and HFI have a 1-1 mapping */
10613 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
10614 		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10615 
10616 	/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10617 	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10618 
10619 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10620 	bc->overall_shared_limit = cpu_to_be16(
10621 		(reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10622 		& SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10623 	if (overall_limit)
10624 		*overall_limit = (reg
10625 			>> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10626 			& SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10627 	return sizeof(struct buffer_control);
10628 }
10629 
10630 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10631 {
10632 	u64 reg;
10633 	int i;
10634 
10635 	/* each register contains 16 SC->VLnt mappings, 4 bits each */
10636 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10637 	for (i = 0; i < sizeof(u64); i++) {
10638 		u8 byte = *(((u8 *)&reg) + i);
10639 
10640 		dp->vlnt[2 * i] = byte & 0xf;
10641 		dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10642 	}
10643 
10644 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10645 	for (i = 0; i < sizeof(u64); i++) {
10646 		u8 byte = *(((u8 *)&reg) + i);
10647 
10648 		dp->vlnt[16 + (2 * i)] = byte & 0xf;
10649 		dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10650 	}
10651 	return sizeof(struct sc2vlnt);
10652 }
10653 
10654 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10655 			      struct ib_vl_weight_elem *vl)
10656 {
10657 	unsigned int i;
10658 
10659 	for (i = 0; i < nelems; i++, vl++) {
10660 		vl->vl = 0xf;
10661 		vl->weight = 0;
10662 	}
10663 }
10664 
10665 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10666 {
10667 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10668 		  DC_SC_VL_VAL(15_0,
10669 			       0, dp->vlnt[0] & 0xf,
10670 			       1, dp->vlnt[1] & 0xf,
10671 			       2, dp->vlnt[2] & 0xf,
10672 			       3, dp->vlnt[3] & 0xf,
10673 			       4, dp->vlnt[4] & 0xf,
10674 			       5, dp->vlnt[5] & 0xf,
10675 			       6, dp->vlnt[6] & 0xf,
10676 			       7, dp->vlnt[7] & 0xf,
10677 			       8, dp->vlnt[8] & 0xf,
10678 			       9, dp->vlnt[9] & 0xf,
10679 			       10, dp->vlnt[10] & 0xf,
10680 			       11, dp->vlnt[11] & 0xf,
10681 			       12, dp->vlnt[12] & 0xf,
10682 			       13, dp->vlnt[13] & 0xf,
10683 			       14, dp->vlnt[14] & 0xf,
10684 			       15, dp->vlnt[15] & 0xf));
10685 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10686 		  DC_SC_VL_VAL(31_16,
10687 			       16, dp->vlnt[16] & 0xf,
10688 			       17, dp->vlnt[17] & 0xf,
10689 			       18, dp->vlnt[18] & 0xf,
10690 			       19, dp->vlnt[19] & 0xf,
10691 			       20, dp->vlnt[20] & 0xf,
10692 			       21, dp->vlnt[21] & 0xf,
10693 			       22, dp->vlnt[22] & 0xf,
10694 			       23, dp->vlnt[23] & 0xf,
10695 			       24, dp->vlnt[24] & 0xf,
10696 			       25, dp->vlnt[25] & 0xf,
10697 			       26, dp->vlnt[26] & 0xf,
10698 			       27, dp->vlnt[27] & 0xf,
10699 			       28, dp->vlnt[28] & 0xf,
10700 			       29, dp->vlnt[29] & 0xf,
10701 			       30, dp->vlnt[30] & 0xf,
10702 			       31, dp->vlnt[31] & 0xf));
10703 }
10704 
10705 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10706 			u16 limit)
10707 {
10708 	if (limit != 0)
10709 		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10710 			    what, (int)limit, idx);
10711 }
10712 
10713 /* change only the shared limit portion of SendCmGLobalCredit */
10714 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10715 {
10716 	u64 reg;
10717 
10718 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10719 	reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10720 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10721 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10722 }
10723 
10724 /* change only the total credit limit portion of SendCmGLobalCredit */
10725 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10726 {
10727 	u64 reg;
10728 
10729 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10730 	reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10731 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10732 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10733 }
10734 
10735 /* set the given per-VL shared limit */
10736 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10737 {
10738 	u64 reg;
10739 	u32 addr;
10740 
10741 	if (vl < TXE_NUM_DATA_VL)
10742 		addr = SEND_CM_CREDIT_VL + (8 * vl);
10743 	else
10744 		addr = SEND_CM_CREDIT_VL15;
10745 
10746 	reg = read_csr(dd, addr);
10747 	reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10748 	reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10749 	write_csr(dd, addr, reg);
10750 }
10751 
10752 /* set the given per-VL dedicated limit */
10753 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10754 {
10755 	u64 reg;
10756 	u32 addr;
10757 
10758 	if (vl < TXE_NUM_DATA_VL)
10759 		addr = SEND_CM_CREDIT_VL + (8 * vl);
10760 	else
10761 		addr = SEND_CM_CREDIT_VL15;
10762 
10763 	reg = read_csr(dd, addr);
10764 	reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10765 	reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10766 	write_csr(dd, addr, reg);
10767 }
10768 
10769 /* spin until the given per-VL status mask bits clear */
10770 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10771 				     const char *which)
10772 {
10773 	unsigned long timeout;
10774 	u64 reg;
10775 
10776 	timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10777 	while (1) {
10778 		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10779 
10780 		if (reg == 0)
10781 			return;	/* success */
10782 		if (time_after(jiffies, timeout))
10783 			break;		/* timed out */
10784 		udelay(1);
10785 	}
10786 
10787 	dd_dev_err(dd,
10788 		   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10789 		   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10790 	/*
10791 	 * If this occurs, it is likely there was a credit loss on the link.
10792 	 * The only recovery from that is a link bounce.
10793 	 */
10794 	dd_dev_err(dd,
10795 		   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10796 }
10797 
10798 /*
10799  * The number of credits on the VLs may be changed while everything
10800  * is "live", but the following algorithm must be followed due to
10801  * how the hardware is actually implemented.  In particular,
10802  * Return_Credit_Status[] is the only correct status check.
10803  *
10804  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10805  *     set Global_Shared_Credit_Limit = 0
10806  *     use_all_vl = 1
10807  * mask0 = all VLs that are changing either dedicated or shared limits
10808  * set Shared_Limit[mask0] = 0
10809  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10810  * if (changing any dedicated limit)
10811  *     mask1 = all VLs that are lowering dedicated limits
10812  *     lower Dedicated_Limit[mask1]
10813  *     spin until Return_Credit_Status[mask1] == 0
10814  *     raise Dedicated_Limits
10815  * raise Shared_Limits
10816  * raise Global_Shared_Credit_Limit
10817  *
10818  * lower = if the new limit is lower, set the limit to the new value
10819  * raise = if the new limit is higher than the current value (may be changed
10820  *	earlier in the algorithm), set the new limit to the new value
10821  */
10822 int set_buffer_control(struct hfi1_pportdata *ppd,
10823 		       struct buffer_control *new_bc)
10824 {
10825 	struct hfi1_devdata *dd = ppd->dd;
10826 	u64 changing_mask, ld_mask, stat_mask;
10827 	int change_count;
10828 	int i, use_all_mask;
10829 	int this_shared_changing;
10830 	int vl_count = 0, ret;
10831 	/*
10832 	 * A0: add the variable any_shared_limit_changing below and in the
10833 	 * algorithm above.  If removing A0 support, it can be removed.
10834 	 */
10835 	int any_shared_limit_changing;
10836 	struct buffer_control cur_bc;
10837 	u8 changing[OPA_MAX_VLS];
10838 	u8 lowering_dedicated[OPA_MAX_VLS];
10839 	u16 cur_total;
10840 	u32 new_total = 0;
10841 	const u64 all_mask =
10842 	SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10843 	 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10844 	 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10845 	 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10846 	 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10847 	 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10848 	 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10849 	 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10850 	 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10851 
10852 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10853 #define NUM_USABLE_VLS 16	/* look at VL15 and less */
10854 
10855 	/* find the new total credits, do sanity check on unused VLs */
10856 	for (i = 0; i < OPA_MAX_VLS; i++) {
10857 		if (valid_vl(i)) {
10858 			new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10859 			continue;
10860 		}
10861 		nonzero_msg(dd, i, "dedicated",
10862 			    be16_to_cpu(new_bc->vl[i].dedicated));
10863 		nonzero_msg(dd, i, "shared",
10864 			    be16_to_cpu(new_bc->vl[i].shared));
10865 		new_bc->vl[i].dedicated = 0;
10866 		new_bc->vl[i].shared = 0;
10867 	}
10868 	new_total += be16_to_cpu(new_bc->overall_shared_limit);
10869 
10870 	/* fetch the current values */
10871 	get_buffer_control(dd, &cur_bc, &cur_total);
10872 
10873 	/*
10874 	 * Create the masks we will use.
10875 	 */
10876 	memset(changing, 0, sizeof(changing));
10877 	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10878 	/*
10879 	 * NOTE: Assumes that the individual VL bits are adjacent and in
10880 	 * increasing order
10881 	 */
10882 	stat_mask =
10883 		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10884 	changing_mask = 0;
10885 	ld_mask = 0;
10886 	change_count = 0;
10887 	any_shared_limit_changing = 0;
10888 	for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10889 		if (!valid_vl(i))
10890 			continue;
10891 		this_shared_changing = new_bc->vl[i].shared
10892 						!= cur_bc.vl[i].shared;
10893 		if (this_shared_changing)
10894 			any_shared_limit_changing = 1;
10895 		if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10896 		    this_shared_changing) {
10897 			changing[i] = 1;
10898 			changing_mask |= stat_mask;
10899 			change_count++;
10900 		}
10901 		if (be16_to_cpu(new_bc->vl[i].dedicated) <
10902 					be16_to_cpu(cur_bc.vl[i].dedicated)) {
10903 			lowering_dedicated[i] = 1;
10904 			ld_mask |= stat_mask;
10905 		}
10906 	}
10907 
10908 	/* bracket the credit change with a total adjustment */
10909 	if (new_total > cur_total)
10910 		set_global_limit(dd, new_total);
10911 
10912 	/*
10913 	 * Start the credit change algorithm.
10914 	 */
10915 	use_all_mask = 0;
10916 	if ((be16_to_cpu(new_bc->overall_shared_limit) <
10917 	     be16_to_cpu(cur_bc.overall_shared_limit)) ||
10918 	    (is_ax(dd) && any_shared_limit_changing)) {
10919 		set_global_shared(dd, 0);
10920 		cur_bc.overall_shared_limit = 0;
10921 		use_all_mask = 1;
10922 	}
10923 
10924 	for (i = 0; i < NUM_USABLE_VLS; i++) {
10925 		if (!valid_vl(i))
10926 			continue;
10927 
10928 		if (changing[i]) {
10929 			set_vl_shared(dd, i, 0);
10930 			cur_bc.vl[i].shared = 0;
10931 		}
10932 	}
10933 
10934 	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10935 				 "shared");
10936 
10937 	if (change_count > 0) {
10938 		for (i = 0; i < NUM_USABLE_VLS; i++) {
10939 			if (!valid_vl(i))
10940 				continue;
10941 
10942 			if (lowering_dedicated[i]) {
10943 				set_vl_dedicated(dd, i,
10944 						 be16_to_cpu(new_bc->
10945 							     vl[i].dedicated));
10946 				cur_bc.vl[i].dedicated =
10947 						new_bc->vl[i].dedicated;
10948 			}
10949 		}
10950 
10951 		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10952 
10953 		/* now raise all dedicated that are going up */
10954 		for (i = 0; i < NUM_USABLE_VLS; i++) {
10955 			if (!valid_vl(i))
10956 				continue;
10957 
10958 			if (be16_to_cpu(new_bc->vl[i].dedicated) >
10959 					be16_to_cpu(cur_bc.vl[i].dedicated))
10960 				set_vl_dedicated(dd, i,
10961 						 be16_to_cpu(new_bc->
10962 							     vl[i].dedicated));
10963 		}
10964 	}
10965 
10966 	/* next raise all shared that are going up */
10967 	for (i = 0; i < NUM_USABLE_VLS; i++) {
10968 		if (!valid_vl(i))
10969 			continue;
10970 
10971 		if (be16_to_cpu(new_bc->vl[i].shared) >
10972 				be16_to_cpu(cur_bc.vl[i].shared))
10973 			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10974 	}
10975 
10976 	/* finally raise the global shared */
10977 	if (be16_to_cpu(new_bc->overall_shared_limit) >
10978 	    be16_to_cpu(cur_bc.overall_shared_limit))
10979 		set_global_shared(dd,
10980 				  be16_to_cpu(new_bc->overall_shared_limit));
10981 
10982 	/* bracket the credit change with a total adjustment */
10983 	if (new_total < cur_total)
10984 		set_global_limit(dd, new_total);
10985 
10986 	/*
10987 	 * Determine the actual number of operational VLS using the number of
10988 	 * dedicated and shared credits for each VL.
10989 	 */
10990 	if (change_count > 0) {
10991 		for (i = 0; i < TXE_NUM_DATA_VL; i++)
10992 			if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10993 			    be16_to_cpu(new_bc->vl[i].shared) > 0)
10994 				vl_count++;
10995 		ppd->actual_vls_operational = vl_count;
10996 		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10997 				    ppd->actual_vls_operational :
10998 				    ppd->vls_operational,
10999 				    NULL);
11000 		if (ret == 0)
11001 			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11002 					   ppd->actual_vls_operational :
11003 					   ppd->vls_operational, NULL);
11004 		if (ret)
11005 			return ret;
11006 	}
11007 	return 0;
11008 }
11009 
11010 /*
11011  * Read the given fabric manager table. Return the size of the
11012  * table (in bytes) on success, and a negative error code on
11013  * failure.
11014  */
11015 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11016 
11017 {
11018 	int size;
11019 	struct vl_arb_cache *vlc;
11020 
11021 	switch (which) {
11022 	case FM_TBL_VL_HIGH_ARB:
11023 		size = 256;
11024 		/*
11025 		 * OPA specifies 128 elements (of 2 bytes each), though
11026 		 * HFI supports only 16 elements in h/w.
11027 		 */
11028 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11029 		vl_arb_get_cache(vlc, t);
11030 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11031 		break;
11032 	case FM_TBL_VL_LOW_ARB:
11033 		size = 256;
11034 		/*
11035 		 * OPA specifies 128 elements (of 2 bytes each), though
11036 		 * HFI supports only 16 elements in h/w.
11037 		 */
11038 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11039 		vl_arb_get_cache(vlc, t);
11040 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11041 		break;
11042 	case FM_TBL_BUFFER_CONTROL:
11043 		size = get_buffer_control(ppd->dd, t, NULL);
11044 		break;
11045 	case FM_TBL_SC2VLNT:
11046 		size = get_sc2vlnt(ppd->dd, t);
11047 		break;
11048 	case FM_TBL_VL_PREEMPT_ELEMS:
11049 		size = 256;
11050 		/* OPA specifies 128 elements, of 2 bytes each */
11051 		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11052 		break;
11053 	case FM_TBL_VL_PREEMPT_MATRIX:
11054 		size = 256;
11055 		/*
11056 		 * OPA specifies that this is the same size as the VL
11057 		 * arbitration tables (i.e., 256 bytes).
11058 		 */
11059 		break;
11060 	default:
11061 		return -EINVAL;
11062 	}
11063 	return size;
11064 }
11065 
11066 /*
11067  * Write the given fabric manager table.
11068  */
11069 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11070 {
11071 	int ret = 0;
11072 	struct vl_arb_cache *vlc;
11073 
11074 	switch (which) {
11075 	case FM_TBL_VL_HIGH_ARB:
11076 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11077 		if (vl_arb_match_cache(vlc, t)) {
11078 			vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11079 			break;
11080 		}
11081 		vl_arb_set_cache(vlc, t);
11082 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11083 		ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11084 				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11085 		break;
11086 	case FM_TBL_VL_LOW_ARB:
11087 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11088 		if (vl_arb_match_cache(vlc, t)) {
11089 			vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11090 			break;
11091 		}
11092 		vl_arb_set_cache(vlc, t);
11093 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11094 		ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11095 				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11096 		break;
11097 	case FM_TBL_BUFFER_CONTROL:
11098 		ret = set_buffer_control(ppd, t);
11099 		break;
11100 	case FM_TBL_SC2VLNT:
11101 		set_sc2vlnt(ppd->dd, t);
11102 		break;
11103 	default:
11104 		ret = -EINVAL;
11105 	}
11106 	return ret;
11107 }
11108 
11109 /*
11110  * Disable all data VLs.
11111  *
11112  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11113  */
11114 static int disable_data_vls(struct hfi1_devdata *dd)
11115 {
11116 	if (is_ax(dd))
11117 		return 1;
11118 
11119 	pio_send_control(dd, PSC_DATA_VL_DISABLE);
11120 
11121 	return 0;
11122 }
11123 
11124 /*
11125  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11126  * Just re-enables all data VLs (the "fill" part happens
11127  * automatically - the name was chosen for symmetry with
11128  * stop_drain_data_vls()).
11129  *
11130  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11131  */
11132 int open_fill_data_vls(struct hfi1_devdata *dd)
11133 {
11134 	if (is_ax(dd))
11135 		return 1;
11136 
11137 	pio_send_control(dd, PSC_DATA_VL_ENABLE);
11138 
11139 	return 0;
11140 }
11141 
11142 /*
11143  * drain_data_vls() - assumes that disable_data_vls() has been called,
11144  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11145  * engines to drop to 0.
11146  */
11147 static void drain_data_vls(struct hfi1_devdata *dd)
11148 {
11149 	sc_wait(dd);
11150 	sdma_wait(dd);
11151 	pause_for_credit_return(dd);
11152 }
11153 
11154 /*
11155  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11156  *
11157  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11158  * meant to be used like this:
11159  *
11160  * stop_drain_data_vls(dd);
11161  * // do things with per-VL resources
11162  * open_fill_data_vls(dd);
11163  */
11164 int stop_drain_data_vls(struct hfi1_devdata *dd)
11165 {
11166 	int ret;
11167 
11168 	ret = disable_data_vls(dd);
11169 	if (ret == 0)
11170 		drain_data_vls(dd);
11171 
11172 	return ret;
11173 }
11174 
11175 /*
11176  * Convert a nanosecond time to a cclock count.  No matter how slow
11177  * the cclock, a non-zero ns will always have a non-zero result.
11178  */
11179 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11180 {
11181 	u32 cclocks;
11182 
11183 	if (dd->icode == ICODE_FPGA_EMULATION)
11184 		cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11185 	else  /* simulation pretends to be ASIC */
11186 		cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11187 	if (ns && !cclocks)	/* if ns nonzero, must be at least 1 */
11188 		cclocks = 1;
11189 	return cclocks;
11190 }
11191 
11192 /*
11193  * Convert a cclock count to nanoseconds. Not matter how slow
11194  * the cclock, a non-zero cclocks will always have a non-zero result.
11195  */
11196 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11197 {
11198 	u32 ns;
11199 
11200 	if (dd->icode == ICODE_FPGA_EMULATION)
11201 		ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11202 	else  /* simulation pretends to be ASIC */
11203 		ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11204 	if (cclocks && !ns)
11205 		ns = 1;
11206 	return ns;
11207 }
11208 
11209 /*
11210  * Dynamically adjust the receive interrupt timeout for a context based on
11211  * incoming packet rate.
11212  *
11213  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11214  */
11215 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11216 {
11217 	struct hfi1_devdata *dd = rcd->dd;
11218 	u32 timeout = rcd->rcvavail_timeout;
11219 
11220 	/*
11221 	 * This algorithm doubles or halves the timeout depending on whether
11222 	 * the number of packets received in this interrupt were less than or
11223 	 * greater equal the interrupt count.
11224 	 *
11225 	 * The calculations below do not allow a steady state to be achieved.
11226 	 * Only at the endpoints it is possible to have an unchanging
11227 	 * timeout.
11228 	 */
11229 	if (npkts < rcv_intr_count) {
11230 		/*
11231 		 * Not enough packets arrived before the timeout, adjust
11232 		 * timeout downward.
11233 		 */
11234 		if (timeout < 2) /* already at minimum? */
11235 			return;
11236 		timeout >>= 1;
11237 	} else {
11238 		/*
11239 		 * More than enough packets arrived before the timeout, adjust
11240 		 * timeout upward.
11241 		 */
11242 		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11243 			return;
11244 		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11245 	}
11246 
11247 	rcd->rcvavail_timeout = timeout;
11248 	/*
11249 	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11250 	 * been verified to be in range
11251 	 */
11252 	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11253 			(u64)timeout <<
11254 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11255 }
11256 
11257 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11258 		    u32 intr_adjust, u32 npkts)
11259 {
11260 	struct hfi1_devdata *dd = rcd->dd;
11261 	u64 reg;
11262 	u32 ctxt = rcd->ctxt;
11263 
11264 	/*
11265 	 * Need to write timeout register before updating RcvHdrHead to ensure
11266 	 * that a new value is used when the HW decides to restart counting.
11267 	 */
11268 	if (intr_adjust)
11269 		adjust_rcv_timeout(rcd, npkts);
11270 	if (updegr) {
11271 		reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11272 			<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11273 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11274 	}
11275 	mmiowb();
11276 	reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11277 		(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11278 			<< RCV_HDR_HEAD_HEAD_SHIFT);
11279 	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11280 	mmiowb();
11281 }
11282 
11283 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11284 {
11285 	u32 head, tail;
11286 
11287 	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11288 		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11289 
11290 	if (rcd->rcvhdrtail_kvaddr)
11291 		tail = get_rcvhdrtail(rcd);
11292 	else
11293 		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11294 
11295 	return head == tail;
11296 }
11297 
11298 /*
11299  * Context Control and Receive Array encoding for buffer size:
11300  *	0x0 invalid
11301  *	0x1   4 KB
11302  *	0x2   8 KB
11303  *	0x3  16 KB
11304  *	0x4  32 KB
11305  *	0x5  64 KB
11306  *	0x6 128 KB
11307  *	0x7 256 KB
11308  *	0x8 512 KB (Receive Array only)
11309  *	0x9   1 MB (Receive Array only)
11310  *	0xa   2 MB (Receive Array only)
11311  *
11312  *	0xB-0xF - reserved (Receive Array only)
11313  *
11314  *
11315  * This routine assumes that the value has already been sanity checked.
11316  */
11317 static u32 encoded_size(u32 size)
11318 {
11319 	switch (size) {
11320 	case   4 * 1024: return 0x1;
11321 	case   8 * 1024: return 0x2;
11322 	case  16 * 1024: return 0x3;
11323 	case  32 * 1024: return 0x4;
11324 	case  64 * 1024: return 0x5;
11325 	case 128 * 1024: return 0x6;
11326 	case 256 * 1024: return 0x7;
11327 	case 512 * 1024: return 0x8;
11328 	case   1 * 1024 * 1024: return 0x9;
11329 	case   2 * 1024 * 1024: return 0xa;
11330 	}
11331 	return 0x1;	/* if invalid, go with the minimum size */
11332 }
11333 
11334 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11335 {
11336 	struct hfi1_ctxtdata *rcd;
11337 	u64 rcvctrl, reg;
11338 	int did_enable = 0;
11339 
11340 	rcd = dd->rcd[ctxt];
11341 	if (!rcd)
11342 		return;
11343 
11344 	hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11345 
11346 	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11347 	/* if the context already enabled, don't do the extra steps */
11348 	if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11349 	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11350 		/* reset the tail and hdr addresses, and sequence count */
11351 		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11352 				rcd->rcvhdrq_phys);
11353 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11354 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11355 					rcd->rcvhdrqtailaddr_phys);
11356 		rcd->seq_cnt = 1;
11357 
11358 		/* reset the cached receive header queue head value */
11359 		rcd->head = 0;
11360 
11361 		/*
11362 		 * Zero the receive header queue so we don't get false
11363 		 * positives when checking the sequence number.  The
11364 		 * sequence numbers could land exactly on the same spot.
11365 		 * E.g. a rcd restart before the receive header wrapped.
11366 		 */
11367 		memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11368 
11369 		/* starting timeout */
11370 		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11371 
11372 		/* enable the context */
11373 		rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11374 
11375 		/* clean the egr buffer size first */
11376 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11377 		rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11378 				& RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11379 					<< RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11380 
11381 		/* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11382 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11383 		did_enable = 1;
11384 
11385 		/* zero RcvEgrIndexHead */
11386 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11387 
11388 		/* set eager count and base index */
11389 		reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11390 			& RCV_EGR_CTRL_EGR_CNT_MASK)
11391 		       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11392 			(((rcd->eager_base >> RCV_SHIFT)
11393 			  & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11394 			 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11395 		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11396 
11397 		/*
11398 		 * Set TID (expected) count and base index.
11399 		 * rcd->expected_count is set to individual RcvArray entries,
11400 		 * not pairs, and the CSR takes a pair-count in groups of
11401 		 * four, so divide by 8.
11402 		 */
11403 		reg = (((rcd->expected_count >> RCV_SHIFT)
11404 					& RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11405 				<< RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11406 		      (((rcd->expected_base >> RCV_SHIFT)
11407 					& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11408 				<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11409 		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11410 		if (ctxt == HFI1_CTRL_CTXT)
11411 			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11412 	}
11413 	if (op & HFI1_RCVCTRL_CTXT_DIS) {
11414 		write_csr(dd, RCV_VL15, 0);
11415 		/*
11416 		 * When receive context is being disabled turn on tail
11417 		 * update with a dummy tail address and then disable
11418 		 * receive context.
11419 		 */
11420 		if (dd->rcvhdrtail_dummy_physaddr) {
11421 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11422 					dd->rcvhdrtail_dummy_physaddr);
11423 			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11424 			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11425 		}
11426 
11427 		rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11428 	}
11429 	if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11430 		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11431 	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11432 		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11433 	if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11434 		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11435 	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11436 		/* See comment on RcvCtxtCtrl.TailUpd above */
11437 		if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11438 			rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11439 	}
11440 	if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11441 		rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11442 	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11443 		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11444 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11445 		/*
11446 		 * In one-packet-per-eager mode, the size comes from
11447 		 * the RcvArray entry.
11448 		 */
11449 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11450 		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11451 	}
11452 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11453 		rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11454 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11455 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11456 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11457 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11458 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11459 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11460 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11461 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11462 	rcd->rcvctrl = rcvctrl;
11463 	hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11464 	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11465 
11466 	/* work around sticky RcvCtxtStatus.BlockedRHQFull */
11467 	if (did_enable &&
11468 	    (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11469 		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11470 		if (reg != 0) {
11471 			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11472 				    ctxt, reg);
11473 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11474 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11475 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11476 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11477 			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11478 			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11479 				    ctxt, reg, reg == 0 ? "not" : "still");
11480 		}
11481 	}
11482 
11483 	if (did_enable) {
11484 		/*
11485 		 * The interrupt timeout and count must be set after
11486 		 * the context is enabled to take effect.
11487 		 */
11488 		/* set interrupt timeout */
11489 		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11490 				(u64)rcd->rcvavail_timeout <<
11491 				RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11492 
11493 		/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11494 		reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11495 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11496 	}
11497 
11498 	if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11499 		/*
11500 		 * If the context has been disabled and the Tail Update has
11501 		 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11502 		 * so it doesn't contain an address that is invalid.
11503 		 */
11504 		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11505 				dd->rcvhdrtail_dummy_physaddr);
11506 }
11507 
11508 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11509 {
11510 	int ret;
11511 	u64 val = 0;
11512 
11513 	if (namep) {
11514 		ret = dd->cntrnameslen;
11515 		*namep = dd->cntrnames;
11516 	} else {
11517 		const struct cntr_entry *entry;
11518 		int i, j;
11519 
11520 		ret = (dd->ndevcntrs) * sizeof(u64);
11521 
11522 		/* Get the start of the block of counters */
11523 		*cntrp = dd->cntrs;
11524 
11525 		/*
11526 		 * Now go and fill in each counter in the block.
11527 		 */
11528 		for (i = 0; i < DEV_CNTR_LAST; i++) {
11529 			entry = &dev_cntrs[i];
11530 			hfi1_cdbg(CNTR, "reading %s", entry->name);
11531 			if (entry->flags & CNTR_DISABLED) {
11532 				/* Nothing */
11533 				hfi1_cdbg(CNTR, "\tDisabled\n");
11534 			} else {
11535 				if (entry->flags & CNTR_VL) {
11536 					hfi1_cdbg(CNTR, "\tPer VL\n");
11537 					for (j = 0; j < C_VL_COUNT; j++) {
11538 						val = entry->rw_cntr(entry,
11539 								  dd, j,
11540 								  CNTR_MODE_R,
11541 								  0);
11542 						hfi1_cdbg(
11543 						   CNTR,
11544 						   "\t\tRead 0x%llx for %d\n",
11545 						   val, j);
11546 						dd->cntrs[entry->offset + j] =
11547 									    val;
11548 					}
11549 				} else if (entry->flags & CNTR_SDMA) {
11550 					hfi1_cdbg(CNTR,
11551 						  "\t Per SDMA Engine\n");
11552 					for (j = 0; j < dd->chip_sdma_engines;
11553 					     j++) {
11554 						val =
11555 						entry->rw_cntr(entry, dd, j,
11556 							       CNTR_MODE_R, 0);
11557 						hfi1_cdbg(CNTR,
11558 							  "\t\tRead 0x%llx for %d\n",
11559 							  val, j);
11560 						dd->cntrs[entry->offset + j] =
11561 									val;
11562 					}
11563 				} else {
11564 					val = entry->rw_cntr(entry, dd,
11565 							CNTR_INVALID_VL,
11566 							CNTR_MODE_R, 0);
11567 					dd->cntrs[entry->offset] = val;
11568 					hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11569 				}
11570 			}
11571 		}
11572 	}
11573 	return ret;
11574 }
11575 
11576 /*
11577  * Used by sysfs to create files for hfi stats to read
11578  */
11579 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11580 {
11581 	int ret;
11582 	u64 val = 0;
11583 
11584 	if (namep) {
11585 		ret = ppd->dd->portcntrnameslen;
11586 		*namep = ppd->dd->portcntrnames;
11587 	} else {
11588 		const struct cntr_entry *entry;
11589 		int i, j;
11590 
11591 		ret = ppd->dd->nportcntrs * sizeof(u64);
11592 		*cntrp = ppd->cntrs;
11593 
11594 		for (i = 0; i < PORT_CNTR_LAST; i++) {
11595 			entry = &port_cntrs[i];
11596 			hfi1_cdbg(CNTR, "reading %s", entry->name);
11597 			if (entry->flags & CNTR_DISABLED) {
11598 				/* Nothing */
11599 				hfi1_cdbg(CNTR, "\tDisabled\n");
11600 				continue;
11601 			}
11602 
11603 			if (entry->flags & CNTR_VL) {
11604 				hfi1_cdbg(CNTR, "\tPer VL");
11605 				for (j = 0; j < C_VL_COUNT; j++) {
11606 					val = entry->rw_cntr(entry, ppd, j,
11607 							       CNTR_MODE_R,
11608 							       0);
11609 					hfi1_cdbg(
11610 					   CNTR,
11611 					   "\t\tRead 0x%llx for %d",
11612 					   val, j);
11613 					ppd->cntrs[entry->offset + j] = val;
11614 				}
11615 			} else {
11616 				val = entry->rw_cntr(entry, ppd,
11617 						       CNTR_INVALID_VL,
11618 						       CNTR_MODE_R,
11619 						       0);
11620 				ppd->cntrs[entry->offset] = val;
11621 				hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11622 			}
11623 		}
11624 	}
11625 	return ret;
11626 }
11627 
11628 static void free_cntrs(struct hfi1_devdata *dd)
11629 {
11630 	struct hfi1_pportdata *ppd;
11631 	int i;
11632 
11633 	if (dd->synth_stats_timer.data)
11634 		del_timer_sync(&dd->synth_stats_timer);
11635 	dd->synth_stats_timer.data = 0;
11636 	ppd = (struct hfi1_pportdata *)(dd + 1);
11637 	for (i = 0; i < dd->num_pports; i++, ppd++) {
11638 		kfree(ppd->cntrs);
11639 		kfree(ppd->scntrs);
11640 		free_percpu(ppd->ibport_data.rvp.rc_acks);
11641 		free_percpu(ppd->ibport_data.rvp.rc_qacks);
11642 		free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11643 		ppd->cntrs = NULL;
11644 		ppd->scntrs = NULL;
11645 		ppd->ibport_data.rvp.rc_acks = NULL;
11646 		ppd->ibport_data.rvp.rc_qacks = NULL;
11647 		ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11648 	}
11649 	kfree(dd->portcntrnames);
11650 	dd->portcntrnames = NULL;
11651 	kfree(dd->cntrs);
11652 	dd->cntrs = NULL;
11653 	kfree(dd->scntrs);
11654 	dd->scntrs = NULL;
11655 	kfree(dd->cntrnames);
11656 	dd->cntrnames = NULL;
11657 }
11658 
11659 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11660 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11661 
11662 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11663 			      u64 *psval, void *context, int vl)
11664 {
11665 	u64 val;
11666 	u64 sval = *psval;
11667 
11668 	if (entry->flags & CNTR_DISABLED) {
11669 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
11670 		return 0;
11671 	}
11672 
11673 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11674 
11675 	val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11676 
11677 	/* If its a synthetic counter there is more work we need to do */
11678 	if (entry->flags & CNTR_SYNTH) {
11679 		if (sval == CNTR_MAX) {
11680 			/* No need to read already saturated */
11681 			return CNTR_MAX;
11682 		}
11683 
11684 		if (entry->flags & CNTR_32BIT) {
11685 			/* 32bit counters can wrap multiple times */
11686 			u64 upper = sval >> 32;
11687 			u64 lower = (sval << 32) >> 32;
11688 
11689 			if (lower > val) { /* hw wrapped */
11690 				if (upper == CNTR_32BIT_MAX)
11691 					val = CNTR_MAX;
11692 				else
11693 					upper++;
11694 			}
11695 
11696 			if (val != CNTR_MAX)
11697 				val = (upper << 32) | val;
11698 
11699 		} else {
11700 			/* If we rolled we are saturated */
11701 			if ((val < sval) || (val > CNTR_MAX))
11702 				val = CNTR_MAX;
11703 		}
11704 	}
11705 
11706 	*psval = val;
11707 
11708 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11709 
11710 	return val;
11711 }
11712 
11713 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11714 			       struct cntr_entry *entry,
11715 			       u64 *psval, void *context, int vl, u64 data)
11716 {
11717 	u64 val;
11718 
11719 	if (entry->flags & CNTR_DISABLED) {
11720 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
11721 		return 0;
11722 	}
11723 
11724 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11725 
11726 	if (entry->flags & CNTR_SYNTH) {
11727 		*psval = data;
11728 		if (entry->flags & CNTR_32BIT) {
11729 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11730 					     (data << 32) >> 32);
11731 			val = data; /* return the full 64bit value */
11732 		} else {
11733 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11734 					     data);
11735 		}
11736 	} else {
11737 		val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11738 	}
11739 
11740 	*psval = val;
11741 
11742 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11743 
11744 	return val;
11745 }
11746 
11747 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11748 {
11749 	struct cntr_entry *entry;
11750 	u64 *sval;
11751 
11752 	entry = &dev_cntrs[index];
11753 	sval = dd->scntrs + entry->offset;
11754 
11755 	if (vl != CNTR_INVALID_VL)
11756 		sval += vl;
11757 
11758 	return read_dev_port_cntr(dd, entry, sval, dd, vl);
11759 }
11760 
11761 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11762 {
11763 	struct cntr_entry *entry;
11764 	u64 *sval;
11765 
11766 	entry = &dev_cntrs[index];
11767 	sval = dd->scntrs + entry->offset;
11768 
11769 	if (vl != CNTR_INVALID_VL)
11770 		sval += vl;
11771 
11772 	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11773 }
11774 
11775 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11776 {
11777 	struct cntr_entry *entry;
11778 	u64 *sval;
11779 
11780 	entry = &port_cntrs[index];
11781 	sval = ppd->scntrs + entry->offset;
11782 
11783 	if (vl != CNTR_INVALID_VL)
11784 		sval += vl;
11785 
11786 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11787 	    (index <= C_RCV_HDR_OVF_LAST)) {
11788 		/* We do not want to bother for disabled contexts */
11789 		return 0;
11790 	}
11791 
11792 	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11793 }
11794 
11795 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11796 {
11797 	struct cntr_entry *entry;
11798 	u64 *sval;
11799 
11800 	entry = &port_cntrs[index];
11801 	sval = ppd->scntrs + entry->offset;
11802 
11803 	if (vl != CNTR_INVALID_VL)
11804 		sval += vl;
11805 
11806 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11807 	    (index <= C_RCV_HDR_OVF_LAST)) {
11808 		/* We do not want to bother for disabled contexts */
11809 		return 0;
11810 	}
11811 
11812 	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11813 }
11814 
11815 static void update_synth_timer(unsigned long opaque)
11816 {
11817 	u64 cur_tx;
11818 	u64 cur_rx;
11819 	u64 total_flits;
11820 	u8 update = 0;
11821 	int i, j, vl;
11822 	struct hfi1_pportdata *ppd;
11823 	struct cntr_entry *entry;
11824 
11825 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11826 
11827 	/*
11828 	 * Rather than keep beating on the CSRs pick a minimal set that we can
11829 	 * check to watch for potential roll over. We can do this by looking at
11830 	 * the number of flits sent/recv. If the total flits exceeds 32bits then
11831 	 * we have to iterate all the counters and update.
11832 	 */
11833 	entry = &dev_cntrs[C_DC_RCV_FLITS];
11834 	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11835 
11836 	entry = &dev_cntrs[C_DC_XMIT_FLITS];
11837 	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11838 
11839 	hfi1_cdbg(
11840 	    CNTR,
11841 	    "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11842 	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11843 
11844 	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11845 		/*
11846 		 * May not be strictly necessary to update but it won't hurt and
11847 		 * simplifies the logic here.
11848 		 */
11849 		update = 1;
11850 		hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11851 			  dd->unit);
11852 	} else {
11853 		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11854 		hfi1_cdbg(CNTR,
11855 			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11856 			  total_flits, (u64)CNTR_32BIT_MAX);
11857 		if (total_flits >= CNTR_32BIT_MAX) {
11858 			hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11859 				  dd->unit);
11860 			update = 1;
11861 		}
11862 	}
11863 
11864 	if (update) {
11865 		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11866 		for (i = 0; i < DEV_CNTR_LAST; i++) {
11867 			entry = &dev_cntrs[i];
11868 			if (entry->flags & CNTR_VL) {
11869 				for (vl = 0; vl < C_VL_COUNT; vl++)
11870 					read_dev_cntr(dd, i, vl);
11871 			} else {
11872 				read_dev_cntr(dd, i, CNTR_INVALID_VL);
11873 			}
11874 		}
11875 		ppd = (struct hfi1_pportdata *)(dd + 1);
11876 		for (i = 0; i < dd->num_pports; i++, ppd++) {
11877 			for (j = 0; j < PORT_CNTR_LAST; j++) {
11878 				entry = &port_cntrs[j];
11879 				if (entry->flags & CNTR_VL) {
11880 					for (vl = 0; vl < C_VL_COUNT; vl++)
11881 						read_port_cntr(ppd, j, vl);
11882 				} else {
11883 					read_port_cntr(ppd, j, CNTR_INVALID_VL);
11884 				}
11885 			}
11886 		}
11887 
11888 		/*
11889 		 * We want the value in the register. The goal is to keep track
11890 		 * of the number of "ticks" not the counter value. In other
11891 		 * words if the register rolls we want to notice it and go ahead
11892 		 * and force an update.
11893 		 */
11894 		entry = &dev_cntrs[C_DC_XMIT_FLITS];
11895 		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11896 						CNTR_MODE_R, 0);
11897 
11898 		entry = &dev_cntrs[C_DC_RCV_FLITS];
11899 		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11900 						CNTR_MODE_R, 0);
11901 
11902 		hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11903 			  dd->unit, dd->last_tx, dd->last_rx);
11904 
11905 	} else {
11906 		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11907 	}
11908 
11909 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11910 }
11911 
11912 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11913 static int init_cntrs(struct hfi1_devdata *dd)
11914 {
11915 	int i, rcv_ctxts, j;
11916 	size_t sz;
11917 	char *p;
11918 	char name[C_MAX_NAME];
11919 	struct hfi1_pportdata *ppd;
11920 	const char *bit_type_32 = ",32";
11921 	const int bit_type_32_sz = strlen(bit_type_32);
11922 
11923 	/* set up the stats timer; the add_timer is done at the end */
11924 	setup_timer(&dd->synth_stats_timer, update_synth_timer,
11925 		    (unsigned long)dd);
11926 
11927 	/***********************/
11928 	/* per device counters */
11929 	/***********************/
11930 
11931 	/* size names and determine how many we have*/
11932 	dd->ndevcntrs = 0;
11933 	sz = 0;
11934 
11935 	for (i = 0; i < DEV_CNTR_LAST; i++) {
11936 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
11937 			hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11938 			continue;
11939 		}
11940 
11941 		if (dev_cntrs[i].flags & CNTR_VL) {
11942 			dev_cntrs[i].offset = dd->ndevcntrs;
11943 			for (j = 0; j < C_VL_COUNT; j++) {
11944 				snprintf(name, C_MAX_NAME, "%s%d",
11945 					 dev_cntrs[i].name, vl_from_idx(j));
11946 				sz += strlen(name);
11947 				/* Add ",32" for 32-bit counters */
11948 				if (dev_cntrs[i].flags & CNTR_32BIT)
11949 					sz += bit_type_32_sz;
11950 				sz++;
11951 				dd->ndevcntrs++;
11952 			}
11953 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
11954 			dev_cntrs[i].offset = dd->ndevcntrs;
11955 			for (j = 0; j < dd->chip_sdma_engines; j++) {
11956 				snprintf(name, C_MAX_NAME, "%s%d",
11957 					 dev_cntrs[i].name, j);
11958 				sz += strlen(name);
11959 				/* Add ",32" for 32-bit counters */
11960 				if (dev_cntrs[i].flags & CNTR_32BIT)
11961 					sz += bit_type_32_sz;
11962 				sz++;
11963 				dd->ndevcntrs++;
11964 			}
11965 		} else {
11966 			/* +1 for newline. */
11967 			sz += strlen(dev_cntrs[i].name) + 1;
11968 			/* Add ",32" for 32-bit counters */
11969 			if (dev_cntrs[i].flags & CNTR_32BIT)
11970 				sz += bit_type_32_sz;
11971 			dev_cntrs[i].offset = dd->ndevcntrs;
11972 			dd->ndevcntrs++;
11973 		}
11974 	}
11975 
11976 	/* allocate space for the counter values */
11977 	dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11978 	if (!dd->cntrs)
11979 		goto bail;
11980 
11981 	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11982 	if (!dd->scntrs)
11983 		goto bail;
11984 
11985 	/* allocate space for the counter names */
11986 	dd->cntrnameslen = sz;
11987 	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11988 	if (!dd->cntrnames)
11989 		goto bail;
11990 
11991 	/* fill in the names */
11992 	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
11993 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
11994 			/* Nothing */
11995 		} else if (dev_cntrs[i].flags & CNTR_VL) {
11996 			for (j = 0; j < C_VL_COUNT; j++) {
11997 				snprintf(name, C_MAX_NAME, "%s%d",
11998 					 dev_cntrs[i].name,
11999 					 vl_from_idx(j));
12000 				memcpy(p, name, strlen(name));
12001 				p += strlen(name);
12002 
12003 				/* Counter is 32 bits */
12004 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12005 					memcpy(p, bit_type_32, bit_type_32_sz);
12006 					p += bit_type_32_sz;
12007 				}
12008 
12009 				*p++ = '\n';
12010 			}
12011 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12012 			for (j = 0; j < dd->chip_sdma_engines; j++) {
12013 				snprintf(name, C_MAX_NAME, "%s%d",
12014 					 dev_cntrs[i].name, j);
12015 				memcpy(p, name, strlen(name));
12016 				p += strlen(name);
12017 
12018 				/* Counter is 32 bits */
12019 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12020 					memcpy(p, bit_type_32, bit_type_32_sz);
12021 					p += bit_type_32_sz;
12022 				}
12023 
12024 				*p++ = '\n';
12025 			}
12026 		} else {
12027 			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12028 			p += strlen(dev_cntrs[i].name);
12029 
12030 			/* Counter is 32 bits */
12031 			if (dev_cntrs[i].flags & CNTR_32BIT) {
12032 				memcpy(p, bit_type_32, bit_type_32_sz);
12033 				p += bit_type_32_sz;
12034 			}
12035 
12036 			*p++ = '\n';
12037 		}
12038 	}
12039 
12040 	/*********************/
12041 	/* per port counters */
12042 	/*********************/
12043 
12044 	/*
12045 	 * Go through the counters for the overflows and disable the ones we
12046 	 * don't need. This varies based on platform so we need to do it
12047 	 * dynamically here.
12048 	 */
12049 	rcv_ctxts = dd->num_rcv_contexts;
12050 	for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12051 	     i <= C_RCV_HDR_OVF_LAST; i++) {
12052 		port_cntrs[i].flags |= CNTR_DISABLED;
12053 	}
12054 
12055 	/* size port counter names and determine how many we have*/
12056 	sz = 0;
12057 	dd->nportcntrs = 0;
12058 	for (i = 0; i < PORT_CNTR_LAST; i++) {
12059 		if (port_cntrs[i].flags & CNTR_DISABLED) {
12060 			hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12061 			continue;
12062 		}
12063 
12064 		if (port_cntrs[i].flags & CNTR_VL) {
12065 			port_cntrs[i].offset = dd->nportcntrs;
12066 			for (j = 0; j < C_VL_COUNT; j++) {
12067 				snprintf(name, C_MAX_NAME, "%s%d",
12068 					 port_cntrs[i].name, vl_from_idx(j));
12069 				sz += strlen(name);
12070 				/* Add ",32" for 32-bit counters */
12071 				if (port_cntrs[i].flags & CNTR_32BIT)
12072 					sz += bit_type_32_sz;
12073 				sz++;
12074 				dd->nportcntrs++;
12075 			}
12076 		} else {
12077 			/* +1 for newline */
12078 			sz += strlen(port_cntrs[i].name) + 1;
12079 			/* Add ",32" for 32-bit counters */
12080 			if (port_cntrs[i].flags & CNTR_32BIT)
12081 				sz += bit_type_32_sz;
12082 			port_cntrs[i].offset = dd->nportcntrs;
12083 			dd->nportcntrs++;
12084 		}
12085 	}
12086 
12087 	/* allocate space for the counter names */
12088 	dd->portcntrnameslen = sz;
12089 	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12090 	if (!dd->portcntrnames)
12091 		goto bail;
12092 
12093 	/* fill in port cntr names */
12094 	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12095 		if (port_cntrs[i].flags & CNTR_DISABLED)
12096 			continue;
12097 
12098 		if (port_cntrs[i].flags & CNTR_VL) {
12099 			for (j = 0; j < C_VL_COUNT; j++) {
12100 				snprintf(name, C_MAX_NAME, "%s%d",
12101 					 port_cntrs[i].name, vl_from_idx(j));
12102 				memcpy(p, name, strlen(name));
12103 				p += strlen(name);
12104 
12105 				/* Counter is 32 bits */
12106 				if (port_cntrs[i].flags & CNTR_32BIT) {
12107 					memcpy(p, bit_type_32, bit_type_32_sz);
12108 					p += bit_type_32_sz;
12109 				}
12110 
12111 				*p++ = '\n';
12112 			}
12113 		} else {
12114 			memcpy(p, port_cntrs[i].name,
12115 			       strlen(port_cntrs[i].name));
12116 			p += strlen(port_cntrs[i].name);
12117 
12118 			/* Counter is 32 bits */
12119 			if (port_cntrs[i].flags & CNTR_32BIT) {
12120 				memcpy(p, bit_type_32, bit_type_32_sz);
12121 				p += bit_type_32_sz;
12122 			}
12123 
12124 			*p++ = '\n';
12125 		}
12126 	}
12127 
12128 	/* allocate per port storage for counter values */
12129 	ppd = (struct hfi1_pportdata *)(dd + 1);
12130 	for (i = 0; i < dd->num_pports; i++, ppd++) {
12131 		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12132 		if (!ppd->cntrs)
12133 			goto bail;
12134 
12135 		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12136 		if (!ppd->scntrs)
12137 			goto bail;
12138 	}
12139 
12140 	/* CPU counters need to be allocated and zeroed */
12141 	if (init_cpu_counters(dd))
12142 		goto bail;
12143 
12144 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12145 	return 0;
12146 bail:
12147 	free_cntrs(dd);
12148 	return -ENOMEM;
12149 }
12150 
12151 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12152 {
12153 	switch (chip_lstate) {
12154 	default:
12155 		dd_dev_err(dd,
12156 			   "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12157 			   chip_lstate);
12158 		/* fall through */
12159 	case LSTATE_DOWN:
12160 		return IB_PORT_DOWN;
12161 	case LSTATE_INIT:
12162 		return IB_PORT_INIT;
12163 	case LSTATE_ARMED:
12164 		return IB_PORT_ARMED;
12165 	case LSTATE_ACTIVE:
12166 		return IB_PORT_ACTIVE;
12167 	}
12168 }
12169 
12170 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12171 {
12172 	/* look at the HFI meta-states only */
12173 	switch (chip_pstate & 0xf0) {
12174 	default:
12175 		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12176 			   chip_pstate);
12177 		/* fall through */
12178 	case PLS_DISABLED:
12179 		return IB_PORTPHYSSTATE_DISABLED;
12180 	case PLS_OFFLINE:
12181 		return OPA_PORTPHYSSTATE_OFFLINE;
12182 	case PLS_POLLING:
12183 		return IB_PORTPHYSSTATE_POLLING;
12184 	case PLS_CONFIGPHY:
12185 		return IB_PORTPHYSSTATE_TRAINING;
12186 	case PLS_LINKUP:
12187 		return IB_PORTPHYSSTATE_LINKUP;
12188 	case PLS_PHYTEST:
12189 		return IB_PORTPHYSSTATE_PHY_TEST;
12190 	}
12191 }
12192 
12193 /* return the OPA port logical state name */
12194 const char *opa_lstate_name(u32 lstate)
12195 {
12196 	static const char * const port_logical_names[] = {
12197 		"PORT_NOP",
12198 		"PORT_DOWN",
12199 		"PORT_INIT",
12200 		"PORT_ARMED",
12201 		"PORT_ACTIVE",
12202 		"PORT_ACTIVE_DEFER",
12203 	};
12204 	if (lstate < ARRAY_SIZE(port_logical_names))
12205 		return port_logical_names[lstate];
12206 	return "unknown";
12207 }
12208 
12209 /* return the OPA port physical state name */
12210 const char *opa_pstate_name(u32 pstate)
12211 {
12212 	static const char * const port_physical_names[] = {
12213 		"PHYS_NOP",
12214 		"reserved1",
12215 		"PHYS_POLL",
12216 		"PHYS_DISABLED",
12217 		"PHYS_TRAINING",
12218 		"PHYS_LINKUP",
12219 		"PHYS_LINK_ERR_RECOVER",
12220 		"PHYS_PHY_TEST",
12221 		"reserved8",
12222 		"PHYS_OFFLINE",
12223 		"PHYS_GANGED",
12224 		"PHYS_TEST",
12225 	};
12226 	if (pstate < ARRAY_SIZE(port_physical_names))
12227 		return port_physical_names[pstate];
12228 	return "unknown";
12229 }
12230 
12231 /*
12232  * Read the hardware link state and set the driver's cached value of it.
12233  * Return the (new) current value.
12234  */
12235 u32 get_logical_state(struct hfi1_pportdata *ppd)
12236 {
12237 	u32 new_state;
12238 
12239 	new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12240 	if (new_state != ppd->lstate) {
12241 		dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12242 			    opa_lstate_name(new_state), new_state);
12243 		ppd->lstate = new_state;
12244 	}
12245 	/*
12246 	 * Set port status flags in the page mapped into userspace
12247 	 * memory. Do it here to ensure a reliable state - this is
12248 	 * the only function called by all state handling code.
12249 	 * Always set the flags due to the fact that the cache value
12250 	 * might have been changed explicitly outside of this
12251 	 * function.
12252 	 */
12253 	if (ppd->statusp) {
12254 		switch (ppd->lstate) {
12255 		case IB_PORT_DOWN:
12256 		case IB_PORT_INIT:
12257 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12258 					   HFI1_STATUS_IB_READY);
12259 			break;
12260 		case IB_PORT_ARMED:
12261 			*ppd->statusp |= HFI1_STATUS_IB_CONF;
12262 			break;
12263 		case IB_PORT_ACTIVE:
12264 			*ppd->statusp |= HFI1_STATUS_IB_READY;
12265 			break;
12266 		}
12267 	}
12268 	return ppd->lstate;
12269 }
12270 
12271 /**
12272  * wait_logical_linkstate - wait for an IB link state change to occur
12273  * @ppd: port device
12274  * @state: the state to wait for
12275  * @msecs: the number of milliseconds to wait
12276  *
12277  * Wait up to msecs milliseconds for IB link state change to occur.
12278  * For now, take the easy polling route.
12279  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12280  */
12281 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12282 				  int msecs)
12283 {
12284 	unsigned long timeout;
12285 
12286 	timeout = jiffies + msecs_to_jiffies(msecs);
12287 	while (1) {
12288 		if (get_logical_state(ppd) == state)
12289 			return 0;
12290 		if (time_after(jiffies, timeout))
12291 			break;
12292 		msleep(20);
12293 	}
12294 	dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12295 
12296 	return -ETIMEDOUT;
12297 }
12298 
12299 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12300 {
12301 	u32 pstate;
12302 	u32 ib_pstate;
12303 
12304 	pstate = read_physical_state(ppd->dd);
12305 	ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12306 	if (ppd->last_pstate != ib_pstate) {
12307 		dd_dev_info(ppd->dd,
12308 			    "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12309 			    __func__, opa_pstate_name(ib_pstate), ib_pstate,
12310 			    pstate);
12311 		ppd->last_pstate = ib_pstate;
12312 	}
12313 	return ib_pstate;
12314 }
12315 
12316 /*
12317  * Read/modify/write ASIC_QSFP register bits as selected by mask
12318  * data: 0 or 1 in the positions depending on what needs to be written
12319  * dir: 0 for read, 1 for write
12320  * mask: select by setting
12321  *      I2CCLK  (bit 0)
12322  *      I2CDATA (bit 1)
12323  */
12324 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12325 		  u32 mask)
12326 {
12327 	u64 qsfp_oe, target_oe;
12328 
12329 	target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12330 	if (mask) {
12331 		/* We are writing register bits, so lock access */
12332 		dir &= mask;
12333 		data &= mask;
12334 
12335 		qsfp_oe = read_csr(dd, target_oe);
12336 		qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12337 		write_csr(dd, target_oe, qsfp_oe);
12338 	}
12339 	/* We are exclusively reading bits here, but it is unlikely
12340 	 * we'll get valid data when we set the direction of the pin
12341 	 * in the same call, so read should call this function again
12342 	 * to get valid data
12343 	 */
12344 	return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12345 }
12346 
12347 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12348 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12349 
12350 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12351 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12352 
12353 int hfi1_init_ctxt(struct send_context *sc)
12354 {
12355 	if (sc) {
12356 		struct hfi1_devdata *dd = sc->dd;
12357 		u64 reg;
12358 		u8 set = (sc->type == SC_USER ?
12359 			  HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12360 			  HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12361 		reg = read_kctxt_csr(dd, sc->hw_context,
12362 				     SEND_CTXT_CHECK_ENABLE);
12363 		if (set)
12364 			CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12365 		else
12366 			SET_STATIC_RATE_CONTROL_SMASK(reg);
12367 		write_kctxt_csr(dd, sc->hw_context,
12368 				SEND_CTXT_CHECK_ENABLE, reg);
12369 	}
12370 	return 0;
12371 }
12372 
12373 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12374 {
12375 	int ret = 0;
12376 	u64 reg;
12377 
12378 	if (dd->icode != ICODE_RTL_SILICON) {
12379 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12380 			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12381 				    __func__);
12382 		return -EINVAL;
12383 	}
12384 	reg = read_csr(dd, ASIC_STS_THERM);
12385 	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12386 		      ASIC_STS_THERM_CURR_TEMP_MASK);
12387 	temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12388 			ASIC_STS_THERM_LO_TEMP_MASK);
12389 	temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12390 			ASIC_STS_THERM_HI_TEMP_MASK);
12391 	temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12392 			  ASIC_STS_THERM_CRIT_TEMP_MASK);
12393 	/* triggers is a 3-bit value - 1 bit per trigger. */
12394 	temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12395 
12396 	return ret;
12397 }
12398 
12399 /* ========================================================================= */
12400 
12401 /*
12402  * Enable/disable chip from delivering interrupts.
12403  */
12404 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12405 {
12406 	int i;
12407 
12408 	/*
12409 	 * In HFI, the mask needs to be 1 to allow interrupts.
12410 	 */
12411 	if (enable) {
12412 		/* enable all interrupts */
12413 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12414 			write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12415 
12416 		init_qsfp_int(dd);
12417 	} else {
12418 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12419 			write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12420 	}
12421 }
12422 
12423 /*
12424  * Clear all interrupt sources on the chip.
12425  */
12426 static void clear_all_interrupts(struct hfi1_devdata *dd)
12427 {
12428 	int i;
12429 
12430 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12431 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12432 
12433 	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12434 	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12435 	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12436 	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12437 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12438 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12439 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12440 	for (i = 0; i < dd->chip_send_contexts; i++)
12441 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12442 	for (i = 0; i < dd->chip_sdma_engines; i++)
12443 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12444 
12445 	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12446 	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12447 	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12448 }
12449 
12450 /* Move to pcie.c? */
12451 static void disable_intx(struct pci_dev *pdev)
12452 {
12453 	pci_intx(pdev, 0);
12454 }
12455 
12456 static void clean_up_interrupts(struct hfi1_devdata *dd)
12457 {
12458 	int i;
12459 
12460 	/* remove irqs - must happen before disabling/turning off */
12461 	if (dd->num_msix_entries) {
12462 		/* MSI-X */
12463 		struct hfi1_msix_entry *me = dd->msix_entries;
12464 
12465 		for (i = 0; i < dd->num_msix_entries; i++, me++) {
12466 			if (!me->arg) /* => no irq, no affinity */
12467 				continue;
12468 			hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12469 			free_irq(me->msix.vector, me->arg);
12470 		}
12471 	} else {
12472 		/* INTx */
12473 		if (dd->requested_intx_irq) {
12474 			free_irq(dd->pcidev->irq, dd);
12475 			dd->requested_intx_irq = 0;
12476 		}
12477 	}
12478 
12479 	/* turn off interrupts */
12480 	if (dd->num_msix_entries) {
12481 		/* MSI-X */
12482 		pci_disable_msix(dd->pcidev);
12483 	} else {
12484 		/* INTx */
12485 		disable_intx(dd->pcidev);
12486 	}
12487 
12488 	/* clean structures */
12489 	kfree(dd->msix_entries);
12490 	dd->msix_entries = NULL;
12491 	dd->num_msix_entries = 0;
12492 }
12493 
12494 /*
12495  * Remap the interrupt source from the general handler to the given MSI-X
12496  * interrupt.
12497  */
12498 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12499 {
12500 	u64 reg;
12501 	int m, n;
12502 
12503 	/* clear from the handled mask of the general interrupt */
12504 	m = isrc / 64;
12505 	n = isrc % 64;
12506 	dd->gi_mask[m] &= ~((u64)1 << n);
12507 
12508 	/* direct the chip source to the given MSI-X interrupt */
12509 	m = isrc / 8;
12510 	n = isrc % 8;
12511 	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12512 	reg &= ~((u64)0xff << (8 * n));
12513 	reg |= ((u64)msix_intr & 0xff) << (8 * n);
12514 	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12515 }
12516 
12517 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12518 				  int engine, int msix_intr)
12519 {
12520 	/*
12521 	 * SDMA engine interrupt sources grouped by type, rather than
12522 	 * engine.  Per-engine interrupts are as follows:
12523 	 *	SDMA
12524 	 *	SDMAProgress
12525 	 *	SDMAIdle
12526 	 */
12527 	remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12528 		   msix_intr);
12529 	remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12530 		   msix_intr);
12531 	remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12532 		   msix_intr);
12533 }
12534 
12535 static int request_intx_irq(struct hfi1_devdata *dd)
12536 {
12537 	int ret;
12538 
12539 	snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12540 		 dd->unit);
12541 	ret = request_irq(dd->pcidev->irq, general_interrupt,
12542 			  IRQF_SHARED, dd->intx_name, dd);
12543 	if (ret)
12544 		dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12545 			   ret);
12546 	else
12547 		dd->requested_intx_irq = 1;
12548 	return ret;
12549 }
12550 
12551 static int request_msix_irqs(struct hfi1_devdata *dd)
12552 {
12553 	int first_general, last_general;
12554 	int first_sdma, last_sdma;
12555 	int first_rx, last_rx;
12556 	int i, ret = 0;
12557 
12558 	/* calculate the ranges we are going to use */
12559 	first_general = 0;
12560 	last_general = first_general + 1;
12561 	first_sdma = last_general;
12562 	last_sdma = first_sdma + dd->num_sdma;
12563 	first_rx = last_sdma;
12564 	last_rx = first_rx + dd->n_krcv_queues;
12565 
12566 	/*
12567 	 * Sanity check - the code expects all SDMA chip source
12568 	 * interrupts to be in the same CSR, starting at bit 0.  Verify
12569 	 * that this is true by checking the bit location of the start.
12570 	 */
12571 	BUILD_BUG_ON(IS_SDMA_START % 64);
12572 
12573 	for (i = 0; i < dd->num_msix_entries; i++) {
12574 		struct hfi1_msix_entry *me = &dd->msix_entries[i];
12575 		const char *err_info;
12576 		irq_handler_t handler;
12577 		irq_handler_t thread = NULL;
12578 		void *arg;
12579 		int idx;
12580 		struct hfi1_ctxtdata *rcd = NULL;
12581 		struct sdma_engine *sde = NULL;
12582 
12583 		/* obtain the arguments to request_irq */
12584 		if (first_general <= i && i < last_general) {
12585 			idx = i - first_general;
12586 			handler = general_interrupt;
12587 			arg = dd;
12588 			snprintf(me->name, sizeof(me->name),
12589 				 DRIVER_NAME "_%d", dd->unit);
12590 			err_info = "general";
12591 			me->type = IRQ_GENERAL;
12592 		} else if (first_sdma <= i && i < last_sdma) {
12593 			idx = i - first_sdma;
12594 			sde = &dd->per_sdma[idx];
12595 			handler = sdma_interrupt;
12596 			arg = sde;
12597 			snprintf(me->name, sizeof(me->name),
12598 				 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12599 			err_info = "sdma";
12600 			remap_sdma_interrupts(dd, idx, i);
12601 			me->type = IRQ_SDMA;
12602 		} else if (first_rx <= i && i < last_rx) {
12603 			idx = i - first_rx;
12604 			rcd = dd->rcd[idx];
12605 			/* no interrupt if no rcd */
12606 			if (!rcd)
12607 				continue;
12608 			/*
12609 			 * Set the interrupt register and mask for this
12610 			 * context's interrupt.
12611 			 */
12612 			rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12613 			rcd->imask = ((u64)1) <<
12614 					((IS_RCVAVAIL_START + idx) % 64);
12615 			handler = receive_context_interrupt;
12616 			thread = receive_context_thread;
12617 			arg = rcd;
12618 			snprintf(me->name, sizeof(me->name),
12619 				 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12620 			err_info = "receive context";
12621 			remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12622 			me->type = IRQ_RCVCTXT;
12623 		} else {
12624 			/* not in our expected range - complain, then
12625 			 * ignore it
12626 			 */
12627 			dd_dev_err(dd,
12628 				   "Unexpected extra MSI-X interrupt %d\n", i);
12629 			continue;
12630 		}
12631 		/* no argument, no interrupt */
12632 		if (!arg)
12633 			continue;
12634 		/* make sure the name is terminated */
12635 		me->name[sizeof(me->name) - 1] = 0;
12636 
12637 		ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12638 					   me->name, arg);
12639 		if (ret) {
12640 			dd_dev_err(dd,
12641 				   "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12642 				   err_info, me->msix.vector, idx, ret);
12643 			return ret;
12644 		}
12645 		/*
12646 		 * assign arg after request_irq call, so it will be
12647 		 * cleaned up
12648 		 */
12649 		me->arg = arg;
12650 
12651 		ret = hfi1_get_irq_affinity(dd, me);
12652 		if (ret)
12653 			dd_dev_err(dd,
12654 				   "unable to pin IRQ %d\n", ret);
12655 	}
12656 
12657 	return ret;
12658 }
12659 
12660 /*
12661  * Set the general handler to accept all interrupts, remap all
12662  * chip interrupts back to MSI-X 0.
12663  */
12664 static void reset_interrupts(struct hfi1_devdata *dd)
12665 {
12666 	int i;
12667 
12668 	/* all interrupts handled by the general handler */
12669 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12670 		dd->gi_mask[i] = ~(u64)0;
12671 
12672 	/* all chip interrupts map to MSI-X 0 */
12673 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12674 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12675 }
12676 
12677 static int set_up_interrupts(struct hfi1_devdata *dd)
12678 {
12679 	struct hfi1_msix_entry *entries;
12680 	u32 total, request;
12681 	int i, ret;
12682 	int single_interrupt = 0; /* we expect to have all the interrupts */
12683 
12684 	/*
12685 	 * Interrupt count:
12686 	 *	1 general, "slow path" interrupt (includes the SDMA engines
12687 	 *		slow source, SDMACleanupDone)
12688 	 *	N interrupts - one per used SDMA engine
12689 	 *	M interrupt - one per kernel receive context
12690 	 */
12691 	total = 1 + dd->num_sdma + dd->n_krcv_queues;
12692 
12693 	entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12694 	if (!entries) {
12695 		ret = -ENOMEM;
12696 		goto fail;
12697 	}
12698 	/* 1-1 MSI-X entry assignment */
12699 	for (i = 0; i < total; i++)
12700 		entries[i].msix.entry = i;
12701 
12702 	/* ask for MSI-X interrupts */
12703 	request = total;
12704 	request_msix(dd, &request, entries);
12705 
12706 	if (request == 0) {
12707 		/* using INTx */
12708 		/* dd->num_msix_entries already zero */
12709 		kfree(entries);
12710 		single_interrupt = 1;
12711 		dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12712 	} else {
12713 		/* using MSI-X */
12714 		dd->num_msix_entries = request;
12715 		dd->msix_entries = entries;
12716 
12717 		if (request != total) {
12718 			/* using MSI-X, with reduced interrupts */
12719 			dd_dev_err(
12720 				dd,
12721 				"cannot handle reduced interrupt case, want %u, got %u\n",
12722 				total, request);
12723 			ret = -EINVAL;
12724 			goto fail;
12725 		}
12726 		dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12727 	}
12728 
12729 	/* mask all interrupts */
12730 	set_intr_state(dd, 0);
12731 	/* clear all pending interrupts */
12732 	clear_all_interrupts(dd);
12733 
12734 	/* reset general handler mask, chip MSI-X mappings */
12735 	reset_interrupts(dd);
12736 
12737 	if (single_interrupt)
12738 		ret = request_intx_irq(dd);
12739 	else
12740 		ret = request_msix_irqs(dd);
12741 	if (ret)
12742 		goto fail;
12743 
12744 	return 0;
12745 
12746 fail:
12747 	clean_up_interrupts(dd);
12748 	return ret;
12749 }
12750 
12751 /*
12752  * Set up context values in dd.  Sets:
12753  *
12754  *	num_rcv_contexts - number of contexts being used
12755  *	n_krcv_queues - number of kernel contexts
12756  *	first_user_ctxt - first non-kernel context in array of contexts
12757  *	freectxts  - number of free user contexts
12758  *	num_send_contexts - number of PIO send contexts being used
12759  */
12760 static int set_up_context_variables(struct hfi1_devdata *dd)
12761 {
12762 	int num_kernel_contexts;
12763 	int total_contexts;
12764 	int ret;
12765 	unsigned ngroups;
12766 	int qos_rmt_count;
12767 	int user_rmt_reduced;
12768 
12769 	/*
12770 	 * Kernel receive contexts:
12771 	 * - min of 2 or 1 context/numa (excluding control context)
12772 	 * - Context 0 - control context (VL15/multicast/error)
12773 	 * - Context 1 - first kernel context
12774 	 * - Context 2 - second kernel context
12775 	 * ...
12776 	 */
12777 	if (n_krcvqs)
12778 		/*
12779 		 * n_krcvqs is the sum of module parameter kernel receive
12780 		 * contexts, krcvqs[].  It does not include the control
12781 		 * context, so add that.
12782 		 */
12783 		num_kernel_contexts = n_krcvqs + 1;
12784 	else
12785 		num_kernel_contexts = num_online_nodes() + 1;
12786 	num_kernel_contexts =
12787 		max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12788 	/*
12789 	 * Every kernel receive context needs an ACK send context.
12790 	 * one send context is allocated for each VL{0-7} and VL15
12791 	 */
12792 	if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12793 		dd_dev_err(dd,
12794 			   "Reducing # kernel rcv contexts to: %d, from %d\n",
12795 			   (int)(dd->chip_send_contexts - num_vls - 1),
12796 			   (int)num_kernel_contexts);
12797 		num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12798 	}
12799 	/*
12800 	 * User contexts:
12801 	 *	- default to 1 user context per real (non-HT) CPU core if
12802 	 *	  num_user_contexts is negative
12803 	 */
12804 	if (num_user_contexts < 0)
12805 		num_user_contexts =
12806 			cpumask_weight(&dd->affinity->real_cpu_mask);
12807 
12808 	total_contexts = num_kernel_contexts + num_user_contexts;
12809 
12810 	/*
12811 	 * Adjust the counts given a global max.
12812 	 */
12813 	if (total_contexts > dd->chip_rcv_contexts) {
12814 		dd_dev_err(dd,
12815 			   "Reducing # user receive contexts to: %d, from %d\n",
12816 			   (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12817 			   (int)num_user_contexts);
12818 		num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12819 		/* recalculate */
12820 		total_contexts = num_kernel_contexts + num_user_contexts;
12821 	}
12822 
12823 	/* each user context requires an entry in the RMT */
12824 	qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12825 	if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12826 		user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12827 		dd_dev_err(dd,
12828 			   "RMT size is reducing the number of user receive contexts from %d to %d\n",
12829 			   (int)num_user_contexts,
12830 			   user_rmt_reduced);
12831 		/* recalculate */
12832 		num_user_contexts = user_rmt_reduced;
12833 		total_contexts = num_kernel_contexts + num_user_contexts;
12834 	}
12835 
12836 	/* the first N are kernel contexts, the rest are user contexts */
12837 	dd->num_rcv_contexts = total_contexts;
12838 	dd->n_krcv_queues = num_kernel_contexts;
12839 	dd->first_user_ctxt = num_kernel_contexts;
12840 	dd->num_user_contexts = num_user_contexts;
12841 	dd->freectxts = num_user_contexts;
12842 	dd_dev_info(dd,
12843 		    "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12844 		    (int)dd->chip_rcv_contexts,
12845 		    (int)dd->num_rcv_contexts,
12846 		    (int)dd->n_krcv_queues,
12847 		    (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12848 
12849 	/*
12850 	 * Receive array allocation:
12851 	 *   All RcvArray entries are divided into groups of 8. This
12852 	 *   is required by the hardware and will speed up writes to
12853 	 *   consecutive entries by using write-combining of the entire
12854 	 *   cacheline.
12855 	 *
12856 	 *   The number of groups are evenly divided among all contexts.
12857 	 *   any left over groups will be given to the first N user
12858 	 *   contexts.
12859 	 */
12860 	dd->rcv_entries.group_size = RCV_INCREMENT;
12861 	ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12862 	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12863 	dd->rcv_entries.nctxt_extra = ngroups -
12864 		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12865 	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12866 		    dd->rcv_entries.ngroups,
12867 		    dd->rcv_entries.nctxt_extra);
12868 	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12869 	    MAX_EAGER_ENTRIES * 2) {
12870 		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12871 			dd->rcv_entries.group_size;
12872 		dd_dev_info(dd,
12873 			    "RcvArray group count too high, change to %u\n",
12874 			    dd->rcv_entries.ngroups);
12875 		dd->rcv_entries.nctxt_extra = 0;
12876 	}
12877 	/*
12878 	 * PIO send contexts
12879 	 */
12880 	ret = init_sc_pools_and_sizes(dd);
12881 	if (ret >= 0) {	/* success */
12882 		dd->num_send_contexts = ret;
12883 		dd_dev_info(
12884 			dd,
12885 			"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
12886 			dd->chip_send_contexts,
12887 			dd->num_send_contexts,
12888 			dd->sc_sizes[SC_KERNEL].count,
12889 			dd->sc_sizes[SC_ACK].count,
12890 			dd->sc_sizes[SC_USER].count,
12891 			dd->sc_sizes[SC_VL15].count);
12892 		ret = 0;	/* success */
12893 	}
12894 
12895 	return ret;
12896 }
12897 
12898 /*
12899  * Set the device/port partition key table. The MAD code
12900  * will ensure that, at least, the partial management
12901  * partition key is present in the table.
12902  */
12903 static void set_partition_keys(struct hfi1_pportdata *ppd)
12904 {
12905 	struct hfi1_devdata *dd = ppd->dd;
12906 	u64 reg = 0;
12907 	int i;
12908 
12909 	dd_dev_info(dd, "Setting partition keys\n");
12910 	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12911 		reg |= (ppd->pkeys[i] &
12912 			RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12913 			((i % 4) *
12914 			 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12915 		/* Each register holds 4 PKey values. */
12916 		if ((i % 4) == 3) {
12917 			write_csr(dd, RCV_PARTITION_KEY +
12918 				  ((i - 3) * 2), reg);
12919 			reg = 0;
12920 		}
12921 	}
12922 
12923 	/* Always enable HW pkeys check when pkeys table is set */
12924 	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12925 }
12926 
12927 /*
12928  * These CSRs and memories are uninitialized on reset and must be
12929  * written before reading to set the ECC/parity bits.
12930  *
12931  * NOTE: All user context CSRs that are not mmaped write-only
12932  * (e.g. the TID flows) must be initialized even if the driver never
12933  * reads them.
12934  */
12935 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12936 {
12937 	int i, j;
12938 
12939 	/* CceIntMap */
12940 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12941 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12942 
12943 	/* SendCtxtCreditReturnAddr */
12944 	for (i = 0; i < dd->chip_send_contexts; i++)
12945 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12946 
12947 	/* PIO Send buffers */
12948 	/* SDMA Send buffers */
12949 	/*
12950 	 * These are not normally read, and (presently) have no method
12951 	 * to be read, so are not pre-initialized
12952 	 */
12953 
12954 	/* RcvHdrAddr */
12955 	/* RcvHdrTailAddr */
12956 	/* RcvTidFlowTable */
12957 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
12958 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12959 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12960 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12961 			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
12962 	}
12963 
12964 	/* RcvArray */
12965 	for (i = 0; i < dd->chip_rcv_array_count; i++)
12966 		write_csr(dd, RCV_ARRAY + (8 * i),
12967 			  RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12968 
12969 	/* RcvQPMapTable */
12970 	for (i = 0; i < 32; i++)
12971 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12972 }
12973 
12974 /*
12975  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12976  */
12977 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12978 			     u64 ctrl_bits)
12979 {
12980 	unsigned long timeout;
12981 	u64 reg;
12982 
12983 	/* is the condition present? */
12984 	reg = read_csr(dd, CCE_STATUS);
12985 	if ((reg & status_bits) == 0)
12986 		return;
12987 
12988 	/* clear the condition */
12989 	write_csr(dd, CCE_CTRL, ctrl_bits);
12990 
12991 	/* wait for the condition to clear */
12992 	timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12993 	while (1) {
12994 		reg = read_csr(dd, CCE_STATUS);
12995 		if ((reg & status_bits) == 0)
12996 			return;
12997 		if (time_after(jiffies, timeout)) {
12998 			dd_dev_err(dd,
12999 				   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13000 				   status_bits, reg & status_bits);
13001 			return;
13002 		}
13003 		udelay(1);
13004 	}
13005 }
13006 
13007 /* set CCE CSRs to chip reset defaults */
13008 static void reset_cce_csrs(struct hfi1_devdata *dd)
13009 {
13010 	int i;
13011 
13012 	/* CCE_REVISION read-only */
13013 	/* CCE_REVISION2 read-only */
13014 	/* CCE_CTRL - bits clear automatically */
13015 	/* CCE_STATUS read-only, use CceCtrl to clear */
13016 	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13017 	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13018 	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13019 	for (i = 0; i < CCE_NUM_SCRATCH; i++)
13020 		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13021 	/* CCE_ERR_STATUS read-only */
13022 	write_csr(dd, CCE_ERR_MASK, 0);
13023 	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13024 	/* CCE_ERR_FORCE leave alone */
13025 	for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13026 		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13027 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13028 	/* CCE_PCIE_CTRL leave alone */
13029 	for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13030 		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13031 		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13032 			  CCE_MSIX_TABLE_UPPER_RESETCSR);
13033 	}
13034 	for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13035 		/* CCE_MSIX_PBA read-only */
13036 		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13037 		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13038 	}
13039 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13040 		write_csr(dd, CCE_INT_MAP, 0);
13041 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13042 		/* CCE_INT_STATUS read-only */
13043 		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13044 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13045 		/* CCE_INT_FORCE leave alone */
13046 		/* CCE_INT_BLOCKED read-only */
13047 	}
13048 	for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13049 		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13050 }
13051 
13052 /* set MISC CSRs to chip reset defaults */
13053 static void reset_misc_csrs(struct hfi1_devdata *dd)
13054 {
13055 	int i;
13056 
13057 	for (i = 0; i < 32; i++) {
13058 		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13059 		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13060 		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13061 	}
13062 	/*
13063 	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13064 	 * only be written 128-byte chunks
13065 	 */
13066 	/* init RSA engine to clear lingering errors */
13067 	write_csr(dd, MISC_CFG_RSA_CMD, 1);
13068 	write_csr(dd, MISC_CFG_RSA_MU, 0);
13069 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
13070 	/* MISC_STS_8051_DIGEST read-only */
13071 	/* MISC_STS_SBM_DIGEST read-only */
13072 	/* MISC_STS_PCIE_DIGEST read-only */
13073 	/* MISC_STS_FAB_DIGEST read-only */
13074 	/* MISC_ERR_STATUS read-only */
13075 	write_csr(dd, MISC_ERR_MASK, 0);
13076 	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13077 	/* MISC_ERR_FORCE leave alone */
13078 }
13079 
13080 /* set TXE CSRs to chip reset defaults */
13081 static void reset_txe_csrs(struct hfi1_devdata *dd)
13082 {
13083 	int i;
13084 
13085 	/*
13086 	 * TXE Kernel CSRs
13087 	 */
13088 	write_csr(dd, SEND_CTRL, 0);
13089 	__cm_reset(dd, 0);	/* reset CM internal state */
13090 	/* SEND_CONTEXTS read-only */
13091 	/* SEND_DMA_ENGINES read-only */
13092 	/* SEND_PIO_MEM_SIZE read-only */
13093 	/* SEND_DMA_MEM_SIZE read-only */
13094 	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13095 	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
13096 	/* SEND_PIO_ERR_STATUS read-only */
13097 	write_csr(dd, SEND_PIO_ERR_MASK, 0);
13098 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13099 	/* SEND_PIO_ERR_FORCE leave alone */
13100 	/* SEND_DMA_ERR_STATUS read-only */
13101 	write_csr(dd, SEND_DMA_ERR_MASK, 0);
13102 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13103 	/* SEND_DMA_ERR_FORCE leave alone */
13104 	/* SEND_EGRESS_ERR_STATUS read-only */
13105 	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13106 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13107 	/* SEND_EGRESS_ERR_FORCE leave alone */
13108 	write_csr(dd, SEND_BTH_QP, 0);
13109 	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13110 	write_csr(dd, SEND_SC2VLT0, 0);
13111 	write_csr(dd, SEND_SC2VLT1, 0);
13112 	write_csr(dd, SEND_SC2VLT2, 0);
13113 	write_csr(dd, SEND_SC2VLT3, 0);
13114 	write_csr(dd, SEND_LEN_CHECK0, 0);
13115 	write_csr(dd, SEND_LEN_CHECK1, 0);
13116 	/* SEND_ERR_STATUS read-only */
13117 	write_csr(dd, SEND_ERR_MASK, 0);
13118 	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13119 	/* SEND_ERR_FORCE read-only */
13120 	for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13121 		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13122 	for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13123 		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13124 	for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13125 		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13126 	for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13127 		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13128 	for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13129 		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13130 	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13131 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13132 	/* SEND_CM_CREDIT_USED_STATUS read-only */
13133 	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13134 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13135 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13136 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13137 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13138 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
13139 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13140 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13141 	/* SEND_CM_CREDIT_USED_VL read-only */
13142 	/* SEND_CM_CREDIT_USED_VL15 read-only */
13143 	/* SEND_EGRESS_CTXT_STATUS read-only */
13144 	/* SEND_EGRESS_SEND_DMA_STATUS read-only */
13145 	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13146 	/* SEND_EGRESS_ERR_INFO read-only */
13147 	/* SEND_EGRESS_ERR_SOURCE read-only */
13148 
13149 	/*
13150 	 * TXE Per-Context CSRs
13151 	 */
13152 	for (i = 0; i < dd->chip_send_contexts; i++) {
13153 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13154 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13155 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13156 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13157 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13158 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13159 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13160 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13161 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13162 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13163 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13164 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13165 	}
13166 
13167 	/*
13168 	 * TXE Per-SDMA CSRs
13169 	 */
13170 	for (i = 0; i < dd->chip_sdma_engines; i++) {
13171 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13172 		/* SEND_DMA_STATUS read-only */
13173 		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13174 		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13175 		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13176 		/* SEND_DMA_HEAD read-only */
13177 		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13178 		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13179 		/* SEND_DMA_IDLE_CNT read-only */
13180 		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13181 		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13182 		/* SEND_DMA_DESC_FETCHED_CNT read-only */
13183 		/* SEND_DMA_ENG_ERR_STATUS read-only */
13184 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13185 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13186 		/* SEND_DMA_ENG_ERR_FORCE leave alone */
13187 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13188 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13189 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13190 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13191 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13192 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13193 		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13194 	}
13195 }
13196 
13197 /*
13198  * Expect on entry:
13199  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13200  */
13201 static void init_rbufs(struct hfi1_devdata *dd)
13202 {
13203 	u64 reg;
13204 	int count;
13205 
13206 	/*
13207 	 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13208 	 * clear.
13209 	 */
13210 	count = 0;
13211 	while (1) {
13212 		reg = read_csr(dd, RCV_STATUS);
13213 		if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13214 			    | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13215 			break;
13216 		/*
13217 		 * Give up after 1ms - maximum wait time.
13218 		 *
13219 		 * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
13220 		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13221 		 *	148 KB / (66% * 250MB/s) = 920us
13222 		 */
13223 		if (count++ > 500) {
13224 			dd_dev_err(dd,
13225 				   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13226 				   __func__, reg);
13227 			break;
13228 		}
13229 		udelay(2); /* do not busy-wait the CSR */
13230 	}
13231 
13232 	/* start the init - expect RcvCtrl to be 0 */
13233 	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13234 
13235 	/*
13236 	 * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13237 	 * period after the write before RcvStatus.RxRbufInitDone is valid.
13238 	 * The delay in the first run through the loop below is sufficient and
13239 	 * required before the first read of RcvStatus.RxRbufInintDone.
13240 	 */
13241 	read_csr(dd, RCV_CTRL);
13242 
13243 	/* wait for the init to finish */
13244 	count = 0;
13245 	while (1) {
13246 		/* delay is required first time through - see above */
13247 		udelay(2); /* do not busy-wait the CSR */
13248 		reg = read_csr(dd, RCV_STATUS);
13249 		if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13250 			break;
13251 
13252 		/* give up after 100us - slowest possible at 33MHz is 73us */
13253 		if (count++ > 50) {
13254 			dd_dev_err(dd,
13255 				   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13256 				   __func__);
13257 			break;
13258 		}
13259 	}
13260 }
13261 
13262 /* set RXE CSRs to chip reset defaults */
13263 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13264 {
13265 	int i, j;
13266 
13267 	/*
13268 	 * RXE Kernel CSRs
13269 	 */
13270 	write_csr(dd, RCV_CTRL, 0);
13271 	init_rbufs(dd);
13272 	/* RCV_STATUS read-only */
13273 	/* RCV_CONTEXTS read-only */
13274 	/* RCV_ARRAY_CNT read-only */
13275 	/* RCV_BUF_SIZE read-only */
13276 	write_csr(dd, RCV_BTH_QP, 0);
13277 	write_csr(dd, RCV_MULTICAST, 0);
13278 	write_csr(dd, RCV_BYPASS, 0);
13279 	write_csr(dd, RCV_VL15, 0);
13280 	/* this is a clear-down */
13281 	write_csr(dd, RCV_ERR_INFO,
13282 		  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13283 	/* RCV_ERR_STATUS read-only */
13284 	write_csr(dd, RCV_ERR_MASK, 0);
13285 	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13286 	/* RCV_ERR_FORCE leave alone */
13287 	for (i = 0; i < 32; i++)
13288 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13289 	for (i = 0; i < 4; i++)
13290 		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13291 	for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13292 		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13293 	for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13294 		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13295 	for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13296 		write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13297 		write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13298 		write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13299 	}
13300 	for (i = 0; i < 32; i++)
13301 		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13302 
13303 	/*
13304 	 * RXE Kernel and User Per-Context CSRs
13305 	 */
13306 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
13307 		/* kernel */
13308 		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13309 		/* RCV_CTXT_STATUS read-only */
13310 		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13311 		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13312 		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13313 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13314 		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13315 		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13316 		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13317 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13318 		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13319 		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13320 
13321 		/* user */
13322 		/* RCV_HDR_TAIL read-only */
13323 		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13324 		/* RCV_EGR_INDEX_TAIL read-only */
13325 		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13326 		/* RCV_EGR_OFFSET_TAIL read-only */
13327 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13328 			write_uctxt_csr(dd, i,
13329 					RCV_TID_FLOW_TABLE + (8 * j), 0);
13330 		}
13331 	}
13332 }
13333 
13334 /*
13335  * Set sc2vl tables.
13336  *
13337  * They power on to zeros, so to avoid send context errors
13338  * they need to be set:
13339  *
13340  * SC 0-7 -> VL 0-7 (respectively)
13341  * SC 15  -> VL 15
13342  * otherwise
13343  *        -> VL 0
13344  */
13345 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13346 {
13347 	int i;
13348 	/* init per architecture spec, constrained by hardware capability */
13349 
13350 	/* HFI maps sent packets */
13351 	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13352 		0,
13353 		0, 0, 1, 1,
13354 		2, 2, 3, 3,
13355 		4, 4, 5, 5,
13356 		6, 6, 7, 7));
13357 	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13358 		1,
13359 		8, 0, 9, 0,
13360 		10, 0, 11, 0,
13361 		12, 0, 13, 0,
13362 		14, 0, 15, 15));
13363 	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13364 		2,
13365 		16, 0, 17, 0,
13366 		18, 0, 19, 0,
13367 		20, 0, 21, 0,
13368 		22, 0, 23, 0));
13369 	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13370 		3,
13371 		24, 0, 25, 0,
13372 		26, 0, 27, 0,
13373 		28, 0, 29, 0,
13374 		30, 0, 31, 0));
13375 
13376 	/* DC maps received packets */
13377 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13378 		15_0,
13379 		0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13380 		8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13381 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13382 		31_16,
13383 		16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13384 		24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13385 
13386 	/* initialize the cached sc2vl values consistently with h/w */
13387 	for (i = 0; i < 32; i++) {
13388 		if (i < 8 || i == 15)
13389 			*((u8 *)(dd->sc2vl) + i) = (u8)i;
13390 		else
13391 			*((u8 *)(dd->sc2vl) + i) = 0;
13392 	}
13393 }
13394 
13395 /*
13396  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13397  * depend on the chip going through a power-on reset - a driver may be loaded
13398  * and unloaded many times.
13399  *
13400  * Do not write any CSR values to the chip in this routine - there may be
13401  * a reset following the (possible) FLR in this routine.
13402  *
13403  */
13404 static void init_chip(struct hfi1_devdata *dd)
13405 {
13406 	int i;
13407 
13408 	/*
13409 	 * Put the HFI CSRs in a known state.
13410 	 * Combine this with a DC reset.
13411 	 *
13412 	 * Stop the device from doing anything while we do a
13413 	 * reset.  We know there are no other active users of
13414 	 * the device since we are now in charge.  Turn off
13415 	 * off all outbound and inbound traffic and make sure
13416 	 * the device does not generate any interrupts.
13417 	 */
13418 
13419 	/* disable send contexts and SDMA engines */
13420 	write_csr(dd, SEND_CTRL, 0);
13421 	for (i = 0; i < dd->chip_send_contexts; i++)
13422 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13423 	for (i = 0; i < dd->chip_sdma_engines; i++)
13424 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13425 	/* disable port (turn off RXE inbound traffic) and contexts */
13426 	write_csr(dd, RCV_CTRL, 0);
13427 	for (i = 0; i < dd->chip_rcv_contexts; i++)
13428 		write_csr(dd, RCV_CTXT_CTRL, 0);
13429 	/* mask all interrupt sources */
13430 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13431 		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13432 
13433 	/*
13434 	 * DC Reset: do a full DC reset before the register clear.
13435 	 * A recommended length of time to hold is one CSR read,
13436 	 * so reread the CceDcCtrl.  Then, hold the DC in reset
13437 	 * across the clear.
13438 	 */
13439 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13440 	(void)read_csr(dd, CCE_DC_CTRL);
13441 
13442 	if (use_flr) {
13443 		/*
13444 		 * A FLR will reset the SPC core and part of the PCIe.
13445 		 * The parts that need to be restored have already been
13446 		 * saved.
13447 		 */
13448 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
13449 
13450 		/* do the FLR, the DC reset will remain */
13451 		hfi1_pcie_flr(dd);
13452 
13453 		/* restore command and BARs */
13454 		restore_pci_variables(dd);
13455 
13456 		if (is_ax(dd)) {
13457 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
13458 			hfi1_pcie_flr(dd);
13459 			restore_pci_variables(dd);
13460 		}
13461 	} else {
13462 		dd_dev_info(dd, "Resetting CSRs with writes\n");
13463 		reset_cce_csrs(dd);
13464 		reset_txe_csrs(dd);
13465 		reset_rxe_csrs(dd);
13466 		reset_misc_csrs(dd);
13467 	}
13468 	/* clear the DC reset */
13469 	write_csr(dd, CCE_DC_CTRL, 0);
13470 
13471 	/* Set the LED off */
13472 	setextled(dd, 0);
13473 
13474 	/*
13475 	 * Clear the QSFP reset.
13476 	 * An FLR enforces a 0 on all out pins. The driver does not touch
13477 	 * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13478 	 * anything plugged constantly in reset, if it pays attention
13479 	 * to RESET_N.
13480 	 * Prime examples of this are optical cables. Set all pins high.
13481 	 * I2CCLK and I2CDAT will change per direction, and INT_N and
13482 	 * MODPRS_N are input only and their value is ignored.
13483 	 */
13484 	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13485 	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13486 	init_chip_resources(dd);
13487 }
13488 
13489 static void init_early_variables(struct hfi1_devdata *dd)
13490 {
13491 	int i;
13492 
13493 	/* assign link credit variables */
13494 	dd->vau = CM_VAU;
13495 	dd->link_credits = CM_GLOBAL_CREDITS;
13496 	if (is_ax(dd))
13497 		dd->link_credits--;
13498 	dd->vcu = cu_to_vcu(hfi1_cu);
13499 	/* enough room for 8 MAD packets plus header - 17K */
13500 	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13501 	if (dd->vl15_init > dd->link_credits)
13502 		dd->vl15_init = dd->link_credits;
13503 
13504 	write_uninitialized_csrs_and_memories(dd);
13505 
13506 	if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13507 		for (i = 0; i < dd->num_pports; i++) {
13508 			struct hfi1_pportdata *ppd = &dd->pport[i];
13509 
13510 			set_partition_keys(ppd);
13511 		}
13512 	init_sc2vl_tables(dd);
13513 }
13514 
13515 static void init_kdeth_qp(struct hfi1_devdata *dd)
13516 {
13517 	/* user changed the KDETH_QP */
13518 	if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13519 		/* out of range or illegal value */
13520 		dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13521 		kdeth_qp = 0;
13522 	}
13523 	if (kdeth_qp == 0)	/* not set, or failed range check */
13524 		kdeth_qp = DEFAULT_KDETH_QP;
13525 
13526 	write_csr(dd, SEND_BTH_QP,
13527 		  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13528 		  SEND_BTH_QP_KDETH_QP_SHIFT);
13529 
13530 	write_csr(dd, RCV_BTH_QP,
13531 		  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13532 		  RCV_BTH_QP_KDETH_QP_SHIFT);
13533 }
13534 
13535 /**
13536  * init_qpmap_table
13537  * @dd - device data
13538  * @first_ctxt - first context
13539  * @last_ctxt - first context
13540  *
13541  * This return sets the qpn mapping table that
13542  * is indexed by qpn[8:1].
13543  *
13544  * The routine will round robin the 256 settings
13545  * from first_ctxt to last_ctxt.
13546  *
13547  * The first/last looks ahead to having specialized
13548  * receive contexts for mgmt and bypass.  Normal
13549  * verbs traffic will assumed to be on a range
13550  * of receive contexts.
13551  */
13552 static void init_qpmap_table(struct hfi1_devdata *dd,
13553 			     u32 first_ctxt,
13554 			     u32 last_ctxt)
13555 {
13556 	u64 reg = 0;
13557 	u64 regno = RCV_QP_MAP_TABLE;
13558 	int i;
13559 	u64 ctxt = first_ctxt;
13560 
13561 	for (i = 0; i < 256; i++) {
13562 		reg |= ctxt << (8 * (i % 8));
13563 		ctxt++;
13564 		if (ctxt > last_ctxt)
13565 			ctxt = first_ctxt;
13566 		if (i % 8 == 7) {
13567 			write_csr(dd, regno, reg);
13568 			reg = 0;
13569 			regno += 8;
13570 		}
13571 	}
13572 
13573 	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13574 			| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13575 }
13576 
13577 struct rsm_map_table {
13578 	u64 map[NUM_MAP_REGS];
13579 	unsigned int used;
13580 };
13581 
13582 struct rsm_rule_data {
13583 	u8 offset;
13584 	u8 pkt_type;
13585 	u32 field1_off;
13586 	u32 field2_off;
13587 	u32 index1_off;
13588 	u32 index1_width;
13589 	u32 index2_off;
13590 	u32 index2_width;
13591 	u32 mask1;
13592 	u32 value1;
13593 	u32 mask2;
13594 	u32 value2;
13595 };
13596 
13597 /*
13598  * Return an initialized RMT map table for users to fill in.  OK if it
13599  * returns NULL, indicating no table.
13600  */
13601 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13602 {
13603 	struct rsm_map_table *rmt;
13604 	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13605 
13606 	rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13607 	if (rmt) {
13608 		memset(rmt->map, rxcontext, sizeof(rmt->map));
13609 		rmt->used = 0;
13610 	}
13611 
13612 	return rmt;
13613 }
13614 
13615 /*
13616  * Write the final RMT map table to the chip and free the table.  OK if
13617  * table is NULL.
13618  */
13619 static void complete_rsm_map_table(struct hfi1_devdata *dd,
13620 				   struct rsm_map_table *rmt)
13621 {
13622 	int i;
13623 
13624 	if (rmt) {
13625 		/* write table to chip */
13626 		for (i = 0; i < NUM_MAP_REGS; i++)
13627 			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13628 
13629 		/* enable RSM */
13630 		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13631 	}
13632 }
13633 
13634 /*
13635  * Add a receive side mapping rule.
13636  */
13637 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13638 			 struct rsm_rule_data *rrd)
13639 {
13640 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13641 		  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13642 		  1ull << rule_index | /* enable bit */
13643 		  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13644 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13645 		  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13646 		  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13647 		  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13648 		  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13649 		  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13650 		  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13651 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13652 		  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13653 		  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13654 		  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13655 		  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13656 }
13657 
13658 /* return the number of RSM map table entries that will be used for QOS */
13659 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13660 			   unsigned int *np)
13661 {
13662 	int i;
13663 	unsigned int m, n;
13664 	u8 max_by_vl = 0;
13665 
13666 	/* is QOS active at all? */
13667 	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13668 	    num_vls == 1 ||
13669 	    krcvqsset <= 1)
13670 		goto no_qos;
13671 
13672 	/* determine bits for qpn */
13673 	for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13674 		if (krcvqs[i] > max_by_vl)
13675 			max_by_vl = krcvqs[i];
13676 	if (max_by_vl > 32)
13677 		goto no_qos;
13678 	m = ilog2(__roundup_pow_of_two(max_by_vl));
13679 
13680 	/* determine bits for vl */
13681 	n = ilog2(__roundup_pow_of_two(num_vls));
13682 
13683 	/* reject if too much is used */
13684 	if ((m + n) > 7)
13685 		goto no_qos;
13686 
13687 	if (mp)
13688 		*mp = m;
13689 	if (np)
13690 		*np = n;
13691 
13692 	return 1 << (m + n);
13693 
13694 no_qos:
13695 	if (mp)
13696 		*mp = 0;
13697 	if (np)
13698 		*np = 0;
13699 	return 0;
13700 }
13701 
13702 /**
13703  * init_qos - init RX qos
13704  * @dd - device data
13705  * @rmt - RSM map table
13706  *
13707  * This routine initializes Rule 0 and the RSM map table to implement
13708  * quality of service (qos).
13709  *
13710  * If all of the limit tests succeed, qos is applied based on the array
13711  * interpretation of krcvqs where entry 0 is VL0.
13712  *
13713  * The number of vl bits (n) and the number of qpn bits (m) are computed to
13714  * feed both the RSM map table and the single rule.
13715  */
13716 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13717 {
13718 	struct rsm_rule_data rrd;
13719 	unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13720 	unsigned int rmt_entries;
13721 	u64 reg;
13722 
13723 	if (!rmt)
13724 		goto bail;
13725 	rmt_entries = qos_rmt_entries(dd, &m, &n);
13726 	if (rmt_entries == 0)
13727 		goto bail;
13728 	qpns_per_vl = 1 << m;
13729 
13730 	/* enough room in the map table? */
13731 	rmt_entries = 1 << (m + n);
13732 	if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13733 		goto bail;
13734 
13735 	/* add qos entries to the the RSM map table */
13736 	for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13737 		unsigned tctxt;
13738 
13739 		for (qpn = 0, tctxt = ctxt;
13740 		     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13741 			unsigned idx, regoff, regidx;
13742 
13743 			/* generate the index the hardware will produce */
13744 			idx = rmt->used + ((qpn << n) ^ i);
13745 			regoff = (idx % 8) * 8;
13746 			regidx = idx / 8;
13747 			/* replace default with context number */
13748 			reg = rmt->map[regidx];
13749 			reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13750 				<< regoff);
13751 			reg |= (u64)(tctxt++) << regoff;
13752 			rmt->map[regidx] = reg;
13753 			if (tctxt == ctxt + krcvqs[i])
13754 				tctxt = ctxt;
13755 		}
13756 		ctxt += krcvqs[i];
13757 	}
13758 
13759 	rrd.offset = rmt->used;
13760 	rrd.pkt_type = 2;
13761 	rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13762 	rrd.field2_off = LRH_SC_MATCH_OFFSET;
13763 	rrd.index1_off = LRH_SC_SELECT_OFFSET;
13764 	rrd.index1_width = n;
13765 	rrd.index2_off = QPN_SELECT_OFFSET;
13766 	rrd.index2_width = m + n;
13767 	rrd.mask1 = LRH_BTH_MASK;
13768 	rrd.value1 = LRH_BTH_VALUE;
13769 	rrd.mask2 = LRH_SC_MASK;
13770 	rrd.value2 = LRH_SC_VALUE;
13771 
13772 	/* add rule 0 */
13773 	add_rsm_rule(dd, 0, &rrd);
13774 
13775 	/* mark RSM map entries as used */
13776 	rmt->used += rmt_entries;
13777 	/* map everything else to the mcast/err/vl15 context */
13778 	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13779 	dd->qos_shift = n + 1;
13780 	return;
13781 bail:
13782 	dd->qos_shift = 1;
13783 	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13784 }
13785 
13786 static void init_user_fecn_handling(struct hfi1_devdata *dd,
13787 				    struct rsm_map_table *rmt)
13788 {
13789 	struct rsm_rule_data rrd;
13790 	u64 reg;
13791 	int i, idx, regoff, regidx;
13792 	u8 offset;
13793 
13794 	/* there needs to be enough room in the map table */
13795 	if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13796 		dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13797 		return;
13798 	}
13799 
13800 	/*
13801 	 * RSM will extract the destination context as an index into the
13802 	 * map table.  The destination contexts are a sequential block
13803 	 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13804 	 * Map entries are accessed as offset + extracted value.  Adjust
13805 	 * the added offset so this sequence can be placed anywhere in
13806 	 * the table - as long as the entries themselves do not wrap.
13807 	 * There are only enough bits in offset for the table size, so
13808 	 * start with that to allow for a "negative" offset.
13809 	 */
13810 	offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13811 						(int)dd->first_user_ctxt);
13812 
13813 	for (i = dd->first_user_ctxt, idx = rmt->used;
13814 				i < dd->num_rcv_contexts; i++, idx++) {
13815 		/* replace with identity mapping */
13816 		regoff = (idx % 8) * 8;
13817 		regidx = idx / 8;
13818 		reg = rmt->map[regidx];
13819 		reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13820 		reg |= (u64)i << regoff;
13821 		rmt->map[regidx] = reg;
13822 	}
13823 
13824 	/*
13825 	 * For RSM intercept of Expected FECN packets:
13826 	 * o packet type 0 - expected
13827 	 * o match on F (bit 95), using select/match 1, and
13828 	 * o match on SH (bit 133), using select/match 2.
13829 	 *
13830 	 * Use index 1 to extract the 8-bit receive context from DestQP
13831 	 * (start at bit 64).  Use that as the RSM map table index.
13832 	 */
13833 	rrd.offset = offset;
13834 	rrd.pkt_type = 0;
13835 	rrd.field1_off = 95;
13836 	rrd.field2_off = 133;
13837 	rrd.index1_off = 64;
13838 	rrd.index1_width = 8;
13839 	rrd.index2_off = 0;
13840 	rrd.index2_width = 0;
13841 	rrd.mask1 = 1;
13842 	rrd.value1 = 1;
13843 	rrd.mask2 = 1;
13844 	rrd.value2 = 1;
13845 
13846 	/* add rule 1 */
13847 	add_rsm_rule(dd, 1, &rrd);
13848 
13849 	rmt->used += dd->num_user_contexts;
13850 }
13851 
13852 static void init_rxe(struct hfi1_devdata *dd)
13853 {
13854 	struct rsm_map_table *rmt;
13855 
13856 	/* enable all receive errors */
13857 	write_csr(dd, RCV_ERR_MASK, ~0ull);
13858 
13859 	rmt = alloc_rsm_map_table(dd);
13860 	/* set up QOS, including the QPN map table */
13861 	init_qos(dd, rmt);
13862 	init_user_fecn_handling(dd, rmt);
13863 	complete_rsm_map_table(dd, rmt);
13864 	kfree(rmt);
13865 
13866 	/*
13867 	 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13868 	 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13869 	 * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
13870 	 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13871 	 * Max_PayLoad_Size set to its minimum of 128.
13872 	 *
13873 	 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13874 	 * (64 bytes).  Max_Payload_Size is possibly modified upward in
13875 	 * tune_pcie_caps() which is called after this routine.
13876 	 */
13877 }
13878 
13879 static void init_other(struct hfi1_devdata *dd)
13880 {
13881 	/* enable all CCE errors */
13882 	write_csr(dd, CCE_ERR_MASK, ~0ull);
13883 	/* enable *some* Misc errors */
13884 	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13885 	/* enable all DC errors, except LCB */
13886 	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13887 	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13888 }
13889 
13890 /*
13891  * Fill out the given AU table using the given CU.  A CU is defined in terms
13892  * AUs.  The table is a an encoding: given the index, how many AUs does that
13893  * represent?
13894  *
13895  * NOTE: Assumes that the register layout is the same for the
13896  * local and remote tables.
13897  */
13898 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13899 			       u32 csr0to3, u32 csr4to7)
13900 {
13901 	write_csr(dd, csr0to3,
13902 		  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13903 		  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13904 		  2ull * cu <<
13905 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13906 		  4ull * cu <<
13907 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13908 	write_csr(dd, csr4to7,
13909 		  8ull * cu <<
13910 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13911 		  16ull * cu <<
13912 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13913 		  32ull * cu <<
13914 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13915 		  64ull * cu <<
13916 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13917 }
13918 
13919 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13920 {
13921 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13922 			   SEND_CM_LOCAL_AU_TABLE4_TO7);
13923 }
13924 
13925 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13926 {
13927 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13928 			   SEND_CM_REMOTE_AU_TABLE4_TO7);
13929 }
13930 
13931 static void init_txe(struct hfi1_devdata *dd)
13932 {
13933 	int i;
13934 
13935 	/* enable all PIO, SDMA, general, and Egress errors */
13936 	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13937 	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13938 	write_csr(dd, SEND_ERR_MASK, ~0ull);
13939 	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13940 
13941 	/* enable all per-context and per-SDMA engine errors */
13942 	for (i = 0; i < dd->chip_send_contexts; i++)
13943 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13944 	for (i = 0; i < dd->chip_sdma_engines; i++)
13945 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13946 
13947 	/* set the local CU to AU mapping */
13948 	assign_local_cm_au_table(dd, dd->vcu);
13949 
13950 	/*
13951 	 * Set reasonable default for Credit Return Timer
13952 	 * Don't set on Simulator - causes it to choke.
13953 	 */
13954 	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13955 		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13956 }
13957 
13958 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13959 {
13960 	struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13961 	unsigned sctxt;
13962 	int ret = 0;
13963 	u64 reg;
13964 
13965 	if (!rcd || !rcd->sc) {
13966 		ret = -EINVAL;
13967 		goto done;
13968 	}
13969 	sctxt = rcd->sc->hw_context;
13970 	reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13971 		((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13972 		 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13973 	/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13974 	if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13975 		reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13976 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13977 	/*
13978 	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13979 	 */
13980 	if (!is_ax(dd)) {
13981 		reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13982 		reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13983 		write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13984 	}
13985 
13986 	/* Enable J_KEY check on receive context. */
13987 	reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13988 		((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13989 		 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13990 	write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13991 done:
13992 	return ret;
13993 }
13994 
13995 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13996 {
13997 	struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13998 	unsigned sctxt;
13999 	int ret = 0;
14000 	u64 reg;
14001 
14002 	if (!rcd || !rcd->sc) {
14003 		ret = -EINVAL;
14004 		goto done;
14005 	}
14006 	sctxt = rcd->sc->hw_context;
14007 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14008 	/*
14009 	 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14010 	 * This check would not have been enabled for A0 h/w, see
14011 	 * set_ctxt_jkey().
14012 	 */
14013 	if (!is_ax(dd)) {
14014 		reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14015 		reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14016 		write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14017 	}
14018 	/* Turn off the J_KEY on the receive side */
14019 	write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14020 done:
14021 	return ret;
14022 }
14023 
14024 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14025 {
14026 	struct hfi1_ctxtdata *rcd;
14027 	unsigned sctxt;
14028 	int ret = 0;
14029 	u64 reg;
14030 
14031 	if (ctxt < dd->num_rcv_contexts) {
14032 		rcd = dd->rcd[ctxt];
14033 	} else {
14034 		ret = -EINVAL;
14035 		goto done;
14036 	}
14037 	if (!rcd || !rcd->sc) {
14038 		ret = -EINVAL;
14039 		goto done;
14040 	}
14041 	sctxt = rcd->sc->hw_context;
14042 	reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14043 		SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14044 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14045 	reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14046 	reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14047 	reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14048 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14049 done:
14050 	return ret;
14051 }
14052 
14053 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14054 {
14055 	struct hfi1_ctxtdata *rcd;
14056 	unsigned sctxt;
14057 	int ret = 0;
14058 	u64 reg;
14059 
14060 	if (ctxt < dd->num_rcv_contexts) {
14061 		rcd = dd->rcd[ctxt];
14062 	} else {
14063 		ret = -EINVAL;
14064 		goto done;
14065 	}
14066 	if (!rcd || !rcd->sc) {
14067 		ret = -EINVAL;
14068 		goto done;
14069 	}
14070 	sctxt = rcd->sc->hw_context;
14071 	reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14072 	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14073 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14074 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14075 done:
14076 	return ret;
14077 }
14078 
14079 /*
14080  * Start doing the clean up the the chip. Our clean up happens in multiple
14081  * stages and this is just the first.
14082  */
14083 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14084 {
14085 	aspm_exit(dd);
14086 	free_cntrs(dd);
14087 	free_rcverr(dd);
14088 	clean_up_interrupts(dd);
14089 	finish_chip_resources(dd);
14090 }
14091 
14092 #define HFI_BASE_GUID(dev) \
14093 	((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14094 
14095 /*
14096  * Information can be shared between the two HFIs on the same ASIC
14097  * in the same OS.  This function finds the peer device and sets
14098  * up a shared structure.
14099  */
14100 static int init_asic_data(struct hfi1_devdata *dd)
14101 {
14102 	unsigned long flags;
14103 	struct hfi1_devdata *tmp, *peer = NULL;
14104 	int ret = 0;
14105 
14106 	spin_lock_irqsave(&hfi1_devs_lock, flags);
14107 	/* Find our peer device */
14108 	list_for_each_entry(tmp, &hfi1_dev_list, list) {
14109 		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14110 		    dd->unit != tmp->unit) {
14111 			peer = tmp;
14112 			break;
14113 		}
14114 	}
14115 
14116 	if (peer) {
14117 		dd->asic_data = peer->asic_data;
14118 	} else {
14119 		dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14120 		if (!dd->asic_data) {
14121 			ret = -ENOMEM;
14122 			goto done;
14123 		}
14124 		mutex_init(&dd->asic_data->asic_resource_mutex);
14125 	}
14126 	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14127 
14128 done:
14129 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14130 	return ret;
14131 }
14132 
14133 /*
14134  * Set dd->boardname.  Use a generic name if a name is not returned from
14135  * EFI variable space.
14136  *
14137  * Return 0 on success, -ENOMEM if space could not be allocated.
14138  */
14139 static int obtain_boardname(struct hfi1_devdata *dd)
14140 {
14141 	/* generic board description */
14142 	const char generic[] =
14143 		"Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14144 	unsigned long size;
14145 	int ret;
14146 
14147 	ret = read_hfi1_efi_var(dd, "description", &size,
14148 				(void **)&dd->boardname);
14149 	if (ret) {
14150 		dd_dev_info(dd, "Board description not found\n");
14151 		/* use generic description */
14152 		dd->boardname = kstrdup(generic, GFP_KERNEL);
14153 		if (!dd->boardname)
14154 			return -ENOMEM;
14155 	}
14156 	return 0;
14157 }
14158 
14159 /*
14160  * Check the interrupt registers to make sure that they are mapped correctly.
14161  * It is intended to help user identify any mismapping by VMM when the driver
14162  * is running in a VM. This function should only be called before interrupt
14163  * is set up properly.
14164  *
14165  * Return 0 on success, -EINVAL on failure.
14166  */
14167 static int check_int_registers(struct hfi1_devdata *dd)
14168 {
14169 	u64 reg;
14170 	u64 all_bits = ~(u64)0;
14171 	u64 mask;
14172 
14173 	/* Clear CceIntMask[0] to avoid raising any interrupts */
14174 	mask = read_csr(dd, CCE_INT_MASK);
14175 	write_csr(dd, CCE_INT_MASK, 0ull);
14176 	reg = read_csr(dd, CCE_INT_MASK);
14177 	if (reg)
14178 		goto err_exit;
14179 
14180 	/* Clear all interrupt status bits */
14181 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14182 	reg = read_csr(dd, CCE_INT_STATUS);
14183 	if (reg)
14184 		goto err_exit;
14185 
14186 	/* Set all interrupt status bits */
14187 	write_csr(dd, CCE_INT_FORCE, all_bits);
14188 	reg = read_csr(dd, CCE_INT_STATUS);
14189 	if (reg != all_bits)
14190 		goto err_exit;
14191 
14192 	/* Restore the interrupt mask */
14193 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14194 	write_csr(dd, CCE_INT_MASK, mask);
14195 
14196 	return 0;
14197 err_exit:
14198 	write_csr(dd, CCE_INT_MASK, mask);
14199 	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14200 	return -EINVAL;
14201 }
14202 
14203 /**
14204  * Allocate and initialize the device structure for the hfi.
14205  * @dev: the pci_dev for hfi1_ib device
14206  * @ent: pci_device_id struct for this dev
14207  *
14208  * Also allocates, initializes, and returns the devdata struct for this
14209  * device instance
14210  *
14211  * This is global, and is called directly at init to set up the
14212  * chip-specific function pointers for later use.
14213  */
14214 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14215 				  const struct pci_device_id *ent)
14216 {
14217 	struct hfi1_devdata *dd;
14218 	struct hfi1_pportdata *ppd;
14219 	u64 reg;
14220 	int i, ret;
14221 	static const char * const inames[] = { /* implementation names */
14222 		"RTL silicon",
14223 		"RTL VCS simulation",
14224 		"RTL FPGA emulation",
14225 		"Functional simulator"
14226 	};
14227 	struct pci_dev *parent = pdev->bus->self;
14228 
14229 	dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14230 				sizeof(struct hfi1_pportdata));
14231 	if (IS_ERR(dd))
14232 		goto bail;
14233 	ppd = dd->pport;
14234 	for (i = 0; i < dd->num_pports; i++, ppd++) {
14235 		int vl;
14236 		/* init common fields */
14237 		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14238 		/* DC supports 4 link widths */
14239 		ppd->link_width_supported =
14240 			OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14241 			OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14242 		ppd->link_width_downgrade_supported =
14243 			ppd->link_width_supported;
14244 		/* start out enabling only 4X */
14245 		ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14246 		ppd->link_width_downgrade_enabled =
14247 					ppd->link_width_downgrade_supported;
14248 		/* link width active is 0 when link is down */
14249 		/* link width downgrade active is 0 when link is down */
14250 
14251 		if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14252 		    num_vls > HFI1_MAX_VLS_SUPPORTED) {
14253 			hfi1_early_err(&pdev->dev,
14254 				       "Invalid num_vls %u, using %u VLs\n",
14255 				    num_vls, HFI1_MAX_VLS_SUPPORTED);
14256 			num_vls = HFI1_MAX_VLS_SUPPORTED;
14257 		}
14258 		ppd->vls_supported = num_vls;
14259 		ppd->vls_operational = ppd->vls_supported;
14260 		ppd->actual_vls_operational = ppd->vls_supported;
14261 		/* Set the default MTU. */
14262 		for (vl = 0; vl < num_vls; vl++)
14263 			dd->vld[vl].mtu = hfi1_max_mtu;
14264 		dd->vld[15].mtu = MAX_MAD_PACKET;
14265 		/*
14266 		 * Set the initial values to reasonable default, will be set
14267 		 * for real when link is up.
14268 		 */
14269 		ppd->lstate = IB_PORT_DOWN;
14270 		ppd->overrun_threshold = 0x4;
14271 		ppd->phy_error_threshold = 0xf;
14272 		ppd->port_crc_mode_enabled = link_crc_mask;
14273 		/* initialize supported LTP CRC mode */
14274 		ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14275 		/* initialize enabled LTP CRC mode */
14276 		ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14277 		/* start in offline */
14278 		ppd->host_link_state = HLS_DN_OFFLINE;
14279 		init_vl_arb_caches(ppd);
14280 		ppd->last_pstate = 0xff; /* invalid value */
14281 	}
14282 
14283 	dd->link_default = HLS_DN_POLL;
14284 
14285 	/*
14286 	 * Do remaining PCIe setup and save PCIe values in dd.
14287 	 * Any error printing is already done by the init code.
14288 	 * On return, we have the chip mapped.
14289 	 */
14290 	ret = hfi1_pcie_ddinit(dd, pdev, ent);
14291 	if (ret < 0)
14292 		goto bail_free;
14293 
14294 	/* verify that reads actually work, save revision for reset check */
14295 	dd->revision = read_csr(dd, CCE_REVISION);
14296 	if (dd->revision == ~(u64)0) {
14297 		dd_dev_err(dd, "cannot read chip CSRs\n");
14298 		ret = -EINVAL;
14299 		goto bail_cleanup;
14300 	}
14301 	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14302 			& CCE_REVISION_CHIP_REV_MAJOR_MASK;
14303 	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14304 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
14305 
14306 	/*
14307 	 * Check interrupt registers mapping if the driver has no access to
14308 	 * the upstream component. In this case, it is likely that the driver
14309 	 * is running in a VM.
14310 	 */
14311 	if (!parent) {
14312 		ret = check_int_registers(dd);
14313 		if (ret)
14314 			goto bail_cleanup;
14315 	}
14316 
14317 	/*
14318 	 * obtain the hardware ID - NOT related to unit, which is a
14319 	 * software enumeration
14320 	 */
14321 	reg = read_csr(dd, CCE_REVISION2);
14322 	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14323 					& CCE_REVISION2_HFI_ID_MASK;
14324 	/* the variable size will remove unwanted bits */
14325 	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14326 	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14327 	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14328 		    dd->icode < ARRAY_SIZE(inames) ?
14329 		    inames[dd->icode] : "unknown", (int)dd->irev);
14330 
14331 	/* speeds the hardware can support */
14332 	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14333 	/* speeds allowed to run at */
14334 	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14335 	/* give a reasonable active value, will be set on link up */
14336 	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14337 
14338 	dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14339 	dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14340 	dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14341 	dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14342 	dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14343 	/* fix up link widths for emulation _p */
14344 	ppd = dd->pport;
14345 	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14346 		ppd->link_width_supported =
14347 			ppd->link_width_enabled =
14348 			ppd->link_width_downgrade_supported =
14349 			ppd->link_width_downgrade_enabled =
14350 				OPA_LINK_WIDTH_1X;
14351 	}
14352 	/* insure num_vls isn't larger than number of sdma engines */
14353 	if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14354 		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14355 			   num_vls, dd->chip_sdma_engines);
14356 		num_vls = dd->chip_sdma_engines;
14357 		ppd->vls_supported = dd->chip_sdma_engines;
14358 		ppd->vls_operational = ppd->vls_supported;
14359 	}
14360 
14361 	/*
14362 	 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14363 	 * Limit the max if larger than the field holds.  If timeout is
14364 	 * non-zero, then the calculated field will be at least 1.
14365 	 *
14366 	 * Must be after icode is set up - the cclock rate depends
14367 	 * on knowing the hardware being used.
14368 	 */
14369 	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14370 	if (dd->rcv_intr_timeout_csr >
14371 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14372 		dd->rcv_intr_timeout_csr =
14373 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14374 	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14375 		dd->rcv_intr_timeout_csr = 1;
14376 
14377 	/* needs to be done before we look for the peer device */
14378 	read_guid(dd);
14379 
14380 	/* set up shared ASIC data with peer device */
14381 	ret = init_asic_data(dd);
14382 	if (ret)
14383 		goto bail_cleanup;
14384 
14385 	/* obtain chip sizes, reset chip CSRs */
14386 	init_chip(dd);
14387 
14388 	/* read in the PCIe link speed information */
14389 	ret = pcie_speeds(dd);
14390 	if (ret)
14391 		goto bail_cleanup;
14392 
14393 	/* Needs to be called before hfi1_firmware_init */
14394 	get_platform_config(dd);
14395 
14396 	/* read in firmware */
14397 	ret = hfi1_firmware_init(dd);
14398 	if (ret)
14399 		goto bail_cleanup;
14400 
14401 	/*
14402 	 * In general, the PCIe Gen3 transition must occur after the
14403 	 * chip has been idled (so it won't initiate any PCIe transactions
14404 	 * e.g. an interrupt) and before the driver changes any registers
14405 	 * (the transition will reset the registers).
14406 	 *
14407 	 * In particular, place this call after:
14408 	 * - init_chip()     - the chip will not initiate any PCIe transactions
14409 	 * - pcie_speeds()   - reads the current link speed
14410 	 * - hfi1_firmware_init() - the needed firmware is ready to be
14411 	 *			    downloaded
14412 	 */
14413 	ret = do_pcie_gen3_transition(dd);
14414 	if (ret)
14415 		goto bail_cleanup;
14416 
14417 	/* start setting dd values and adjusting CSRs */
14418 	init_early_variables(dd);
14419 
14420 	parse_platform_config(dd);
14421 
14422 	ret = obtain_boardname(dd);
14423 	if (ret)
14424 		goto bail_cleanup;
14425 
14426 	snprintf(dd->boardversion, BOARD_VERS_MAX,
14427 		 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14428 		 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14429 		 (u32)dd->majrev,
14430 		 (u32)dd->minrev,
14431 		 (dd->revision >> CCE_REVISION_SW_SHIFT)
14432 		    & CCE_REVISION_SW_MASK);
14433 
14434 	/*
14435 	 * The real cpu mask is part of the affinity struct but has to be
14436 	 * initialized earlier than the rest of the affinity struct because it
14437 	 * is needed to calculate the number of user contexts in
14438 	 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14439 	 * which initializes the rest of the affinity struct members,
14440 	 * depends on set_up_context_variables() for the number of kernel
14441 	 * contexts, so it cannot be called before set_up_context_variables().
14442 	 */
14443 	ret = init_real_cpu_mask(dd);
14444 	if (ret)
14445 		goto bail_cleanup;
14446 
14447 	ret = set_up_context_variables(dd);
14448 	if (ret)
14449 		goto bail_cleanup;
14450 
14451 	/* set initial RXE CSRs */
14452 	init_rxe(dd);
14453 	/* set initial TXE CSRs */
14454 	init_txe(dd);
14455 	/* set initial non-RXE, non-TXE CSRs */
14456 	init_other(dd);
14457 	/* set up KDETH QP prefix in both RX and TX CSRs */
14458 	init_kdeth_qp(dd);
14459 
14460 	hfi1_dev_affinity_init(dd);
14461 
14462 	/* send contexts must be set up before receive contexts */
14463 	ret = init_send_contexts(dd);
14464 	if (ret)
14465 		goto bail_cleanup;
14466 
14467 	ret = hfi1_create_ctxts(dd);
14468 	if (ret)
14469 		goto bail_cleanup;
14470 
14471 	dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14472 	/*
14473 	 * rcd[0] is guaranteed to be valid by this point. Also, all
14474 	 * context are using the same value, as per the module parameter.
14475 	 */
14476 	dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14477 
14478 	ret = init_pervl_scs(dd);
14479 	if (ret)
14480 		goto bail_cleanup;
14481 
14482 	/* sdma init */
14483 	for (i = 0; i < dd->num_pports; ++i) {
14484 		ret = sdma_init(dd, i);
14485 		if (ret)
14486 			goto bail_cleanup;
14487 	}
14488 
14489 	/* use contexts created by hfi1_create_ctxts */
14490 	ret = set_up_interrupts(dd);
14491 	if (ret)
14492 		goto bail_cleanup;
14493 
14494 	/* set up LCB access - must be after set_up_interrupts() */
14495 	init_lcb_access(dd);
14496 
14497 	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14498 		 dd->base_guid & 0xFFFFFF);
14499 
14500 	dd->oui1 = dd->base_guid >> 56 & 0xFF;
14501 	dd->oui2 = dd->base_guid >> 48 & 0xFF;
14502 	dd->oui3 = dd->base_guid >> 40 & 0xFF;
14503 
14504 	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14505 	if (ret)
14506 		goto bail_clear_intr;
14507 	check_fabric_firmware_versions(dd);
14508 
14509 	thermal_init(dd);
14510 
14511 	ret = init_cntrs(dd);
14512 	if (ret)
14513 		goto bail_clear_intr;
14514 
14515 	ret = init_rcverr(dd);
14516 	if (ret)
14517 		goto bail_free_cntrs;
14518 
14519 	ret = eprom_init(dd);
14520 	if (ret)
14521 		goto bail_free_rcverr;
14522 
14523 	goto bail;
14524 
14525 bail_free_rcverr:
14526 	free_rcverr(dd);
14527 bail_free_cntrs:
14528 	free_cntrs(dd);
14529 bail_clear_intr:
14530 	clean_up_interrupts(dd);
14531 bail_cleanup:
14532 	hfi1_pcie_ddcleanup(dd);
14533 bail_free:
14534 	hfi1_free_devdata(dd);
14535 	dd = ERR_PTR(ret);
14536 bail:
14537 	return dd;
14538 }
14539 
14540 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14541 			u32 dw_len)
14542 {
14543 	u32 delta_cycles;
14544 	u32 current_egress_rate = ppd->current_egress_rate;
14545 	/* rates here are in units of 10^6 bits/sec */
14546 
14547 	if (desired_egress_rate == -1)
14548 		return 0; /* shouldn't happen */
14549 
14550 	if (desired_egress_rate >= current_egress_rate)
14551 		return 0; /* we can't help go faster, only slower */
14552 
14553 	delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14554 			egress_cycles(dw_len * 4, current_egress_rate);
14555 
14556 	return (u16)delta_cycles;
14557 }
14558 
14559 /**
14560  * create_pbc - build a pbc for transmission
14561  * @flags: special case flags or-ed in built pbc
14562  * @srate: static rate
14563  * @vl: vl
14564  * @dwlen: dword length (header words + data words + pbc words)
14565  *
14566  * Create a PBC with the given flags, rate, VL, and length.
14567  *
14568  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14569  * for verbs, which does not use this PSM feature.  The lone other caller
14570  * is for the diagnostic interface which calls this if the user does not
14571  * supply their own PBC.
14572  */
14573 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14574 	       u32 dw_len)
14575 {
14576 	u64 pbc, delay = 0;
14577 
14578 	if (unlikely(srate_mbs))
14579 		delay = delay_cycles(ppd, srate_mbs, dw_len);
14580 
14581 	pbc = flags
14582 		| (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14583 		| ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14584 		| (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14585 		| (dw_len & PBC_LENGTH_DWS_MASK)
14586 			<< PBC_LENGTH_DWS_SHIFT;
14587 
14588 	return pbc;
14589 }
14590 
14591 #define SBUS_THERMAL    0x4f
14592 #define SBUS_THERM_MONITOR_MODE 0x1
14593 
14594 #define THERM_FAILURE(dev, ret, reason) \
14595 	dd_dev_err((dd),						\
14596 		   "Thermal sensor initialization failed: %s (%d)\n",	\
14597 		   (reason), (ret))
14598 
14599 /*
14600  * Initialize the thermal sensor.
14601  *
14602  * After initialization, enable polling of thermal sensor through
14603  * SBus interface. In order for this to work, the SBus Master
14604  * firmware has to be loaded due to the fact that the HW polling
14605  * logic uses SBus interrupts, which are not supported with
14606  * default firmware. Otherwise, no data will be returned through
14607  * the ASIC_STS_THERM CSR.
14608  */
14609 static int thermal_init(struct hfi1_devdata *dd)
14610 {
14611 	int ret = 0;
14612 
14613 	if (dd->icode != ICODE_RTL_SILICON ||
14614 	    check_chip_resource(dd, CR_THERM_INIT, NULL))
14615 		return ret;
14616 
14617 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14618 	if (ret) {
14619 		THERM_FAILURE(dd, ret, "Acquire SBus");
14620 		return ret;
14621 	}
14622 
14623 	dd_dev_info(dd, "Initializing thermal sensor\n");
14624 	/* Disable polling of thermal readings */
14625 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14626 	msleep(100);
14627 	/* Thermal Sensor Initialization */
14628 	/*    Step 1: Reset the Thermal SBus Receiver */
14629 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14630 				RESET_SBUS_RECEIVER, 0);
14631 	if (ret) {
14632 		THERM_FAILURE(dd, ret, "Bus Reset");
14633 		goto done;
14634 	}
14635 	/*    Step 2: Set Reset bit in Thermal block */
14636 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14637 				WRITE_SBUS_RECEIVER, 0x1);
14638 	if (ret) {
14639 		THERM_FAILURE(dd, ret, "Therm Block Reset");
14640 		goto done;
14641 	}
14642 	/*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14643 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14644 				WRITE_SBUS_RECEIVER, 0x32);
14645 	if (ret) {
14646 		THERM_FAILURE(dd, ret, "Write Clock Div");
14647 		goto done;
14648 	}
14649 	/*    Step 4: Select temperature mode */
14650 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14651 				WRITE_SBUS_RECEIVER,
14652 				SBUS_THERM_MONITOR_MODE);
14653 	if (ret) {
14654 		THERM_FAILURE(dd, ret, "Write Mode Sel");
14655 		goto done;
14656 	}
14657 	/*    Step 5: De-assert block reset and start conversion */
14658 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14659 				WRITE_SBUS_RECEIVER, 0x2);
14660 	if (ret) {
14661 		THERM_FAILURE(dd, ret, "Write Reset Deassert");
14662 		goto done;
14663 	}
14664 	/*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14665 	msleep(22);
14666 
14667 	/* Enable polling of thermal readings */
14668 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14669 
14670 	/* Set initialized flag */
14671 	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14672 	if (ret)
14673 		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14674 
14675 done:
14676 	release_chip_resource(dd, CR_SBUS);
14677 	return ret;
14678 }
14679 
14680 static void handle_temp_err(struct hfi1_devdata *dd)
14681 {
14682 	struct hfi1_pportdata *ppd = &dd->pport[0];
14683 	/*
14684 	 * Thermal Critical Interrupt
14685 	 * Put the device into forced freeze mode, take link down to
14686 	 * offline, and put DC into reset.
14687 	 */
14688 	dd_dev_emerg(dd,
14689 		     "Critical temperature reached! Forcing device into freeze mode!\n");
14690 	dd->flags |= HFI1_FORCED_FREEZE;
14691 	start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14692 	/*
14693 	 * Shut DC down as much and as quickly as possible.
14694 	 *
14695 	 * Step 1: Take the link down to OFFLINE. This will cause the
14696 	 *         8051 to put the Serdes in reset. However, we don't want to
14697 	 *         go through the entire link state machine since we want to
14698 	 *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14699 	 *         but rather an attempt to save the chip.
14700 	 *         Code below is almost the same as quiet_serdes() but avoids
14701 	 *         all the extra work and the sleeps.
14702 	 */
14703 	ppd->driver_link_ready = 0;
14704 	ppd->link_enabled = 0;
14705 	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14706 				PLS_OFFLINE);
14707 	/*
14708 	 * Step 2: Shutdown LCB and 8051
14709 	 *         After shutdown, do not restore DC_CFG_RESET value.
14710 	 */
14711 	dc_shutdown(dd);
14712 }
14713