xref: /openbmc/linux/drivers/infiniband/hw/hfi1/chip.c (revision 6dfcd296)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51 
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 
68 #define NUM_IB_PORTS 1
69 
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73 
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77 
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88 
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92 
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96 
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100 
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106 
107 struct flag_table {
108 	u64 flag;	/* the flag */
109 	char *str;	/* description string */
110 	u16 extra;	/* extra information */
111 	u16 unused0;
112 	u32 unused1;
113 };
114 
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118 
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED	0x1
121 #define SEC_PACKET_DROPPED	0x2
122 #define SEC_SC_HALTED		0x4	/* per-context only */
123 #define SEC_SPC_FREEZE		0x8	/* per-HFI only */
124 
125 #define DEFAULT_KRCVQS		  2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128 /* sizes for both the QP and RSM map tables */
129 #define NUM_MAP_ENTRIES		256
130 #define NUM_MAP_REGS             32
131 
132 /* Bit offset into the GUID which carries HFI id information */
133 #define GUID_HFI_INDEX_SHIFT     39
134 
135 /* extract the emulation revision */
136 #define emulator_rev(dd) ((dd)->irev >> 8)
137 /* parallel and serial emulation versions are 3 and 4 respectively */
138 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140 
141 /* RSM fields */
142 
143 /* packet type */
144 #define IB_PACKET_TYPE         2ull
145 #define QW_SHIFT               6ull
146 /* QPN[7..1] */
147 #define QPN_WIDTH              7ull
148 
149 /* LRH.BTH: QW 0, OFFSET 48 - for match */
150 #define LRH_BTH_QW             0ull
151 #define LRH_BTH_BIT_OFFSET     48ull
152 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
153 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154 #define LRH_BTH_SELECT
155 #define LRH_BTH_MASK           3ull
156 #define LRH_BTH_VALUE          2ull
157 
158 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
159 #define LRH_SC_QW              0ull
160 #define LRH_SC_BIT_OFFSET      56ull
161 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
162 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163 #define LRH_SC_MASK            128ull
164 #define LRH_SC_VALUE           0ull
165 
166 /* SC[n..0] QW 0, OFFSET 60 - for select */
167 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
168 
169 /* QPN[m+n:1] QW 1, OFFSET 1 */
170 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
171 
172 /* defines to build power on SC2VL table */
173 #define SC2VL_VAL( \
174 	num, \
175 	sc0, sc0val, \
176 	sc1, sc1val, \
177 	sc2, sc2val, \
178 	sc3, sc3val, \
179 	sc4, sc4val, \
180 	sc5, sc5val, \
181 	sc6, sc6val, \
182 	sc7, sc7val) \
183 ( \
184 	((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185 	((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186 	((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187 	((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188 	((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189 	((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190 	((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191 	((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
192 )
193 
194 #define DC_SC_VL_VAL( \
195 	range, \
196 	e0, e0val, \
197 	e1, e1val, \
198 	e2, e2val, \
199 	e3, e3val, \
200 	e4, e4val, \
201 	e5, e5val, \
202 	e6, e6val, \
203 	e7, e7val, \
204 	e8, e8val, \
205 	e9, e9val, \
206 	e10, e10val, \
207 	e11, e11val, \
208 	e12, e12val, \
209 	e13, e13val, \
210 	e14, e14val, \
211 	e15, e15val) \
212 ( \
213 	((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214 	((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215 	((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216 	((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217 	((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218 	((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219 	((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220 	((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221 	((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222 	((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223 	((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224 	((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225 	((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226 	((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227 	((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228 	((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229 )
230 
231 /* all CceStatus sub-block freeze bits */
232 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233 			| CCE_STATUS_RXE_FROZE_SMASK \
234 			| CCE_STATUS_TXE_FROZE_SMASK \
235 			| CCE_STATUS_TXE_PIO_FROZE_SMASK)
236 /* all CceStatus sub-block TXE pause bits */
237 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238 			| CCE_STATUS_TXE_PAUSED_SMASK \
239 			| CCE_STATUS_SDMA_PAUSED_SMASK)
240 /* all CceStatus sub-block RXE pause bits */
241 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242 
243 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
245 
246 /*
247  * CCE Error flags.
248  */
249 static struct flag_table cce_err_status_flags[] = {
250 /* 0*/	FLAG_ENTRY0("CceCsrParityErr",
251 		CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252 /* 1*/	FLAG_ENTRY0("CceCsrReadBadAddrErr",
253 		CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254 /* 2*/	FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255 		CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256 /* 3*/	FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257 		CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258 /* 4*/	FLAG_ENTRY0("CceTrgtAccessErr",
259 		CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260 /* 5*/	FLAG_ENTRY0("CceRspdDataParityErr",
261 		CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262 /* 6*/	FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263 		CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264 /* 7*/	FLAG_ENTRY0("CceCsrCfgBusParityErr",
265 		CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266 /* 8*/	FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267 		CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268 /* 9*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270 /*10*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272 /*11*/	FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274 /*12*/	FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275 		CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276 /*13*/	FLAG_ENTRY0("PcicRetryMemCorErr",
277 		CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278 /*14*/	FLAG_ENTRY0("PcicRetryMemCorErr",
279 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280 /*15*/	FLAG_ENTRY0("PcicPostHdQCorErr",
281 		CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282 /*16*/	FLAG_ENTRY0("PcicPostHdQCorErr",
283 		CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284 /*17*/	FLAG_ENTRY0("PcicPostHdQCorErr",
285 		CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286 /*18*/	FLAG_ENTRY0("PcicCplDatQCorErr",
287 		CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288 /*19*/	FLAG_ENTRY0("PcicNPostHQParityErr",
289 		CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290 /*20*/	FLAG_ENTRY0("PcicNPostDatQParityErr",
291 		CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292 /*21*/	FLAG_ENTRY0("PcicRetryMemUncErr",
293 		CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294 /*22*/	FLAG_ENTRY0("PcicRetrySotMemUncErr",
295 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296 /*23*/	FLAG_ENTRY0("PcicPostHdQUncErr",
297 		CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298 /*24*/	FLAG_ENTRY0("PcicPostDatQUncErr",
299 		CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300 /*25*/	FLAG_ENTRY0("PcicCplHdQUncErr",
301 		CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302 /*26*/	FLAG_ENTRY0("PcicCplDatQUncErr",
303 		CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304 /*27*/	FLAG_ENTRY0("PcicTransmitFrontParityErr",
305 		CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306 /*28*/	FLAG_ENTRY0("PcicTransmitBackParityErr",
307 		CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308 /*29*/	FLAG_ENTRY0("PcicReceiveParityErr",
309 		CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310 /*30*/	FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311 		CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312 /*31*/	FLAG_ENTRY0("LATriggered",
313 		CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314 /*32*/	FLAG_ENTRY0("CceSegReadBadAddrErr",
315 		CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316 /*33*/	FLAG_ENTRY0("CceSegWriteBadAddrErr",
317 		CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318 /*34*/	FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319 		CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320 /*35*/	FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321 		CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322 /*36*/	FLAG_ENTRY0("CceMsixTableCorErr",
323 		CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324 /*37*/	FLAG_ENTRY0("CceMsixTableUncErr",
325 		CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326 /*38*/	FLAG_ENTRY0("CceIntMapCorErr",
327 		CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328 /*39*/	FLAG_ENTRY0("CceIntMapUncErr",
329 		CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330 /*40*/	FLAG_ENTRY0("CceMsixCsrParityErr",
331 		CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332 /*41-63 reserved*/
333 };
334 
335 /*
336  * Misc Error flags
337  */
338 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339 static struct flag_table misc_err_status_flags[] = {
340 /* 0*/	FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341 /* 1*/	FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342 /* 2*/	FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343 /* 3*/	FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344 /* 4*/	FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345 /* 5*/	FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346 /* 6*/	FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347 /* 7*/	FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348 /* 8*/	FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349 /* 9*/	FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350 /*10*/	FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351 /*11*/	FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352 /*12*/	FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
353 };
354 
355 /*
356  * TXE PIO Error flags and consequences
357  */
358 static struct flag_table pio_err_status_flags[] = {
359 /* 0*/	FLAG_ENTRY("PioWriteBadCtxt",
360 	SEC_WRITE_DROPPED,
361 	SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362 /* 1*/	FLAG_ENTRY("PioWriteAddrParity",
363 	SEC_SPC_FREEZE,
364 	SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365 /* 2*/	FLAG_ENTRY("PioCsrParity",
366 	SEC_SPC_FREEZE,
367 	SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368 /* 3*/	FLAG_ENTRY("PioSbMemFifo0",
369 	SEC_SPC_FREEZE,
370 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371 /* 4*/	FLAG_ENTRY("PioSbMemFifo1",
372 	SEC_SPC_FREEZE,
373 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374 /* 5*/	FLAG_ENTRY("PioPccFifoParity",
375 	SEC_SPC_FREEZE,
376 	SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377 /* 6*/	FLAG_ENTRY("PioPecFifoParity",
378 	SEC_SPC_FREEZE,
379 	SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380 /* 7*/	FLAG_ENTRY("PioSbrdctlCrrelParity",
381 	SEC_SPC_FREEZE,
382 	SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383 /* 8*/	FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
384 	SEC_SPC_FREEZE,
385 	SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386 /* 9*/	FLAG_ENTRY("PioPktEvictFifoParityErr",
387 	SEC_SPC_FREEZE,
388 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389 /*10*/	FLAG_ENTRY("PioSmPktResetParity",
390 	SEC_SPC_FREEZE,
391 	SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392 /*11*/	FLAG_ENTRY("PioVlLenMemBank0Unc",
393 	SEC_SPC_FREEZE,
394 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395 /*12*/	FLAG_ENTRY("PioVlLenMemBank1Unc",
396 	SEC_SPC_FREEZE,
397 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398 /*13*/	FLAG_ENTRY("PioVlLenMemBank0Cor",
399 	0,
400 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401 /*14*/	FLAG_ENTRY("PioVlLenMemBank1Cor",
402 	0,
403 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404 /*15*/	FLAG_ENTRY("PioCreditRetFifoParity",
405 	SEC_SPC_FREEZE,
406 	SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407 /*16*/	FLAG_ENTRY("PioPpmcPblFifo",
408 	SEC_SPC_FREEZE,
409 	SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410 /*17*/	FLAG_ENTRY("PioInitSmIn",
411 	0,
412 	SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413 /*18*/	FLAG_ENTRY("PioPktEvictSmOrArbSm",
414 	SEC_SPC_FREEZE,
415 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416 /*19*/	FLAG_ENTRY("PioHostAddrMemUnc",
417 	SEC_SPC_FREEZE,
418 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419 /*20*/	FLAG_ENTRY("PioHostAddrMemCor",
420 	0,
421 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422 /*21*/	FLAG_ENTRY("PioWriteDataParity",
423 	SEC_SPC_FREEZE,
424 	SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425 /*22*/	FLAG_ENTRY("PioStateMachine",
426 	SEC_SPC_FREEZE,
427 	SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428 /*23*/	FLAG_ENTRY("PioWriteQwValidParity",
429 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
430 	SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431 /*24*/	FLAG_ENTRY("PioBlockQwCountParity",
432 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
433 	SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434 /*25*/	FLAG_ENTRY("PioVlfVlLenParity",
435 	SEC_SPC_FREEZE,
436 	SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437 /*26*/	FLAG_ENTRY("PioVlfSopParity",
438 	SEC_SPC_FREEZE,
439 	SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440 /*27*/	FLAG_ENTRY("PioVlFifoParity",
441 	SEC_SPC_FREEZE,
442 	SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443 /*28*/	FLAG_ENTRY("PioPpmcBqcMemParity",
444 	SEC_SPC_FREEZE,
445 	SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446 /*29*/	FLAG_ENTRY("PioPpmcSopLen",
447 	SEC_SPC_FREEZE,
448 	SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
449 /*30-31 reserved*/
450 /*32*/	FLAG_ENTRY("PioCurrentFreeCntParity",
451 	SEC_SPC_FREEZE,
452 	SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453 /*33*/	FLAG_ENTRY("PioLastReturnedCntParity",
454 	SEC_SPC_FREEZE,
455 	SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456 /*34*/	FLAG_ENTRY("PioPccSopHeadParity",
457 	SEC_SPC_FREEZE,
458 	SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459 /*35*/	FLAG_ENTRY("PioPecSopHeadParityErr",
460 	SEC_SPC_FREEZE,
461 	SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
462 /*36-63 reserved*/
463 };
464 
465 /* TXE PIO errors that cause an SPC freeze */
466 #define ALL_PIO_FREEZE_ERR \
467 	(SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468 	| SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471 	| SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472 	| SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476 	| SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479 	| SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480 	| SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482 	| SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483 	| SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484 	| SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485 	| SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486 	| SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487 	| SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488 	| SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489 	| SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490 	| SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491 	| SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492 	| SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493 	| SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494 	| SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495 	| SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
496 
497 /*
498  * TXE SDMA Error flags
499  */
500 static struct flag_table sdma_err_status_flags[] = {
501 /* 0*/	FLAG_ENTRY0("SDmaRpyTagErr",
502 		SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503 /* 1*/	FLAG_ENTRY0("SDmaCsrParityErr",
504 		SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505 /* 2*/	FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507 /* 3*/	FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
509 /*04-63 reserved*/
510 };
511 
512 /* TXE SDMA errors that cause an SPC freeze */
513 #define ALL_SDMA_FREEZE_ERR  \
514 		(SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515 		| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516 		| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
517 
518 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
519 #define PORT_DISCARD_EGRESS_ERRS \
520 	(SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521 	| SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522 	| SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
523 
524 /*
525  * TXE Egress Error flags
526  */
527 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528 static struct flag_table egress_err_status_flags[] = {
529 /* 0*/	FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530 /* 1*/	FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
531 /* 2 reserved */
532 /* 3*/	FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533 		SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534 /* 4*/	FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535 /* 5*/	FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
536 /* 6 reserved */
537 /* 7*/	FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538 		SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539 /* 8*/	FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540 		SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
541 /* 9-10 reserved */
542 /*11*/	FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543 		SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544 /*12*/	FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545 /*13*/	FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546 /*14*/	FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547 /*15*/	FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548 /*16*/	FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549 		SEES(TX_SDMA0_DISALLOWED_PACKET)),
550 /*17*/	FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551 		SEES(TX_SDMA1_DISALLOWED_PACKET)),
552 /*18*/	FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553 		SEES(TX_SDMA2_DISALLOWED_PACKET)),
554 /*19*/	FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555 		SEES(TX_SDMA3_DISALLOWED_PACKET)),
556 /*20*/	FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557 		SEES(TX_SDMA4_DISALLOWED_PACKET)),
558 /*21*/	FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559 		SEES(TX_SDMA5_DISALLOWED_PACKET)),
560 /*22*/	FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561 		SEES(TX_SDMA6_DISALLOWED_PACKET)),
562 /*23*/	FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563 		SEES(TX_SDMA7_DISALLOWED_PACKET)),
564 /*24*/	FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565 		SEES(TX_SDMA8_DISALLOWED_PACKET)),
566 /*25*/	FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567 		SEES(TX_SDMA9_DISALLOWED_PACKET)),
568 /*26*/	FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569 		SEES(TX_SDMA10_DISALLOWED_PACKET)),
570 /*27*/	FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571 		SEES(TX_SDMA11_DISALLOWED_PACKET)),
572 /*28*/	FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573 		SEES(TX_SDMA12_DISALLOWED_PACKET)),
574 /*29*/	FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575 		SEES(TX_SDMA13_DISALLOWED_PACKET)),
576 /*30*/	FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577 		SEES(TX_SDMA14_DISALLOWED_PACKET)),
578 /*31*/	FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579 		SEES(TX_SDMA15_DISALLOWED_PACKET)),
580 /*32*/	FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581 		SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582 /*33*/	FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583 		SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584 /*34*/	FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585 		SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586 /*35*/	FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587 		SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588 /*36*/	FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589 		SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590 /*37*/	FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591 		SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592 /*38*/	FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593 		SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594 /*39*/	FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595 		SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596 /*40*/	FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597 		SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598 /*41*/	FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599 /*42*/	FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600 /*43*/	FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601 /*44*/	FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602 /*45*/	FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603 /*46*/	FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604 /*47*/	FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605 /*48*/	FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606 /*49*/	FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607 /*50*/	FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608 /*51*/	FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609 /*52*/	FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610 /*53*/	FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611 /*54*/	FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612 /*55*/	FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613 /*56*/	FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614 /*57*/	FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615 /*58*/	FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616 /*59*/	FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617 /*60*/	FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618 /*61*/	FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619 /*62*/	FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620 		SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621 /*63*/	FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622 		SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
623 };
624 
625 /*
626  * TXE Egress Error Info flags
627  */
628 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629 static struct flag_table egress_err_info_flags[] = {
630 /* 0*/	FLAG_ENTRY0("Reserved", 0ull),
631 /* 1*/	FLAG_ENTRY0("VLErr", SEEI(VL)),
632 /* 2*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633 /* 3*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634 /* 4*/	FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635 /* 5*/	FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636 /* 6*/	FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637 /* 7*/	FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638 /* 8*/	FLAG_ENTRY0("RawErr", SEEI(RAW)),
639 /* 9*/	FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640 /*10*/	FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641 /*11*/	FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642 /*12*/	FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643 /*13*/	FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644 /*14*/	FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645 /*15*/	FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646 /*16*/	FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647 /*17*/	FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648 /*18*/	FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649 /*19*/	FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650 /*20*/	FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651 /*21*/	FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
652 };
653 
654 /* TXE Egress errors that cause an SPC freeze */
655 #define ALL_TXE_EGRESS_FREEZE_ERR \
656 	(SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657 	| SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658 	| SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659 	| SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660 	| SEES(TX_LAUNCH_CSR_PARITY) \
661 	| SEES(TX_SBRD_CTL_CSR_PARITY) \
662 	| SEES(TX_CONFIG_PARITY) \
663 	| SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664 	| SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665 	| SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666 	| SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667 	| SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668 	| SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669 	| SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670 	| SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671 	| SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672 	| SEES(TX_CREDIT_RETURN_PARITY))
673 
674 /*
675  * TXE Send error flags
676  */
677 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678 static struct flag_table send_err_status_flags[] = {
679 /* 0*/	FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
680 /* 1*/	FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681 /* 2*/	FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
682 };
683 
684 /*
685  * TXE Send Context Error flags and consequences
686  */
687 static struct flag_table sc_err_status_flags[] = {
688 /* 0*/	FLAG_ENTRY("InconsistentSop",
689 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
690 		SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691 /* 1*/	FLAG_ENTRY("DisallowedPacket",
692 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
693 		SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694 /* 2*/	FLAG_ENTRY("WriteCrossesBoundary",
695 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
696 		SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697 /* 3*/	FLAG_ENTRY("WriteOverflow",
698 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
699 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700 /* 4*/	FLAG_ENTRY("WriteOutOfBounds",
701 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
702 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
703 /* 5-63 reserved*/
704 };
705 
706 /*
707  * RXE Receive Error flags
708  */
709 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710 static struct flag_table rxe_err_status_flags[] = {
711 /* 0*/	FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712 /* 1*/	FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713 /* 2*/	FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714 /* 3*/	FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715 /* 4*/	FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716 /* 5*/	FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717 /* 6*/	FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718 /* 7*/	FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719 /* 8*/	FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720 /* 9*/	FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721 /*10*/	FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722 /*11*/	FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723 /*12*/	FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724 /*13*/	FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725 /*14*/	FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726 /*15*/	FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727 /*16*/	FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728 		RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729 /*17*/	FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730 /*18*/	FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731 /*19*/	FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732 		RXES(RBUF_BLOCK_LIST_READ_UNC)),
733 /*20*/	FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734 		RXES(RBUF_BLOCK_LIST_READ_COR)),
735 /*21*/	FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736 		RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737 /*22*/	FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738 		RXES(RBUF_CSR_QENT_CNT_PARITY)),
739 /*23*/	FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740 		RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741 /*24*/	FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742 		RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743 /*25*/	FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744 /*26*/	FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745 /*27*/	FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746 		RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747 /*28*/	FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748 /*29*/	FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749 /*30*/	FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750 /*31*/	FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751 /*32*/	FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752 /*33*/	FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753 /*34*/	FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754 /*35*/	FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755 		RXES(RBUF_FL_INITDONE_PARITY)),
756 /*36*/	FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757 		RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758 /*37*/	FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759 /*38*/	FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760 /*39*/	FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761 /*40*/	FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762 		RXES(LOOKUP_DES_PART1_UNC_COR)),
763 /*41*/	FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764 		RXES(LOOKUP_DES_PART2_PARITY)),
765 /*42*/	FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766 /*43*/	FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767 /*44*/	FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768 /*45*/	FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769 /*46*/	FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770 /*47*/	FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771 /*48*/	FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772 /*49*/	FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773 /*50*/	FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774 /*51*/	FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775 /*52*/	FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776 /*53*/	FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777 /*54*/	FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778 /*55*/	FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779 /*56*/	FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780 /*57*/	FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781 /*58*/	FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782 /*59*/	FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783 /*60*/	FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784 /*61*/	FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785 /*62*/	FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786 /*63*/	FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
787 };
788 
789 /* RXE errors that will trigger an SPC freeze */
790 #define ALL_RXE_FREEZE_ERR  \
791 	(RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792 	| RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793 	| RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794 	| RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795 	| RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799 	| RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801 	| RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803 	| RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805 	| RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807 	| RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808 	| RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809 	| RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810 	| RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811 	| RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812 	| RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813 	| RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814 	| RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815 	| RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816 	| RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820 	| RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821 	| RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822 	| RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823 	| RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827 	| RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828 	| RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829 	| RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830 	| RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831 	| RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832 	| RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833 	| RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834 	| RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
835 
836 #define RXE_FREEZE_ABORT_MASK \
837 	(RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838 	RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839 	RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840 
841 /*
842  * DCC Error Flags
843  */
844 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845 static struct flag_table dcc_err_flags[] = {
846 	FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847 	FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848 	FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849 	FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850 	FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851 	FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852 	FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853 	FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854 	FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855 	FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856 	FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857 	FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858 	FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859 	FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860 	FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861 	FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862 	FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863 	FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864 	FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865 	FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866 	FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867 	FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868 	FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869 	FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870 	FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871 	FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872 	FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873 	FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874 	FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875 	FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876 	FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877 	FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878 	FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879 	FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880 	FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881 	FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882 	FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883 	FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884 	FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885 	FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886 	FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887 	FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888 	FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889 	FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890 	FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891 	FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
892 };
893 
894 /*
895  * LCB error flags
896  */
897 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898 static struct flag_table lcb_err_flags[] = {
899 /* 0*/	FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900 /* 1*/	FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901 /* 2*/	FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902 /* 3*/	FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903 		LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904 /* 4*/	FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905 /* 5*/	FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906 /* 6*/	FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907 /* 7*/	FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908 /* 8*/	FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909 /* 9*/	FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910 /*10*/	FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911 /*11*/	FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912 /*12*/	FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913 /*13*/	FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914 		LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915 /*14*/	FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916 /*15*/	FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917 /*16*/	FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918 /*17*/	FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919 /*18*/	FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920 /*19*/	FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921 		LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922 /*20*/	FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923 /*21*/	FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924 /*22*/	FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925 /*23*/	FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926 /*24*/	FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927 /*25*/	FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928 /*26*/	FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929 		LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930 /*27*/	FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931 /*28*/	FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932 		LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933 /*29*/	FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934 		LCBE(REDUNDANT_FLIT_PARITY_ERR))
935 };
936 
937 /*
938  * DC8051 Error Flags
939  */
940 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941 static struct flag_table dc8051_err_flags[] = {
942 	FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943 	FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944 	FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945 	FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946 	FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947 	FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948 	FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949 	FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950 	FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
951 		    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
952 	FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
953 };
954 
955 /*
956  * DC8051 Information Error flags
957  *
958  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
959  */
960 static struct flag_table dc8051_info_err_flags[] = {
961 	FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
962 	FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
963 	FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
964 	FLAG_ENTRY0("Serdes internal loopback failure",
965 		    FAILED_SERDES_INTERNAL_LOOPBACK),
966 	FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
967 	FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
968 	FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
969 	FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
970 	FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
971 	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972 	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
973 	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
974 	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
975 	FLAG_ENTRY0("External Device Request Timeout",
976 		    EXTERNAL_DEVICE_REQ_TIMEOUT),
977 };
978 
979 /*
980  * DC8051 Information Host Information flags
981  *
982  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
983  */
984 static struct flag_table dc8051_info_host_msg_flags[] = {
985 	FLAG_ENTRY0("Host request done", 0x0001),
986 	FLAG_ENTRY0("BC SMA message", 0x0002),
987 	FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988 	FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989 	FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990 	FLAG_ENTRY0("External device config request", 0x0020),
991 	FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992 	FLAG_ENTRY0("LinkUp achieved", 0x0080),
993 	FLAG_ENTRY0("Link going down", 0x0100),
994 };
995 
996 static u32 encoded_size(u32 size);
997 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1000 			       u8 *continuous);
1001 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004 				      u8 *remote_tx_rate, u16 *link_widths);
1005 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006 				     u8 *flag_bits, u16 *link_widths);
1007 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1008 				  u8 *device_rev);
1009 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012 			    u8 *tx_polarity_inversion,
1013 			    u8 *rx_polarity_inversion, u8 *max_rate);
1014 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015 				unsigned int context, u64 err_status);
1016 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017 static void handle_dcc_err(struct hfi1_devdata *dd,
1018 			   unsigned int context, u64 err_status);
1019 static void handle_lcb_err(struct hfi1_devdata *dd,
1020 			   unsigned int context, u64 err_status);
1021 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void set_partition_keys(struct hfi1_pportdata *);
1030 static const char *link_state_name(u32 state);
1031 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1032 					  u32 state);
1033 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1034 			   u64 *out_data);
1035 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036 static int thermal_init(struct hfi1_devdata *dd);
1037 
1038 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1039 				  int msecs);
1040 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1041 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1042 static void handle_temp_err(struct hfi1_devdata *);
1043 static void dc_shutdown(struct hfi1_devdata *);
1044 static void dc_start(struct hfi1_devdata *);
1045 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1046 			   unsigned int *np);
1047 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1048 
1049 /*
1050  * Error interrupt table entry.  This is used as input to the interrupt
1051  * "clear down" routine used for all second tier error interrupt register.
1052  * Second tier interrupt registers have a single bit representing them
1053  * in the top-level CceIntStatus.
1054  */
1055 struct err_reg_info {
1056 	u32 status;		/* status CSR offset */
1057 	u32 clear;		/* clear CSR offset */
1058 	u32 mask;		/* mask CSR offset */
1059 	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1060 	const char *desc;
1061 };
1062 
1063 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1064 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1065 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1066 
1067 /*
1068  * Helpers for building HFI and DC error interrupt table entries.  Different
1069  * helpers are needed because of inconsistent register names.
1070  */
1071 #define EE(reg, handler, desc) \
1072 	{ reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1073 		handler, desc }
1074 #define DC_EE1(reg, handler, desc) \
1075 	{ reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1076 #define DC_EE2(reg, handler, desc) \
1077 	{ reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1078 
1079 /*
1080  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1081  * another register containing more information.
1082  */
1083 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1084 /* 0*/	EE(CCE_ERR,		handle_cce_err,    "CceErr"),
1085 /* 1*/	EE(RCV_ERR,		handle_rxe_err,    "RxeErr"),
1086 /* 2*/	EE(MISC_ERR,	handle_misc_err,   "MiscErr"),
1087 /* 3*/	{ 0, 0, 0, NULL }, /* reserved */
1088 /* 4*/	EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1089 /* 5*/	EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1090 /* 6*/	EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1091 /* 7*/	EE(SEND_ERR,	handle_txe_err,    "TxeErr")
1092 	/* the rest are reserved */
1093 };
1094 
1095 /*
1096  * Index into the Various section of the interrupt sources
1097  * corresponding to the Critical Temperature interrupt.
1098  */
1099 #define TCRIT_INT_SOURCE 4
1100 
1101 /*
1102  * SDMA error interrupt entry - refers to another register containing more
1103  * information.
1104  */
1105 static const struct err_reg_info sdma_eng_err =
1106 	EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1107 
1108 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1109 /* 0*/	{ 0, 0, 0, NULL }, /* PbcInt */
1110 /* 1*/	{ 0, 0, 0, NULL }, /* GpioAssertInt */
1111 /* 2*/	EE(ASIC_QSFP1,	handle_qsfp_int,	"QSFP1"),
1112 /* 3*/	EE(ASIC_QSFP2,	handle_qsfp_int,	"QSFP2"),
1113 /* 4*/	{ 0, 0, 0, NULL }, /* TCritInt */
1114 	/* rest are reserved */
1115 };
1116 
1117 /*
1118  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1119  * register can not be derived from the MTU value because 10K is not
1120  * a power of 2. Therefore, we need a constant. Everything else can
1121  * be calculated.
1122  */
1123 #define DCC_CFG_PORT_MTU_CAP_10240 7
1124 
1125 /*
1126  * Table of the DC grouping of error interrupts.  Each entry refers to
1127  * another register containing more information.
1128  */
1129 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1130 /* 0*/	DC_EE1(DCC_ERR,		handle_dcc_err,	       "DCC Err"),
1131 /* 1*/	DC_EE2(DC_LCB_ERR,	handle_lcb_err,	       "LCB Err"),
1132 /* 2*/	DC_EE2(DC_DC8051_ERR,	handle_8051_interrupt, "DC8051 Interrupt"),
1133 /* 3*/	/* dc_lbm_int - special, see is_dc_int() */
1134 	/* the rest are reserved */
1135 };
1136 
1137 struct cntr_entry {
1138 	/*
1139 	 * counter name
1140 	 */
1141 	char *name;
1142 
1143 	/*
1144 	 * csr to read for name (if applicable)
1145 	 */
1146 	u64 csr;
1147 
1148 	/*
1149 	 * offset into dd or ppd to store the counter's value
1150 	 */
1151 	int offset;
1152 
1153 	/*
1154 	 * flags
1155 	 */
1156 	u8 flags;
1157 
1158 	/*
1159 	 * accessor for stat element, context either dd or ppd
1160 	 */
1161 	u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1162 		       int mode, u64 data);
1163 };
1164 
1165 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1166 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1167 
1168 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1169 { \
1170 	name, \
1171 	csr, \
1172 	offset, \
1173 	flags, \
1174 	accessor \
1175 }
1176 
1177 /* 32bit RXE */
1178 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1179 CNTR_ELEM(#name, \
1180 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1181 	  0, flags | CNTR_32BIT, \
1182 	  port_access_u32_csr)
1183 
1184 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1185 CNTR_ELEM(#name, \
1186 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1187 	  0, flags | CNTR_32BIT, \
1188 	  dev_access_u32_csr)
1189 
1190 /* 64bit RXE */
1191 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1192 CNTR_ELEM(#name, \
1193 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1194 	  0, flags, \
1195 	  port_access_u64_csr)
1196 
1197 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1198 CNTR_ELEM(#name, \
1199 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1200 	  0, flags, \
1201 	  dev_access_u64_csr)
1202 
1203 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1204 #define OVR_ELM(ctx) \
1205 CNTR_ELEM("RcvHdrOvr" #ctx, \
1206 	  (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1207 	  0, CNTR_NORMAL, port_access_u64_csr)
1208 
1209 /* 32bit TXE */
1210 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1211 CNTR_ELEM(#name, \
1212 	  (counter * 8 + SEND_COUNTER_ARRAY32), \
1213 	  0, flags | CNTR_32BIT, \
1214 	  port_access_u32_csr)
1215 
1216 /* 64bit TXE */
1217 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1218 CNTR_ELEM(#name, \
1219 	  (counter * 8 + SEND_COUNTER_ARRAY64), \
1220 	  0, flags, \
1221 	  port_access_u64_csr)
1222 
1223 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1224 CNTR_ELEM(#name,\
1225 	  counter * 8 + SEND_COUNTER_ARRAY64, \
1226 	  0, \
1227 	  flags, \
1228 	  dev_access_u64_csr)
1229 
1230 /* CCE */
1231 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233 	  (counter * 8 + CCE_COUNTER_ARRAY32), \
1234 	  0, flags | CNTR_32BIT, \
1235 	  dev_access_u32_csr)
1236 
1237 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239 	  (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1240 	  0, flags | CNTR_32BIT, \
1241 	  dev_access_u32_csr)
1242 
1243 /* DC */
1244 #define DC_PERF_CNTR(name, counter, flags) \
1245 CNTR_ELEM(#name, \
1246 	  counter, \
1247 	  0, \
1248 	  flags, \
1249 	  dev_access_u64_csr)
1250 
1251 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1252 CNTR_ELEM(#name, \
1253 	  counter, \
1254 	  0, \
1255 	  flags, \
1256 	  dc_access_lcb_cntr)
1257 
1258 /* ibp counters */
1259 #define SW_IBP_CNTR(name, cntr) \
1260 CNTR_ELEM(#name, \
1261 	  0, \
1262 	  0, \
1263 	  CNTR_SYNTH, \
1264 	  access_ibp_##cntr)
1265 
1266 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1267 {
1268 	if (dd->flags & HFI1_PRESENT) {
1269 		return readq((void __iomem *)dd->kregbase + offset);
1270 	}
1271 	return -1;
1272 }
1273 
1274 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1275 {
1276 	if (dd->flags & HFI1_PRESENT)
1277 		writeq(value, (void __iomem *)dd->kregbase + offset);
1278 }
1279 
1280 void __iomem *get_csr_addr(
1281 	struct hfi1_devdata *dd,
1282 	u32 offset)
1283 {
1284 	return (void __iomem *)dd->kregbase + offset;
1285 }
1286 
1287 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1288 				 int mode, u64 value)
1289 {
1290 	u64 ret;
1291 
1292 	if (mode == CNTR_MODE_R) {
1293 		ret = read_csr(dd, csr);
1294 	} else if (mode == CNTR_MODE_W) {
1295 		write_csr(dd, csr, value);
1296 		ret = value;
1297 	} else {
1298 		dd_dev_err(dd, "Invalid cntr register access mode");
1299 		return 0;
1300 	}
1301 
1302 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1303 	return ret;
1304 }
1305 
1306 /* Dev Access */
1307 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1308 			      void *context, int vl, int mode, u64 data)
1309 {
1310 	struct hfi1_devdata *dd = context;
1311 	u64 csr = entry->csr;
1312 
1313 	if (entry->flags & CNTR_SDMA) {
1314 		if (vl == CNTR_INVALID_VL)
1315 			return 0;
1316 		csr += 0x100 * vl;
1317 	} else {
1318 		if (vl != CNTR_INVALID_VL)
1319 			return 0;
1320 	}
1321 	return read_write_csr(dd, csr, mode, data);
1322 }
1323 
1324 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1325 			      void *context, int idx, int mode, u64 data)
1326 {
1327 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328 
1329 	if (dd->per_sdma && idx < dd->num_sdma)
1330 		return dd->per_sdma[idx].err_cnt;
1331 	return 0;
1332 }
1333 
1334 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1335 			      void *context, int idx, int mode, u64 data)
1336 {
1337 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338 
1339 	if (dd->per_sdma && idx < dd->num_sdma)
1340 		return dd->per_sdma[idx].sdma_int_cnt;
1341 	return 0;
1342 }
1343 
1344 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1345 				   void *context, int idx, int mode, u64 data)
1346 {
1347 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348 
1349 	if (dd->per_sdma && idx < dd->num_sdma)
1350 		return dd->per_sdma[idx].idle_int_cnt;
1351 	return 0;
1352 }
1353 
1354 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1355 				       void *context, int idx, int mode,
1356 				       u64 data)
1357 {
1358 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1359 
1360 	if (dd->per_sdma && idx < dd->num_sdma)
1361 		return dd->per_sdma[idx].progress_int_cnt;
1362 	return 0;
1363 }
1364 
1365 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1366 			      int vl, int mode, u64 data)
1367 {
1368 	struct hfi1_devdata *dd = context;
1369 
1370 	u64 val = 0;
1371 	u64 csr = entry->csr;
1372 
1373 	if (entry->flags & CNTR_VL) {
1374 		if (vl == CNTR_INVALID_VL)
1375 			return 0;
1376 		csr += 8 * vl;
1377 	} else {
1378 		if (vl != CNTR_INVALID_VL)
1379 			return 0;
1380 	}
1381 
1382 	val = read_write_csr(dd, csr, mode, data);
1383 	return val;
1384 }
1385 
1386 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1387 			      int vl, int mode, u64 data)
1388 {
1389 	struct hfi1_devdata *dd = context;
1390 	u32 csr = entry->csr;
1391 	int ret = 0;
1392 
1393 	if (vl != CNTR_INVALID_VL)
1394 		return 0;
1395 	if (mode == CNTR_MODE_R)
1396 		ret = read_lcb_csr(dd, csr, &data);
1397 	else if (mode == CNTR_MODE_W)
1398 		ret = write_lcb_csr(dd, csr, data);
1399 
1400 	if (ret) {
1401 		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1402 		return 0;
1403 	}
1404 
1405 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1406 	return data;
1407 }
1408 
1409 /* Port Access */
1410 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1411 			       int vl, int mode, u64 data)
1412 {
1413 	struct hfi1_pportdata *ppd = context;
1414 
1415 	if (vl != CNTR_INVALID_VL)
1416 		return 0;
1417 	return read_write_csr(ppd->dd, entry->csr, mode, data);
1418 }
1419 
1420 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1421 			       void *context, int vl, int mode, u64 data)
1422 {
1423 	struct hfi1_pportdata *ppd = context;
1424 	u64 val;
1425 	u64 csr = entry->csr;
1426 
1427 	if (entry->flags & CNTR_VL) {
1428 		if (vl == CNTR_INVALID_VL)
1429 			return 0;
1430 		csr += 8 * vl;
1431 	} else {
1432 		if (vl != CNTR_INVALID_VL)
1433 			return 0;
1434 	}
1435 	val = read_write_csr(ppd->dd, csr, mode, data);
1436 	return val;
1437 }
1438 
1439 /* Software defined */
1440 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1441 				u64 data)
1442 {
1443 	u64 ret;
1444 
1445 	if (mode == CNTR_MODE_R) {
1446 		ret = *cntr;
1447 	} else if (mode == CNTR_MODE_W) {
1448 		*cntr = data;
1449 		ret = data;
1450 	} else {
1451 		dd_dev_err(dd, "Invalid cntr sw access mode");
1452 		return 0;
1453 	}
1454 
1455 	hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1456 
1457 	return ret;
1458 }
1459 
1460 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1461 				 int vl, int mode, u64 data)
1462 {
1463 	struct hfi1_pportdata *ppd = context;
1464 
1465 	if (vl != CNTR_INVALID_VL)
1466 		return 0;
1467 	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1468 }
1469 
1470 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1471 				 int vl, int mode, u64 data)
1472 {
1473 	struct hfi1_pportdata *ppd = context;
1474 
1475 	if (vl != CNTR_INVALID_VL)
1476 		return 0;
1477 	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1478 }
1479 
1480 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1481 				       void *context, int vl, int mode,
1482 				       u64 data)
1483 {
1484 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1485 
1486 	if (vl != CNTR_INVALID_VL)
1487 		return 0;
1488 	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1489 }
1490 
1491 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1492 				   void *context, int vl, int mode, u64 data)
1493 {
1494 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1495 	u64 zero = 0;
1496 	u64 *counter;
1497 
1498 	if (vl == CNTR_INVALID_VL)
1499 		counter = &ppd->port_xmit_discards;
1500 	else if (vl >= 0 && vl < C_VL_COUNT)
1501 		counter = &ppd->port_xmit_discards_vl[vl];
1502 	else
1503 		counter = &zero;
1504 
1505 	return read_write_sw(ppd->dd, counter, mode, data);
1506 }
1507 
1508 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1509 				       void *context, int vl, int mode,
1510 				       u64 data)
1511 {
1512 	struct hfi1_pportdata *ppd = context;
1513 
1514 	if (vl != CNTR_INVALID_VL)
1515 		return 0;
1516 
1517 	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1518 			     mode, data);
1519 }
1520 
1521 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1522 				      void *context, int vl, int mode, u64 data)
1523 {
1524 	struct hfi1_pportdata *ppd = context;
1525 
1526 	if (vl != CNTR_INVALID_VL)
1527 		return 0;
1528 
1529 	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1530 			     mode, data);
1531 }
1532 
1533 u64 get_all_cpu_total(u64 __percpu *cntr)
1534 {
1535 	int cpu;
1536 	u64 counter = 0;
1537 
1538 	for_each_possible_cpu(cpu)
1539 		counter += *per_cpu_ptr(cntr, cpu);
1540 	return counter;
1541 }
1542 
1543 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1544 			  u64 __percpu *cntr,
1545 			  int vl, int mode, u64 data)
1546 {
1547 	u64 ret = 0;
1548 
1549 	if (vl != CNTR_INVALID_VL)
1550 		return 0;
1551 
1552 	if (mode == CNTR_MODE_R) {
1553 		ret = get_all_cpu_total(cntr) - *z_val;
1554 	} else if (mode == CNTR_MODE_W) {
1555 		/* A write can only zero the counter */
1556 		if (data == 0)
1557 			*z_val = get_all_cpu_total(cntr);
1558 		else
1559 			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1560 	} else {
1561 		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1562 		return 0;
1563 	}
1564 
1565 	return ret;
1566 }
1567 
1568 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1569 			      void *context, int vl, int mode, u64 data)
1570 {
1571 	struct hfi1_devdata *dd = context;
1572 
1573 	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1574 			      mode, data);
1575 }
1576 
1577 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1578 				   void *context, int vl, int mode, u64 data)
1579 {
1580 	struct hfi1_devdata *dd = context;
1581 
1582 	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1583 			      mode, data);
1584 }
1585 
1586 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1587 			      void *context, int vl, int mode, u64 data)
1588 {
1589 	struct hfi1_devdata *dd = context;
1590 
1591 	return dd->verbs_dev.n_piowait;
1592 }
1593 
1594 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1595 			       void *context, int vl, int mode, u64 data)
1596 {
1597 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598 
1599 	return dd->verbs_dev.n_piodrain;
1600 }
1601 
1602 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1603 			      void *context, int vl, int mode, u64 data)
1604 {
1605 	struct hfi1_devdata *dd = context;
1606 
1607 	return dd->verbs_dev.n_txwait;
1608 }
1609 
1610 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1611 			       void *context, int vl, int mode, u64 data)
1612 {
1613 	struct hfi1_devdata *dd = context;
1614 
1615 	return dd->verbs_dev.n_kmem_wait;
1616 }
1617 
1618 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1619 				   void *context, int vl, int mode, u64 data)
1620 {
1621 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622 
1623 	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1624 			      mode, data);
1625 }
1626 
1627 /* Software counters for the error status bits within MISC_ERR_STATUS */
1628 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1629 					     void *context, int vl, int mode,
1630 					     u64 data)
1631 {
1632 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1633 
1634 	return dd->misc_err_status_cnt[12];
1635 }
1636 
1637 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1638 					  void *context, int vl, int mode,
1639 					  u64 data)
1640 {
1641 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1642 
1643 	return dd->misc_err_status_cnt[11];
1644 }
1645 
1646 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1647 					       void *context, int vl, int mode,
1648 					       u64 data)
1649 {
1650 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1651 
1652 	return dd->misc_err_status_cnt[10];
1653 }
1654 
1655 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1656 						 void *context, int vl,
1657 						 int mode, u64 data)
1658 {
1659 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1660 
1661 	return dd->misc_err_status_cnt[9];
1662 }
1663 
1664 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1665 					   void *context, int vl, int mode,
1666 					   u64 data)
1667 {
1668 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1669 
1670 	return dd->misc_err_status_cnt[8];
1671 }
1672 
1673 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1674 				const struct cntr_entry *entry,
1675 				void *context, int vl, int mode, u64 data)
1676 {
1677 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1678 
1679 	return dd->misc_err_status_cnt[7];
1680 }
1681 
1682 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1683 						void *context, int vl,
1684 						int mode, u64 data)
1685 {
1686 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1687 
1688 	return dd->misc_err_status_cnt[6];
1689 }
1690 
1691 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1692 					      void *context, int vl, int mode,
1693 					      u64 data)
1694 {
1695 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1696 
1697 	return dd->misc_err_status_cnt[5];
1698 }
1699 
1700 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1701 					    void *context, int vl, int mode,
1702 					    u64 data)
1703 {
1704 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1705 
1706 	return dd->misc_err_status_cnt[4];
1707 }
1708 
1709 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1710 						 void *context, int vl,
1711 						 int mode, u64 data)
1712 {
1713 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714 
1715 	return dd->misc_err_status_cnt[3];
1716 }
1717 
1718 static u64 access_misc_csr_write_bad_addr_err_cnt(
1719 				const struct cntr_entry *entry,
1720 				void *context, int vl, int mode, u64 data)
1721 {
1722 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723 
1724 	return dd->misc_err_status_cnt[2];
1725 }
1726 
1727 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1728 						 void *context, int vl,
1729 						 int mode, u64 data)
1730 {
1731 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732 
1733 	return dd->misc_err_status_cnt[1];
1734 }
1735 
1736 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 					  void *context, int vl, int mode,
1738 					  u64 data)
1739 {
1740 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741 
1742 	return dd->misc_err_status_cnt[0];
1743 }
1744 
1745 /*
1746  * Software counter for the aggregate of
1747  * individual CceErrStatus counters
1748  */
1749 static u64 access_sw_cce_err_status_aggregated_cnt(
1750 				const struct cntr_entry *entry,
1751 				void *context, int vl, int mode, u64 data)
1752 {
1753 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754 
1755 	return dd->sw_cce_err_status_aggregate;
1756 }
1757 
1758 /*
1759  * Software counters corresponding to each of the
1760  * error status bits within CceErrStatus
1761  */
1762 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1763 					      void *context, int vl, int mode,
1764 					      u64 data)
1765 {
1766 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767 
1768 	return dd->cce_err_status_cnt[40];
1769 }
1770 
1771 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1772 					  void *context, int vl, int mode,
1773 					  u64 data)
1774 {
1775 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776 
1777 	return dd->cce_err_status_cnt[39];
1778 }
1779 
1780 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1781 					  void *context, int vl, int mode,
1782 					  u64 data)
1783 {
1784 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785 
1786 	return dd->cce_err_status_cnt[38];
1787 }
1788 
1789 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1790 					     void *context, int vl, int mode,
1791 					     u64 data)
1792 {
1793 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794 
1795 	return dd->cce_err_status_cnt[37];
1796 }
1797 
1798 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1799 					     void *context, int vl, int mode,
1800 					     u64 data)
1801 {
1802 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803 
1804 	return dd->cce_err_status_cnt[36];
1805 }
1806 
1807 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1808 				const struct cntr_entry *entry,
1809 				void *context, int vl, int mode, u64 data)
1810 {
1811 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812 
1813 	return dd->cce_err_status_cnt[35];
1814 }
1815 
1816 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1817 				const struct cntr_entry *entry,
1818 				void *context, int vl, int mode, u64 data)
1819 {
1820 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821 
1822 	return dd->cce_err_status_cnt[34];
1823 }
1824 
1825 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1826 						 void *context, int vl,
1827 						 int mode, u64 data)
1828 {
1829 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830 
1831 	return dd->cce_err_status_cnt[33];
1832 }
1833 
1834 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1835 						void *context, int vl, int mode,
1836 						u64 data)
1837 {
1838 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839 
1840 	return dd->cce_err_status_cnt[32];
1841 }
1842 
1843 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1844 				   void *context, int vl, int mode, u64 data)
1845 {
1846 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847 
1848 	return dd->cce_err_status_cnt[31];
1849 }
1850 
1851 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1852 					       void *context, int vl, int mode,
1853 					       u64 data)
1854 {
1855 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856 
1857 	return dd->cce_err_status_cnt[30];
1858 }
1859 
1860 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1861 					      void *context, int vl, int mode,
1862 					      u64 data)
1863 {
1864 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865 
1866 	return dd->cce_err_status_cnt[29];
1867 }
1868 
1869 static u64 access_pcic_transmit_back_parity_err_cnt(
1870 				const struct cntr_entry *entry,
1871 				void *context, int vl, int mode, u64 data)
1872 {
1873 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874 
1875 	return dd->cce_err_status_cnt[28];
1876 }
1877 
1878 static u64 access_pcic_transmit_front_parity_err_cnt(
1879 				const struct cntr_entry *entry,
1880 				void *context, int vl, int mode, u64 data)
1881 {
1882 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883 
1884 	return dd->cce_err_status_cnt[27];
1885 }
1886 
1887 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1888 					     void *context, int vl, int mode,
1889 					     u64 data)
1890 {
1891 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892 
1893 	return dd->cce_err_status_cnt[26];
1894 }
1895 
1896 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1897 					    void *context, int vl, int mode,
1898 					    u64 data)
1899 {
1900 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901 
1902 	return dd->cce_err_status_cnt[25];
1903 }
1904 
1905 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1906 					      void *context, int vl, int mode,
1907 					      u64 data)
1908 {
1909 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910 
1911 	return dd->cce_err_status_cnt[24];
1912 }
1913 
1914 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1915 					     void *context, int vl, int mode,
1916 					     u64 data)
1917 {
1918 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919 
1920 	return dd->cce_err_status_cnt[23];
1921 }
1922 
1923 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1924 						 void *context, int vl,
1925 						 int mode, u64 data)
1926 {
1927 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928 
1929 	return dd->cce_err_status_cnt[22];
1930 }
1931 
1932 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1933 					 void *context, int vl, int mode,
1934 					 u64 data)
1935 {
1936 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937 
1938 	return dd->cce_err_status_cnt[21];
1939 }
1940 
1941 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1942 				const struct cntr_entry *entry,
1943 				void *context, int vl, int mode, u64 data)
1944 {
1945 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946 
1947 	return dd->cce_err_status_cnt[20];
1948 }
1949 
1950 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1951 						 void *context, int vl,
1952 						 int mode, u64 data)
1953 {
1954 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955 
1956 	return dd->cce_err_status_cnt[19];
1957 }
1958 
1959 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1960 					     void *context, int vl, int mode,
1961 					     u64 data)
1962 {
1963 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964 
1965 	return dd->cce_err_status_cnt[18];
1966 }
1967 
1968 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1969 					    void *context, int vl, int mode,
1970 					    u64 data)
1971 {
1972 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973 
1974 	return dd->cce_err_status_cnt[17];
1975 }
1976 
1977 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1978 					      void *context, int vl, int mode,
1979 					      u64 data)
1980 {
1981 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982 
1983 	return dd->cce_err_status_cnt[16];
1984 }
1985 
1986 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1987 					     void *context, int vl, int mode,
1988 					     u64 data)
1989 {
1990 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991 
1992 	return dd->cce_err_status_cnt[15];
1993 }
1994 
1995 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1996 						 void *context, int vl,
1997 						 int mode, u64 data)
1998 {
1999 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000 
2001 	return dd->cce_err_status_cnt[14];
2002 }
2003 
2004 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2005 					     void *context, int vl, int mode,
2006 					     u64 data)
2007 {
2008 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009 
2010 	return dd->cce_err_status_cnt[13];
2011 }
2012 
2013 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2014 				const struct cntr_entry *entry,
2015 				void *context, int vl, int mode, u64 data)
2016 {
2017 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018 
2019 	return dd->cce_err_status_cnt[12];
2020 }
2021 
2022 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2023 				const struct cntr_entry *entry,
2024 				void *context, int vl, int mode, u64 data)
2025 {
2026 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027 
2028 	return dd->cce_err_status_cnt[11];
2029 }
2030 
2031 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2032 				const struct cntr_entry *entry,
2033 				void *context, int vl, int mode, u64 data)
2034 {
2035 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036 
2037 	return dd->cce_err_status_cnt[10];
2038 }
2039 
2040 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2041 				const struct cntr_entry *entry,
2042 				void *context, int vl, int mode, u64 data)
2043 {
2044 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045 
2046 	return dd->cce_err_status_cnt[9];
2047 }
2048 
2049 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2050 				const struct cntr_entry *entry,
2051 				void *context, int vl, int mode, u64 data)
2052 {
2053 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054 
2055 	return dd->cce_err_status_cnt[8];
2056 }
2057 
2058 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2059 						 void *context, int vl,
2060 						 int mode, u64 data)
2061 {
2062 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063 
2064 	return dd->cce_err_status_cnt[7];
2065 }
2066 
2067 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2068 				const struct cntr_entry *entry,
2069 				void *context, int vl, int mode, u64 data)
2070 {
2071 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072 
2073 	return dd->cce_err_status_cnt[6];
2074 }
2075 
2076 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2077 					       void *context, int vl, int mode,
2078 					       u64 data)
2079 {
2080 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081 
2082 	return dd->cce_err_status_cnt[5];
2083 }
2084 
2085 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2086 					  void *context, int vl, int mode,
2087 					  u64 data)
2088 {
2089 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090 
2091 	return dd->cce_err_status_cnt[4];
2092 }
2093 
2094 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2095 				const struct cntr_entry *entry,
2096 				void *context, int vl, int mode, u64 data)
2097 {
2098 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099 
2100 	return dd->cce_err_status_cnt[3];
2101 }
2102 
2103 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2104 						 void *context, int vl,
2105 						 int mode, u64 data)
2106 {
2107 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108 
2109 	return dd->cce_err_status_cnt[2];
2110 }
2111 
2112 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2113 						void *context, int vl,
2114 						int mode, u64 data)
2115 {
2116 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117 
2118 	return dd->cce_err_status_cnt[1];
2119 }
2120 
2121 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2122 					 void *context, int vl, int mode,
2123 					 u64 data)
2124 {
2125 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126 
2127 	return dd->cce_err_status_cnt[0];
2128 }
2129 
2130 /*
2131  * Software counters corresponding to each of the
2132  * error status bits within RcvErrStatus
2133  */
2134 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2135 					void *context, int vl, int mode,
2136 					u64 data)
2137 {
2138 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139 
2140 	return dd->rcv_err_status_cnt[63];
2141 }
2142 
2143 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2144 						void *context, int vl,
2145 						int mode, u64 data)
2146 {
2147 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148 
2149 	return dd->rcv_err_status_cnt[62];
2150 }
2151 
2152 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2153 					       void *context, int vl, int mode,
2154 					       u64 data)
2155 {
2156 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157 
2158 	return dd->rcv_err_status_cnt[61];
2159 }
2160 
2161 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2162 					 void *context, int vl, int mode,
2163 					 u64 data)
2164 {
2165 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166 
2167 	return dd->rcv_err_status_cnt[60];
2168 }
2169 
2170 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2171 						 void *context, int vl,
2172 						 int mode, u64 data)
2173 {
2174 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175 
2176 	return dd->rcv_err_status_cnt[59];
2177 }
2178 
2179 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2180 						 void *context, int vl,
2181 						 int mode, u64 data)
2182 {
2183 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184 
2185 	return dd->rcv_err_status_cnt[58];
2186 }
2187 
2188 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2189 					    void *context, int vl, int mode,
2190 					    u64 data)
2191 {
2192 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193 
2194 	return dd->rcv_err_status_cnt[57];
2195 }
2196 
2197 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2198 					   void *context, int vl, int mode,
2199 					   u64 data)
2200 {
2201 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202 
2203 	return dd->rcv_err_status_cnt[56];
2204 }
2205 
2206 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2207 					   void *context, int vl, int mode,
2208 					   u64 data)
2209 {
2210 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211 
2212 	return dd->rcv_err_status_cnt[55];
2213 }
2214 
2215 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2216 				const struct cntr_entry *entry,
2217 				void *context, int vl, int mode, u64 data)
2218 {
2219 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220 
2221 	return dd->rcv_err_status_cnt[54];
2222 }
2223 
2224 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2225 				const struct cntr_entry *entry,
2226 				void *context, int vl, int mode, u64 data)
2227 {
2228 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229 
2230 	return dd->rcv_err_status_cnt[53];
2231 }
2232 
2233 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2234 						 void *context, int vl,
2235 						 int mode, u64 data)
2236 {
2237 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238 
2239 	return dd->rcv_err_status_cnt[52];
2240 }
2241 
2242 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2243 						 void *context, int vl,
2244 						 int mode, u64 data)
2245 {
2246 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247 
2248 	return dd->rcv_err_status_cnt[51];
2249 }
2250 
2251 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2252 						 void *context, int vl,
2253 						 int mode, u64 data)
2254 {
2255 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256 
2257 	return dd->rcv_err_status_cnt[50];
2258 }
2259 
2260 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2261 						 void *context, int vl,
2262 						 int mode, u64 data)
2263 {
2264 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265 
2266 	return dd->rcv_err_status_cnt[49];
2267 }
2268 
2269 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2270 						 void *context, int vl,
2271 						 int mode, u64 data)
2272 {
2273 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274 
2275 	return dd->rcv_err_status_cnt[48];
2276 }
2277 
2278 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2279 						 void *context, int vl,
2280 						 int mode, u64 data)
2281 {
2282 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283 
2284 	return dd->rcv_err_status_cnt[47];
2285 }
2286 
2287 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2288 					 void *context, int vl, int mode,
2289 					 u64 data)
2290 {
2291 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292 
2293 	return dd->rcv_err_status_cnt[46];
2294 }
2295 
2296 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2297 				const struct cntr_entry *entry,
2298 				void *context, int vl, int mode, u64 data)
2299 {
2300 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301 
2302 	return dd->rcv_err_status_cnt[45];
2303 }
2304 
2305 static u64 access_rx_lookup_csr_parity_err_cnt(
2306 				const struct cntr_entry *entry,
2307 				void *context, int vl, int mode, u64 data)
2308 {
2309 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310 
2311 	return dd->rcv_err_status_cnt[44];
2312 }
2313 
2314 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2315 				const struct cntr_entry *entry,
2316 				void *context, int vl, int mode, u64 data)
2317 {
2318 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319 
2320 	return dd->rcv_err_status_cnt[43];
2321 }
2322 
2323 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2324 				const struct cntr_entry *entry,
2325 				void *context, int vl, int mode, u64 data)
2326 {
2327 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328 
2329 	return dd->rcv_err_status_cnt[42];
2330 }
2331 
2332 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2333 				const struct cntr_entry *entry,
2334 				void *context, int vl, int mode, u64 data)
2335 {
2336 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337 
2338 	return dd->rcv_err_status_cnt[41];
2339 }
2340 
2341 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2342 				const struct cntr_entry *entry,
2343 				void *context, int vl, int mode, u64 data)
2344 {
2345 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346 
2347 	return dd->rcv_err_status_cnt[40];
2348 }
2349 
2350 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2351 				const struct cntr_entry *entry,
2352 				void *context, int vl, int mode, u64 data)
2353 {
2354 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355 
2356 	return dd->rcv_err_status_cnt[39];
2357 }
2358 
2359 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2360 				const struct cntr_entry *entry,
2361 				void *context, int vl, int mode, u64 data)
2362 {
2363 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364 
2365 	return dd->rcv_err_status_cnt[38];
2366 }
2367 
2368 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2369 				const struct cntr_entry *entry,
2370 				void *context, int vl, int mode, u64 data)
2371 {
2372 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373 
2374 	return dd->rcv_err_status_cnt[37];
2375 }
2376 
2377 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2378 				const struct cntr_entry *entry,
2379 				void *context, int vl, int mode, u64 data)
2380 {
2381 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382 
2383 	return dd->rcv_err_status_cnt[36];
2384 }
2385 
2386 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2387 				const struct cntr_entry *entry,
2388 				void *context, int vl, int mode, u64 data)
2389 {
2390 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391 
2392 	return dd->rcv_err_status_cnt[35];
2393 }
2394 
2395 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2396 				const struct cntr_entry *entry,
2397 				void *context, int vl, int mode, u64 data)
2398 {
2399 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400 
2401 	return dd->rcv_err_status_cnt[34];
2402 }
2403 
2404 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2405 				const struct cntr_entry *entry,
2406 				void *context, int vl, int mode, u64 data)
2407 {
2408 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409 
2410 	return dd->rcv_err_status_cnt[33];
2411 }
2412 
2413 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2414 					void *context, int vl, int mode,
2415 					u64 data)
2416 {
2417 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418 
2419 	return dd->rcv_err_status_cnt[32];
2420 }
2421 
2422 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2423 				       void *context, int vl, int mode,
2424 				       u64 data)
2425 {
2426 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427 
2428 	return dd->rcv_err_status_cnt[31];
2429 }
2430 
2431 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2432 					  void *context, int vl, int mode,
2433 					  u64 data)
2434 {
2435 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436 
2437 	return dd->rcv_err_status_cnt[30];
2438 }
2439 
2440 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2441 					     void *context, int vl, int mode,
2442 					     u64 data)
2443 {
2444 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445 
2446 	return dd->rcv_err_status_cnt[29];
2447 }
2448 
2449 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2450 						 void *context, int vl,
2451 						 int mode, u64 data)
2452 {
2453 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454 
2455 	return dd->rcv_err_status_cnt[28];
2456 }
2457 
2458 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2459 				const struct cntr_entry *entry,
2460 				void *context, int vl, int mode, u64 data)
2461 {
2462 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463 
2464 	return dd->rcv_err_status_cnt[27];
2465 }
2466 
2467 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2468 				const struct cntr_entry *entry,
2469 				void *context, int vl, int mode, u64 data)
2470 {
2471 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472 
2473 	return dd->rcv_err_status_cnt[26];
2474 }
2475 
2476 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2477 				const struct cntr_entry *entry,
2478 				void *context, int vl, int mode, u64 data)
2479 {
2480 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481 
2482 	return dd->rcv_err_status_cnt[25];
2483 }
2484 
2485 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2486 				const struct cntr_entry *entry,
2487 				void *context, int vl, int mode, u64 data)
2488 {
2489 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490 
2491 	return dd->rcv_err_status_cnt[24];
2492 }
2493 
2494 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2495 				const struct cntr_entry *entry,
2496 				void *context, int vl, int mode, u64 data)
2497 {
2498 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499 
2500 	return dd->rcv_err_status_cnt[23];
2501 }
2502 
2503 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2504 				const struct cntr_entry *entry,
2505 				void *context, int vl, int mode, u64 data)
2506 {
2507 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508 
2509 	return dd->rcv_err_status_cnt[22];
2510 }
2511 
2512 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2513 				const struct cntr_entry *entry,
2514 				void *context, int vl, int mode, u64 data)
2515 {
2516 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517 
2518 	return dd->rcv_err_status_cnt[21];
2519 }
2520 
2521 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2522 				const struct cntr_entry *entry,
2523 				void *context, int vl, int mode, u64 data)
2524 {
2525 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526 
2527 	return dd->rcv_err_status_cnt[20];
2528 }
2529 
2530 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2531 				const struct cntr_entry *entry,
2532 				void *context, int vl, int mode, u64 data)
2533 {
2534 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535 
2536 	return dd->rcv_err_status_cnt[19];
2537 }
2538 
2539 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2540 						 void *context, int vl,
2541 						 int mode, u64 data)
2542 {
2543 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544 
2545 	return dd->rcv_err_status_cnt[18];
2546 }
2547 
2548 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2549 						 void *context, int vl,
2550 						 int mode, u64 data)
2551 {
2552 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553 
2554 	return dd->rcv_err_status_cnt[17];
2555 }
2556 
2557 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2558 				const struct cntr_entry *entry,
2559 				void *context, int vl, int mode, u64 data)
2560 {
2561 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562 
2563 	return dd->rcv_err_status_cnt[16];
2564 }
2565 
2566 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2567 				const struct cntr_entry *entry,
2568 				void *context, int vl, int mode, u64 data)
2569 {
2570 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571 
2572 	return dd->rcv_err_status_cnt[15];
2573 }
2574 
2575 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2576 						void *context, int vl,
2577 						int mode, u64 data)
2578 {
2579 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580 
2581 	return dd->rcv_err_status_cnt[14];
2582 }
2583 
2584 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2585 						void *context, int vl,
2586 						int mode, u64 data)
2587 {
2588 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589 
2590 	return dd->rcv_err_status_cnt[13];
2591 }
2592 
2593 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2594 					      void *context, int vl, int mode,
2595 					      u64 data)
2596 {
2597 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598 
2599 	return dd->rcv_err_status_cnt[12];
2600 }
2601 
2602 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2603 					  void *context, int vl, int mode,
2604 					  u64 data)
2605 {
2606 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607 
2608 	return dd->rcv_err_status_cnt[11];
2609 }
2610 
2611 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2612 					  void *context, int vl, int mode,
2613 					  u64 data)
2614 {
2615 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616 
2617 	return dd->rcv_err_status_cnt[10];
2618 }
2619 
2620 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2621 					       void *context, int vl, int mode,
2622 					       u64 data)
2623 {
2624 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625 
2626 	return dd->rcv_err_status_cnt[9];
2627 }
2628 
2629 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2630 					    void *context, int vl, int mode,
2631 					    u64 data)
2632 {
2633 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634 
2635 	return dd->rcv_err_status_cnt[8];
2636 }
2637 
2638 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2639 				const struct cntr_entry *entry,
2640 				void *context, int vl, int mode, u64 data)
2641 {
2642 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643 
2644 	return dd->rcv_err_status_cnt[7];
2645 }
2646 
2647 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2648 				const struct cntr_entry *entry,
2649 				void *context, int vl, int mode, u64 data)
2650 {
2651 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652 
2653 	return dd->rcv_err_status_cnt[6];
2654 }
2655 
2656 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2657 					  void *context, int vl, int mode,
2658 					  u64 data)
2659 {
2660 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661 
2662 	return dd->rcv_err_status_cnt[5];
2663 }
2664 
2665 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2666 					  void *context, int vl, int mode,
2667 					  u64 data)
2668 {
2669 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670 
2671 	return dd->rcv_err_status_cnt[4];
2672 }
2673 
2674 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2675 					 void *context, int vl, int mode,
2676 					 u64 data)
2677 {
2678 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679 
2680 	return dd->rcv_err_status_cnt[3];
2681 }
2682 
2683 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2684 					 void *context, int vl, int mode,
2685 					 u64 data)
2686 {
2687 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688 
2689 	return dd->rcv_err_status_cnt[2];
2690 }
2691 
2692 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2693 					    void *context, int vl, int mode,
2694 					    u64 data)
2695 {
2696 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697 
2698 	return dd->rcv_err_status_cnt[1];
2699 }
2700 
2701 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2702 					 void *context, int vl, int mode,
2703 					 u64 data)
2704 {
2705 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706 
2707 	return dd->rcv_err_status_cnt[0];
2708 }
2709 
2710 /*
2711  * Software counters corresponding to each of the
2712  * error status bits within SendPioErrStatus
2713  */
2714 static u64 access_pio_pec_sop_head_parity_err_cnt(
2715 				const struct cntr_entry *entry,
2716 				void *context, int vl, int mode, u64 data)
2717 {
2718 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719 
2720 	return dd->send_pio_err_status_cnt[35];
2721 }
2722 
2723 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2724 				const struct cntr_entry *entry,
2725 				void *context, int vl, int mode, u64 data)
2726 {
2727 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728 
2729 	return dd->send_pio_err_status_cnt[34];
2730 }
2731 
2732 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2733 				const struct cntr_entry *entry,
2734 				void *context, int vl, int mode, u64 data)
2735 {
2736 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737 
2738 	return dd->send_pio_err_status_cnt[33];
2739 }
2740 
2741 static u64 access_pio_current_free_cnt_parity_err_cnt(
2742 				const struct cntr_entry *entry,
2743 				void *context, int vl, int mode, u64 data)
2744 {
2745 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746 
2747 	return dd->send_pio_err_status_cnt[32];
2748 }
2749 
2750 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2751 					  void *context, int vl, int mode,
2752 					  u64 data)
2753 {
2754 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755 
2756 	return dd->send_pio_err_status_cnt[31];
2757 }
2758 
2759 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2760 					  void *context, int vl, int mode,
2761 					  u64 data)
2762 {
2763 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764 
2765 	return dd->send_pio_err_status_cnt[30];
2766 }
2767 
2768 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2769 					   void *context, int vl, int mode,
2770 					   u64 data)
2771 {
2772 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773 
2774 	return dd->send_pio_err_status_cnt[29];
2775 }
2776 
2777 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2778 				const struct cntr_entry *entry,
2779 				void *context, int vl, int mode, u64 data)
2780 {
2781 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782 
2783 	return dd->send_pio_err_status_cnt[28];
2784 }
2785 
2786 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2787 					     void *context, int vl, int mode,
2788 					     u64 data)
2789 {
2790 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791 
2792 	return dd->send_pio_err_status_cnt[27];
2793 }
2794 
2795 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2796 					     void *context, int vl, int mode,
2797 					     u64 data)
2798 {
2799 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800 
2801 	return dd->send_pio_err_status_cnt[26];
2802 }
2803 
2804 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2805 						void *context, int vl,
2806 						int mode, u64 data)
2807 {
2808 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809 
2810 	return dd->send_pio_err_status_cnt[25];
2811 }
2812 
2813 static u64 access_pio_block_qw_count_parity_err_cnt(
2814 				const struct cntr_entry *entry,
2815 				void *context, int vl, int mode, u64 data)
2816 {
2817 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818 
2819 	return dd->send_pio_err_status_cnt[24];
2820 }
2821 
2822 static u64 access_pio_write_qw_valid_parity_err_cnt(
2823 				const struct cntr_entry *entry,
2824 				void *context, int vl, int mode, u64 data)
2825 {
2826 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827 
2828 	return dd->send_pio_err_status_cnt[23];
2829 }
2830 
2831 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2832 					    void *context, int vl, int mode,
2833 					    u64 data)
2834 {
2835 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836 
2837 	return dd->send_pio_err_status_cnt[22];
2838 }
2839 
2840 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2841 						void *context, int vl,
2842 						int mode, u64 data)
2843 {
2844 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845 
2846 	return dd->send_pio_err_status_cnt[21];
2847 }
2848 
2849 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2850 						void *context, int vl,
2851 						int mode, u64 data)
2852 {
2853 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854 
2855 	return dd->send_pio_err_status_cnt[20];
2856 }
2857 
2858 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2859 						void *context, int vl,
2860 						int mode, u64 data)
2861 {
2862 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863 
2864 	return dd->send_pio_err_status_cnt[19];
2865 }
2866 
2867 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2868 				const struct cntr_entry *entry,
2869 				void *context, int vl, int mode, u64 data)
2870 {
2871 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872 
2873 	return dd->send_pio_err_status_cnt[18];
2874 }
2875 
2876 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2877 					 void *context, int vl, int mode,
2878 					 u64 data)
2879 {
2880 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881 
2882 	return dd->send_pio_err_status_cnt[17];
2883 }
2884 
2885 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2886 					    void *context, int vl, int mode,
2887 					    u64 data)
2888 {
2889 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890 
2891 	return dd->send_pio_err_status_cnt[16];
2892 }
2893 
2894 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2895 				const struct cntr_entry *entry,
2896 				void *context, int vl, int mode, u64 data)
2897 {
2898 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899 
2900 	return dd->send_pio_err_status_cnt[15];
2901 }
2902 
2903 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2904 				const struct cntr_entry *entry,
2905 				void *context, int vl, int mode, u64 data)
2906 {
2907 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908 
2909 	return dd->send_pio_err_status_cnt[14];
2910 }
2911 
2912 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2913 				const struct cntr_entry *entry,
2914 				void *context, int vl, int mode, u64 data)
2915 {
2916 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917 
2918 	return dd->send_pio_err_status_cnt[13];
2919 }
2920 
2921 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2922 				const struct cntr_entry *entry,
2923 				void *context, int vl, int mode, u64 data)
2924 {
2925 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926 
2927 	return dd->send_pio_err_status_cnt[12];
2928 }
2929 
2930 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2931 				const struct cntr_entry *entry,
2932 				void *context, int vl, int mode, u64 data)
2933 {
2934 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935 
2936 	return dd->send_pio_err_status_cnt[11];
2937 }
2938 
2939 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2940 				const struct cntr_entry *entry,
2941 				void *context, int vl, int mode, u64 data)
2942 {
2943 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944 
2945 	return dd->send_pio_err_status_cnt[10];
2946 }
2947 
2948 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2949 				const struct cntr_entry *entry,
2950 				void *context, int vl, int mode, u64 data)
2951 {
2952 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953 
2954 	return dd->send_pio_err_status_cnt[9];
2955 }
2956 
2957 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2958 				const struct cntr_entry *entry,
2959 				void *context, int vl, int mode, u64 data)
2960 {
2961 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962 
2963 	return dd->send_pio_err_status_cnt[8];
2964 }
2965 
2966 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2967 				const struct cntr_entry *entry,
2968 				void *context, int vl, int mode, u64 data)
2969 {
2970 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971 
2972 	return dd->send_pio_err_status_cnt[7];
2973 }
2974 
2975 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2976 					      void *context, int vl, int mode,
2977 					      u64 data)
2978 {
2979 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980 
2981 	return dd->send_pio_err_status_cnt[6];
2982 }
2983 
2984 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2985 					      void *context, int vl, int mode,
2986 					      u64 data)
2987 {
2988 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989 
2990 	return dd->send_pio_err_status_cnt[5];
2991 }
2992 
2993 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2994 					   void *context, int vl, int mode,
2995 					   u64 data)
2996 {
2997 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998 
2999 	return dd->send_pio_err_status_cnt[4];
3000 }
3001 
3002 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3003 					   void *context, int vl, int mode,
3004 					   u64 data)
3005 {
3006 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007 
3008 	return dd->send_pio_err_status_cnt[3];
3009 }
3010 
3011 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3012 					 void *context, int vl, int mode,
3013 					 u64 data)
3014 {
3015 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016 
3017 	return dd->send_pio_err_status_cnt[2];
3018 }
3019 
3020 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3021 						void *context, int vl,
3022 						int mode, u64 data)
3023 {
3024 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025 
3026 	return dd->send_pio_err_status_cnt[1];
3027 }
3028 
3029 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3030 					     void *context, int vl, int mode,
3031 					     u64 data)
3032 {
3033 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034 
3035 	return dd->send_pio_err_status_cnt[0];
3036 }
3037 
3038 /*
3039  * Software counters corresponding to each of the
3040  * error status bits within SendDmaErrStatus
3041  */
3042 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3043 				const struct cntr_entry *entry,
3044 				void *context, int vl, int mode, u64 data)
3045 {
3046 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047 
3048 	return dd->send_dma_err_status_cnt[3];
3049 }
3050 
3051 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3052 				const struct cntr_entry *entry,
3053 				void *context, int vl, int mode, u64 data)
3054 {
3055 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056 
3057 	return dd->send_dma_err_status_cnt[2];
3058 }
3059 
3060 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3061 					  void *context, int vl, int mode,
3062 					  u64 data)
3063 {
3064 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065 
3066 	return dd->send_dma_err_status_cnt[1];
3067 }
3068 
3069 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3070 				       void *context, int vl, int mode,
3071 				       u64 data)
3072 {
3073 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074 
3075 	return dd->send_dma_err_status_cnt[0];
3076 }
3077 
3078 /*
3079  * Software counters corresponding to each of the
3080  * error status bits within SendEgressErrStatus
3081  */
3082 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3083 				const struct cntr_entry *entry,
3084 				void *context, int vl, int mode, u64 data)
3085 {
3086 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087 
3088 	return dd->send_egress_err_status_cnt[63];
3089 }
3090 
3091 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3092 				const struct cntr_entry *entry,
3093 				void *context, int vl, int mode, u64 data)
3094 {
3095 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096 
3097 	return dd->send_egress_err_status_cnt[62];
3098 }
3099 
3100 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3101 					     void *context, int vl, int mode,
3102 					     u64 data)
3103 {
3104 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105 
3106 	return dd->send_egress_err_status_cnt[61];
3107 }
3108 
3109 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3110 						 void *context, int vl,
3111 						 int mode, u64 data)
3112 {
3113 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3114 
3115 	return dd->send_egress_err_status_cnt[60];
3116 }
3117 
3118 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3119 				const struct cntr_entry *entry,
3120 				void *context, int vl, int mode, u64 data)
3121 {
3122 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3123 
3124 	return dd->send_egress_err_status_cnt[59];
3125 }
3126 
3127 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3128 					void *context, int vl, int mode,
3129 					u64 data)
3130 {
3131 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3132 
3133 	return dd->send_egress_err_status_cnt[58];
3134 }
3135 
3136 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3137 					    void *context, int vl, int mode,
3138 					    u64 data)
3139 {
3140 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141 
3142 	return dd->send_egress_err_status_cnt[57];
3143 }
3144 
3145 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3146 					      void *context, int vl, int mode,
3147 					      u64 data)
3148 {
3149 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150 
3151 	return dd->send_egress_err_status_cnt[56];
3152 }
3153 
3154 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3155 					      void *context, int vl, int mode,
3156 					      u64 data)
3157 {
3158 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159 
3160 	return dd->send_egress_err_status_cnt[55];
3161 }
3162 
3163 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3164 					      void *context, int vl, int mode,
3165 					      u64 data)
3166 {
3167 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168 
3169 	return dd->send_egress_err_status_cnt[54];
3170 }
3171 
3172 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3173 					      void *context, int vl, int mode,
3174 					      u64 data)
3175 {
3176 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177 
3178 	return dd->send_egress_err_status_cnt[53];
3179 }
3180 
3181 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3182 					      void *context, int vl, int mode,
3183 					      u64 data)
3184 {
3185 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186 
3187 	return dd->send_egress_err_status_cnt[52];
3188 }
3189 
3190 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3191 					      void *context, int vl, int mode,
3192 					      u64 data)
3193 {
3194 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195 
3196 	return dd->send_egress_err_status_cnt[51];
3197 }
3198 
3199 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3200 					      void *context, int vl, int mode,
3201 					      u64 data)
3202 {
3203 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204 
3205 	return dd->send_egress_err_status_cnt[50];
3206 }
3207 
3208 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3209 					      void *context, int vl, int mode,
3210 					      u64 data)
3211 {
3212 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213 
3214 	return dd->send_egress_err_status_cnt[49];
3215 }
3216 
3217 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3218 					      void *context, int vl, int mode,
3219 					      u64 data)
3220 {
3221 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222 
3223 	return dd->send_egress_err_status_cnt[48];
3224 }
3225 
3226 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3227 					      void *context, int vl, int mode,
3228 					      u64 data)
3229 {
3230 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231 
3232 	return dd->send_egress_err_status_cnt[47];
3233 }
3234 
3235 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3236 					    void *context, int vl, int mode,
3237 					    u64 data)
3238 {
3239 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240 
3241 	return dd->send_egress_err_status_cnt[46];
3242 }
3243 
3244 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3245 					     void *context, int vl, int mode,
3246 					     u64 data)
3247 {
3248 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249 
3250 	return dd->send_egress_err_status_cnt[45];
3251 }
3252 
3253 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3254 						 void *context, int vl,
3255 						 int mode, u64 data)
3256 {
3257 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258 
3259 	return dd->send_egress_err_status_cnt[44];
3260 }
3261 
3262 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3263 				const struct cntr_entry *entry,
3264 				void *context, int vl, int mode, u64 data)
3265 {
3266 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267 
3268 	return dd->send_egress_err_status_cnt[43];
3269 }
3270 
3271 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3272 					void *context, int vl, int mode,
3273 					u64 data)
3274 {
3275 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276 
3277 	return dd->send_egress_err_status_cnt[42];
3278 }
3279 
3280 static u64 access_tx_credit_return_partiy_err_cnt(
3281 				const struct cntr_entry *entry,
3282 				void *context, int vl, int mode, u64 data)
3283 {
3284 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285 
3286 	return dd->send_egress_err_status_cnt[41];
3287 }
3288 
3289 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3290 				const struct cntr_entry *entry,
3291 				void *context, int vl, int mode, u64 data)
3292 {
3293 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294 
3295 	return dd->send_egress_err_status_cnt[40];
3296 }
3297 
3298 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3299 				const struct cntr_entry *entry,
3300 				void *context, int vl, int mode, u64 data)
3301 {
3302 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303 
3304 	return dd->send_egress_err_status_cnt[39];
3305 }
3306 
3307 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3308 				const struct cntr_entry *entry,
3309 				void *context, int vl, int mode, u64 data)
3310 {
3311 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312 
3313 	return dd->send_egress_err_status_cnt[38];
3314 }
3315 
3316 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3317 				const struct cntr_entry *entry,
3318 				void *context, int vl, int mode, u64 data)
3319 {
3320 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321 
3322 	return dd->send_egress_err_status_cnt[37];
3323 }
3324 
3325 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3326 				const struct cntr_entry *entry,
3327 				void *context, int vl, int mode, u64 data)
3328 {
3329 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330 
3331 	return dd->send_egress_err_status_cnt[36];
3332 }
3333 
3334 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3335 				const struct cntr_entry *entry,
3336 				void *context, int vl, int mode, u64 data)
3337 {
3338 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339 
3340 	return dd->send_egress_err_status_cnt[35];
3341 }
3342 
3343 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3344 				const struct cntr_entry *entry,
3345 				void *context, int vl, int mode, u64 data)
3346 {
3347 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348 
3349 	return dd->send_egress_err_status_cnt[34];
3350 }
3351 
3352 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3353 				const struct cntr_entry *entry,
3354 				void *context, int vl, int mode, u64 data)
3355 {
3356 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357 
3358 	return dd->send_egress_err_status_cnt[33];
3359 }
3360 
3361 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3362 				const struct cntr_entry *entry,
3363 				void *context, int vl, int mode, u64 data)
3364 {
3365 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366 
3367 	return dd->send_egress_err_status_cnt[32];
3368 }
3369 
3370 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3371 				const struct cntr_entry *entry,
3372 				void *context, int vl, int mode, u64 data)
3373 {
3374 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375 
3376 	return dd->send_egress_err_status_cnt[31];
3377 }
3378 
3379 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3380 				const struct cntr_entry *entry,
3381 				void *context, int vl, int mode, u64 data)
3382 {
3383 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384 
3385 	return dd->send_egress_err_status_cnt[30];
3386 }
3387 
3388 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3389 				const struct cntr_entry *entry,
3390 				void *context, int vl, int mode, u64 data)
3391 {
3392 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393 
3394 	return dd->send_egress_err_status_cnt[29];
3395 }
3396 
3397 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3398 				const struct cntr_entry *entry,
3399 				void *context, int vl, int mode, u64 data)
3400 {
3401 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402 
3403 	return dd->send_egress_err_status_cnt[28];
3404 }
3405 
3406 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3407 				const struct cntr_entry *entry,
3408 				void *context, int vl, int mode, u64 data)
3409 {
3410 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411 
3412 	return dd->send_egress_err_status_cnt[27];
3413 }
3414 
3415 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3416 				const struct cntr_entry *entry,
3417 				void *context, int vl, int mode, u64 data)
3418 {
3419 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420 
3421 	return dd->send_egress_err_status_cnt[26];
3422 }
3423 
3424 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3425 				const struct cntr_entry *entry,
3426 				void *context, int vl, int mode, u64 data)
3427 {
3428 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429 
3430 	return dd->send_egress_err_status_cnt[25];
3431 }
3432 
3433 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3434 				const struct cntr_entry *entry,
3435 				void *context, int vl, int mode, u64 data)
3436 {
3437 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438 
3439 	return dd->send_egress_err_status_cnt[24];
3440 }
3441 
3442 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3443 				const struct cntr_entry *entry,
3444 				void *context, int vl, int mode, u64 data)
3445 {
3446 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447 
3448 	return dd->send_egress_err_status_cnt[23];
3449 }
3450 
3451 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3452 				const struct cntr_entry *entry,
3453 				void *context, int vl, int mode, u64 data)
3454 {
3455 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456 
3457 	return dd->send_egress_err_status_cnt[22];
3458 }
3459 
3460 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3461 				const struct cntr_entry *entry,
3462 				void *context, int vl, int mode, u64 data)
3463 {
3464 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465 
3466 	return dd->send_egress_err_status_cnt[21];
3467 }
3468 
3469 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3470 				const struct cntr_entry *entry,
3471 				void *context, int vl, int mode, u64 data)
3472 {
3473 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474 
3475 	return dd->send_egress_err_status_cnt[20];
3476 }
3477 
3478 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3479 				const struct cntr_entry *entry,
3480 				void *context, int vl, int mode, u64 data)
3481 {
3482 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483 
3484 	return dd->send_egress_err_status_cnt[19];
3485 }
3486 
3487 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3488 				const struct cntr_entry *entry,
3489 				void *context, int vl, int mode, u64 data)
3490 {
3491 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492 
3493 	return dd->send_egress_err_status_cnt[18];
3494 }
3495 
3496 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3497 				const struct cntr_entry *entry,
3498 				void *context, int vl, int mode, u64 data)
3499 {
3500 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501 
3502 	return dd->send_egress_err_status_cnt[17];
3503 }
3504 
3505 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3506 				const struct cntr_entry *entry,
3507 				void *context, int vl, int mode, u64 data)
3508 {
3509 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510 
3511 	return dd->send_egress_err_status_cnt[16];
3512 }
3513 
3514 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3515 					   void *context, int vl, int mode,
3516 					   u64 data)
3517 {
3518 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519 
3520 	return dd->send_egress_err_status_cnt[15];
3521 }
3522 
3523 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3524 						 void *context, int vl,
3525 						 int mode, u64 data)
3526 {
3527 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528 
3529 	return dd->send_egress_err_status_cnt[14];
3530 }
3531 
3532 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3533 					       void *context, int vl, int mode,
3534 					       u64 data)
3535 {
3536 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537 
3538 	return dd->send_egress_err_status_cnt[13];
3539 }
3540 
3541 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3542 					void *context, int vl, int mode,
3543 					u64 data)
3544 {
3545 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546 
3547 	return dd->send_egress_err_status_cnt[12];
3548 }
3549 
3550 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3551 				const struct cntr_entry *entry,
3552 				void *context, int vl, int mode, u64 data)
3553 {
3554 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555 
3556 	return dd->send_egress_err_status_cnt[11];
3557 }
3558 
3559 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3560 					     void *context, int vl, int mode,
3561 					     u64 data)
3562 {
3563 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564 
3565 	return dd->send_egress_err_status_cnt[10];
3566 }
3567 
3568 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3569 					    void *context, int vl, int mode,
3570 					    u64 data)
3571 {
3572 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573 
3574 	return dd->send_egress_err_status_cnt[9];
3575 }
3576 
3577 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3578 				const struct cntr_entry *entry,
3579 				void *context, int vl, int mode, u64 data)
3580 {
3581 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582 
3583 	return dd->send_egress_err_status_cnt[8];
3584 }
3585 
3586 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3587 				const struct cntr_entry *entry,
3588 				void *context, int vl, int mode, u64 data)
3589 {
3590 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591 
3592 	return dd->send_egress_err_status_cnt[7];
3593 }
3594 
3595 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3596 					    void *context, int vl, int mode,
3597 					    u64 data)
3598 {
3599 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600 
3601 	return dd->send_egress_err_status_cnt[6];
3602 }
3603 
3604 static u64 access_tx_incorrect_link_state_err_cnt(
3605 				const struct cntr_entry *entry,
3606 				void *context, int vl, int mode, u64 data)
3607 {
3608 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609 
3610 	return dd->send_egress_err_status_cnt[5];
3611 }
3612 
3613 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3614 				      void *context, int vl, int mode,
3615 				      u64 data)
3616 {
3617 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618 
3619 	return dd->send_egress_err_status_cnt[4];
3620 }
3621 
3622 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3623 				const struct cntr_entry *entry,
3624 				void *context, int vl, int mode, u64 data)
3625 {
3626 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627 
3628 	return dd->send_egress_err_status_cnt[3];
3629 }
3630 
3631 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3632 					    void *context, int vl, int mode,
3633 					    u64 data)
3634 {
3635 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636 
3637 	return dd->send_egress_err_status_cnt[2];
3638 }
3639 
3640 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3641 				const struct cntr_entry *entry,
3642 				void *context, int vl, int mode, u64 data)
3643 {
3644 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645 
3646 	return dd->send_egress_err_status_cnt[1];
3647 }
3648 
3649 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3650 				const struct cntr_entry *entry,
3651 				void *context, int vl, int mode, u64 data)
3652 {
3653 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654 
3655 	return dd->send_egress_err_status_cnt[0];
3656 }
3657 
3658 /*
3659  * Software counters corresponding to each of the
3660  * error status bits within SendErrStatus
3661  */
3662 static u64 access_send_csr_write_bad_addr_err_cnt(
3663 				const struct cntr_entry *entry,
3664 				void *context, int vl, int mode, u64 data)
3665 {
3666 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667 
3668 	return dd->send_err_status_cnt[2];
3669 }
3670 
3671 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3672 						 void *context, int vl,
3673 						 int mode, u64 data)
3674 {
3675 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676 
3677 	return dd->send_err_status_cnt[1];
3678 }
3679 
3680 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3681 				      void *context, int vl, int mode,
3682 				      u64 data)
3683 {
3684 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685 
3686 	return dd->send_err_status_cnt[0];
3687 }
3688 
3689 /*
3690  * Software counters corresponding to each of the
3691  * error status bits within SendCtxtErrStatus
3692  */
3693 static u64 access_pio_write_out_of_bounds_err_cnt(
3694 				const struct cntr_entry *entry,
3695 				void *context, int vl, int mode, u64 data)
3696 {
3697 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698 
3699 	return dd->sw_ctxt_err_status_cnt[4];
3700 }
3701 
3702 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3703 					     void *context, int vl, int mode,
3704 					     u64 data)
3705 {
3706 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707 
3708 	return dd->sw_ctxt_err_status_cnt[3];
3709 }
3710 
3711 static u64 access_pio_write_crosses_boundary_err_cnt(
3712 				const struct cntr_entry *entry,
3713 				void *context, int vl, int mode, u64 data)
3714 {
3715 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716 
3717 	return dd->sw_ctxt_err_status_cnt[2];
3718 }
3719 
3720 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3721 						void *context, int vl,
3722 						int mode, u64 data)
3723 {
3724 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725 
3726 	return dd->sw_ctxt_err_status_cnt[1];
3727 }
3728 
3729 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3730 					       void *context, int vl, int mode,
3731 					       u64 data)
3732 {
3733 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734 
3735 	return dd->sw_ctxt_err_status_cnt[0];
3736 }
3737 
3738 /*
3739  * Software counters corresponding to each of the
3740  * error status bits within SendDmaEngErrStatus
3741  */
3742 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3743 				const struct cntr_entry *entry,
3744 				void *context, int vl, int mode, u64 data)
3745 {
3746 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747 
3748 	return dd->sw_send_dma_eng_err_status_cnt[23];
3749 }
3750 
3751 static u64 access_sdma_header_storage_cor_err_cnt(
3752 				const struct cntr_entry *entry,
3753 				void *context, int vl, int mode, u64 data)
3754 {
3755 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756 
3757 	return dd->sw_send_dma_eng_err_status_cnt[22];
3758 }
3759 
3760 static u64 access_sdma_packet_tracking_cor_err_cnt(
3761 				const struct cntr_entry *entry,
3762 				void *context, int vl, int mode, u64 data)
3763 {
3764 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3765 
3766 	return dd->sw_send_dma_eng_err_status_cnt[21];
3767 }
3768 
3769 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3770 					    void *context, int vl, int mode,
3771 					    u64 data)
3772 {
3773 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3774 
3775 	return dd->sw_send_dma_eng_err_status_cnt[20];
3776 }
3777 
3778 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3779 					      void *context, int vl, int mode,
3780 					      u64 data)
3781 {
3782 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3783 
3784 	return dd->sw_send_dma_eng_err_status_cnt[19];
3785 }
3786 
3787 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3788 				const struct cntr_entry *entry,
3789 				void *context, int vl, int mode, u64 data)
3790 {
3791 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792 
3793 	return dd->sw_send_dma_eng_err_status_cnt[18];
3794 }
3795 
3796 static u64 access_sdma_header_storage_unc_err_cnt(
3797 				const struct cntr_entry *entry,
3798 				void *context, int vl, int mode, u64 data)
3799 {
3800 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801 
3802 	return dd->sw_send_dma_eng_err_status_cnt[17];
3803 }
3804 
3805 static u64 access_sdma_packet_tracking_unc_err_cnt(
3806 				const struct cntr_entry *entry,
3807 				void *context, int vl, int mode, u64 data)
3808 {
3809 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810 
3811 	return dd->sw_send_dma_eng_err_status_cnt[16];
3812 }
3813 
3814 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3815 					    void *context, int vl, int mode,
3816 					    u64 data)
3817 {
3818 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819 
3820 	return dd->sw_send_dma_eng_err_status_cnt[15];
3821 }
3822 
3823 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3824 					      void *context, int vl, int mode,
3825 					      u64 data)
3826 {
3827 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828 
3829 	return dd->sw_send_dma_eng_err_status_cnt[14];
3830 }
3831 
3832 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3833 				       void *context, int vl, int mode,
3834 				       u64 data)
3835 {
3836 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837 
3838 	return dd->sw_send_dma_eng_err_status_cnt[13];
3839 }
3840 
3841 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3842 					     void *context, int vl, int mode,
3843 					     u64 data)
3844 {
3845 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846 
3847 	return dd->sw_send_dma_eng_err_status_cnt[12];
3848 }
3849 
3850 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3851 					      void *context, int vl, int mode,
3852 					      u64 data)
3853 {
3854 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855 
3856 	return dd->sw_send_dma_eng_err_status_cnt[11];
3857 }
3858 
3859 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3860 					     void *context, int vl, int mode,
3861 					     u64 data)
3862 {
3863 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864 
3865 	return dd->sw_send_dma_eng_err_status_cnt[10];
3866 }
3867 
3868 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3869 					  void *context, int vl, int mode,
3870 					  u64 data)
3871 {
3872 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873 
3874 	return dd->sw_send_dma_eng_err_status_cnt[9];
3875 }
3876 
3877 static u64 access_sdma_packet_desc_overflow_err_cnt(
3878 				const struct cntr_entry *entry,
3879 				void *context, int vl, int mode, u64 data)
3880 {
3881 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882 
3883 	return dd->sw_send_dma_eng_err_status_cnt[8];
3884 }
3885 
3886 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3887 					       void *context, int vl,
3888 					       int mode, u64 data)
3889 {
3890 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891 
3892 	return dd->sw_send_dma_eng_err_status_cnt[7];
3893 }
3894 
3895 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3896 				    void *context, int vl, int mode, u64 data)
3897 {
3898 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899 
3900 	return dd->sw_send_dma_eng_err_status_cnt[6];
3901 }
3902 
3903 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3904 					void *context, int vl, int mode,
3905 					u64 data)
3906 {
3907 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908 
3909 	return dd->sw_send_dma_eng_err_status_cnt[5];
3910 }
3911 
3912 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3913 					  void *context, int vl, int mode,
3914 					  u64 data)
3915 {
3916 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917 
3918 	return dd->sw_send_dma_eng_err_status_cnt[4];
3919 }
3920 
3921 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3922 				const struct cntr_entry *entry,
3923 				void *context, int vl, int mode, u64 data)
3924 {
3925 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926 
3927 	return dd->sw_send_dma_eng_err_status_cnt[3];
3928 }
3929 
3930 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3931 					void *context, int vl, int mode,
3932 					u64 data)
3933 {
3934 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935 
3936 	return dd->sw_send_dma_eng_err_status_cnt[2];
3937 }
3938 
3939 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3940 					    void *context, int vl, int mode,
3941 					    u64 data)
3942 {
3943 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944 
3945 	return dd->sw_send_dma_eng_err_status_cnt[1];
3946 }
3947 
3948 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3949 					void *context, int vl, int mode,
3950 					u64 data)
3951 {
3952 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953 
3954 	return dd->sw_send_dma_eng_err_status_cnt[0];
3955 }
3956 
3957 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3958 				 void *context, int vl, int mode,
3959 				 u64 data)
3960 {
3961 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962 
3963 	u64 val = 0;
3964 	u64 csr = entry->csr;
3965 
3966 	val = read_write_csr(dd, csr, mode, data);
3967 	if (mode == CNTR_MODE_R) {
3968 		val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3969 			CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3970 	} else if (mode == CNTR_MODE_W) {
3971 		dd->sw_rcv_bypass_packet_errors = 0;
3972 	} else {
3973 		dd_dev_err(dd, "Invalid cntr register access mode");
3974 		return 0;
3975 	}
3976 	return val;
3977 }
3978 
3979 #define def_access_sw_cpu(cntr) \
3980 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,		      \
3981 			      void *context, int vl, int mode, u64 data)      \
3982 {									      \
3983 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
3984 	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
3985 			      ppd->ibport_data.rvp.cntr, vl,		      \
3986 			      mode, data);				      \
3987 }
3988 
3989 def_access_sw_cpu(rc_acks);
3990 def_access_sw_cpu(rc_qacks);
3991 def_access_sw_cpu(rc_delayed_comp);
3992 
3993 #define def_access_ibp_counter(cntr) \
3994 static u64 access_ibp_##cntr(const struct cntr_entry *entry,		      \
3995 				void *context, int vl, int mode, u64 data)    \
3996 {									      \
3997 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
3998 									      \
3999 	if (vl != CNTR_INVALID_VL)					      \
4000 		return 0;						      \
4001 									      \
4002 	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
4003 			     mode, data);				      \
4004 }
4005 
4006 def_access_ibp_counter(loop_pkts);
4007 def_access_ibp_counter(rc_resends);
4008 def_access_ibp_counter(rnr_naks);
4009 def_access_ibp_counter(other_naks);
4010 def_access_ibp_counter(rc_timeouts);
4011 def_access_ibp_counter(pkt_drops);
4012 def_access_ibp_counter(dmawait);
4013 def_access_ibp_counter(rc_seqnak);
4014 def_access_ibp_counter(rc_dupreq);
4015 def_access_ibp_counter(rdma_seq);
4016 def_access_ibp_counter(unaligned);
4017 def_access_ibp_counter(seq_naks);
4018 
4019 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4020 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4021 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4022 			CNTR_NORMAL),
4023 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4024 			CNTR_NORMAL),
4025 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4026 			RCV_TID_FLOW_GEN_MISMATCH_CNT,
4027 			CNTR_NORMAL),
4028 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4029 			CNTR_NORMAL),
4030 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4031 			RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4032 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4033 			CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4034 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4035 			CNTR_NORMAL),
4036 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4037 			CNTR_NORMAL),
4038 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4039 			CNTR_NORMAL),
4040 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4041 			CNTR_NORMAL),
4042 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4043 			CNTR_NORMAL),
4044 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4045 			CNTR_NORMAL),
4046 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4047 			CCE_RCV_URGENT_INT_CNT,	CNTR_NORMAL),
4048 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4049 			CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4050 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4051 			      CNTR_SYNTH),
4052 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4053 			    access_dc_rcv_err_cnt),
4054 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4055 				 CNTR_SYNTH),
4056 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4057 				  CNTR_SYNTH),
4058 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4059 				  CNTR_SYNTH),
4060 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4061 				   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4062 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4063 				  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4064 				  CNTR_SYNTH),
4065 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4066 				DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4067 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4068 			       CNTR_SYNTH),
4069 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4070 			      CNTR_SYNTH),
4071 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4072 			       CNTR_SYNTH),
4073 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4074 				 CNTR_SYNTH),
4075 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4076 				CNTR_SYNTH),
4077 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4078 				CNTR_SYNTH),
4079 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4080 			       CNTR_SYNTH),
4081 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4082 				 CNTR_SYNTH | CNTR_VL),
4083 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4084 				CNTR_SYNTH | CNTR_VL),
4085 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4086 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4087 				 CNTR_SYNTH | CNTR_VL),
4088 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4089 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4090 				 CNTR_SYNTH | CNTR_VL),
4091 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4092 			      CNTR_SYNTH),
4093 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4094 				 CNTR_SYNTH | CNTR_VL),
4095 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4096 				CNTR_SYNTH),
4097 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4098 				   CNTR_SYNTH | CNTR_VL),
4099 [C_DC_TOTAL_CRC] =
4100 	DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4101 			 CNTR_SYNTH),
4102 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4103 				  CNTR_SYNTH),
4104 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4105 				  CNTR_SYNTH),
4106 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4107 				  CNTR_SYNTH),
4108 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4109 				  CNTR_SYNTH),
4110 [C_DC_CRC_MULT_LN] =
4111 	DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4112 			 CNTR_SYNTH),
4113 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4114 				    CNTR_SYNTH),
4115 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4116 				    CNTR_SYNTH),
4117 [C_DC_SEQ_CRC_CNT] =
4118 	DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4119 			 CNTR_SYNTH),
4120 [C_DC_ESC0_ONLY_CNT] =
4121 	DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4122 			 CNTR_SYNTH),
4123 [C_DC_ESC0_PLUS1_CNT] =
4124 	DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4125 			 CNTR_SYNTH),
4126 [C_DC_ESC0_PLUS2_CNT] =
4127 	DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4128 			 CNTR_SYNTH),
4129 [C_DC_REINIT_FROM_PEER_CNT] =
4130 	DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4131 			 CNTR_SYNTH),
4132 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4133 				  CNTR_SYNTH),
4134 [C_DC_MISC_FLG_CNT] =
4135 	DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4136 			 CNTR_SYNTH),
4137 [C_DC_PRF_GOOD_LTP_CNT] =
4138 	DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4139 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4140 	DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4141 			 CNTR_SYNTH),
4142 [C_DC_PRF_RX_FLIT_CNT] =
4143 	DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4144 [C_DC_PRF_TX_FLIT_CNT] =
4145 	DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4146 [C_DC_PRF_CLK_CNTR] =
4147 	DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4148 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4149 	DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4150 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4151 	DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4152 			 CNTR_SYNTH),
4153 [C_DC_PG_STS_TX_SBE_CNT] =
4154 	DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4155 [C_DC_PG_STS_TX_MBE_CNT] =
4156 	DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4157 			 CNTR_SYNTH),
4158 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4159 			    access_sw_cpu_intr),
4160 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4161 			    access_sw_cpu_rcv_limit),
4162 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4163 			    access_sw_vtx_wait),
4164 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4165 			    access_sw_pio_wait),
4166 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4167 			    access_sw_pio_drain),
4168 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4169 			    access_sw_kmem_wait),
4170 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4171 			    access_sw_send_schedule),
4172 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4173 				      SEND_DMA_DESC_FETCHED_CNT, 0,
4174 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175 				      dev_access_u32_csr),
4176 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4177 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178 			     access_sde_int_cnt),
4179 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4180 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181 			     access_sde_err_cnt),
4182 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4183 				  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184 				  access_sde_idle_int_cnt),
4185 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4186 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4187 				      access_sde_progress_int_cnt),
4188 /* MISC_ERR_STATUS */
4189 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4190 				CNTR_NORMAL,
4191 				access_misc_pll_lock_fail_err_cnt),
4192 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4193 				CNTR_NORMAL,
4194 				access_misc_mbist_fail_err_cnt),
4195 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4196 				CNTR_NORMAL,
4197 				access_misc_invalid_eep_cmd_err_cnt),
4198 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4199 				CNTR_NORMAL,
4200 				access_misc_efuse_done_parity_err_cnt),
4201 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4202 				CNTR_NORMAL,
4203 				access_misc_efuse_write_err_cnt),
4204 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4205 				0, CNTR_NORMAL,
4206 				access_misc_efuse_read_bad_addr_err_cnt),
4207 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4208 				CNTR_NORMAL,
4209 				access_misc_efuse_csr_parity_err_cnt),
4210 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4211 				CNTR_NORMAL,
4212 				access_misc_fw_auth_failed_err_cnt),
4213 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4214 				CNTR_NORMAL,
4215 				access_misc_key_mismatch_err_cnt),
4216 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4217 				CNTR_NORMAL,
4218 				access_misc_sbus_write_failed_err_cnt),
4219 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4220 				CNTR_NORMAL,
4221 				access_misc_csr_write_bad_addr_err_cnt),
4222 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4223 				CNTR_NORMAL,
4224 				access_misc_csr_read_bad_addr_err_cnt),
4225 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4226 				CNTR_NORMAL,
4227 				access_misc_csr_parity_err_cnt),
4228 /* CceErrStatus */
4229 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4230 				CNTR_NORMAL,
4231 				access_sw_cce_err_status_aggregated_cnt),
4232 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4233 				CNTR_NORMAL,
4234 				access_cce_msix_csr_parity_err_cnt),
4235 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4236 				CNTR_NORMAL,
4237 				access_cce_int_map_unc_err_cnt),
4238 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4239 				CNTR_NORMAL,
4240 				access_cce_int_map_cor_err_cnt),
4241 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4242 				CNTR_NORMAL,
4243 				access_cce_msix_table_unc_err_cnt),
4244 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4245 				CNTR_NORMAL,
4246 				access_cce_msix_table_cor_err_cnt),
4247 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4248 				0, CNTR_NORMAL,
4249 				access_cce_rxdma_conv_fifo_parity_err_cnt),
4250 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4251 				0, CNTR_NORMAL,
4252 				access_cce_rcpl_async_fifo_parity_err_cnt),
4253 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4254 				CNTR_NORMAL,
4255 				access_cce_seg_write_bad_addr_err_cnt),
4256 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4257 				CNTR_NORMAL,
4258 				access_cce_seg_read_bad_addr_err_cnt),
4259 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4260 				CNTR_NORMAL,
4261 				access_la_triggered_cnt),
4262 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4263 				CNTR_NORMAL,
4264 				access_cce_trgt_cpl_timeout_err_cnt),
4265 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4266 				CNTR_NORMAL,
4267 				access_pcic_receive_parity_err_cnt),
4268 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4269 				CNTR_NORMAL,
4270 				access_pcic_transmit_back_parity_err_cnt),
4271 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4272 				0, CNTR_NORMAL,
4273 				access_pcic_transmit_front_parity_err_cnt),
4274 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4275 				CNTR_NORMAL,
4276 				access_pcic_cpl_dat_q_unc_err_cnt),
4277 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4278 				CNTR_NORMAL,
4279 				access_pcic_cpl_hd_q_unc_err_cnt),
4280 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4281 				CNTR_NORMAL,
4282 				access_pcic_post_dat_q_unc_err_cnt),
4283 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4284 				CNTR_NORMAL,
4285 				access_pcic_post_hd_q_unc_err_cnt),
4286 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4287 				CNTR_NORMAL,
4288 				access_pcic_retry_sot_mem_unc_err_cnt),
4289 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4290 				CNTR_NORMAL,
4291 				access_pcic_retry_mem_unc_err),
4292 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4293 				CNTR_NORMAL,
4294 				access_pcic_n_post_dat_q_parity_err_cnt),
4295 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4296 				CNTR_NORMAL,
4297 				access_pcic_n_post_h_q_parity_err_cnt),
4298 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4299 				CNTR_NORMAL,
4300 				access_pcic_cpl_dat_q_cor_err_cnt),
4301 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4302 				CNTR_NORMAL,
4303 				access_pcic_cpl_hd_q_cor_err_cnt),
4304 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4305 				CNTR_NORMAL,
4306 				access_pcic_post_dat_q_cor_err_cnt),
4307 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4308 				CNTR_NORMAL,
4309 				access_pcic_post_hd_q_cor_err_cnt),
4310 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4311 				CNTR_NORMAL,
4312 				access_pcic_retry_sot_mem_cor_err_cnt),
4313 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4314 				CNTR_NORMAL,
4315 				access_pcic_retry_mem_cor_err_cnt),
4316 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4317 				"CceCli1AsyncFifoDbgParityError", 0, 0,
4318 				CNTR_NORMAL,
4319 				access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4320 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4321 				"CceCli1AsyncFifoRxdmaParityError", 0, 0,
4322 				CNTR_NORMAL,
4323 				access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4324 				),
4325 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4326 			"CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4327 			CNTR_NORMAL,
4328 			access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4329 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4330 			"CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4331 			CNTR_NORMAL,
4332 			access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4333 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4334 			0, CNTR_NORMAL,
4335 			access_cce_cli2_async_fifo_parity_err_cnt),
4336 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4337 			CNTR_NORMAL,
4338 			access_cce_csr_cfg_bus_parity_err_cnt),
4339 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4340 			0, CNTR_NORMAL,
4341 			access_cce_cli0_async_fifo_parity_err_cnt),
4342 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4343 			CNTR_NORMAL,
4344 			access_cce_rspd_data_parity_err_cnt),
4345 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4346 			CNTR_NORMAL,
4347 			access_cce_trgt_access_err_cnt),
4348 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4349 			0, CNTR_NORMAL,
4350 			access_cce_trgt_async_fifo_parity_err_cnt),
4351 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4352 			CNTR_NORMAL,
4353 			access_cce_csr_write_bad_addr_err_cnt),
4354 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4355 			CNTR_NORMAL,
4356 			access_cce_csr_read_bad_addr_err_cnt),
4357 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4358 			CNTR_NORMAL,
4359 			access_ccs_csr_parity_err_cnt),
4360 
4361 /* RcvErrStatus */
4362 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4363 			CNTR_NORMAL,
4364 			access_rx_csr_parity_err_cnt),
4365 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4366 			CNTR_NORMAL,
4367 			access_rx_csr_write_bad_addr_err_cnt),
4368 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4369 			CNTR_NORMAL,
4370 			access_rx_csr_read_bad_addr_err_cnt),
4371 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4372 			CNTR_NORMAL,
4373 			access_rx_dma_csr_unc_err_cnt),
4374 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4375 			CNTR_NORMAL,
4376 			access_rx_dma_dq_fsm_encoding_err_cnt),
4377 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4378 			CNTR_NORMAL,
4379 			access_rx_dma_eq_fsm_encoding_err_cnt),
4380 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4381 			CNTR_NORMAL,
4382 			access_rx_dma_csr_parity_err_cnt),
4383 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4384 			CNTR_NORMAL,
4385 			access_rx_rbuf_data_cor_err_cnt),
4386 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4387 			CNTR_NORMAL,
4388 			access_rx_rbuf_data_unc_err_cnt),
4389 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4390 			CNTR_NORMAL,
4391 			access_rx_dma_data_fifo_rd_cor_err_cnt),
4392 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4393 			CNTR_NORMAL,
4394 			access_rx_dma_data_fifo_rd_unc_err_cnt),
4395 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4396 			CNTR_NORMAL,
4397 			access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4398 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4399 			CNTR_NORMAL,
4400 			access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4401 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4402 			CNTR_NORMAL,
4403 			access_rx_rbuf_desc_part2_cor_err_cnt),
4404 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4405 			CNTR_NORMAL,
4406 			access_rx_rbuf_desc_part2_unc_err_cnt),
4407 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4408 			CNTR_NORMAL,
4409 			access_rx_rbuf_desc_part1_cor_err_cnt),
4410 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4411 			CNTR_NORMAL,
4412 			access_rx_rbuf_desc_part1_unc_err_cnt),
4413 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4414 			CNTR_NORMAL,
4415 			access_rx_hq_intr_fsm_err_cnt),
4416 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4417 			CNTR_NORMAL,
4418 			access_rx_hq_intr_csr_parity_err_cnt),
4419 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4420 			CNTR_NORMAL,
4421 			access_rx_lookup_csr_parity_err_cnt),
4422 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4423 			CNTR_NORMAL,
4424 			access_rx_lookup_rcv_array_cor_err_cnt),
4425 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4426 			CNTR_NORMAL,
4427 			access_rx_lookup_rcv_array_unc_err_cnt),
4428 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4429 			0, CNTR_NORMAL,
4430 			access_rx_lookup_des_part2_parity_err_cnt),
4431 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4432 			0, CNTR_NORMAL,
4433 			access_rx_lookup_des_part1_unc_cor_err_cnt),
4434 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4435 			CNTR_NORMAL,
4436 			access_rx_lookup_des_part1_unc_err_cnt),
4437 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4438 			CNTR_NORMAL,
4439 			access_rx_rbuf_next_free_buf_cor_err_cnt),
4440 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4441 			CNTR_NORMAL,
4442 			access_rx_rbuf_next_free_buf_unc_err_cnt),
4443 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4444 			"RxRbufFlInitWrAddrParityErr", 0, 0,
4445 			CNTR_NORMAL,
4446 			access_rbuf_fl_init_wr_addr_parity_err_cnt),
4447 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4448 			0, CNTR_NORMAL,
4449 			access_rx_rbuf_fl_initdone_parity_err_cnt),
4450 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4451 			0, CNTR_NORMAL,
4452 			access_rx_rbuf_fl_write_addr_parity_err_cnt),
4453 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4454 			CNTR_NORMAL,
4455 			access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4456 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4457 			CNTR_NORMAL,
4458 			access_rx_rbuf_empty_err_cnt),
4459 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4460 			CNTR_NORMAL,
4461 			access_rx_rbuf_full_err_cnt),
4462 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4463 			CNTR_NORMAL,
4464 			access_rbuf_bad_lookup_err_cnt),
4465 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4466 			CNTR_NORMAL,
4467 			access_rbuf_ctx_id_parity_err_cnt),
4468 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4469 			CNTR_NORMAL,
4470 			access_rbuf_csr_qeopdw_parity_err_cnt),
4471 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4472 			"RxRbufCsrQNumOfPktParityErr", 0, 0,
4473 			CNTR_NORMAL,
4474 			access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4475 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4476 			"RxRbufCsrQTlPtrParityErr", 0, 0,
4477 			CNTR_NORMAL,
4478 			access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4479 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4480 			0, CNTR_NORMAL,
4481 			access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4482 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4483 			0, CNTR_NORMAL,
4484 			access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4485 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4486 			0, 0, CNTR_NORMAL,
4487 			access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4488 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4489 			0, CNTR_NORMAL,
4490 			access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4491 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4492 			"RxRbufCsrQHeadBufNumParityErr", 0, 0,
4493 			CNTR_NORMAL,
4494 			access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4495 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4496 			0, CNTR_NORMAL,
4497 			access_rx_rbuf_block_list_read_cor_err_cnt),
4498 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4499 			0, CNTR_NORMAL,
4500 			access_rx_rbuf_block_list_read_unc_err_cnt),
4501 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4502 			CNTR_NORMAL,
4503 			access_rx_rbuf_lookup_des_cor_err_cnt),
4504 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4505 			CNTR_NORMAL,
4506 			access_rx_rbuf_lookup_des_unc_err_cnt),
4507 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4508 			"RxRbufLookupDesRegUncCorErr", 0, 0,
4509 			CNTR_NORMAL,
4510 			access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4511 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4512 			CNTR_NORMAL,
4513 			access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4514 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4515 			CNTR_NORMAL,
4516 			access_rx_rbuf_free_list_cor_err_cnt),
4517 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4518 			CNTR_NORMAL,
4519 			access_rx_rbuf_free_list_unc_err_cnt),
4520 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4521 			CNTR_NORMAL,
4522 			access_rx_rcv_fsm_encoding_err_cnt),
4523 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4524 			CNTR_NORMAL,
4525 			access_rx_dma_flag_cor_err_cnt),
4526 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4527 			CNTR_NORMAL,
4528 			access_rx_dma_flag_unc_err_cnt),
4529 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4530 			CNTR_NORMAL,
4531 			access_rx_dc_sop_eop_parity_err_cnt),
4532 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4533 			CNTR_NORMAL,
4534 			access_rx_rcv_csr_parity_err_cnt),
4535 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4536 			CNTR_NORMAL,
4537 			access_rx_rcv_qp_map_table_cor_err_cnt),
4538 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4539 			CNTR_NORMAL,
4540 			access_rx_rcv_qp_map_table_unc_err_cnt),
4541 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4542 			CNTR_NORMAL,
4543 			access_rx_rcv_data_cor_err_cnt),
4544 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4545 			CNTR_NORMAL,
4546 			access_rx_rcv_data_unc_err_cnt),
4547 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4548 			CNTR_NORMAL,
4549 			access_rx_rcv_hdr_cor_err_cnt),
4550 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4551 			CNTR_NORMAL,
4552 			access_rx_rcv_hdr_unc_err_cnt),
4553 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4554 			CNTR_NORMAL,
4555 			access_rx_dc_intf_parity_err_cnt),
4556 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4557 			CNTR_NORMAL,
4558 			access_rx_dma_csr_cor_err_cnt),
4559 /* SendPioErrStatus */
4560 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4561 			CNTR_NORMAL,
4562 			access_pio_pec_sop_head_parity_err_cnt),
4563 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4564 			CNTR_NORMAL,
4565 			access_pio_pcc_sop_head_parity_err_cnt),
4566 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4567 			0, 0, CNTR_NORMAL,
4568 			access_pio_last_returned_cnt_parity_err_cnt),
4569 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4570 			0, CNTR_NORMAL,
4571 			access_pio_current_free_cnt_parity_err_cnt),
4572 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4573 			CNTR_NORMAL,
4574 			access_pio_reserved_31_err_cnt),
4575 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4576 			CNTR_NORMAL,
4577 			access_pio_reserved_30_err_cnt),
4578 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4579 			CNTR_NORMAL,
4580 			access_pio_ppmc_sop_len_err_cnt),
4581 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4582 			CNTR_NORMAL,
4583 			access_pio_ppmc_bqc_mem_parity_err_cnt),
4584 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4585 			CNTR_NORMAL,
4586 			access_pio_vl_fifo_parity_err_cnt),
4587 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4588 			CNTR_NORMAL,
4589 			access_pio_vlf_sop_parity_err_cnt),
4590 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4591 			CNTR_NORMAL,
4592 			access_pio_vlf_v1_len_parity_err_cnt),
4593 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4594 			CNTR_NORMAL,
4595 			access_pio_block_qw_count_parity_err_cnt),
4596 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4597 			CNTR_NORMAL,
4598 			access_pio_write_qw_valid_parity_err_cnt),
4599 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4600 			CNTR_NORMAL,
4601 			access_pio_state_machine_err_cnt),
4602 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4603 			CNTR_NORMAL,
4604 			access_pio_write_data_parity_err_cnt),
4605 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4606 			CNTR_NORMAL,
4607 			access_pio_host_addr_mem_cor_err_cnt),
4608 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4609 			CNTR_NORMAL,
4610 			access_pio_host_addr_mem_unc_err_cnt),
4611 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4612 			CNTR_NORMAL,
4613 			access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4614 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4615 			CNTR_NORMAL,
4616 			access_pio_init_sm_in_err_cnt),
4617 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4618 			CNTR_NORMAL,
4619 			access_pio_ppmc_pbl_fifo_err_cnt),
4620 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4621 			0, CNTR_NORMAL,
4622 			access_pio_credit_ret_fifo_parity_err_cnt),
4623 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4624 			CNTR_NORMAL,
4625 			access_pio_v1_len_mem_bank1_cor_err_cnt),
4626 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4627 			CNTR_NORMAL,
4628 			access_pio_v1_len_mem_bank0_cor_err_cnt),
4629 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4630 			CNTR_NORMAL,
4631 			access_pio_v1_len_mem_bank1_unc_err_cnt),
4632 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4633 			CNTR_NORMAL,
4634 			access_pio_v1_len_mem_bank0_unc_err_cnt),
4635 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4636 			CNTR_NORMAL,
4637 			access_pio_sm_pkt_reset_parity_err_cnt),
4638 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4639 			CNTR_NORMAL,
4640 			access_pio_pkt_evict_fifo_parity_err_cnt),
4641 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4642 			"PioSbrdctrlCrrelFifoParityErr", 0, 0,
4643 			CNTR_NORMAL,
4644 			access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4645 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4646 			CNTR_NORMAL,
4647 			access_pio_sbrdctl_crrel_parity_err_cnt),
4648 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4649 			CNTR_NORMAL,
4650 			access_pio_pec_fifo_parity_err_cnt),
4651 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4652 			CNTR_NORMAL,
4653 			access_pio_pcc_fifo_parity_err_cnt),
4654 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4655 			CNTR_NORMAL,
4656 			access_pio_sb_mem_fifo1_err_cnt),
4657 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4658 			CNTR_NORMAL,
4659 			access_pio_sb_mem_fifo0_err_cnt),
4660 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4661 			CNTR_NORMAL,
4662 			access_pio_csr_parity_err_cnt),
4663 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4664 			CNTR_NORMAL,
4665 			access_pio_write_addr_parity_err_cnt),
4666 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4667 			CNTR_NORMAL,
4668 			access_pio_write_bad_ctxt_err_cnt),
4669 /* SendDmaErrStatus */
4670 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4671 			0, CNTR_NORMAL,
4672 			access_sdma_pcie_req_tracking_cor_err_cnt),
4673 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4674 			0, CNTR_NORMAL,
4675 			access_sdma_pcie_req_tracking_unc_err_cnt),
4676 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4677 			CNTR_NORMAL,
4678 			access_sdma_csr_parity_err_cnt),
4679 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4680 			CNTR_NORMAL,
4681 			access_sdma_rpy_tag_err_cnt),
4682 /* SendEgressErrStatus */
4683 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4684 			CNTR_NORMAL,
4685 			access_tx_read_pio_memory_csr_unc_err_cnt),
4686 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4687 			0, CNTR_NORMAL,
4688 			access_tx_read_sdma_memory_csr_err_cnt),
4689 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4690 			CNTR_NORMAL,
4691 			access_tx_egress_fifo_cor_err_cnt),
4692 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4693 			CNTR_NORMAL,
4694 			access_tx_read_pio_memory_cor_err_cnt),
4695 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4696 			CNTR_NORMAL,
4697 			access_tx_read_sdma_memory_cor_err_cnt),
4698 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4699 			CNTR_NORMAL,
4700 			access_tx_sb_hdr_cor_err_cnt),
4701 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4702 			CNTR_NORMAL,
4703 			access_tx_credit_overrun_err_cnt),
4704 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4705 			CNTR_NORMAL,
4706 			access_tx_launch_fifo8_cor_err_cnt),
4707 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4708 			CNTR_NORMAL,
4709 			access_tx_launch_fifo7_cor_err_cnt),
4710 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4711 			CNTR_NORMAL,
4712 			access_tx_launch_fifo6_cor_err_cnt),
4713 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4714 			CNTR_NORMAL,
4715 			access_tx_launch_fifo5_cor_err_cnt),
4716 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4717 			CNTR_NORMAL,
4718 			access_tx_launch_fifo4_cor_err_cnt),
4719 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4720 			CNTR_NORMAL,
4721 			access_tx_launch_fifo3_cor_err_cnt),
4722 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4723 			CNTR_NORMAL,
4724 			access_tx_launch_fifo2_cor_err_cnt),
4725 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4726 			CNTR_NORMAL,
4727 			access_tx_launch_fifo1_cor_err_cnt),
4728 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4729 			CNTR_NORMAL,
4730 			access_tx_launch_fifo0_cor_err_cnt),
4731 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4732 			CNTR_NORMAL,
4733 			access_tx_credit_return_vl_err_cnt),
4734 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4735 			CNTR_NORMAL,
4736 			access_tx_hcrc_insertion_err_cnt),
4737 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4738 			CNTR_NORMAL,
4739 			access_tx_egress_fifo_unc_err_cnt),
4740 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4741 			CNTR_NORMAL,
4742 			access_tx_read_pio_memory_unc_err_cnt),
4743 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4744 			CNTR_NORMAL,
4745 			access_tx_read_sdma_memory_unc_err_cnt),
4746 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4747 			CNTR_NORMAL,
4748 			access_tx_sb_hdr_unc_err_cnt),
4749 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4750 			CNTR_NORMAL,
4751 			access_tx_credit_return_partiy_err_cnt),
4752 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4753 			0, 0, CNTR_NORMAL,
4754 			access_tx_launch_fifo8_unc_or_parity_err_cnt),
4755 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4756 			0, 0, CNTR_NORMAL,
4757 			access_tx_launch_fifo7_unc_or_parity_err_cnt),
4758 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4759 			0, 0, CNTR_NORMAL,
4760 			access_tx_launch_fifo6_unc_or_parity_err_cnt),
4761 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4762 			0, 0, CNTR_NORMAL,
4763 			access_tx_launch_fifo5_unc_or_parity_err_cnt),
4764 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4765 			0, 0, CNTR_NORMAL,
4766 			access_tx_launch_fifo4_unc_or_parity_err_cnt),
4767 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4768 			0, 0, CNTR_NORMAL,
4769 			access_tx_launch_fifo3_unc_or_parity_err_cnt),
4770 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4771 			0, 0, CNTR_NORMAL,
4772 			access_tx_launch_fifo2_unc_or_parity_err_cnt),
4773 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4774 			0, 0, CNTR_NORMAL,
4775 			access_tx_launch_fifo1_unc_or_parity_err_cnt),
4776 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4777 			0, 0, CNTR_NORMAL,
4778 			access_tx_launch_fifo0_unc_or_parity_err_cnt),
4779 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4780 			0, 0, CNTR_NORMAL,
4781 			access_tx_sdma15_disallowed_packet_err_cnt),
4782 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4783 			0, 0, CNTR_NORMAL,
4784 			access_tx_sdma14_disallowed_packet_err_cnt),
4785 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4786 			0, 0, CNTR_NORMAL,
4787 			access_tx_sdma13_disallowed_packet_err_cnt),
4788 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4789 			0, 0, CNTR_NORMAL,
4790 			access_tx_sdma12_disallowed_packet_err_cnt),
4791 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4792 			0, 0, CNTR_NORMAL,
4793 			access_tx_sdma11_disallowed_packet_err_cnt),
4794 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4795 			0, 0, CNTR_NORMAL,
4796 			access_tx_sdma10_disallowed_packet_err_cnt),
4797 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4798 			0, 0, CNTR_NORMAL,
4799 			access_tx_sdma9_disallowed_packet_err_cnt),
4800 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4801 			0, 0, CNTR_NORMAL,
4802 			access_tx_sdma8_disallowed_packet_err_cnt),
4803 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4804 			0, 0, CNTR_NORMAL,
4805 			access_tx_sdma7_disallowed_packet_err_cnt),
4806 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4807 			0, 0, CNTR_NORMAL,
4808 			access_tx_sdma6_disallowed_packet_err_cnt),
4809 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4810 			0, 0, CNTR_NORMAL,
4811 			access_tx_sdma5_disallowed_packet_err_cnt),
4812 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4813 			0, 0, CNTR_NORMAL,
4814 			access_tx_sdma4_disallowed_packet_err_cnt),
4815 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4816 			0, 0, CNTR_NORMAL,
4817 			access_tx_sdma3_disallowed_packet_err_cnt),
4818 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4819 			0, 0, CNTR_NORMAL,
4820 			access_tx_sdma2_disallowed_packet_err_cnt),
4821 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4822 			0, 0, CNTR_NORMAL,
4823 			access_tx_sdma1_disallowed_packet_err_cnt),
4824 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4825 			0, 0, CNTR_NORMAL,
4826 			access_tx_sdma0_disallowed_packet_err_cnt),
4827 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4828 			CNTR_NORMAL,
4829 			access_tx_config_parity_err_cnt),
4830 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4831 			CNTR_NORMAL,
4832 			access_tx_sbrd_ctl_csr_parity_err_cnt),
4833 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4834 			CNTR_NORMAL,
4835 			access_tx_launch_csr_parity_err_cnt),
4836 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4837 			CNTR_NORMAL,
4838 			access_tx_illegal_vl_err_cnt),
4839 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4840 			"TxSbrdCtlStateMachineParityErr", 0, 0,
4841 			CNTR_NORMAL,
4842 			access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4843 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4844 			CNTR_NORMAL,
4845 			access_egress_reserved_10_err_cnt),
4846 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4847 			CNTR_NORMAL,
4848 			access_egress_reserved_9_err_cnt),
4849 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4850 			0, 0, CNTR_NORMAL,
4851 			access_tx_sdma_launch_intf_parity_err_cnt),
4852 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4853 			CNTR_NORMAL,
4854 			access_tx_pio_launch_intf_parity_err_cnt),
4855 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4856 			CNTR_NORMAL,
4857 			access_egress_reserved_6_err_cnt),
4858 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4859 			CNTR_NORMAL,
4860 			access_tx_incorrect_link_state_err_cnt),
4861 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4862 			CNTR_NORMAL,
4863 			access_tx_linkdown_err_cnt),
4864 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4865 			"EgressFifoUnderrunOrParityErr", 0, 0,
4866 			CNTR_NORMAL,
4867 			access_tx_egress_fifi_underrun_or_parity_err_cnt),
4868 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4869 			CNTR_NORMAL,
4870 			access_egress_reserved_2_err_cnt),
4871 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4872 			CNTR_NORMAL,
4873 			access_tx_pkt_integrity_mem_unc_err_cnt),
4874 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4875 			CNTR_NORMAL,
4876 			access_tx_pkt_integrity_mem_cor_err_cnt),
4877 /* SendErrStatus */
4878 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4879 			CNTR_NORMAL,
4880 			access_send_csr_write_bad_addr_err_cnt),
4881 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4882 			CNTR_NORMAL,
4883 			access_send_csr_read_bad_addr_err_cnt),
4884 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4885 			CNTR_NORMAL,
4886 			access_send_csr_parity_cnt),
4887 /* SendCtxtErrStatus */
4888 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4889 			CNTR_NORMAL,
4890 			access_pio_write_out_of_bounds_err_cnt),
4891 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4892 			CNTR_NORMAL,
4893 			access_pio_write_overflow_err_cnt),
4894 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4895 			0, 0, CNTR_NORMAL,
4896 			access_pio_write_crosses_boundary_err_cnt),
4897 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4898 			CNTR_NORMAL,
4899 			access_pio_disallowed_packet_err_cnt),
4900 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4901 			CNTR_NORMAL,
4902 			access_pio_inconsistent_sop_err_cnt),
4903 /* SendDmaEngErrStatus */
4904 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4905 			0, 0, CNTR_NORMAL,
4906 			access_sdma_header_request_fifo_cor_err_cnt),
4907 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4908 			CNTR_NORMAL,
4909 			access_sdma_header_storage_cor_err_cnt),
4910 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4911 			CNTR_NORMAL,
4912 			access_sdma_packet_tracking_cor_err_cnt),
4913 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4914 			CNTR_NORMAL,
4915 			access_sdma_assembly_cor_err_cnt),
4916 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4917 			CNTR_NORMAL,
4918 			access_sdma_desc_table_cor_err_cnt),
4919 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4920 			0, 0, CNTR_NORMAL,
4921 			access_sdma_header_request_fifo_unc_err_cnt),
4922 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4923 			CNTR_NORMAL,
4924 			access_sdma_header_storage_unc_err_cnt),
4925 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4926 			CNTR_NORMAL,
4927 			access_sdma_packet_tracking_unc_err_cnt),
4928 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4929 			CNTR_NORMAL,
4930 			access_sdma_assembly_unc_err_cnt),
4931 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4932 			CNTR_NORMAL,
4933 			access_sdma_desc_table_unc_err_cnt),
4934 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4935 			CNTR_NORMAL,
4936 			access_sdma_timeout_err_cnt),
4937 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4938 			CNTR_NORMAL,
4939 			access_sdma_header_length_err_cnt),
4940 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4941 			CNTR_NORMAL,
4942 			access_sdma_header_address_err_cnt),
4943 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4944 			CNTR_NORMAL,
4945 			access_sdma_header_select_err_cnt),
4946 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4947 			CNTR_NORMAL,
4948 			access_sdma_reserved_9_err_cnt),
4949 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4950 			CNTR_NORMAL,
4951 			access_sdma_packet_desc_overflow_err_cnt),
4952 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4953 			CNTR_NORMAL,
4954 			access_sdma_length_mismatch_err_cnt),
4955 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4956 			CNTR_NORMAL,
4957 			access_sdma_halt_err_cnt),
4958 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4959 			CNTR_NORMAL,
4960 			access_sdma_mem_read_err_cnt),
4961 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4962 			CNTR_NORMAL,
4963 			access_sdma_first_desc_err_cnt),
4964 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4965 			CNTR_NORMAL,
4966 			access_sdma_tail_out_of_bounds_err_cnt),
4967 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4968 			CNTR_NORMAL,
4969 			access_sdma_too_long_err_cnt),
4970 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4971 			CNTR_NORMAL,
4972 			access_sdma_gen_mismatch_err_cnt),
4973 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4974 			CNTR_NORMAL,
4975 			access_sdma_wrong_dw_err_cnt),
4976 };
4977 
4978 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4979 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4980 			CNTR_NORMAL),
4981 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4982 			CNTR_NORMAL),
4983 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4984 			CNTR_NORMAL),
4985 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4986 			CNTR_NORMAL),
4987 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4988 			CNTR_NORMAL),
4989 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4990 			CNTR_NORMAL),
4991 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4992 			CNTR_NORMAL),
4993 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4994 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4995 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4996 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4997 				      CNTR_SYNTH | CNTR_VL),
4998 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4999 				     CNTR_SYNTH | CNTR_VL),
5000 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5001 				      CNTR_SYNTH | CNTR_VL),
5002 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5003 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5004 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5005 			     access_sw_link_dn_cnt),
5006 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5007 			   access_sw_link_up_cnt),
5008 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5009 				 access_sw_unknown_frame_cnt),
5010 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5011 			     access_sw_xmit_discards),
5012 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5013 				CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5014 				access_sw_xmit_discards),
5015 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5016 				 access_xmit_constraint_errs),
5017 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5018 				access_rcv_constraint_errs),
5019 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5020 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5021 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5022 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5023 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5024 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5025 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5026 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5027 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5028 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5029 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5030 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5031 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5032 			       access_sw_cpu_rc_acks),
5033 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5034 				access_sw_cpu_rc_qacks),
5035 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5036 				       access_sw_cpu_rc_delayed_comp),
5037 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5038 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5039 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5040 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5041 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5042 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5043 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5044 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5045 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5046 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5047 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5048 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5049 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5050 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5051 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5052 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5053 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5054 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5055 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5056 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5057 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5058 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5059 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5060 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5061 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5062 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5063 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5064 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5065 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5066 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5067 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5068 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5069 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5070 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5071 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5072 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5073 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5074 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5075 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5076 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5077 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5078 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5079 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5080 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5081 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5082 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5083 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5084 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5085 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5086 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5087 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5088 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5089 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5090 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5091 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5092 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5093 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5094 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5095 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5096 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5097 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5098 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5099 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5100 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5101 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5102 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5103 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5104 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5105 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5106 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5107 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5108 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5109 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5110 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5111 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5112 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5113 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5114 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5115 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5116 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5117 };
5118 
5119 /* ======================================================================== */
5120 
5121 /* return true if this is chip revision revision a */
5122 int is_ax(struct hfi1_devdata *dd)
5123 {
5124 	u8 chip_rev_minor =
5125 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5126 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5127 	return (chip_rev_minor & 0xf0) == 0;
5128 }
5129 
5130 /* return true if this is chip revision revision b */
5131 int is_bx(struct hfi1_devdata *dd)
5132 {
5133 	u8 chip_rev_minor =
5134 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5135 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5136 	return (chip_rev_minor & 0xF0) == 0x10;
5137 }
5138 
5139 /*
5140  * Append string s to buffer buf.  Arguments curp and len are the current
5141  * position and remaining length, respectively.
5142  *
5143  * return 0 on success, 1 on out of room
5144  */
5145 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5146 {
5147 	char *p = *curp;
5148 	int len = *lenp;
5149 	int result = 0; /* success */
5150 	char c;
5151 
5152 	/* add a comma, if first in the buffer */
5153 	if (p != buf) {
5154 		if (len == 0) {
5155 			result = 1; /* out of room */
5156 			goto done;
5157 		}
5158 		*p++ = ',';
5159 		len--;
5160 	}
5161 
5162 	/* copy the string */
5163 	while ((c = *s++) != 0) {
5164 		if (len == 0) {
5165 			result = 1; /* out of room */
5166 			goto done;
5167 		}
5168 		*p++ = c;
5169 		len--;
5170 	}
5171 
5172 done:
5173 	/* write return values */
5174 	*curp = p;
5175 	*lenp = len;
5176 
5177 	return result;
5178 }
5179 
5180 /*
5181  * Using the given flag table, print a comma separated string into
5182  * the buffer.  End in '*' if the buffer is too short.
5183  */
5184 static char *flag_string(char *buf, int buf_len, u64 flags,
5185 			 struct flag_table *table, int table_size)
5186 {
5187 	char extra[32];
5188 	char *p = buf;
5189 	int len = buf_len;
5190 	int no_room = 0;
5191 	int i;
5192 
5193 	/* make sure there is at least 2 so we can form "*" */
5194 	if (len < 2)
5195 		return "";
5196 
5197 	len--;	/* leave room for a nul */
5198 	for (i = 0; i < table_size; i++) {
5199 		if (flags & table[i].flag) {
5200 			no_room = append_str(buf, &p, &len, table[i].str);
5201 			if (no_room)
5202 				break;
5203 			flags &= ~table[i].flag;
5204 		}
5205 	}
5206 
5207 	/* any undocumented bits left? */
5208 	if (!no_room && flags) {
5209 		snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5210 		no_room = append_str(buf, &p, &len, extra);
5211 	}
5212 
5213 	/* add * if ran out of room */
5214 	if (no_room) {
5215 		/* may need to back up to add space for a '*' */
5216 		if (len == 0)
5217 			--p;
5218 		*p++ = '*';
5219 	}
5220 
5221 	/* add final nul - space already allocated above */
5222 	*p = 0;
5223 	return buf;
5224 }
5225 
5226 /* first 8 CCE error interrupt source names */
5227 static const char * const cce_misc_names[] = {
5228 	"CceErrInt",		/* 0 */
5229 	"RxeErrInt",		/* 1 */
5230 	"MiscErrInt",		/* 2 */
5231 	"Reserved3",		/* 3 */
5232 	"PioErrInt",		/* 4 */
5233 	"SDmaErrInt",		/* 5 */
5234 	"EgressErrInt",		/* 6 */
5235 	"TxeErrInt"		/* 7 */
5236 };
5237 
5238 /*
5239  * Return the miscellaneous error interrupt name.
5240  */
5241 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5242 {
5243 	if (source < ARRAY_SIZE(cce_misc_names))
5244 		strncpy(buf, cce_misc_names[source], bsize);
5245 	else
5246 		snprintf(buf, bsize, "Reserved%u",
5247 			 source + IS_GENERAL_ERR_START);
5248 
5249 	return buf;
5250 }
5251 
5252 /*
5253  * Return the SDMA engine error interrupt name.
5254  */
5255 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5256 {
5257 	snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5258 	return buf;
5259 }
5260 
5261 /*
5262  * Return the send context error interrupt name.
5263  */
5264 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5265 {
5266 	snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5267 	return buf;
5268 }
5269 
5270 static const char * const various_names[] = {
5271 	"PbcInt",
5272 	"GpioAssertInt",
5273 	"Qsfp1Int",
5274 	"Qsfp2Int",
5275 	"TCritInt"
5276 };
5277 
5278 /*
5279  * Return the various interrupt name.
5280  */
5281 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5282 {
5283 	if (source < ARRAY_SIZE(various_names))
5284 		strncpy(buf, various_names[source], bsize);
5285 	else
5286 		snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5287 	return buf;
5288 }
5289 
5290 /*
5291  * Return the DC interrupt name.
5292  */
5293 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5294 {
5295 	static const char * const dc_int_names[] = {
5296 		"common",
5297 		"lcb",
5298 		"8051",
5299 		"lbm"	/* local block merge */
5300 	};
5301 
5302 	if (source < ARRAY_SIZE(dc_int_names))
5303 		snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5304 	else
5305 		snprintf(buf, bsize, "DCInt%u", source);
5306 	return buf;
5307 }
5308 
5309 static const char * const sdma_int_names[] = {
5310 	"SDmaInt",
5311 	"SdmaIdleInt",
5312 	"SdmaProgressInt",
5313 };
5314 
5315 /*
5316  * Return the SDMA engine interrupt name.
5317  */
5318 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5319 {
5320 	/* what interrupt */
5321 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5322 	/* which engine */
5323 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5324 
5325 	if (likely(what < 3))
5326 		snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5327 	else
5328 		snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5329 	return buf;
5330 }
5331 
5332 /*
5333  * Return the receive available interrupt name.
5334  */
5335 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5336 {
5337 	snprintf(buf, bsize, "RcvAvailInt%u", source);
5338 	return buf;
5339 }
5340 
5341 /*
5342  * Return the receive urgent interrupt name.
5343  */
5344 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5345 {
5346 	snprintf(buf, bsize, "RcvUrgentInt%u", source);
5347 	return buf;
5348 }
5349 
5350 /*
5351  * Return the send credit interrupt name.
5352  */
5353 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5354 {
5355 	snprintf(buf, bsize, "SendCreditInt%u", source);
5356 	return buf;
5357 }
5358 
5359 /*
5360  * Return the reserved interrupt name.
5361  */
5362 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5363 {
5364 	snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5365 	return buf;
5366 }
5367 
5368 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5369 {
5370 	return flag_string(buf, buf_len, flags,
5371 			   cce_err_status_flags,
5372 			   ARRAY_SIZE(cce_err_status_flags));
5373 }
5374 
5375 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5376 {
5377 	return flag_string(buf, buf_len, flags,
5378 			   rxe_err_status_flags,
5379 			   ARRAY_SIZE(rxe_err_status_flags));
5380 }
5381 
5382 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5383 {
5384 	return flag_string(buf, buf_len, flags, misc_err_status_flags,
5385 			   ARRAY_SIZE(misc_err_status_flags));
5386 }
5387 
5388 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5389 {
5390 	return flag_string(buf, buf_len, flags,
5391 			   pio_err_status_flags,
5392 			   ARRAY_SIZE(pio_err_status_flags));
5393 }
5394 
5395 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5396 {
5397 	return flag_string(buf, buf_len, flags,
5398 			   sdma_err_status_flags,
5399 			   ARRAY_SIZE(sdma_err_status_flags));
5400 }
5401 
5402 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5403 {
5404 	return flag_string(buf, buf_len, flags,
5405 			   egress_err_status_flags,
5406 			   ARRAY_SIZE(egress_err_status_flags));
5407 }
5408 
5409 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5410 {
5411 	return flag_string(buf, buf_len, flags,
5412 			   egress_err_info_flags,
5413 			   ARRAY_SIZE(egress_err_info_flags));
5414 }
5415 
5416 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5417 {
5418 	return flag_string(buf, buf_len, flags,
5419 			   send_err_status_flags,
5420 			   ARRAY_SIZE(send_err_status_flags));
5421 }
5422 
5423 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5424 {
5425 	char buf[96];
5426 	int i = 0;
5427 
5428 	/*
5429 	 * For most these errors, there is nothing that can be done except
5430 	 * report or record it.
5431 	 */
5432 	dd_dev_info(dd, "CCE Error: %s\n",
5433 		    cce_err_status_string(buf, sizeof(buf), reg));
5434 
5435 	if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5436 	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5437 		/* this error requires a manual drop into SPC freeze mode */
5438 		/* then a fix up */
5439 		start_freeze_handling(dd->pport, FREEZE_SELF);
5440 	}
5441 
5442 	for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5443 		if (reg & (1ull << i)) {
5444 			incr_cntr64(&dd->cce_err_status_cnt[i]);
5445 			/* maintain a counter over all cce_err_status errors */
5446 			incr_cntr64(&dd->sw_cce_err_status_aggregate);
5447 		}
5448 	}
5449 }
5450 
5451 /*
5452  * Check counters for receive errors that do not have an interrupt
5453  * associated with them.
5454  */
5455 #define RCVERR_CHECK_TIME 10
5456 static void update_rcverr_timer(unsigned long opaque)
5457 {
5458 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5459 	struct hfi1_pportdata *ppd = dd->pport;
5460 	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5461 
5462 	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5463 	    ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5464 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5465 		set_link_down_reason(
5466 		ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5467 		OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5468 		queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5469 	}
5470 	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5471 
5472 	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5473 }
5474 
5475 static int init_rcverr(struct hfi1_devdata *dd)
5476 {
5477 	setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5478 	/* Assume the hardware counter has been reset */
5479 	dd->rcv_ovfl_cnt = 0;
5480 	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5481 }
5482 
5483 static void free_rcverr(struct hfi1_devdata *dd)
5484 {
5485 	if (dd->rcverr_timer.data)
5486 		del_timer_sync(&dd->rcverr_timer);
5487 	dd->rcverr_timer.data = 0;
5488 }
5489 
5490 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5491 {
5492 	char buf[96];
5493 	int i = 0;
5494 
5495 	dd_dev_info(dd, "Receive Error: %s\n",
5496 		    rxe_err_status_string(buf, sizeof(buf), reg));
5497 
5498 	if (reg & ALL_RXE_FREEZE_ERR) {
5499 		int flags = 0;
5500 
5501 		/*
5502 		 * Freeze mode recovery is disabled for the errors
5503 		 * in RXE_FREEZE_ABORT_MASK
5504 		 */
5505 		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5506 			flags = FREEZE_ABORT;
5507 
5508 		start_freeze_handling(dd->pport, flags);
5509 	}
5510 
5511 	for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5512 		if (reg & (1ull << i))
5513 			incr_cntr64(&dd->rcv_err_status_cnt[i]);
5514 	}
5515 }
5516 
5517 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518 {
5519 	char buf[96];
5520 	int i = 0;
5521 
5522 	dd_dev_info(dd, "Misc Error: %s",
5523 		    misc_err_status_string(buf, sizeof(buf), reg));
5524 	for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5525 		if (reg & (1ull << i))
5526 			incr_cntr64(&dd->misc_err_status_cnt[i]);
5527 	}
5528 }
5529 
5530 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5531 {
5532 	char buf[96];
5533 	int i = 0;
5534 
5535 	dd_dev_info(dd, "PIO Error: %s\n",
5536 		    pio_err_status_string(buf, sizeof(buf), reg));
5537 
5538 	if (reg & ALL_PIO_FREEZE_ERR)
5539 		start_freeze_handling(dd->pport, 0);
5540 
5541 	for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5542 		if (reg & (1ull << i))
5543 			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5544 	}
5545 }
5546 
5547 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5548 {
5549 	char buf[96];
5550 	int i = 0;
5551 
5552 	dd_dev_info(dd, "SDMA Error: %s\n",
5553 		    sdma_err_status_string(buf, sizeof(buf), reg));
5554 
5555 	if (reg & ALL_SDMA_FREEZE_ERR)
5556 		start_freeze_handling(dd->pport, 0);
5557 
5558 	for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5559 		if (reg & (1ull << i))
5560 			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5561 	}
5562 }
5563 
5564 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5565 {
5566 	incr_cntr64(&ppd->port_xmit_discards);
5567 }
5568 
5569 static void count_port_inactive(struct hfi1_devdata *dd)
5570 {
5571 	__count_port_discards(dd->pport);
5572 }
5573 
5574 /*
5575  * We have had a "disallowed packet" error during egress. Determine the
5576  * integrity check which failed, and update relevant error counter, etc.
5577  *
5578  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5579  * bit of state per integrity check, and so we can miss the reason for an
5580  * egress error if more than one packet fails the same integrity check
5581  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5582  */
5583 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5584 					int vl)
5585 {
5586 	struct hfi1_pportdata *ppd = dd->pport;
5587 	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5588 	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5589 	char buf[96];
5590 
5591 	/* clear down all observed info as quickly as possible after read */
5592 	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5593 
5594 	dd_dev_info(dd,
5595 		    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5596 		    info, egress_err_info_string(buf, sizeof(buf), info), src);
5597 
5598 	/* Eventually add other counters for each bit */
5599 	if (info & PORT_DISCARD_EGRESS_ERRS) {
5600 		int weight, i;
5601 
5602 		/*
5603 		 * Count all applicable bits as individual errors and
5604 		 * attribute them to the packet that triggered this handler.
5605 		 * This may not be completely accurate due to limitations
5606 		 * on the available hardware error information.  There is
5607 		 * a single information register and any number of error
5608 		 * packets may have occurred and contributed to it before
5609 		 * this routine is called.  This means that:
5610 		 * a) If multiple packets with the same error occur before
5611 		 *    this routine is called, earlier packets are missed.
5612 		 *    There is only a single bit for each error type.
5613 		 * b) Errors may not be attributed to the correct VL.
5614 		 *    The driver is attributing all bits in the info register
5615 		 *    to the packet that triggered this call, but bits
5616 		 *    could be an accumulation of different packets with
5617 		 *    different VLs.
5618 		 * c) A single error packet may have multiple counts attached
5619 		 *    to it.  There is no way for the driver to know if
5620 		 *    multiple bits set in the info register are due to a
5621 		 *    single packet or multiple packets.  The driver assumes
5622 		 *    multiple packets.
5623 		 */
5624 		weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5625 		for (i = 0; i < weight; i++) {
5626 			__count_port_discards(ppd);
5627 			if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5628 				incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5629 			else if (vl == 15)
5630 				incr_cntr64(&ppd->port_xmit_discards_vl
5631 					    [C_VL_15]);
5632 		}
5633 	}
5634 }
5635 
5636 /*
5637  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5638  * register. Does it represent a 'port inactive' error?
5639  */
5640 static inline int port_inactive_err(u64 posn)
5641 {
5642 	return (posn >= SEES(TX_LINKDOWN) &&
5643 		posn <= SEES(TX_INCORRECT_LINK_STATE));
5644 }
5645 
5646 /*
5647  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5648  * register. Does it represent a 'disallowed packet' error?
5649  */
5650 static inline int disallowed_pkt_err(int posn)
5651 {
5652 	return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5653 		posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5654 }
5655 
5656 /*
5657  * Input value is a bit position of one of the SDMA engine disallowed
5658  * packet errors.  Return which engine.  Use of this must be guarded by
5659  * disallowed_pkt_err().
5660  */
5661 static inline int disallowed_pkt_engine(int posn)
5662 {
5663 	return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5664 }
5665 
5666 /*
5667  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5668  * be done.
5669  */
5670 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5671 {
5672 	struct sdma_vl_map *m;
5673 	int vl;
5674 
5675 	/* range check */
5676 	if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5677 		return -1;
5678 
5679 	rcu_read_lock();
5680 	m = rcu_dereference(dd->sdma_map);
5681 	vl = m->engine_to_vl[engine];
5682 	rcu_read_unlock();
5683 
5684 	return vl;
5685 }
5686 
5687 /*
5688  * Translate the send context (sofware index) into a VL.  Return -1 if the
5689  * translation cannot be done.
5690  */
5691 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5692 {
5693 	struct send_context_info *sci;
5694 	struct send_context *sc;
5695 	int i;
5696 
5697 	sci = &dd->send_contexts[sw_index];
5698 
5699 	/* there is no information for user (PSM) and ack contexts */
5700 	if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5701 		return -1;
5702 
5703 	sc = sci->sc;
5704 	if (!sc)
5705 		return -1;
5706 	if (dd->vld[15].sc == sc)
5707 		return 15;
5708 	for (i = 0; i < num_vls; i++)
5709 		if (dd->vld[i].sc == sc)
5710 			return i;
5711 
5712 	return -1;
5713 }
5714 
5715 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5716 {
5717 	u64 reg_copy = reg, handled = 0;
5718 	char buf[96];
5719 	int i = 0;
5720 
5721 	if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5722 		start_freeze_handling(dd->pport, 0);
5723 	else if (is_ax(dd) &&
5724 		 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5725 		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5726 		start_freeze_handling(dd->pport, 0);
5727 
5728 	while (reg_copy) {
5729 		int posn = fls64(reg_copy);
5730 		/* fls64() returns a 1-based offset, we want it zero based */
5731 		int shift = posn - 1;
5732 		u64 mask = 1ULL << shift;
5733 
5734 		if (port_inactive_err(shift)) {
5735 			count_port_inactive(dd);
5736 			handled |= mask;
5737 		} else if (disallowed_pkt_err(shift)) {
5738 			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5739 
5740 			handle_send_egress_err_info(dd, vl);
5741 			handled |= mask;
5742 		}
5743 		reg_copy &= ~mask;
5744 	}
5745 
5746 	reg &= ~handled;
5747 
5748 	if (reg)
5749 		dd_dev_info(dd, "Egress Error: %s\n",
5750 			    egress_err_status_string(buf, sizeof(buf), reg));
5751 
5752 	for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5753 		if (reg & (1ull << i))
5754 			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5755 	}
5756 }
5757 
5758 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5759 {
5760 	char buf[96];
5761 	int i = 0;
5762 
5763 	dd_dev_info(dd, "Send Error: %s\n",
5764 		    send_err_status_string(buf, sizeof(buf), reg));
5765 
5766 	for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5767 		if (reg & (1ull << i))
5768 			incr_cntr64(&dd->send_err_status_cnt[i]);
5769 	}
5770 }
5771 
5772 /*
5773  * The maximum number of times the error clear down will loop before
5774  * blocking a repeating error.  This value is arbitrary.
5775  */
5776 #define MAX_CLEAR_COUNT 20
5777 
5778 /*
5779  * Clear and handle an error register.  All error interrupts are funneled
5780  * through here to have a central location to correctly handle single-
5781  * or multi-shot errors.
5782  *
5783  * For non per-context registers, call this routine with a context value
5784  * of 0 so the per-context offset is zero.
5785  *
5786  * If the handler loops too many times, assume that something is wrong
5787  * and can't be fixed, so mask the error bits.
5788  */
5789 static void interrupt_clear_down(struct hfi1_devdata *dd,
5790 				 u32 context,
5791 				 const struct err_reg_info *eri)
5792 {
5793 	u64 reg;
5794 	u32 count;
5795 
5796 	/* read in a loop until no more errors are seen */
5797 	count = 0;
5798 	while (1) {
5799 		reg = read_kctxt_csr(dd, context, eri->status);
5800 		if (reg == 0)
5801 			break;
5802 		write_kctxt_csr(dd, context, eri->clear, reg);
5803 		if (likely(eri->handler))
5804 			eri->handler(dd, context, reg);
5805 		count++;
5806 		if (count > MAX_CLEAR_COUNT) {
5807 			u64 mask;
5808 
5809 			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5810 				   eri->desc, reg);
5811 			/*
5812 			 * Read-modify-write so any other masked bits
5813 			 * remain masked.
5814 			 */
5815 			mask = read_kctxt_csr(dd, context, eri->mask);
5816 			mask &= ~reg;
5817 			write_kctxt_csr(dd, context, eri->mask, mask);
5818 			break;
5819 		}
5820 	}
5821 }
5822 
5823 /*
5824  * CCE block "misc" interrupt.  Source is < 16.
5825  */
5826 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5827 {
5828 	const struct err_reg_info *eri = &misc_errs[source];
5829 
5830 	if (eri->handler) {
5831 		interrupt_clear_down(dd, 0, eri);
5832 	} else {
5833 		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5834 			   source);
5835 	}
5836 }
5837 
5838 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5839 {
5840 	return flag_string(buf, buf_len, flags,
5841 			   sc_err_status_flags,
5842 			   ARRAY_SIZE(sc_err_status_flags));
5843 }
5844 
5845 /*
5846  * Send context error interrupt.  Source (hw_context) is < 160.
5847  *
5848  * All send context errors cause the send context to halt.  The normal
5849  * clear-down mechanism cannot be used because we cannot clear the
5850  * error bits until several other long-running items are done first.
5851  * This is OK because with the context halted, nothing else is going
5852  * to happen on it anyway.
5853  */
5854 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5855 				unsigned int hw_context)
5856 {
5857 	struct send_context_info *sci;
5858 	struct send_context *sc;
5859 	char flags[96];
5860 	u64 status;
5861 	u32 sw_index;
5862 	int i = 0;
5863 
5864 	sw_index = dd->hw_to_sw[hw_context];
5865 	if (sw_index >= dd->num_send_contexts) {
5866 		dd_dev_err(dd,
5867 			   "out of range sw index %u for send context %u\n",
5868 			   sw_index, hw_context);
5869 		return;
5870 	}
5871 	sci = &dd->send_contexts[sw_index];
5872 	sc = sci->sc;
5873 	if (!sc) {
5874 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5875 			   sw_index, hw_context);
5876 		return;
5877 	}
5878 
5879 	/* tell the software that a halt has begun */
5880 	sc_stop(sc, SCF_HALTED);
5881 
5882 	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5883 
5884 	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5885 		    send_context_err_status_string(flags, sizeof(flags),
5886 						   status));
5887 
5888 	if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5889 		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5890 
5891 	/*
5892 	 * Automatically restart halted kernel contexts out of interrupt
5893 	 * context.  User contexts must ask the driver to restart the context.
5894 	 */
5895 	if (sc->type != SC_USER)
5896 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5897 
5898 	/*
5899 	 * Update the counters for the corresponding status bits.
5900 	 * Note that these particular counters are aggregated over all
5901 	 * 160 contexts.
5902 	 */
5903 	for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5904 		if (status & (1ull << i))
5905 			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5906 	}
5907 }
5908 
5909 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5910 				unsigned int source, u64 status)
5911 {
5912 	struct sdma_engine *sde;
5913 	int i = 0;
5914 
5915 	sde = &dd->per_sdma[source];
5916 #ifdef CONFIG_SDMA_VERBOSITY
5917 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5918 		   slashstrip(__FILE__), __LINE__, __func__);
5919 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5920 		   sde->this_idx, source, (unsigned long long)status);
5921 #endif
5922 	sde->err_cnt++;
5923 	sdma_engine_error(sde, status);
5924 
5925 	/*
5926 	* Update the counters for the corresponding status bits.
5927 	* Note that these particular counters are aggregated over
5928 	* all 16 DMA engines.
5929 	*/
5930 	for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5931 		if (status & (1ull << i))
5932 			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5933 	}
5934 }
5935 
5936 /*
5937  * CCE block SDMA error interrupt.  Source is < 16.
5938  */
5939 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5940 {
5941 #ifdef CONFIG_SDMA_VERBOSITY
5942 	struct sdma_engine *sde = &dd->per_sdma[source];
5943 
5944 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5945 		   slashstrip(__FILE__), __LINE__, __func__);
5946 	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5947 		   source);
5948 	sdma_dumpstate(sde);
5949 #endif
5950 	interrupt_clear_down(dd, source, &sdma_eng_err);
5951 }
5952 
5953 /*
5954  * CCE block "various" interrupt.  Source is < 8.
5955  */
5956 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5957 {
5958 	const struct err_reg_info *eri = &various_err[source];
5959 
5960 	/*
5961 	 * TCritInt cannot go through interrupt_clear_down()
5962 	 * because it is not a second tier interrupt. The handler
5963 	 * should be called directly.
5964 	 */
5965 	if (source == TCRIT_INT_SOURCE)
5966 		handle_temp_err(dd);
5967 	else if (eri->handler)
5968 		interrupt_clear_down(dd, 0, eri);
5969 	else
5970 		dd_dev_info(dd,
5971 			    "%s: Unimplemented/reserved interrupt %d\n",
5972 			    __func__, source);
5973 }
5974 
5975 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5976 {
5977 	/* src_ctx is always zero */
5978 	struct hfi1_pportdata *ppd = dd->pport;
5979 	unsigned long flags;
5980 	u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5981 
5982 	if (reg & QSFP_HFI0_MODPRST_N) {
5983 		if (!qsfp_mod_present(ppd)) {
5984 			dd_dev_info(dd, "%s: QSFP module removed\n",
5985 				    __func__);
5986 
5987 			ppd->driver_link_ready = 0;
5988 			/*
5989 			 * Cable removed, reset all our information about the
5990 			 * cache and cable capabilities
5991 			 */
5992 
5993 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5994 			/*
5995 			 * We don't set cache_refresh_required here as we expect
5996 			 * an interrupt when a cable is inserted
5997 			 */
5998 			ppd->qsfp_info.cache_valid = 0;
5999 			ppd->qsfp_info.reset_needed = 0;
6000 			ppd->qsfp_info.limiting_active = 0;
6001 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6002 					       flags);
6003 			/* Invert the ModPresent pin now to detect plug-in */
6004 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6006 
6007 			if ((ppd->offline_disabled_reason >
6008 			  HFI1_ODR_MASK(
6009 			  OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6010 			  (ppd->offline_disabled_reason ==
6011 			  HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6012 				ppd->offline_disabled_reason =
6013 				HFI1_ODR_MASK(
6014 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6015 
6016 			if (ppd->host_link_state == HLS_DN_POLL) {
6017 				/*
6018 				 * The link is still in POLL. This means
6019 				 * that the normal link down processing
6020 				 * will not happen. We have to do it here
6021 				 * before turning the DC off.
6022 				 */
6023 				queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6024 			}
6025 		} else {
6026 			dd_dev_info(dd, "%s: QSFP module inserted\n",
6027 				    __func__);
6028 
6029 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6030 			ppd->qsfp_info.cache_valid = 0;
6031 			ppd->qsfp_info.cache_refresh_required = 1;
6032 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6033 					       flags);
6034 
6035 			/*
6036 			 * Stop inversion of ModPresent pin to detect
6037 			 * removal of the cable
6038 			 */
6039 			qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6040 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6041 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6042 
6043 			ppd->offline_disabled_reason =
6044 				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6045 		}
6046 	}
6047 
6048 	if (reg & QSFP_HFI0_INT_N) {
6049 		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6050 			    __func__);
6051 		spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6052 		ppd->qsfp_info.check_interrupt_flags = 1;
6053 		spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6054 	}
6055 
6056 	/* Schedule the QSFP work only if there is a cable attached. */
6057 	if (qsfp_mod_present(ppd))
6058 		queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6059 }
6060 
6061 static int request_host_lcb_access(struct hfi1_devdata *dd)
6062 {
6063 	int ret;
6064 
6065 	ret = do_8051_command(dd, HCMD_MISC,
6066 			      (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6067 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6068 	if (ret != HCMD_SUCCESS) {
6069 		dd_dev_err(dd, "%s: command failed with error %d\n",
6070 			   __func__, ret);
6071 	}
6072 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6073 }
6074 
6075 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6076 {
6077 	int ret;
6078 
6079 	ret = do_8051_command(dd, HCMD_MISC,
6080 			      (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6081 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6082 	if (ret != HCMD_SUCCESS) {
6083 		dd_dev_err(dd, "%s: command failed with error %d\n",
6084 			   __func__, ret);
6085 	}
6086 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6087 }
6088 
6089 /*
6090  * Set the LCB selector - allow host access.  The DCC selector always
6091  * points to the host.
6092  */
6093 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6094 {
6095 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6096 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6097 		  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6098 }
6099 
6100 /*
6101  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6102  * points to the host.
6103  */
6104 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6105 {
6106 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6107 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6108 }
6109 
6110 /*
6111  * Acquire LCB access from the 8051.  If the host already has access,
6112  * just increment a counter.  Otherwise, inform the 8051 that the
6113  * host is taking access.
6114  *
6115  * Returns:
6116  *	0 on success
6117  *	-EBUSY if the 8051 has control and cannot be disturbed
6118  *	-errno if unable to acquire access from the 8051
6119  */
6120 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6121 {
6122 	struct hfi1_pportdata *ppd = dd->pport;
6123 	int ret = 0;
6124 
6125 	/*
6126 	 * Use the host link state lock so the operation of this routine
6127 	 * { link state check, selector change, count increment } can occur
6128 	 * as a unit against a link state change.  Otherwise there is a
6129 	 * race between the state change and the count increment.
6130 	 */
6131 	if (sleep_ok) {
6132 		mutex_lock(&ppd->hls_lock);
6133 	} else {
6134 		while (!mutex_trylock(&ppd->hls_lock))
6135 			udelay(1);
6136 	}
6137 
6138 	/* this access is valid only when the link is up */
6139 	if (ppd->host_link_state & HLS_DOWN) {
6140 		dd_dev_info(dd, "%s: link state %s not up\n",
6141 			    __func__, link_state_name(ppd->host_link_state));
6142 		ret = -EBUSY;
6143 		goto done;
6144 	}
6145 
6146 	if (dd->lcb_access_count == 0) {
6147 		ret = request_host_lcb_access(dd);
6148 		if (ret) {
6149 			dd_dev_err(dd,
6150 				   "%s: unable to acquire LCB access, err %d\n",
6151 				   __func__, ret);
6152 			goto done;
6153 		}
6154 		set_host_lcb_access(dd);
6155 	}
6156 	dd->lcb_access_count++;
6157 done:
6158 	mutex_unlock(&ppd->hls_lock);
6159 	return ret;
6160 }
6161 
6162 /*
6163  * Release LCB access by decrementing the use count.  If the count is moving
6164  * from 1 to 0, inform 8051 that it has control back.
6165  *
6166  * Returns:
6167  *	0 on success
6168  *	-errno if unable to release access to the 8051
6169  */
6170 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6171 {
6172 	int ret = 0;
6173 
6174 	/*
6175 	 * Use the host link state lock because the acquire needed it.
6176 	 * Here, we only need to keep { selector change, count decrement }
6177 	 * as a unit.
6178 	 */
6179 	if (sleep_ok) {
6180 		mutex_lock(&dd->pport->hls_lock);
6181 	} else {
6182 		while (!mutex_trylock(&dd->pport->hls_lock))
6183 			udelay(1);
6184 	}
6185 
6186 	if (dd->lcb_access_count == 0) {
6187 		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6188 			   __func__);
6189 		goto done;
6190 	}
6191 
6192 	if (dd->lcb_access_count == 1) {
6193 		set_8051_lcb_access(dd);
6194 		ret = request_8051_lcb_access(dd);
6195 		if (ret) {
6196 			dd_dev_err(dd,
6197 				   "%s: unable to release LCB access, err %d\n",
6198 				   __func__, ret);
6199 			/* restore host access if the grant didn't work */
6200 			set_host_lcb_access(dd);
6201 			goto done;
6202 		}
6203 	}
6204 	dd->lcb_access_count--;
6205 done:
6206 	mutex_unlock(&dd->pport->hls_lock);
6207 	return ret;
6208 }
6209 
6210 /*
6211  * Initialize LCB access variables and state.  Called during driver load,
6212  * after most of the initialization is finished.
6213  *
6214  * The DC default is LCB access on for the host.  The driver defaults to
6215  * leaving access to the 8051.  Assign access now - this constrains the call
6216  * to this routine to be after all LCB set-up is done.  In particular, after
6217  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6218  */
6219 static void init_lcb_access(struct hfi1_devdata *dd)
6220 {
6221 	dd->lcb_access_count = 0;
6222 }
6223 
6224 /*
6225  * Write a response back to a 8051 request.
6226  */
6227 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6228 {
6229 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6230 		  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6231 		  (u64)return_code <<
6232 		  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6233 		  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6234 }
6235 
6236 /*
6237  * Handle host requests from the 8051.
6238  */
6239 static void handle_8051_request(struct hfi1_pportdata *ppd)
6240 {
6241 	struct hfi1_devdata *dd = ppd->dd;
6242 	u64 reg;
6243 	u16 data = 0;
6244 	u8 type;
6245 
6246 	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6247 	if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6248 		return;	/* no request */
6249 
6250 	/* zero out COMPLETED so the response is seen */
6251 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6252 
6253 	/* extract request details */
6254 	type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6255 			& DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6256 	data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6257 			& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6258 
6259 	switch (type) {
6260 	case HREQ_LOAD_CONFIG:
6261 	case HREQ_SAVE_CONFIG:
6262 	case HREQ_READ_CONFIG:
6263 	case HREQ_SET_TX_EQ_ABS:
6264 	case HREQ_SET_TX_EQ_REL:
6265 	case HREQ_ENABLE:
6266 		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6267 			    type);
6268 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6269 		break;
6270 	case HREQ_CONFIG_DONE:
6271 		hreq_response(dd, HREQ_SUCCESS, 0);
6272 		break;
6273 
6274 	case HREQ_INTERFACE_TEST:
6275 		hreq_response(dd, HREQ_SUCCESS, data);
6276 		break;
6277 	default:
6278 		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6279 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6280 		break;
6281 	}
6282 }
6283 
6284 static void write_global_credit(struct hfi1_devdata *dd,
6285 				u8 vau, u16 total, u16 shared)
6286 {
6287 	write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6288 		  ((u64)total <<
6289 		   SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6290 		  ((u64)shared <<
6291 		   SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6292 		  ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6293 }
6294 
6295 /*
6296  * Set up initial VL15 credits of the remote.  Assumes the rest of
6297  * the CM credit registers are zero from a previous global or credit reset .
6298  */
6299 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6300 {
6301 	/* leave shared count at zero for both global and VL15 */
6302 	write_global_credit(dd, vau, vl15buf, 0);
6303 
6304 	/* We may need some credits for another VL when sending packets
6305 	 * with the snoop interface. Dividing it down the middle for VL15
6306 	 * and VL0 should suffice.
6307 	 */
6308 	if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6309 		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6310 		    << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6311 		write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6312 		    << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6313 	} else {
6314 		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6315 			<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6316 	}
6317 }
6318 
6319 /*
6320  * Zero all credit details from the previous connection and
6321  * reset the CM manager's internal counters.
6322  */
6323 void reset_link_credits(struct hfi1_devdata *dd)
6324 {
6325 	int i;
6326 
6327 	/* remove all previous VL credit limits */
6328 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
6329 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6330 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6331 	write_global_credit(dd, 0, 0, 0);
6332 	/* reset the CM block */
6333 	pio_send_control(dd, PSC_CM_RESET);
6334 }
6335 
6336 /* convert a vCU to a CU */
6337 static u32 vcu_to_cu(u8 vcu)
6338 {
6339 	return 1 << vcu;
6340 }
6341 
6342 /* convert a CU to a vCU */
6343 static u8 cu_to_vcu(u32 cu)
6344 {
6345 	return ilog2(cu);
6346 }
6347 
6348 /* convert a vAU to an AU */
6349 static u32 vau_to_au(u8 vau)
6350 {
6351 	return 8 * (1 << vau);
6352 }
6353 
6354 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6355 {
6356 	ppd->sm_trap_qp = 0x0;
6357 	ppd->sa_qp = 0x1;
6358 }
6359 
6360 /*
6361  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6362  */
6363 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6364 {
6365 	u64 reg;
6366 
6367 	/* clear lcb run: LCB_CFG_RUN.EN = 0 */
6368 	write_csr(dd, DC_LCB_CFG_RUN, 0);
6369 	/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6370 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6371 		  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6372 	/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6373 	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6374 	reg = read_csr(dd, DCC_CFG_RESET);
6375 	write_csr(dd, DCC_CFG_RESET, reg |
6376 		  (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6377 		  (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6378 	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6379 	if (!abort) {
6380 		udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6381 		write_csr(dd, DCC_CFG_RESET, reg);
6382 		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6383 	}
6384 }
6385 
6386 /*
6387  * This routine should be called after the link has been transitioned to
6388  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6389  * reset).
6390  *
6391  * The expectation is that the caller of this routine would have taken
6392  * care of properly transitioning the link into the correct state.
6393  */
6394 static void dc_shutdown(struct hfi1_devdata *dd)
6395 {
6396 	unsigned long flags;
6397 
6398 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6399 	if (dd->dc_shutdown) {
6400 		spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6401 		return;
6402 	}
6403 	dd->dc_shutdown = 1;
6404 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6405 	/* Shutdown the LCB */
6406 	lcb_shutdown(dd, 1);
6407 	/*
6408 	 * Going to OFFLINE would have causes the 8051 to put the
6409 	 * SerDes into reset already. Just need to shut down the 8051,
6410 	 * itself.
6411 	 */
6412 	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6413 }
6414 
6415 /*
6416  * Calling this after the DC has been brought out of reset should not
6417  * do any damage.
6418  */
6419 static void dc_start(struct hfi1_devdata *dd)
6420 {
6421 	unsigned long flags;
6422 	int ret;
6423 
6424 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6425 	if (!dd->dc_shutdown)
6426 		goto done;
6427 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6428 	/* Take the 8051 out of reset */
6429 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6430 	/* Wait until 8051 is ready */
6431 	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6432 	if (ret) {
6433 		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6434 			   __func__);
6435 	}
6436 	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6437 	write_csr(dd, DCC_CFG_RESET, 0x10);
6438 	/* lcb_shutdown() with abort=1 does not restore these */
6439 	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6440 	spin_lock_irqsave(&dd->dc8051_lock, flags);
6441 	dd->dc_shutdown = 0;
6442 done:
6443 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6444 }
6445 
6446 /*
6447  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6448  */
6449 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6450 {
6451 	u64 rx_radr, tx_radr;
6452 	u32 version;
6453 
6454 	if (dd->icode != ICODE_FPGA_EMULATION)
6455 		return;
6456 
6457 	/*
6458 	 * These LCB defaults on emulator _s are good, nothing to do here:
6459 	 *	LCB_CFG_TX_FIFOS_RADR
6460 	 *	LCB_CFG_RX_FIFOS_RADR
6461 	 *	LCB_CFG_LN_DCLK
6462 	 *	LCB_CFG_IGNORE_LOST_RCLK
6463 	 */
6464 	if (is_emulator_s(dd))
6465 		return;
6466 	/* else this is _p */
6467 
6468 	version = emulator_rev(dd);
6469 	if (!is_ax(dd))
6470 		version = 0x2d;	/* all B0 use 0x2d or higher settings */
6471 
6472 	if (version <= 0x12) {
6473 		/* release 0x12 and below */
6474 
6475 		/*
6476 		 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6477 		 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6478 		 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6479 		 */
6480 		rx_radr =
6481 		      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6482 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6483 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6484 		/*
6485 		 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6486 		 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6487 		 */
6488 		tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6489 	} else if (version <= 0x18) {
6490 		/* release 0x13 up to 0x18 */
6491 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6492 		rx_radr =
6493 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6494 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6495 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6496 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6497 	} else if (version == 0x19) {
6498 		/* release 0x19 */
6499 		/* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6500 		rx_radr =
6501 		      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6502 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6503 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6504 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6505 	} else if (version == 0x1a) {
6506 		/* release 0x1a */
6507 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6508 		rx_radr =
6509 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6510 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6511 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6512 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6513 		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6514 	} else {
6515 		/* release 0x1b and higher */
6516 		/* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6517 		rx_radr =
6518 		      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6519 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6520 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6521 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6522 	}
6523 
6524 	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6525 	/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6526 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6527 		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6528 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6529 }
6530 
6531 /*
6532  * Handle a SMA idle message
6533  *
6534  * This is a work-queue function outside of the interrupt.
6535  */
6536 void handle_sma_message(struct work_struct *work)
6537 {
6538 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6539 							sma_message_work);
6540 	struct hfi1_devdata *dd = ppd->dd;
6541 	u64 msg;
6542 	int ret;
6543 
6544 	/*
6545 	 * msg is bytes 1-4 of the 40-bit idle message - the command code
6546 	 * is stripped off
6547 	 */
6548 	ret = read_idle_sma(dd, &msg);
6549 	if (ret)
6550 		return;
6551 	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6552 	/*
6553 	 * React to the SMA message.  Byte[1] (0 for us) is the command.
6554 	 */
6555 	switch (msg & 0xff) {
6556 	case SMA_IDLE_ARM:
6557 		/*
6558 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6559 		 * State Transitions
6560 		 *
6561 		 * Only expected in INIT or ARMED, discard otherwise.
6562 		 */
6563 		if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6564 			ppd->neighbor_normal = 1;
6565 		break;
6566 	case SMA_IDLE_ACTIVE:
6567 		/*
6568 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6569 		 * State Transitions
6570 		 *
6571 		 * Can activate the node.  Discard otherwise.
6572 		 */
6573 		if (ppd->host_link_state == HLS_UP_ARMED &&
6574 		    ppd->is_active_optimize_enabled) {
6575 			ppd->neighbor_normal = 1;
6576 			ret = set_link_state(ppd, HLS_UP_ACTIVE);
6577 			if (ret)
6578 				dd_dev_err(
6579 					dd,
6580 					"%s: received Active SMA idle message, couldn't set link to Active\n",
6581 					__func__);
6582 		}
6583 		break;
6584 	default:
6585 		dd_dev_err(dd,
6586 			   "%s: received unexpected SMA idle message 0x%llx\n",
6587 			   __func__, msg);
6588 		break;
6589 	}
6590 }
6591 
6592 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6593 {
6594 	u64 rcvctrl;
6595 	unsigned long flags;
6596 
6597 	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6598 	rcvctrl = read_csr(dd, RCV_CTRL);
6599 	rcvctrl |= add;
6600 	rcvctrl &= ~clear;
6601 	write_csr(dd, RCV_CTRL, rcvctrl);
6602 	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6603 }
6604 
6605 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6606 {
6607 	adjust_rcvctrl(dd, add, 0);
6608 }
6609 
6610 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6611 {
6612 	adjust_rcvctrl(dd, 0, clear);
6613 }
6614 
6615 /*
6616  * Called from all interrupt handlers to start handling an SPC freeze.
6617  */
6618 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6619 {
6620 	struct hfi1_devdata *dd = ppd->dd;
6621 	struct send_context *sc;
6622 	int i;
6623 
6624 	if (flags & FREEZE_SELF)
6625 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6626 
6627 	/* enter frozen mode */
6628 	dd->flags |= HFI1_FROZEN;
6629 
6630 	/* notify all SDMA engines that they are going into a freeze */
6631 	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6632 
6633 	/* do halt pre-handling on all enabled send contexts */
6634 	for (i = 0; i < dd->num_send_contexts; i++) {
6635 		sc = dd->send_contexts[i].sc;
6636 		if (sc && (sc->flags & SCF_ENABLED))
6637 			sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6638 	}
6639 
6640 	/* Send context are frozen. Notify user space */
6641 	hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6642 
6643 	if (flags & FREEZE_ABORT) {
6644 		dd_dev_err(dd,
6645 			   "Aborted freeze recovery. Please REBOOT system\n");
6646 		return;
6647 	}
6648 	/* queue non-interrupt handler */
6649 	queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6650 }
6651 
6652 /*
6653  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6654  * depending on the "freeze" parameter.
6655  *
6656  * No need to return an error if it times out, our only option
6657  * is to proceed anyway.
6658  */
6659 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6660 {
6661 	unsigned long timeout;
6662 	u64 reg;
6663 
6664 	timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6665 	while (1) {
6666 		reg = read_csr(dd, CCE_STATUS);
6667 		if (freeze) {
6668 			/* waiting until all indicators are set */
6669 			if ((reg & ALL_FROZE) == ALL_FROZE)
6670 				return;	/* all done */
6671 		} else {
6672 			/* waiting until all indicators are clear */
6673 			if ((reg & ALL_FROZE) == 0)
6674 				return; /* all done */
6675 		}
6676 
6677 		if (time_after(jiffies, timeout)) {
6678 			dd_dev_err(dd,
6679 				   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6680 				   freeze ? "" : "un", reg & ALL_FROZE,
6681 				   freeze ? ALL_FROZE : 0ull);
6682 			return;
6683 		}
6684 		usleep_range(80, 120);
6685 	}
6686 }
6687 
6688 /*
6689  * Do all freeze handling for the RXE block.
6690  */
6691 static void rxe_freeze(struct hfi1_devdata *dd)
6692 {
6693 	int i;
6694 
6695 	/* disable port */
6696 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6697 
6698 	/* disable all receive contexts */
6699 	for (i = 0; i < dd->num_rcv_contexts; i++)
6700 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6701 }
6702 
6703 /*
6704  * Unfreeze handling for the RXE block - kernel contexts only.
6705  * This will also enable the port.  User contexts will do unfreeze
6706  * handling on a per-context basis as they call into the driver.
6707  *
6708  */
6709 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6710 {
6711 	u32 rcvmask;
6712 	int i;
6713 
6714 	/* enable all kernel contexts */
6715 	for (i = 0; i < dd->n_krcv_queues; i++) {
6716 		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6717 		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6718 		rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6719 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6720 		hfi1_rcvctrl(dd, rcvmask, i);
6721 	}
6722 
6723 	/* enable port */
6724 	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6725 }
6726 
6727 /*
6728  * Non-interrupt SPC freeze handling.
6729  *
6730  * This is a work-queue function outside of the triggering interrupt.
6731  */
6732 void handle_freeze(struct work_struct *work)
6733 {
6734 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6735 								freeze_work);
6736 	struct hfi1_devdata *dd = ppd->dd;
6737 
6738 	/* wait for freeze indicators on all affected blocks */
6739 	wait_for_freeze_status(dd, 1);
6740 
6741 	/* SPC is now frozen */
6742 
6743 	/* do send PIO freeze steps */
6744 	pio_freeze(dd);
6745 
6746 	/* do send DMA freeze steps */
6747 	sdma_freeze(dd);
6748 
6749 	/* do send egress freeze steps - nothing to do */
6750 
6751 	/* do receive freeze steps */
6752 	rxe_freeze(dd);
6753 
6754 	/*
6755 	 * Unfreeze the hardware - clear the freeze, wait for each
6756 	 * block's frozen bit to clear, then clear the frozen flag.
6757 	 */
6758 	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6759 	wait_for_freeze_status(dd, 0);
6760 
6761 	if (is_ax(dd)) {
6762 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6763 		wait_for_freeze_status(dd, 1);
6764 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6765 		wait_for_freeze_status(dd, 0);
6766 	}
6767 
6768 	/* do send PIO unfreeze steps for kernel contexts */
6769 	pio_kernel_unfreeze(dd);
6770 
6771 	/* do send DMA unfreeze steps */
6772 	sdma_unfreeze(dd);
6773 
6774 	/* do send egress unfreeze steps - nothing to do */
6775 
6776 	/* do receive unfreeze steps for kernel contexts */
6777 	rxe_kernel_unfreeze(dd);
6778 
6779 	/*
6780 	 * The unfreeze procedure touches global device registers when
6781 	 * it disables and re-enables RXE. Mark the device unfrozen
6782 	 * after all that is done so other parts of the driver waiting
6783 	 * for the device to unfreeze don't do things out of order.
6784 	 *
6785 	 * The above implies that the meaning of HFI1_FROZEN flag is
6786 	 * "Device has gone into freeze mode and freeze mode handling
6787 	 * is still in progress."
6788 	 *
6789 	 * The flag will be removed when freeze mode processing has
6790 	 * completed.
6791 	 */
6792 	dd->flags &= ~HFI1_FROZEN;
6793 	wake_up(&dd->event_queue);
6794 
6795 	/* no longer frozen */
6796 }
6797 
6798 /*
6799  * Handle a link up interrupt from the 8051.
6800  *
6801  * This is a work-queue function outside of the interrupt.
6802  */
6803 void handle_link_up(struct work_struct *work)
6804 {
6805 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6806 						  link_up_work);
6807 	set_link_state(ppd, HLS_UP_INIT);
6808 
6809 	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6810 	read_ltp_rtt(ppd->dd);
6811 	/*
6812 	 * OPA specifies that certain counters are cleared on a transition
6813 	 * to link up, so do that.
6814 	 */
6815 	clear_linkup_counters(ppd->dd);
6816 	/*
6817 	 * And (re)set link up default values.
6818 	 */
6819 	set_linkup_defaults(ppd);
6820 
6821 	/* enforce link speed enabled */
6822 	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6823 		/* oops - current speed is not enabled, bounce */
6824 		dd_dev_err(ppd->dd,
6825 			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6826 			   ppd->link_speed_active, ppd->link_speed_enabled);
6827 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6828 				     OPA_LINKDOWN_REASON_SPEED_POLICY);
6829 		set_link_state(ppd, HLS_DN_OFFLINE);
6830 		start_link(ppd);
6831 	}
6832 }
6833 
6834 /*
6835  * Several pieces of LNI information were cached for SMA in ppd.
6836  * Reset these on link down
6837  */
6838 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6839 {
6840 	ppd->neighbor_guid = 0;
6841 	ppd->neighbor_port_number = 0;
6842 	ppd->neighbor_type = 0;
6843 	ppd->neighbor_fm_security = 0;
6844 }
6845 
6846 static const char * const link_down_reason_strs[] = {
6847 	[OPA_LINKDOWN_REASON_NONE] = "None",
6848 	[OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6849 	[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6850 	[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6851 	[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6852 	[OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6853 	[OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6854 	[OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6855 	[OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6856 	[OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6857 	[OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6858 	[OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6859 	[OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6860 	[OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6861 	[OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6862 	[OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6863 	[OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6864 	[OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6865 	[OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6866 	[OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6867 	[OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6868 	[OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6869 	[OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6870 	[OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6871 	[OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6872 	[OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6873 	[OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6874 	[OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6875 	[OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6876 	[OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6877 	[OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6878 	[OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6879 	[OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6880 					"Excessive buffer overrun",
6881 	[OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6882 	[OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6883 	[OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6884 	[OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6885 	[OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6886 	[OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6887 	[OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6888 	[OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6889 					"Local media not installed",
6890 	[OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6891 	[OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6892 	[OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6893 					"End to end not installed",
6894 	[OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6895 	[OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6896 	[OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6897 	[OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6898 	[OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6899 	[OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6900 };
6901 
6902 /* return the neighbor link down reason string */
6903 static const char *link_down_reason_str(u8 reason)
6904 {
6905 	const char *str = NULL;
6906 
6907 	if (reason < ARRAY_SIZE(link_down_reason_strs))
6908 		str = link_down_reason_strs[reason];
6909 	if (!str)
6910 		str = "(invalid)";
6911 
6912 	return str;
6913 }
6914 
6915 /*
6916  * Handle a link down interrupt from the 8051.
6917  *
6918  * This is a work-queue function outside of the interrupt.
6919  */
6920 void handle_link_down(struct work_struct *work)
6921 {
6922 	u8 lcl_reason, neigh_reason = 0;
6923 	u8 link_down_reason;
6924 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6925 						  link_down_work);
6926 	int was_up;
6927 	static const char ldr_str[] = "Link down reason: ";
6928 
6929 	if ((ppd->host_link_state &
6930 	     (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6931 	     ppd->port_type == PORT_TYPE_FIXED)
6932 		ppd->offline_disabled_reason =
6933 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6934 
6935 	/* Go offline first, then deal with reading/writing through 8051 */
6936 	was_up = !!(ppd->host_link_state & HLS_UP);
6937 	set_link_state(ppd, HLS_DN_OFFLINE);
6938 
6939 	if (was_up) {
6940 		lcl_reason = 0;
6941 		/* link down reason is only valid if the link was up */
6942 		read_link_down_reason(ppd->dd, &link_down_reason);
6943 		switch (link_down_reason) {
6944 		case LDR_LINK_TRANSFER_ACTIVE_LOW:
6945 			/* the link went down, no idle message reason */
6946 			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6947 				    ldr_str);
6948 			break;
6949 		case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6950 			/*
6951 			 * The neighbor reason is only valid if an idle message
6952 			 * was received for it.
6953 			 */
6954 			read_planned_down_reason_code(ppd->dd, &neigh_reason);
6955 			dd_dev_info(ppd->dd,
6956 				    "%sNeighbor link down message %d, %s\n",
6957 				    ldr_str, neigh_reason,
6958 				    link_down_reason_str(neigh_reason));
6959 			break;
6960 		case LDR_RECEIVED_HOST_OFFLINE_REQ:
6961 			dd_dev_info(ppd->dd,
6962 				    "%sHost requested link to go offline\n",
6963 				    ldr_str);
6964 			break;
6965 		default:
6966 			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6967 				    ldr_str, link_down_reason);
6968 			break;
6969 		}
6970 
6971 		/*
6972 		 * If no reason, assume peer-initiated but missed
6973 		 * LinkGoingDown idle flits.
6974 		 */
6975 		if (neigh_reason == 0)
6976 			lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6977 	} else {
6978 		/* went down while polling or going up */
6979 		lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6980 	}
6981 
6982 	set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6983 
6984 	/* inform the SMA when the link transitions from up to down */
6985 	if (was_up && ppd->local_link_down_reason.sma == 0 &&
6986 	    ppd->neigh_link_down_reason.sma == 0) {
6987 		ppd->local_link_down_reason.sma =
6988 					ppd->local_link_down_reason.latest;
6989 		ppd->neigh_link_down_reason.sma =
6990 					ppd->neigh_link_down_reason.latest;
6991 	}
6992 
6993 	reset_neighbor_info(ppd);
6994 
6995 	/* disable the port */
6996 	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6997 
6998 	/*
6999 	 * If there is no cable attached, turn the DC off. Otherwise,
7000 	 * start the link bring up.
7001 	 */
7002 	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7003 		dc_shutdown(ppd->dd);
7004 	else
7005 		start_link(ppd);
7006 }
7007 
7008 void handle_link_bounce(struct work_struct *work)
7009 {
7010 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7011 							link_bounce_work);
7012 
7013 	/*
7014 	 * Only do something if the link is currently up.
7015 	 */
7016 	if (ppd->host_link_state & HLS_UP) {
7017 		set_link_state(ppd, HLS_DN_OFFLINE);
7018 		start_link(ppd);
7019 	} else {
7020 		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7021 			    __func__, link_state_name(ppd->host_link_state));
7022 	}
7023 }
7024 
7025 /*
7026  * Mask conversion: Capability exchange to Port LTP.  The capability
7027  * exchange has an implicit 16b CRC that is mandatory.
7028  */
7029 static int cap_to_port_ltp(int cap)
7030 {
7031 	int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7032 
7033 	if (cap & CAP_CRC_14B)
7034 		port_ltp |= PORT_LTP_CRC_MODE_14;
7035 	if (cap & CAP_CRC_48B)
7036 		port_ltp |= PORT_LTP_CRC_MODE_48;
7037 	if (cap & CAP_CRC_12B_16B_PER_LANE)
7038 		port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7039 
7040 	return port_ltp;
7041 }
7042 
7043 /*
7044  * Convert an OPA Port LTP mask to capability mask
7045  */
7046 int port_ltp_to_cap(int port_ltp)
7047 {
7048 	int cap_mask = 0;
7049 
7050 	if (port_ltp & PORT_LTP_CRC_MODE_14)
7051 		cap_mask |= CAP_CRC_14B;
7052 	if (port_ltp & PORT_LTP_CRC_MODE_48)
7053 		cap_mask |= CAP_CRC_48B;
7054 	if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7055 		cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7056 
7057 	return cap_mask;
7058 }
7059 
7060 /*
7061  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7062  */
7063 static int lcb_to_port_ltp(int lcb_crc)
7064 {
7065 	int port_ltp = 0;
7066 
7067 	if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7068 		port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7069 	else if (lcb_crc == LCB_CRC_48B)
7070 		port_ltp = PORT_LTP_CRC_MODE_48;
7071 	else if (lcb_crc == LCB_CRC_14B)
7072 		port_ltp = PORT_LTP_CRC_MODE_14;
7073 	else
7074 		port_ltp = PORT_LTP_CRC_MODE_16;
7075 
7076 	return port_ltp;
7077 }
7078 
7079 /*
7080  * Our neighbor has indicated that we are allowed to act as a fabric
7081  * manager, so place the full management partition key in the second
7082  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7083  * that we should already have the limited management partition key in
7084  * array element 1, and also that the port is not yet up when
7085  * add_full_mgmt_pkey() is invoked.
7086  */
7087 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7088 {
7089 	struct hfi1_devdata *dd = ppd->dd;
7090 
7091 	/* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7092 	if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7093 		dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7094 			    __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7095 	ppd->pkeys[2] = FULL_MGMT_P_KEY;
7096 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7097 	hfi1_event_pkey_change(ppd->dd, ppd->port);
7098 }
7099 
7100 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7101 {
7102 	if (ppd->pkeys[2] != 0) {
7103 		ppd->pkeys[2] = 0;
7104 		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7105 		hfi1_event_pkey_change(ppd->dd, ppd->port);
7106 	}
7107 }
7108 
7109 /*
7110  * Convert the given link width to the OPA link width bitmask.
7111  */
7112 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7113 {
7114 	switch (width) {
7115 	case 0:
7116 		/*
7117 		 * Simulator and quick linkup do not set the width.
7118 		 * Just set it to 4x without complaint.
7119 		 */
7120 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7121 			return OPA_LINK_WIDTH_4X;
7122 		return 0; /* no lanes up */
7123 	case 1: return OPA_LINK_WIDTH_1X;
7124 	case 2: return OPA_LINK_WIDTH_2X;
7125 	case 3: return OPA_LINK_WIDTH_3X;
7126 	default:
7127 		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7128 			    __func__, width);
7129 		/* fall through */
7130 	case 4: return OPA_LINK_WIDTH_4X;
7131 	}
7132 }
7133 
7134 /*
7135  * Do a population count on the bottom nibble.
7136  */
7137 static const u8 bit_counts[16] = {
7138 	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7139 };
7140 
7141 static inline u8 nibble_to_count(u8 nibble)
7142 {
7143 	return bit_counts[nibble & 0xf];
7144 }
7145 
7146 /*
7147  * Read the active lane information from the 8051 registers and return
7148  * their widths.
7149  *
7150  * Active lane information is found in these 8051 registers:
7151  *	enable_lane_tx
7152  *	enable_lane_rx
7153  */
7154 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7155 			    u16 *rx_width)
7156 {
7157 	u16 tx, rx;
7158 	u8 enable_lane_rx;
7159 	u8 enable_lane_tx;
7160 	u8 tx_polarity_inversion;
7161 	u8 rx_polarity_inversion;
7162 	u8 max_rate;
7163 
7164 	/* read the active lanes */
7165 	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7166 			 &rx_polarity_inversion, &max_rate);
7167 	read_local_lni(dd, &enable_lane_rx);
7168 
7169 	/* convert to counts */
7170 	tx = nibble_to_count(enable_lane_tx);
7171 	rx = nibble_to_count(enable_lane_rx);
7172 
7173 	/*
7174 	 * Set link_speed_active here, overriding what was set in
7175 	 * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7176 	 * set the max_rate field in handle_verify_cap until v0.19.
7177 	 */
7178 	if ((dd->icode == ICODE_RTL_SILICON) &&
7179 	    (dd->dc8051_ver < dc8051_ver(0, 19))) {
7180 		/* max_rate: 0 = 12.5G, 1 = 25G */
7181 		switch (max_rate) {
7182 		case 0:
7183 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7184 			break;
7185 		default:
7186 			dd_dev_err(dd,
7187 				   "%s: unexpected max rate %d, using 25Gb\n",
7188 				   __func__, (int)max_rate);
7189 			/* fall through */
7190 		case 1:
7191 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7192 			break;
7193 		}
7194 	}
7195 
7196 	dd_dev_info(dd,
7197 		    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7198 		    enable_lane_tx, tx, enable_lane_rx, rx);
7199 	*tx_width = link_width_to_bits(dd, tx);
7200 	*rx_width = link_width_to_bits(dd, rx);
7201 }
7202 
7203 /*
7204  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7205  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7206  * after link up.  I.e. look elsewhere for downgrade information.
7207  *
7208  * Bits are:
7209  *	+ bits [7:4] contain the number of active transmitters
7210  *	+ bits [3:0] contain the number of active receivers
7211  * These are numbers 1 through 4 and can be different values if the
7212  * link is asymmetric.
7213  *
7214  * verify_cap_local_fm_link_width[0] retains its original value.
7215  */
7216 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7217 			      u16 *rx_width)
7218 {
7219 	u16 widths, tx, rx;
7220 	u8 misc_bits, local_flags;
7221 	u16 active_tx, active_rx;
7222 
7223 	read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7224 	tx = widths >> 12;
7225 	rx = (widths >> 8) & 0xf;
7226 
7227 	*tx_width = link_width_to_bits(dd, tx);
7228 	*rx_width = link_width_to_bits(dd, rx);
7229 
7230 	/* print the active widths */
7231 	get_link_widths(dd, &active_tx, &active_rx);
7232 }
7233 
7234 /*
7235  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7236  * hardware information when the link first comes up.
7237  *
7238  * The link width is not available until after VerifyCap.AllFramesReceived
7239  * (the trigger for handle_verify_cap), so this is outside that routine
7240  * and should be called when the 8051 signals linkup.
7241  */
7242 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7243 {
7244 	u16 tx_width, rx_width;
7245 
7246 	/* get end-of-LNI link widths */
7247 	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7248 
7249 	/* use tx_width as the link is supposed to be symmetric on link up */
7250 	ppd->link_width_active = tx_width;
7251 	/* link width downgrade active (LWD.A) starts out matching LW.A */
7252 	ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7253 	ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7254 	/* per OPA spec, on link up LWD.E resets to LWD.S */
7255 	ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7256 	/* cache the active egress rate (units {10^6 bits/sec]) */
7257 	ppd->current_egress_rate = active_egress_rate(ppd);
7258 }
7259 
7260 /*
7261  * Handle a verify capabilities interrupt from the 8051.
7262  *
7263  * This is a work-queue function outside of the interrupt.
7264  */
7265 void handle_verify_cap(struct work_struct *work)
7266 {
7267 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7268 								link_vc_work);
7269 	struct hfi1_devdata *dd = ppd->dd;
7270 	u64 reg;
7271 	u8 power_management;
7272 	u8 continious;
7273 	u8 vcu;
7274 	u8 vau;
7275 	u8 z;
7276 	u16 vl15buf;
7277 	u16 link_widths;
7278 	u16 crc_mask;
7279 	u16 crc_val;
7280 	u16 device_id;
7281 	u16 active_tx, active_rx;
7282 	u8 partner_supported_crc;
7283 	u8 remote_tx_rate;
7284 	u8 device_rev;
7285 
7286 	set_link_state(ppd, HLS_VERIFY_CAP);
7287 
7288 	lcb_shutdown(dd, 0);
7289 	adjust_lcb_for_fpga_serdes(dd);
7290 
7291 	/*
7292 	 * These are now valid:
7293 	 *	remote VerifyCap fields in the general LNI config
7294 	 *	CSR DC8051_STS_REMOTE_GUID
7295 	 *	CSR DC8051_STS_REMOTE_NODE_TYPE
7296 	 *	CSR DC8051_STS_REMOTE_FM_SECURITY
7297 	 *	CSR DC8051_STS_REMOTE_PORT_NO
7298 	 */
7299 
7300 	read_vc_remote_phy(dd, &power_management, &continious);
7301 	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7302 			      &partner_supported_crc);
7303 	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7304 	read_remote_device_id(dd, &device_id, &device_rev);
7305 	/*
7306 	 * And the 'MgmtAllowed' information, which is exchanged during
7307 	 * LNI, is also be available at this point.
7308 	 */
7309 	read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7310 	/* print the active widths */
7311 	get_link_widths(dd, &active_tx, &active_rx);
7312 	dd_dev_info(dd,
7313 		    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7314 		    (int)power_management, (int)continious);
7315 	dd_dev_info(dd,
7316 		    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7317 		    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7318 		    (int)partner_supported_crc);
7319 	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7320 		    (u32)remote_tx_rate, (u32)link_widths);
7321 	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7322 		    (u32)device_id, (u32)device_rev);
7323 	/*
7324 	 * The peer vAU value just read is the peer receiver value.  HFI does
7325 	 * not support a transmit vAU of 0 (AU == 8).  We advertised that
7326 	 * with Z=1 in the fabric capabilities sent to the peer.  The peer
7327 	 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7328 	 * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7329 	 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7330 	 * subject to the Z value exception.
7331 	 */
7332 	if (vau == 0)
7333 		vau = 1;
7334 	set_up_vl15(dd, vau, vl15buf);
7335 
7336 	/* set up the LCB CRC mode */
7337 	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7338 
7339 	/* order is important: use the lowest bit in common */
7340 	if (crc_mask & CAP_CRC_14B)
7341 		crc_val = LCB_CRC_14B;
7342 	else if (crc_mask & CAP_CRC_48B)
7343 		crc_val = LCB_CRC_48B;
7344 	else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7345 		crc_val = LCB_CRC_12B_16B_PER_LANE;
7346 	else
7347 		crc_val = LCB_CRC_16B;
7348 
7349 	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7350 	write_csr(dd, DC_LCB_CFG_CRC_MODE,
7351 		  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7352 
7353 	/* set (14b only) or clear sideband credit */
7354 	reg = read_csr(dd, SEND_CM_CTRL);
7355 	if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7356 		write_csr(dd, SEND_CM_CTRL,
7357 			  reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7358 	} else {
7359 		write_csr(dd, SEND_CM_CTRL,
7360 			  reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7361 	}
7362 
7363 	ppd->link_speed_active = 0;	/* invalid value */
7364 	if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7365 		/* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7366 		switch (remote_tx_rate) {
7367 		case 0:
7368 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7369 			break;
7370 		case 1:
7371 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7372 			break;
7373 		}
7374 	} else {
7375 		/* actual rate is highest bit of the ANDed rates */
7376 		u8 rate = remote_tx_rate & ppd->local_tx_rate;
7377 
7378 		if (rate & 2)
7379 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7380 		else if (rate & 1)
7381 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7382 	}
7383 	if (ppd->link_speed_active == 0) {
7384 		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7385 			   __func__, (int)remote_tx_rate);
7386 		ppd->link_speed_active = OPA_LINK_SPEED_25G;
7387 	}
7388 
7389 	/*
7390 	 * Cache the values of the supported, enabled, and active
7391 	 * LTP CRC modes to return in 'portinfo' queries. But the bit
7392 	 * flags that are returned in the portinfo query differ from
7393 	 * what's in the link_crc_mask, crc_sizes, and crc_val
7394 	 * variables. Convert these here.
7395 	 */
7396 	ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7397 		/* supported crc modes */
7398 	ppd->port_ltp_crc_mode |=
7399 		cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7400 		/* enabled crc modes */
7401 	ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7402 		/* active crc mode */
7403 
7404 	/* set up the remote credit return table */
7405 	assign_remote_cm_au_table(dd, vcu);
7406 
7407 	/*
7408 	 * The LCB is reset on entry to handle_verify_cap(), so this must
7409 	 * be applied on every link up.
7410 	 *
7411 	 * Adjust LCB error kill enable to kill the link if
7412 	 * these RBUF errors are seen:
7413 	 *	REPLAY_BUF_MBE_SMASK
7414 	 *	FLIT_INPUT_BUF_MBE_SMASK
7415 	 */
7416 	if (is_ax(dd)) {			/* fixed in B0 */
7417 		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7418 		reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7419 			| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7420 		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7421 	}
7422 
7423 	/* pull LCB fifos out of reset - all fifo clocks must be stable */
7424 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7425 
7426 	/* give 8051 access to the LCB CSRs */
7427 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7428 	set_8051_lcb_access(dd);
7429 
7430 	ppd->neighbor_guid =
7431 		read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7432 	ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7433 					DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7434 	ppd->neighbor_type =
7435 		read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7436 		DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7437 	ppd->neighbor_fm_security =
7438 		read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7439 		DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7440 	dd_dev_info(dd,
7441 		    "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7442 		    ppd->neighbor_guid, ppd->neighbor_type,
7443 		    ppd->mgmt_allowed, ppd->neighbor_fm_security);
7444 	if (ppd->mgmt_allowed)
7445 		add_full_mgmt_pkey(ppd);
7446 
7447 	/* tell the 8051 to go to LinkUp */
7448 	set_link_state(ppd, HLS_GOING_UP);
7449 }
7450 
7451 /*
7452  * Apply the link width downgrade enabled policy against the current active
7453  * link widths.
7454  *
7455  * Called when the enabled policy changes or the active link widths change.
7456  */
7457 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7458 {
7459 	int do_bounce = 0;
7460 	int tries;
7461 	u16 lwde;
7462 	u16 tx, rx;
7463 
7464 	/* use the hls lock to avoid a race with actual link up */
7465 	tries = 0;
7466 retry:
7467 	mutex_lock(&ppd->hls_lock);
7468 	/* only apply if the link is up */
7469 	if (ppd->host_link_state & HLS_DOWN) {
7470 		/* still going up..wait and retry */
7471 		if (ppd->host_link_state & HLS_GOING_UP) {
7472 			if (++tries < 1000) {
7473 				mutex_unlock(&ppd->hls_lock);
7474 				usleep_range(100, 120); /* arbitrary */
7475 				goto retry;
7476 			}
7477 			dd_dev_err(ppd->dd,
7478 				   "%s: giving up waiting for link state change\n",
7479 				   __func__);
7480 		}
7481 		goto done;
7482 	}
7483 
7484 	lwde = ppd->link_width_downgrade_enabled;
7485 
7486 	if (refresh_widths) {
7487 		get_link_widths(ppd->dd, &tx, &rx);
7488 		ppd->link_width_downgrade_tx_active = tx;
7489 		ppd->link_width_downgrade_rx_active = rx;
7490 	}
7491 
7492 	if (ppd->link_width_downgrade_tx_active == 0 ||
7493 	    ppd->link_width_downgrade_rx_active == 0) {
7494 		/* the 8051 reported a dead link as a downgrade */
7495 		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7496 	} else if (lwde == 0) {
7497 		/* downgrade is disabled */
7498 
7499 		/* bounce if not at starting active width */
7500 		if ((ppd->link_width_active !=
7501 		     ppd->link_width_downgrade_tx_active) ||
7502 		    (ppd->link_width_active !=
7503 		     ppd->link_width_downgrade_rx_active)) {
7504 			dd_dev_err(ppd->dd,
7505 				   "Link downgrade is disabled and link has downgraded, downing link\n");
7506 			dd_dev_err(ppd->dd,
7507 				   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7508 				   ppd->link_width_active,
7509 				   ppd->link_width_downgrade_tx_active,
7510 				   ppd->link_width_downgrade_rx_active);
7511 			do_bounce = 1;
7512 		}
7513 	} else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7514 		   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7515 		/* Tx or Rx is outside the enabled policy */
7516 		dd_dev_err(ppd->dd,
7517 			   "Link is outside of downgrade allowed, downing link\n");
7518 		dd_dev_err(ppd->dd,
7519 			   "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7520 			   lwde, ppd->link_width_downgrade_tx_active,
7521 			   ppd->link_width_downgrade_rx_active);
7522 		do_bounce = 1;
7523 	}
7524 
7525 done:
7526 	mutex_unlock(&ppd->hls_lock);
7527 
7528 	if (do_bounce) {
7529 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7530 				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7531 		set_link_state(ppd, HLS_DN_OFFLINE);
7532 		start_link(ppd);
7533 	}
7534 }
7535 
7536 /*
7537  * Handle a link downgrade interrupt from the 8051.
7538  *
7539  * This is a work-queue function outside of the interrupt.
7540  */
7541 void handle_link_downgrade(struct work_struct *work)
7542 {
7543 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7544 							link_downgrade_work);
7545 
7546 	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7547 	apply_link_downgrade_policy(ppd, 1);
7548 }
7549 
7550 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7551 {
7552 	return flag_string(buf, buf_len, flags, dcc_err_flags,
7553 		ARRAY_SIZE(dcc_err_flags));
7554 }
7555 
7556 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7557 {
7558 	return flag_string(buf, buf_len, flags, lcb_err_flags,
7559 		ARRAY_SIZE(lcb_err_flags));
7560 }
7561 
7562 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7563 {
7564 	return flag_string(buf, buf_len, flags, dc8051_err_flags,
7565 		ARRAY_SIZE(dc8051_err_flags));
7566 }
7567 
7568 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7569 {
7570 	return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7571 		ARRAY_SIZE(dc8051_info_err_flags));
7572 }
7573 
7574 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7575 {
7576 	return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7577 		ARRAY_SIZE(dc8051_info_host_msg_flags));
7578 }
7579 
7580 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7581 {
7582 	struct hfi1_pportdata *ppd = dd->pport;
7583 	u64 info, err, host_msg;
7584 	int queue_link_down = 0;
7585 	char buf[96];
7586 
7587 	/* look at the flags */
7588 	if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7589 		/* 8051 information set by firmware */
7590 		/* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7591 		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7592 		err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7593 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7594 		host_msg = (info >>
7595 			DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7596 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7597 
7598 		/*
7599 		 * Handle error flags.
7600 		 */
7601 		if (err & FAILED_LNI) {
7602 			/*
7603 			 * LNI error indications are cleared by the 8051
7604 			 * only when starting polling.  Only pay attention
7605 			 * to them when in the states that occur during
7606 			 * LNI.
7607 			 */
7608 			if (ppd->host_link_state
7609 			    & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7610 				queue_link_down = 1;
7611 				dd_dev_info(dd, "Link error: %s\n",
7612 					    dc8051_info_err_string(buf,
7613 								   sizeof(buf),
7614 								   err &
7615 								   FAILED_LNI));
7616 			}
7617 			err &= ~(u64)FAILED_LNI;
7618 		}
7619 		/* unknown frames can happen durning LNI, just count */
7620 		if (err & UNKNOWN_FRAME) {
7621 			ppd->unknown_frame_count++;
7622 			err &= ~(u64)UNKNOWN_FRAME;
7623 		}
7624 		if (err) {
7625 			/* report remaining errors, but do not do anything */
7626 			dd_dev_err(dd, "8051 info error: %s\n",
7627 				   dc8051_info_err_string(buf, sizeof(buf),
7628 							  err));
7629 		}
7630 
7631 		/*
7632 		 * Handle host message flags.
7633 		 */
7634 		if (host_msg & HOST_REQ_DONE) {
7635 			/*
7636 			 * Presently, the driver does a busy wait for
7637 			 * host requests to complete.  This is only an
7638 			 * informational message.
7639 			 * NOTE: The 8051 clears the host message
7640 			 * information *on the next 8051 command*.
7641 			 * Therefore, when linkup is achieved,
7642 			 * this flag will still be set.
7643 			 */
7644 			host_msg &= ~(u64)HOST_REQ_DONE;
7645 		}
7646 		if (host_msg & BC_SMA_MSG) {
7647 			queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7648 			host_msg &= ~(u64)BC_SMA_MSG;
7649 		}
7650 		if (host_msg & LINKUP_ACHIEVED) {
7651 			dd_dev_info(dd, "8051: Link up\n");
7652 			queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7653 			host_msg &= ~(u64)LINKUP_ACHIEVED;
7654 		}
7655 		if (host_msg & EXT_DEVICE_CFG_REQ) {
7656 			handle_8051_request(ppd);
7657 			host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7658 		}
7659 		if (host_msg & VERIFY_CAP_FRAME) {
7660 			queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7661 			host_msg &= ~(u64)VERIFY_CAP_FRAME;
7662 		}
7663 		if (host_msg & LINK_GOING_DOWN) {
7664 			const char *extra = "";
7665 			/* no downgrade action needed if going down */
7666 			if (host_msg & LINK_WIDTH_DOWNGRADED) {
7667 				host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7668 				extra = " (ignoring downgrade)";
7669 			}
7670 			dd_dev_info(dd, "8051: Link down%s\n", extra);
7671 			queue_link_down = 1;
7672 			host_msg &= ~(u64)LINK_GOING_DOWN;
7673 		}
7674 		if (host_msg & LINK_WIDTH_DOWNGRADED) {
7675 			queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7676 			host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7677 		}
7678 		if (host_msg) {
7679 			/* report remaining messages, but do not do anything */
7680 			dd_dev_info(dd, "8051 info host message: %s\n",
7681 				    dc8051_info_host_msg_string(buf,
7682 								sizeof(buf),
7683 								host_msg));
7684 		}
7685 
7686 		reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7687 	}
7688 	if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7689 		/*
7690 		 * Lost the 8051 heartbeat.  If this happens, we
7691 		 * receive constant interrupts about it.  Disable
7692 		 * the interrupt after the first.
7693 		 */
7694 		dd_dev_err(dd, "Lost 8051 heartbeat\n");
7695 		write_csr(dd, DC_DC8051_ERR_EN,
7696 			  read_csr(dd, DC_DC8051_ERR_EN) &
7697 			  ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7698 
7699 		reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7700 	}
7701 	if (reg) {
7702 		/* report the error, but do not do anything */
7703 		dd_dev_err(dd, "8051 error: %s\n",
7704 			   dc8051_err_string(buf, sizeof(buf), reg));
7705 	}
7706 
7707 	if (queue_link_down) {
7708 		/*
7709 		 * if the link is already going down or disabled, do not
7710 		 * queue another
7711 		 */
7712 		if ((ppd->host_link_state &
7713 		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7714 		    ppd->link_enabled == 0) {
7715 			dd_dev_info(dd, "%s: not queuing link down\n",
7716 				    __func__);
7717 		} else {
7718 			queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7719 		}
7720 	}
7721 }
7722 
7723 static const char * const fm_config_txt[] = {
7724 [0] =
7725 	"BadHeadDist: Distance violation between two head flits",
7726 [1] =
7727 	"BadTailDist: Distance violation between two tail flits",
7728 [2] =
7729 	"BadCtrlDist: Distance violation between two credit control flits",
7730 [3] =
7731 	"BadCrdAck: Credits return for unsupported VL",
7732 [4] =
7733 	"UnsupportedVLMarker: Received VL Marker",
7734 [5] =
7735 	"BadPreempt: Exceeded the preemption nesting level",
7736 [6] =
7737 	"BadControlFlit: Received unsupported control flit",
7738 /* no 7 */
7739 [8] =
7740 	"UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7741 };
7742 
7743 static const char * const port_rcv_txt[] = {
7744 [1] =
7745 	"BadPktLen: Illegal PktLen",
7746 [2] =
7747 	"PktLenTooLong: Packet longer than PktLen",
7748 [3] =
7749 	"PktLenTooShort: Packet shorter than PktLen",
7750 [4] =
7751 	"BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7752 [5] =
7753 	"BadDLID: Illegal DLID (0, doesn't match HFI)",
7754 [6] =
7755 	"BadL2: Illegal L2 opcode",
7756 [7] =
7757 	"BadSC: Unsupported SC",
7758 [9] =
7759 	"BadRC: Illegal RC",
7760 [11] =
7761 	"PreemptError: Preempting with same VL",
7762 [12] =
7763 	"PreemptVL15: Preempting a VL15 packet",
7764 };
7765 
7766 #define OPA_LDR_FMCONFIG_OFFSET 16
7767 #define OPA_LDR_PORTRCV_OFFSET 0
7768 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7769 {
7770 	u64 info, hdr0, hdr1;
7771 	const char *extra;
7772 	char buf[96];
7773 	struct hfi1_pportdata *ppd = dd->pport;
7774 	u8 lcl_reason = 0;
7775 	int do_bounce = 0;
7776 
7777 	if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7778 		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7779 			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7780 			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7781 			/* set status bit */
7782 			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7783 		}
7784 		reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7785 	}
7786 
7787 	if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7788 		struct hfi1_pportdata *ppd = dd->pport;
7789 		/* this counter saturates at (2^32) - 1 */
7790 		if (ppd->link_downed < (u32)UINT_MAX)
7791 			ppd->link_downed++;
7792 		reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7793 	}
7794 
7795 	if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7796 		u8 reason_valid = 1;
7797 
7798 		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7799 		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7800 			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7801 			/* set status bit */
7802 			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7803 		}
7804 		switch (info) {
7805 		case 0:
7806 		case 1:
7807 		case 2:
7808 		case 3:
7809 		case 4:
7810 		case 5:
7811 		case 6:
7812 			extra = fm_config_txt[info];
7813 			break;
7814 		case 8:
7815 			extra = fm_config_txt[info];
7816 			if (ppd->port_error_action &
7817 			    OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7818 				do_bounce = 1;
7819 				/*
7820 				 * lcl_reason cannot be derived from info
7821 				 * for this error
7822 				 */
7823 				lcl_reason =
7824 				  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7825 			}
7826 			break;
7827 		default:
7828 			reason_valid = 0;
7829 			snprintf(buf, sizeof(buf), "reserved%lld", info);
7830 			extra = buf;
7831 			break;
7832 		}
7833 
7834 		if (reason_valid && !do_bounce) {
7835 			do_bounce = ppd->port_error_action &
7836 					(1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7837 			lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7838 		}
7839 
7840 		/* just report this */
7841 		dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7842 		reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7843 	}
7844 
7845 	if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7846 		u8 reason_valid = 1;
7847 
7848 		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7849 		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7850 		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7851 		if (!(dd->err_info_rcvport.status_and_code &
7852 		      OPA_EI_STATUS_SMASK)) {
7853 			dd->err_info_rcvport.status_and_code =
7854 				info & OPA_EI_CODE_SMASK;
7855 			/* set status bit */
7856 			dd->err_info_rcvport.status_and_code |=
7857 				OPA_EI_STATUS_SMASK;
7858 			/*
7859 			 * save first 2 flits in the packet that caused
7860 			 * the error
7861 			 */
7862 			dd->err_info_rcvport.packet_flit1 = hdr0;
7863 			dd->err_info_rcvport.packet_flit2 = hdr1;
7864 		}
7865 		switch (info) {
7866 		case 1:
7867 		case 2:
7868 		case 3:
7869 		case 4:
7870 		case 5:
7871 		case 6:
7872 		case 7:
7873 		case 9:
7874 		case 11:
7875 		case 12:
7876 			extra = port_rcv_txt[info];
7877 			break;
7878 		default:
7879 			reason_valid = 0;
7880 			snprintf(buf, sizeof(buf), "reserved%lld", info);
7881 			extra = buf;
7882 			break;
7883 		}
7884 
7885 		if (reason_valid && !do_bounce) {
7886 			do_bounce = ppd->port_error_action &
7887 					(1 << (OPA_LDR_PORTRCV_OFFSET + info));
7888 			lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7889 		}
7890 
7891 		/* just report this */
7892 		dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7893 		dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7894 			    hdr0, hdr1);
7895 
7896 		reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7897 	}
7898 
7899 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7900 		/* informative only */
7901 		dd_dev_info(dd, "8051 access to LCB blocked\n");
7902 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7903 	}
7904 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7905 		/* informative only */
7906 		dd_dev_info(dd, "host access to LCB blocked\n");
7907 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7908 	}
7909 
7910 	/* report any remaining errors */
7911 	if (reg)
7912 		dd_dev_info(dd, "DCC Error: %s\n",
7913 			    dcc_err_string(buf, sizeof(buf), reg));
7914 
7915 	if (lcl_reason == 0)
7916 		lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7917 
7918 	if (do_bounce) {
7919 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7920 		set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7921 		queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7922 	}
7923 }
7924 
7925 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7926 {
7927 	char buf[96];
7928 
7929 	dd_dev_info(dd, "LCB Error: %s\n",
7930 		    lcb_err_string(buf, sizeof(buf), reg));
7931 }
7932 
7933 /*
7934  * CCE block DC interrupt.  Source is < 8.
7935  */
7936 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7937 {
7938 	const struct err_reg_info *eri = &dc_errs[source];
7939 
7940 	if (eri->handler) {
7941 		interrupt_clear_down(dd, 0, eri);
7942 	} else if (source == 3 /* dc_lbm_int */) {
7943 		/*
7944 		 * This indicates that a parity error has occurred on the
7945 		 * address/control lines presented to the LBM.  The error
7946 		 * is a single pulse, there is no associated error flag,
7947 		 * and it is non-maskable.  This is because if a parity
7948 		 * error occurs on the request the request is dropped.
7949 		 * This should never occur, but it is nice to know if it
7950 		 * ever does.
7951 		 */
7952 		dd_dev_err(dd, "Parity error in DC LBM block\n");
7953 	} else {
7954 		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7955 	}
7956 }
7957 
7958 /*
7959  * TX block send credit interrupt.  Source is < 160.
7960  */
7961 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7962 {
7963 	sc_group_release_update(dd, source);
7964 }
7965 
7966 /*
7967  * TX block SDMA interrupt.  Source is < 48.
7968  *
7969  * SDMA interrupts are grouped by type:
7970  *
7971  *	 0 -  N-1 = SDma
7972  *	 N - 2N-1 = SDmaProgress
7973  *	2N - 3N-1 = SDmaIdle
7974  */
7975 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7976 {
7977 	/* what interrupt */
7978 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7979 	/* which engine */
7980 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7981 
7982 #ifdef CONFIG_SDMA_VERBOSITY
7983 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7984 		   slashstrip(__FILE__), __LINE__, __func__);
7985 	sdma_dumpstate(&dd->per_sdma[which]);
7986 #endif
7987 
7988 	if (likely(what < 3 && which < dd->num_sdma)) {
7989 		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7990 	} else {
7991 		/* should not happen */
7992 		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7993 	}
7994 }
7995 
7996 /*
7997  * RX block receive available interrupt.  Source is < 160.
7998  */
7999 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8000 {
8001 	struct hfi1_ctxtdata *rcd;
8002 	char *err_detail;
8003 
8004 	if (likely(source < dd->num_rcv_contexts)) {
8005 		rcd = dd->rcd[source];
8006 		if (rcd) {
8007 			if (source < dd->first_user_ctxt)
8008 				rcd->do_interrupt(rcd, 0);
8009 			else
8010 				handle_user_interrupt(rcd);
8011 			return;	/* OK */
8012 		}
8013 		/* received an interrupt, but no rcd */
8014 		err_detail = "dataless";
8015 	} else {
8016 		/* received an interrupt, but are not using that context */
8017 		err_detail = "out of range";
8018 	}
8019 	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8020 		   err_detail, source);
8021 }
8022 
8023 /*
8024  * RX block receive urgent interrupt.  Source is < 160.
8025  */
8026 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8027 {
8028 	struct hfi1_ctxtdata *rcd;
8029 	char *err_detail;
8030 
8031 	if (likely(source < dd->num_rcv_contexts)) {
8032 		rcd = dd->rcd[source];
8033 		if (rcd) {
8034 			/* only pay attention to user urgent interrupts */
8035 			if (source >= dd->first_user_ctxt)
8036 				handle_user_interrupt(rcd);
8037 			return;	/* OK */
8038 		}
8039 		/* received an interrupt, but no rcd */
8040 		err_detail = "dataless";
8041 	} else {
8042 		/* received an interrupt, but are not using that context */
8043 		err_detail = "out of range";
8044 	}
8045 	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8046 		   err_detail, source);
8047 }
8048 
8049 /*
8050  * Reserved range interrupt.  Should not be called in normal operation.
8051  */
8052 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8053 {
8054 	char name[64];
8055 
8056 	dd_dev_err(dd, "unexpected %s interrupt\n",
8057 		   is_reserved_name(name, sizeof(name), source));
8058 }
8059 
8060 static const struct is_table is_table[] = {
8061 /*
8062  * start		 end
8063  *				name func		interrupt func
8064  */
8065 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8066 				is_misc_err_name,	is_misc_err_int },
8067 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8068 				is_sdma_eng_err_name,	is_sdma_eng_err_int },
8069 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8070 				is_sendctxt_err_name,	is_sendctxt_err_int },
8071 { IS_SDMA_START,	     IS_SDMA_END,
8072 				is_sdma_eng_name,	is_sdma_eng_int },
8073 { IS_VARIOUS_START,	     IS_VARIOUS_END,
8074 				is_various_name,	is_various_int },
8075 { IS_DC_START,	     IS_DC_END,
8076 				is_dc_name,		is_dc_int },
8077 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8078 				is_rcv_avail_name,	is_rcv_avail_int },
8079 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8080 				is_rcv_urgent_name,	is_rcv_urgent_int },
8081 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8082 				is_send_credit_name,	is_send_credit_int},
8083 { IS_RESERVED_START,     IS_RESERVED_END,
8084 				is_reserved_name,	is_reserved_int},
8085 };
8086 
8087 /*
8088  * Interrupt source interrupt - called when the given source has an interrupt.
8089  * Source is a bit index into an array of 64-bit integers.
8090  */
8091 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8092 {
8093 	const struct is_table *entry;
8094 
8095 	/* avoids a double compare by walking the table in-order */
8096 	for (entry = &is_table[0]; entry->is_name; entry++) {
8097 		if (source < entry->end) {
8098 			trace_hfi1_interrupt(dd, entry, source);
8099 			entry->is_int(dd, source - entry->start);
8100 			return;
8101 		}
8102 	}
8103 	/* fell off the end */
8104 	dd_dev_err(dd, "invalid interrupt source %u\n", source);
8105 }
8106 
8107 /*
8108  * General interrupt handler.  This is able to correctly handle
8109  * all interrupts in case INTx is used.
8110  */
8111 static irqreturn_t general_interrupt(int irq, void *data)
8112 {
8113 	struct hfi1_devdata *dd = data;
8114 	u64 regs[CCE_NUM_INT_CSRS];
8115 	u32 bit;
8116 	int i;
8117 
8118 	this_cpu_inc(*dd->int_counter);
8119 
8120 	/* phase 1: scan and clear all handled interrupts */
8121 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8122 		if (dd->gi_mask[i] == 0) {
8123 			regs[i] = 0;	/* used later */
8124 			continue;
8125 		}
8126 		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8127 				dd->gi_mask[i];
8128 		/* only clear if anything is set */
8129 		if (regs[i])
8130 			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8131 	}
8132 
8133 	/* phase 2: call the appropriate handler */
8134 	for_each_set_bit(bit, (unsigned long *)&regs[0],
8135 			 CCE_NUM_INT_CSRS * 64) {
8136 		is_interrupt(dd, bit);
8137 	}
8138 
8139 	return IRQ_HANDLED;
8140 }
8141 
8142 static irqreturn_t sdma_interrupt(int irq, void *data)
8143 {
8144 	struct sdma_engine *sde = data;
8145 	struct hfi1_devdata *dd = sde->dd;
8146 	u64 status;
8147 
8148 #ifdef CONFIG_SDMA_VERBOSITY
8149 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8150 		   slashstrip(__FILE__), __LINE__, __func__);
8151 	sdma_dumpstate(sde);
8152 #endif
8153 
8154 	this_cpu_inc(*dd->int_counter);
8155 
8156 	/* This read_csr is really bad in the hot path */
8157 	status = read_csr(dd,
8158 			  CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8159 			  & sde->imask;
8160 	if (likely(status)) {
8161 		/* clear the interrupt(s) */
8162 		write_csr(dd,
8163 			  CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8164 			  status);
8165 
8166 		/* handle the interrupt(s) */
8167 		sdma_engine_interrupt(sde, status);
8168 	} else
8169 		dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8170 			   sde->this_idx);
8171 
8172 	return IRQ_HANDLED;
8173 }
8174 
8175 /*
8176  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8177  * to insure that the write completed.  This does NOT guarantee that
8178  * queued DMA writes to memory from the chip are pushed.
8179  */
8180 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8181 {
8182 	struct hfi1_devdata *dd = rcd->dd;
8183 	u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8184 
8185 	mmiowb();	/* make sure everything before is written */
8186 	write_csr(dd, addr, rcd->imask);
8187 	/* force the above write on the chip and get a value back */
8188 	(void)read_csr(dd, addr);
8189 }
8190 
8191 /* force the receive interrupt */
8192 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8193 {
8194 	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8195 }
8196 
8197 /*
8198  * Return non-zero if a packet is present.
8199  *
8200  * This routine is called when rechecking for packets after the RcvAvail
8201  * interrupt has been cleared down.  First, do a quick check of memory for
8202  * a packet present.  If not found, use an expensive CSR read of the context
8203  * tail to determine the actual tail.  The CSR read is necessary because there
8204  * is no method to push pending DMAs to memory other than an interrupt and we
8205  * are trying to determine if we need to force an interrupt.
8206  */
8207 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8208 {
8209 	u32 tail;
8210 	int present;
8211 
8212 	if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8213 		present = (rcd->seq_cnt ==
8214 				rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8215 	else /* is RDMA rtail */
8216 		present = (rcd->head != get_rcvhdrtail(rcd));
8217 
8218 	if (present)
8219 		return 1;
8220 
8221 	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8222 	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8223 	return rcd->head != tail;
8224 }
8225 
8226 /*
8227  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8228  * This routine will try to handle packets immediately (latency), but if
8229  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8230  * chip receive interrupt is *not* cleared down until this or the thread (if
8231  * invoked) is finished.  The intent is to avoid extra interrupts while we
8232  * are processing packets anyway.
8233  */
8234 static irqreturn_t receive_context_interrupt(int irq, void *data)
8235 {
8236 	struct hfi1_ctxtdata *rcd = data;
8237 	struct hfi1_devdata *dd = rcd->dd;
8238 	int disposition;
8239 	int present;
8240 
8241 	trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8242 	this_cpu_inc(*dd->int_counter);
8243 	aspm_ctx_disable(rcd);
8244 
8245 	/* receive interrupt remains blocked while processing packets */
8246 	disposition = rcd->do_interrupt(rcd, 0);
8247 
8248 	/*
8249 	 * Too many packets were seen while processing packets in this
8250 	 * IRQ handler.  Invoke the handler thread.  The receive interrupt
8251 	 * remains blocked.
8252 	 */
8253 	if (disposition == RCV_PKT_LIMIT)
8254 		return IRQ_WAKE_THREAD;
8255 
8256 	/*
8257 	 * The packet processor detected no more packets.  Clear the receive
8258 	 * interrupt and recheck for a packet packet that may have arrived
8259 	 * after the previous check and interrupt clear.  If a packet arrived,
8260 	 * force another interrupt.
8261 	 */
8262 	clear_recv_intr(rcd);
8263 	present = check_packet_present(rcd);
8264 	if (present)
8265 		force_recv_intr(rcd);
8266 
8267 	return IRQ_HANDLED;
8268 }
8269 
8270 /*
8271  * Receive packet thread handler.  This expects to be invoked with the
8272  * receive interrupt still blocked.
8273  */
8274 static irqreturn_t receive_context_thread(int irq, void *data)
8275 {
8276 	struct hfi1_ctxtdata *rcd = data;
8277 	int present;
8278 
8279 	/* receive interrupt is still blocked from the IRQ handler */
8280 	(void)rcd->do_interrupt(rcd, 1);
8281 
8282 	/*
8283 	 * The packet processor will only return if it detected no more
8284 	 * packets.  Hold IRQs here so we can safely clear the interrupt and
8285 	 * recheck for a packet that may have arrived after the previous
8286 	 * check and the interrupt clear.  If a packet arrived, force another
8287 	 * interrupt.
8288 	 */
8289 	local_irq_disable();
8290 	clear_recv_intr(rcd);
8291 	present = check_packet_present(rcd);
8292 	if (present)
8293 		force_recv_intr(rcd);
8294 	local_irq_enable();
8295 
8296 	return IRQ_HANDLED;
8297 }
8298 
8299 /* ========================================================================= */
8300 
8301 u32 read_physical_state(struct hfi1_devdata *dd)
8302 {
8303 	u64 reg;
8304 
8305 	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8306 	return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8307 				& DC_DC8051_STS_CUR_STATE_PORT_MASK;
8308 }
8309 
8310 u32 read_logical_state(struct hfi1_devdata *dd)
8311 {
8312 	u64 reg;
8313 
8314 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8315 	return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8316 				& DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8317 }
8318 
8319 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8320 {
8321 	u64 reg;
8322 
8323 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8324 	/* clear current state, set new state */
8325 	reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8326 	reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8327 	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8328 }
8329 
8330 /*
8331  * Use the 8051 to read a LCB CSR.
8332  */
8333 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8334 {
8335 	u32 regno;
8336 	int ret;
8337 
8338 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8339 		if (acquire_lcb_access(dd, 0) == 0) {
8340 			*data = read_csr(dd, addr);
8341 			release_lcb_access(dd, 0);
8342 			return 0;
8343 		}
8344 		return -EBUSY;
8345 	}
8346 
8347 	/* register is an index of LCB registers: (offset - base) / 8 */
8348 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8349 	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8350 	if (ret != HCMD_SUCCESS)
8351 		return -EBUSY;
8352 	return 0;
8353 }
8354 
8355 /*
8356  * Read an LCB CSR.  Access may not be in host control, so check.
8357  * Return 0 on success, -EBUSY on failure.
8358  */
8359 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8360 {
8361 	struct hfi1_pportdata *ppd = dd->pport;
8362 
8363 	/* if up, go through the 8051 for the value */
8364 	if (ppd->host_link_state & HLS_UP)
8365 		return read_lcb_via_8051(dd, addr, data);
8366 	/* if going up or down, no access */
8367 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8368 		return -EBUSY;
8369 	/* otherwise, host has access */
8370 	*data = read_csr(dd, addr);
8371 	return 0;
8372 }
8373 
8374 /*
8375  * Use the 8051 to write a LCB CSR.
8376  */
8377 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8378 {
8379 	u32 regno;
8380 	int ret;
8381 
8382 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8383 	    (dd->dc8051_ver < dc8051_ver(0, 20))) {
8384 		if (acquire_lcb_access(dd, 0) == 0) {
8385 			write_csr(dd, addr, data);
8386 			release_lcb_access(dd, 0);
8387 			return 0;
8388 		}
8389 		return -EBUSY;
8390 	}
8391 
8392 	/* register is an index of LCB registers: (offset - base) / 8 */
8393 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8394 	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8395 	if (ret != HCMD_SUCCESS)
8396 		return -EBUSY;
8397 	return 0;
8398 }
8399 
8400 /*
8401  * Write an LCB CSR.  Access may not be in host control, so check.
8402  * Return 0 on success, -EBUSY on failure.
8403  */
8404 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8405 {
8406 	struct hfi1_pportdata *ppd = dd->pport;
8407 
8408 	/* if up, go through the 8051 for the value */
8409 	if (ppd->host_link_state & HLS_UP)
8410 		return write_lcb_via_8051(dd, addr, data);
8411 	/* if going up or down, no access */
8412 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8413 		return -EBUSY;
8414 	/* otherwise, host has access */
8415 	write_csr(dd, addr, data);
8416 	return 0;
8417 }
8418 
8419 /*
8420  * Returns:
8421  *	< 0 = Linux error, not able to get access
8422  *	> 0 = 8051 command RETURN_CODE
8423  */
8424 static int do_8051_command(
8425 	struct hfi1_devdata *dd,
8426 	u32 type,
8427 	u64 in_data,
8428 	u64 *out_data)
8429 {
8430 	u64 reg, completed;
8431 	int return_code;
8432 	unsigned long flags;
8433 	unsigned long timeout;
8434 
8435 	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8436 
8437 	/*
8438 	 * Alternative to holding the lock for a long time:
8439 	 * - keep busy wait - have other users bounce off
8440 	 */
8441 	spin_lock_irqsave(&dd->dc8051_lock, flags);
8442 
8443 	/* We can't send any commands to the 8051 if it's in reset */
8444 	if (dd->dc_shutdown) {
8445 		return_code = -ENODEV;
8446 		goto fail;
8447 	}
8448 
8449 	/*
8450 	 * If an 8051 host command timed out previously, then the 8051 is
8451 	 * stuck.
8452 	 *
8453 	 * On first timeout, attempt to reset and restart the entire DC
8454 	 * block (including 8051). (Is this too big of a hammer?)
8455 	 *
8456 	 * If the 8051 times out a second time, the reset did not bring it
8457 	 * back to healthy life. In that case, fail any subsequent commands.
8458 	 */
8459 	if (dd->dc8051_timed_out) {
8460 		if (dd->dc8051_timed_out > 1) {
8461 			dd_dev_err(dd,
8462 				   "Previous 8051 host command timed out, skipping command %u\n",
8463 				   type);
8464 			return_code = -ENXIO;
8465 			goto fail;
8466 		}
8467 		spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8468 		dc_shutdown(dd);
8469 		dc_start(dd);
8470 		spin_lock_irqsave(&dd->dc8051_lock, flags);
8471 	}
8472 
8473 	/*
8474 	 * If there is no timeout, then the 8051 command interface is
8475 	 * waiting for a command.
8476 	 */
8477 
8478 	/*
8479 	 * When writing a LCB CSR, out_data contains the full value to
8480 	 * to be written, while in_data contains the relative LCB
8481 	 * address in 7:0.  Do the work here, rather than the caller,
8482 	 * of distrubting the write data to where it needs to go:
8483 	 *
8484 	 * Write data
8485 	 *   39:00 -> in_data[47:8]
8486 	 *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8487 	 *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8488 	 */
8489 	if (type == HCMD_WRITE_LCB_CSR) {
8490 		in_data |= ((*out_data) & 0xffffffffffull) << 8;
8491 		reg = ((((*out_data) >> 40) & 0xff) <<
8492 				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8493 		      | ((((*out_data) >> 48) & 0xffff) <<
8494 				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8495 		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8496 	}
8497 
8498 	/*
8499 	 * Do two writes: the first to stabilize the type and req_data, the
8500 	 * second to activate.
8501 	 */
8502 	reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8503 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8504 		| (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8505 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8506 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8507 	reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8508 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8509 
8510 	/* wait for completion, alternate: interrupt */
8511 	timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8512 	while (1) {
8513 		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8514 		completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8515 		if (completed)
8516 			break;
8517 		if (time_after(jiffies, timeout)) {
8518 			dd->dc8051_timed_out++;
8519 			dd_dev_err(dd, "8051 host command %u timeout\n", type);
8520 			if (out_data)
8521 				*out_data = 0;
8522 			return_code = -ETIMEDOUT;
8523 			goto fail;
8524 		}
8525 		udelay(2);
8526 	}
8527 
8528 	if (out_data) {
8529 		*out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8530 				& DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8531 		if (type == HCMD_READ_LCB_CSR) {
8532 			/* top 16 bits are in a different register */
8533 			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8534 				& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8535 				<< (48
8536 				    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8537 		}
8538 	}
8539 	return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8540 				& DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8541 	dd->dc8051_timed_out = 0;
8542 	/*
8543 	 * Clear command for next user.
8544 	 */
8545 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8546 
8547 fail:
8548 	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8549 
8550 	return return_code;
8551 }
8552 
8553 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8554 {
8555 	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8556 }
8557 
8558 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8559 		     u8 lane_id, u32 config_data)
8560 {
8561 	u64 data;
8562 	int ret;
8563 
8564 	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8565 		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8566 		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
8567 	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8568 	if (ret != HCMD_SUCCESS) {
8569 		dd_dev_err(dd,
8570 			   "load 8051 config: field id %d, lane %d, err %d\n",
8571 			   (int)field_id, (int)lane_id, ret);
8572 	}
8573 	return ret;
8574 }
8575 
8576 /*
8577  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8578  * set the result, even on error.
8579  * Return 0 on success, -errno on failure
8580  */
8581 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8582 		     u32 *result)
8583 {
8584 	u64 big_data;
8585 	u32 addr;
8586 	int ret;
8587 
8588 	/* address start depends on the lane_id */
8589 	if (lane_id < 4)
8590 		addr = (4 * NUM_GENERAL_FIELDS)
8591 			+ (lane_id * 4 * NUM_LANE_FIELDS);
8592 	else
8593 		addr = 0;
8594 	addr += field_id * 4;
8595 
8596 	/* read is in 8-byte chunks, hardware will truncate the address down */
8597 	ret = read_8051_data(dd, addr, 8, &big_data);
8598 
8599 	if (ret == 0) {
8600 		/* extract the 4 bytes we want */
8601 		if (addr & 0x4)
8602 			*result = (u32)(big_data >> 32);
8603 		else
8604 			*result = (u32)big_data;
8605 	} else {
8606 		*result = 0;
8607 		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8608 			   __func__, lane_id, field_id);
8609 	}
8610 
8611 	return ret;
8612 }
8613 
8614 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8615 			      u8 continuous)
8616 {
8617 	u32 frame;
8618 
8619 	frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8620 		| power_management << POWER_MANAGEMENT_SHIFT;
8621 	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8622 				GENERAL_CONFIG, frame);
8623 }
8624 
8625 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8626 				 u16 vl15buf, u8 crc_sizes)
8627 {
8628 	u32 frame;
8629 
8630 	frame = (u32)vau << VAU_SHIFT
8631 		| (u32)z << Z_SHIFT
8632 		| (u32)vcu << VCU_SHIFT
8633 		| (u32)vl15buf << VL15BUF_SHIFT
8634 		| (u32)crc_sizes << CRC_SIZES_SHIFT;
8635 	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8636 				GENERAL_CONFIG, frame);
8637 }
8638 
8639 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8640 				     u8 *flag_bits, u16 *link_widths)
8641 {
8642 	u32 frame;
8643 
8644 	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8645 			 &frame);
8646 	*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8647 	*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8648 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8649 }
8650 
8651 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8652 				     u8 misc_bits,
8653 				     u8 flag_bits,
8654 				     u16 link_widths)
8655 {
8656 	u32 frame;
8657 
8658 	frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8659 		| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8660 		| (u32)link_widths << LINK_WIDTH_SHIFT;
8661 	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8662 		     frame);
8663 }
8664 
8665 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8666 				 u8 device_rev)
8667 {
8668 	u32 frame;
8669 
8670 	frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8671 		| ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8672 	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8673 }
8674 
8675 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8676 				  u8 *device_rev)
8677 {
8678 	u32 frame;
8679 
8680 	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8681 	*device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8682 	*device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8683 			& REMOTE_DEVICE_REV_MASK;
8684 }
8685 
8686 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8687 {
8688 	u32 frame;
8689 
8690 	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8691 	*ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8692 	*ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8693 }
8694 
8695 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8696 			       u8 *continuous)
8697 {
8698 	u32 frame;
8699 
8700 	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8701 	*power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8702 					& POWER_MANAGEMENT_MASK;
8703 	*continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8704 					& CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8705 }
8706 
8707 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8708 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8709 {
8710 	u32 frame;
8711 
8712 	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8713 	*vau = (frame >> VAU_SHIFT) & VAU_MASK;
8714 	*z = (frame >> Z_SHIFT) & Z_MASK;
8715 	*vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8716 	*vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8717 	*crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8718 }
8719 
8720 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8721 				      u8 *remote_tx_rate,
8722 				      u16 *link_widths)
8723 {
8724 	u32 frame;
8725 
8726 	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8727 			 &frame);
8728 	*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8729 				& REMOTE_TX_RATE_MASK;
8730 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8731 }
8732 
8733 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8734 {
8735 	u32 frame;
8736 
8737 	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8738 	*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8739 }
8740 
8741 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8742 {
8743 	u32 frame;
8744 
8745 	read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8746 	*mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8747 }
8748 
8749 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8750 {
8751 	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8752 }
8753 
8754 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8755 {
8756 	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8757 }
8758 
8759 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8760 {
8761 	u32 frame;
8762 	int ret;
8763 
8764 	*link_quality = 0;
8765 	if (dd->pport->host_link_state & HLS_UP) {
8766 		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8767 				       &frame);
8768 		if (ret == 0)
8769 			*link_quality = (frame >> LINK_QUALITY_SHIFT)
8770 						& LINK_QUALITY_MASK;
8771 	}
8772 }
8773 
8774 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8775 {
8776 	u32 frame;
8777 
8778 	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8779 	*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8780 }
8781 
8782 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8783 {
8784 	u32 frame;
8785 
8786 	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8787 	*ldr = (frame & 0xff);
8788 }
8789 
8790 static int read_tx_settings(struct hfi1_devdata *dd,
8791 			    u8 *enable_lane_tx,
8792 			    u8 *tx_polarity_inversion,
8793 			    u8 *rx_polarity_inversion,
8794 			    u8 *max_rate)
8795 {
8796 	u32 frame;
8797 	int ret;
8798 
8799 	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8800 	*enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8801 				& ENABLE_LANE_TX_MASK;
8802 	*tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8803 				& TX_POLARITY_INVERSION_MASK;
8804 	*rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8805 				& RX_POLARITY_INVERSION_MASK;
8806 	*max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8807 	return ret;
8808 }
8809 
8810 static int write_tx_settings(struct hfi1_devdata *dd,
8811 			     u8 enable_lane_tx,
8812 			     u8 tx_polarity_inversion,
8813 			     u8 rx_polarity_inversion,
8814 			     u8 max_rate)
8815 {
8816 	u32 frame;
8817 
8818 	/* no need to mask, all variable sizes match field widths */
8819 	frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8820 		| tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8821 		| rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8822 		| max_rate << MAX_RATE_SHIFT;
8823 	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8824 }
8825 
8826 /*
8827  * Read an idle LCB message.
8828  *
8829  * Returns 0 on success, -EINVAL on error
8830  */
8831 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8832 {
8833 	int ret;
8834 
8835 	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8836 	if (ret != HCMD_SUCCESS) {
8837 		dd_dev_err(dd, "read idle message: type %d, err %d\n",
8838 			   (u32)type, ret);
8839 		return -EINVAL;
8840 	}
8841 	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8842 	/* return only the payload as we already know the type */
8843 	*data_out >>= IDLE_PAYLOAD_SHIFT;
8844 	return 0;
8845 }
8846 
8847 /*
8848  * Read an idle SMA message.  To be done in response to a notification from
8849  * the 8051.
8850  *
8851  * Returns 0 on success, -EINVAL on error
8852  */
8853 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8854 {
8855 	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8856 				 data);
8857 }
8858 
8859 /*
8860  * Send an idle LCB message.
8861  *
8862  * Returns 0 on success, -EINVAL on error
8863  */
8864 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8865 {
8866 	int ret;
8867 
8868 	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8869 	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8870 	if (ret != HCMD_SUCCESS) {
8871 		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8872 			   data, ret);
8873 		return -EINVAL;
8874 	}
8875 	return 0;
8876 }
8877 
8878 /*
8879  * Send an idle SMA message.
8880  *
8881  * Returns 0 on success, -EINVAL on error
8882  */
8883 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8884 {
8885 	u64 data;
8886 
8887 	data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8888 		((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8889 	return send_idle_message(dd, data);
8890 }
8891 
8892 /*
8893  * Initialize the LCB then do a quick link up.  This may or may not be
8894  * in loopback.
8895  *
8896  * return 0 on success, -errno on error
8897  */
8898 static int do_quick_linkup(struct hfi1_devdata *dd)
8899 {
8900 	u64 reg;
8901 	unsigned long timeout;
8902 	int ret;
8903 
8904 	lcb_shutdown(dd, 0);
8905 
8906 	if (loopback) {
8907 		/* LCB_CFG_LOOPBACK.VAL = 2 */
8908 		/* LCB_CFG_LANE_WIDTH.VAL = 0 */
8909 		write_csr(dd, DC_LCB_CFG_LOOPBACK,
8910 			  IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8911 		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8912 	}
8913 
8914 	/* start the LCBs */
8915 	/* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8916 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8917 
8918 	/* simulator only loopback steps */
8919 	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8920 		/* LCB_CFG_RUN.EN = 1 */
8921 		write_csr(dd, DC_LCB_CFG_RUN,
8922 			  1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8923 
8924 		/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8925 		timeout = jiffies + msecs_to_jiffies(10);
8926 		while (1) {
8927 			reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8928 			if (reg)
8929 				break;
8930 			if (time_after(jiffies, timeout)) {
8931 				dd_dev_err(dd,
8932 					   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8933 				return -ETIMEDOUT;
8934 			}
8935 			udelay(2);
8936 		}
8937 
8938 		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8939 			  1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8940 	}
8941 
8942 	if (!loopback) {
8943 		/*
8944 		 * When doing quick linkup and not in loopback, both
8945 		 * sides must be done with LCB set-up before either
8946 		 * starts the quick linkup.  Put a delay here so that
8947 		 * both sides can be started and have a chance to be
8948 		 * done with LCB set up before resuming.
8949 		 */
8950 		dd_dev_err(dd,
8951 			   "Pausing for peer to be finished with LCB set up\n");
8952 		msleep(5000);
8953 		dd_dev_err(dd, "Continuing with quick linkup\n");
8954 	}
8955 
8956 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8957 	set_8051_lcb_access(dd);
8958 
8959 	/*
8960 	 * State "quick" LinkUp request sets the physical link state to
8961 	 * LinkUp without a verify capability sequence.
8962 	 * This state is in simulator v37 and later.
8963 	 */
8964 	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8965 	if (ret != HCMD_SUCCESS) {
8966 		dd_dev_err(dd,
8967 			   "%s: set physical link state to quick LinkUp failed with return %d\n",
8968 			   __func__, ret);
8969 
8970 		set_host_lcb_access(dd);
8971 		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8972 
8973 		if (ret >= 0)
8974 			ret = -EINVAL;
8975 		return ret;
8976 	}
8977 
8978 	return 0; /* success */
8979 }
8980 
8981 /*
8982  * Set the SerDes to internal loopback mode.
8983  * Returns 0 on success, -errno on error.
8984  */
8985 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8986 {
8987 	int ret;
8988 
8989 	ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8990 	if (ret == HCMD_SUCCESS)
8991 		return 0;
8992 	dd_dev_err(dd,
8993 		   "Set physical link state to SerDes Loopback failed with return %d\n",
8994 		   ret);
8995 	if (ret >= 0)
8996 		ret = -EINVAL;
8997 	return ret;
8998 }
8999 
9000 /*
9001  * Do all special steps to set up loopback.
9002  */
9003 static int init_loopback(struct hfi1_devdata *dd)
9004 {
9005 	dd_dev_info(dd, "Entering loopback mode\n");
9006 
9007 	/* all loopbacks should disable self GUID check */
9008 	write_csr(dd, DC_DC8051_CFG_MODE,
9009 		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9010 
9011 	/*
9012 	 * The simulator has only one loopback option - LCB.  Switch
9013 	 * to that option, which includes quick link up.
9014 	 *
9015 	 * Accept all valid loopback values.
9016 	 */
9017 	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9018 	    (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9019 	     loopback == LOOPBACK_CABLE)) {
9020 		loopback = LOOPBACK_LCB;
9021 		quick_linkup = 1;
9022 		return 0;
9023 	}
9024 
9025 	/* handle serdes loopback */
9026 	if (loopback == LOOPBACK_SERDES) {
9027 		/* internal serdes loopack needs quick linkup on RTL */
9028 		if (dd->icode == ICODE_RTL_SILICON)
9029 			quick_linkup = 1;
9030 		return set_serdes_loopback_mode(dd);
9031 	}
9032 
9033 	/* LCB loopback - handled at poll time */
9034 	if (loopback == LOOPBACK_LCB) {
9035 		quick_linkup = 1; /* LCB is always quick linkup */
9036 
9037 		/* not supported in emulation due to emulation RTL changes */
9038 		if (dd->icode == ICODE_FPGA_EMULATION) {
9039 			dd_dev_err(dd,
9040 				   "LCB loopback not supported in emulation\n");
9041 			return -EINVAL;
9042 		}
9043 		return 0;
9044 	}
9045 
9046 	/* external cable loopback requires no extra steps */
9047 	if (loopback == LOOPBACK_CABLE)
9048 		return 0;
9049 
9050 	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9051 	return -EINVAL;
9052 }
9053 
9054 /*
9055  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9056  * used in the Verify Capability link width attribute.
9057  */
9058 static u16 opa_to_vc_link_widths(u16 opa_widths)
9059 {
9060 	int i;
9061 	u16 result = 0;
9062 
9063 	static const struct link_bits {
9064 		u16 from;
9065 		u16 to;
9066 	} opa_link_xlate[] = {
9067 		{ OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9068 		{ OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9069 		{ OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9070 		{ OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9071 	};
9072 
9073 	for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9074 		if (opa_widths & opa_link_xlate[i].from)
9075 			result |= opa_link_xlate[i].to;
9076 	}
9077 	return result;
9078 }
9079 
9080 /*
9081  * Set link attributes before moving to polling.
9082  */
9083 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9084 {
9085 	struct hfi1_devdata *dd = ppd->dd;
9086 	u8 enable_lane_tx;
9087 	u8 tx_polarity_inversion;
9088 	u8 rx_polarity_inversion;
9089 	int ret;
9090 
9091 	/* reset our fabric serdes to clear any lingering problems */
9092 	fabric_serdes_reset(dd);
9093 
9094 	/* set the local tx rate - need to read-modify-write */
9095 	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9096 			       &rx_polarity_inversion, &ppd->local_tx_rate);
9097 	if (ret)
9098 		goto set_local_link_attributes_fail;
9099 
9100 	if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9101 		/* set the tx rate to the fastest enabled */
9102 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9103 			ppd->local_tx_rate = 1;
9104 		else
9105 			ppd->local_tx_rate = 0;
9106 	} else {
9107 		/* set the tx rate to all enabled */
9108 		ppd->local_tx_rate = 0;
9109 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9110 			ppd->local_tx_rate |= 2;
9111 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9112 			ppd->local_tx_rate |= 1;
9113 	}
9114 
9115 	enable_lane_tx = 0xF; /* enable all four lanes */
9116 	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9117 				rx_polarity_inversion, ppd->local_tx_rate);
9118 	if (ret != HCMD_SUCCESS)
9119 		goto set_local_link_attributes_fail;
9120 
9121 	/*
9122 	 * DC supports continuous updates.
9123 	 */
9124 	ret = write_vc_local_phy(dd,
9125 				 0 /* no power management */,
9126 				 1 /* continuous updates */);
9127 	if (ret != HCMD_SUCCESS)
9128 		goto set_local_link_attributes_fail;
9129 
9130 	/* z=1 in the next call: AU of 0 is not supported by the hardware */
9131 	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9132 				    ppd->port_crc_mode_enabled);
9133 	if (ret != HCMD_SUCCESS)
9134 		goto set_local_link_attributes_fail;
9135 
9136 	ret = write_vc_local_link_width(dd, 0, 0,
9137 					opa_to_vc_link_widths(
9138 						ppd->link_width_enabled));
9139 	if (ret != HCMD_SUCCESS)
9140 		goto set_local_link_attributes_fail;
9141 
9142 	/* let peer know who we are */
9143 	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9144 	if (ret == HCMD_SUCCESS)
9145 		return 0;
9146 
9147 set_local_link_attributes_fail:
9148 	dd_dev_err(dd,
9149 		   "Failed to set local link attributes, return 0x%x\n",
9150 		   ret);
9151 	return ret;
9152 }
9153 
9154 /*
9155  * Call this to start the link.
9156  * Do not do anything if the link is disabled.
9157  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9158  */
9159 int start_link(struct hfi1_pportdata *ppd)
9160 {
9161 	/*
9162 	 * Tune the SerDes to a ballpark setting for optimal signal and bit
9163 	 * error rate.  Needs to be done before starting the link.
9164 	 */
9165 	tune_serdes(ppd);
9166 
9167 	if (!ppd->link_enabled) {
9168 		dd_dev_info(ppd->dd,
9169 			    "%s: stopping link start because link is disabled\n",
9170 			    __func__);
9171 		return 0;
9172 	}
9173 	if (!ppd->driver_link_ready) {
9174 		dd_dev_info(ppd->dd,
9175 			    "%s: stopping link start because driver is not ready\n",
9176 			    __func__);
9177 		return 0;
9178 	}
9179 
9180 	/*
9181 	 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9182 	 * pkey table can be configured properly if the HFI unit is connected
9183 	 * to switch port with MgmtAllowed=NO
9184 	 */
9185 	clear_full_mgmt_pkey(ppd);
9186 
9187 	return set_link_state(ppd, HLS_DN_POLL);
9188 }
9189 
9190 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9191 {
9192 	struct hfi1_devdata *dd = ppd->dd;
9193 	u64 mask;
9194 	unsigned long timeout;
9195 
9196 	/*
9197 	 * Some QSFP cables have a quirk that asserts the IntN line as a side
9198 	 * effect of power up on plug-in. We ignore this false positive
9199 	 * interrupt until the module has finished powering up by waiting for
9200 	 * a minimum timeout of the module inrush initialization time of
9201 	 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9202 	 * module have stabilized.
9203 	 */
9204 	msleep(500);
9205 
9206 	/*
9207 	 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9208 	 */
9209 	timeout = jiffies + msecs_to_jiffies(2000);
9210 	while (1) {
9211 		mask = read_csr(dd, dd->hfi1_id ?
9212 				ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9213 		if (!(mask & QSFP_HFI0_INT_N))
9214 			break;
9215 		if (time_after(jiffies, timeout)) {
9216 			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9217 				    __func__);
9218 			break;
9219 		}
9220 		udelay(2);
9221 	}
9222 }
9223 
9224 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9225 {
9226 	struct hfi1_devdata *dd = ppd->dd;
9227 	u64 mask;
9228 
9229 	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9230 	if (enable) {
9231 		/*
9232 		 * Clear the status register to avoid an immediate interrupt
9233 		 * when we re-enable the IntN pin
9234 		 */
9235 		write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9236 			  QSFP_HFI0_INT_N);
9237 		mask |= (u64)QSFP_HFI0_INT_N;
9238 	} else {
9239 		mask &= ~(u64)QSFP_HFI0_INT_N;
9240 	}
9241 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9242 }
9243 
9244 void reset_qsfp(struct hfi1_pportdata *ppd)
9245 {
9246 	struct hfi1_devdata *dd = ppd->dd;
9247 	u64 mask, qsfp_mask;
9248 
9249 	/* Disable INT_N from triggering QSFP interrupts */
9250 	set_qsfp_int_n(ppd, 0);
9251 
9252 	/* Reset the QSFP */
9253 	mask = (u64)QSFP_HFI0_RESET_N;
9254 
9255 	qsfp_mask = read_csr(dd,
9256 			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9257 	qsfp_mask &= ~mask;
9258 	write_csr(dd,
9259 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9260 
9261 	udelay(10);
9262 
9263 	qsfp_mask |= mask;
9264 	write_csr(dd,
9265 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9266 
9267 	wait_for_qsfp_init(ppd);
9268 
9269 	/*
9270 	 * Allow INT_N to trigger the QSFP interrupt to watch
9271 	 * for alarms and warnings
9272 	 */
9273 	set_qsfp_int_n(ppd, 1);
9274 }
9275 
9276 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9277 					u8 *qsfp_interrupt_status)
9278 {
9279 	struct hfi1_devdata *dd = ppd->dd;
9280 
9281 	if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9282 	    (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9283 		dd_dev_info(dd, "%s: QSFP cable on fire\n",
9284 			    __func__);
9285 
9286 	if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9287 	    (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9288 		dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9289 			    __func__);
9290 
9291 	/*
9292 	 * The remaining alarms/warnings don't matter if the link is down.
9293 	 */
9294 	if (ppd->host_link_state & HLS_DOWN)
9295 		return 0;
9296 
9297 	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9298 	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9299 		dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9300 			    __func__);
9301 
9302 	if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9303 	    (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9304 		dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9305 			    __func__);
9306 
9307 	/* Byte 2 is vendor specific */
9308 
9309 	if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9310 	    (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9311 		dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9312 			    __func__);
9313 
9314 	if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9315 	    (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9316 		dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9317 			    __func__);
9318 
9319 	if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9320 	    (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9321 		dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9322 			    __func__);
9323 
9324 	if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9325 	    (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9326 		dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9327 			    __func__);
9328 
9329 	if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9330 	    (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9331 		dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9332 			    __func__);
9333 
9334 	if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9335 	    (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9336 		dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9337 			    __func__);
9338 
9339 	if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9340 	    (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9341 		dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9342 			    __func__);
9343 
9344 	if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9345 	    (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9346 		dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9347 			    __func__);
9348 
9349 	if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9350 	    (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9351 		dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9352 			    __func__);
9353 
9354 	if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9355 	    (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9356 		dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9357 			    __func__);
9358 
9359 	if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9360 	    (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9361 		dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9362 			    __func__);
9363 
9364 	if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9365 	    (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9366 		dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9367 			    __func__);
9368 
9369 	/* Bytes 9-10 and 11-12 are reserved */
9370 	/* Bytes 13-15 are vendor specific */
9371 
9372 	return 0;
9373 }
9374 
9375 /* This routine will only be scheduled if the QSFP module present is asserted */
9376 void qsfp_event(struct work_struct *work)
9377 {
9378 	struct qsfp_data *qd;
9379 	struct hfi1_pportdata *ppd;
9380 	struct hfi1_devdata *dd;
9381 
9382 	qd = container_of(work, struct qsfp_data, qsfp_work);
9383 	ppd = qd->ppd;
9384 	dd = ppd->dd;
9385 
9386 	/* Sanity check */
9387 	if (!qsfp_mod_present(ppd))
9388 		return;
9389 
9390 	/*
9391 	 * Turn DC back on after cable has been re-inserted. Up until
9392 	 * now, the DC has been in reset to save power.
9393 	 */
9394 	dc_start(dd);
9395 
9396 	if (qd->cache_refresh_required) {
9397 		set_qsfp_int_n(ppd, 0);
9398 
9399 		wait_for_qsfp_init(ppd);
9400 
9401 		/*
9402 		 * Allow INT_N to trigger the QSFP interrupt to watch
9403 		 * for alarms and warnings
9404 		 */
9405 		set_qsfp_int_n(ppd, 1);
9406 
9407 		start_link(ppd);
9408 	}
9409 
9410 	if (qd->check_interrupt_flags) {
9411 		u8 qsfp_interrupt_status[16] = {0,};
9412 
9413 		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9414 				  &qsfp_interrupt_status[0], 16) != 16) {
9415 			dd_dev_info(dd,
9416 				    "%s: Failed to read status of QSFP module\n",
9417 				    __func__);
9418 		} else {
9419 			unsigned long flags;
9420 
9421 			handle_qsfp_error_conditions(
9422 					ppd, qsfp_interrupt_status);
9423 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9424 			ppd->qsfp_info.check_interrupt_flags = 0;
9425 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9426 					       flags);
9427 		}
9428 	}
9429 }
9430 
9431 static void init_qsfp_int(struct hfi1_devdata *dd)
9432 {
9433 	struct hfi1_pportdata *ppd = dd->pport;
9434 	u64 qsfp_mask, cce_int_mask;
9435 	const int qsfp1_int_smask = QSFP1_INT % 64;
9436 	const int qsfp2_int_smask = QSFP2_INT % 64;
9437 
9438 	/*
9439 	 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9440 	 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9441 	 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9442 	 * the index of the appropriate CSR in the CCEIntMask CSR array
9443 	 */
9444 	cce_int_mask = read_csr(dd, CCE_INT_MASK +
9445 				(8 * (QSFP1_INT / 64)));
9446 	if (dd->hfi1_id) {
9447 		cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9448 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9449 			  cce_int_mask);
9450 	} else {
9451 		cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9452 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9453 			  cce_int_mask);
9454 	}
9455 
9456 	qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9457 	/* Clear current status to avoid spurious interrupts */
9458 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9459 		  qsfp_mask);
9460 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9461 		  qsfp_mask);
9462 
9463 	set_qsfp_int_n(ppd, 0);
9464 
9465 	/* Handle active low nature of INT_N and MODPRST_N pins */
9466 	if (qsfp_mod_present(ppd))
9467 		qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9468 	write_csr(dd,
9469 		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9470 		  qsfp_mask);
9471 }
9472 
9473 /*
9474  * Do a one-time initialize of the LCB block.
9475  */
9476 static void init_lcb(struct hfi1_devdata *dd)
9477 {
9478 	/* simulator does not correctly handle LCB cclk loopback, skip */
9479 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9480 		return;
9481 
9482 	/* the DC has been reset earlier in the driver load */
9483 
9484 	/* set LCB for cclk loopback on the port */
9485 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9486 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9487 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9488 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9489 	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9490 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9491 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9492 }
9493 
9494 /*
9495  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9496  * on error.
9497  */
9498 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9499 {
9500 	int ret;
9501 	u8 status;
9502 
9503 	/* report success if not a QSFP */
9504 	if (ppd->port_type != PORT_TYPE_QSFP)
9505 		return 0;
9506 
9507 	/* read byte 2, the status byte */
9508 	ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9509 	if (ret < 0)
9510 		return ret;
9511 	if (ret != 1)
9512 		return -EIO;
9513 
9514 	return 0; /* success */
9515 }
9516 
9517 /*
9518  * Values for QSFP retry.
9519  *
9520  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9521  * arrived at from experience on a large cluster.
9522  */
9523 #define MAX_QSFP_RETRIES 20
9524 #define QSFP_RETRY_WAIT 500 /* msec */
9525 
9526 /*
9527  * Try a QSFP read.  If it fails, schedule a retry for later.
9528  * Called on first link activation after driver load.
9529  */
9530 static void try_start_link(struct hfi1_pportdata *ppd)
9531 {
9532 	if (test_qsfp_read(ppd)) {
9533 		/* read failed */
9534 		if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9535 			dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9536 			return;
9537 		}
9538 		dd_dev_info(ppd->dd,
9539 			    "QSFP not responding, waiting and retrying %d\n",
9540 			    (int)ppd->qsfp_retry_count);
9541 		ppd->qsfp_retry_count++;
9542 		queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9543 				   msecs_to_jiffies(QSFP_RETRY_WAIT));
9544 		return;
9545 	}
9546 	ppd->qsfp_retry_count = 0;
9547 
9548 	start_link(ppd);
9549 }
9550 
9551 /*
9552  * Workqueue function to start the link after a delay.
9553  */
9554 void handle_start_link(struct work_struct *work)
9555 {
9556 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9557 						  start_link_work.work);
9558 	try_start_link(ppd);
9559 }
9560 
9561 int bringup_serdes(struct hfi1_pportdata *ppd)
9562 {
9563 	struct hfi1_devdata *dd = ppd->dd;
9564 	u64 guid;
9565 	int ret;
9566 
9567 	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9568 		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9569 
9570 	guid = ppd->guid;
9571 	if (!guid) {
9572 		if (dd->base_guid)
9573 			guid = dd->base_guid + ppd->port - 1;
9574 		ppd->guid = guid;
9575 	}
9576 
9577 	/* Set linkinit_reason on power up per OPA spec */
9578 	ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9579 
9580 	/* one-time init of the LCB */
9581 	init_lcb(dd);
9582 
9583 	if (loopback) {
9584 		ret = init_loopback(dd);
9585 		if (ret < 0)
9586 			return ret;
9587 	}
9588 
9589 	get_port_type(ppd);
9590 	if (ppd->port_type == PORT_TYPE_QSFP) {
9591 		set_qsfp_int_n(ppd, 0);
9592 		wait_for_qsfp_init(ppd);
9593 		set_qsfp_int_n(ppd, 1);
9594 	}
9595 
9596 	try_start_link(ppd);
9597 	return 0;
9598 }
9599 
9600 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9601 {
9602 	struct hfi1_devdata *dd = ppd->dd;
9603 
9604 	/*
9605 	 * Shut down the link and keep it down.   First turn off that the
9606 	 * driver wants to allow the link to be up (driver_link_ready).
9607 	 * Then make sure the link is not automatically restarted
9608 	 * (link_enabled).  Cancel any pending restart.  And finally
9609 	 * go offline.
9610 	 */
9611 	ppd->driver_link_ready = 0;
9612 	ppd->link_enabled = 0;
9613 
9614 	ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9615 	flush_delayed_work(&ppd->start_link_work);
9616 	cancel_delayed_work_sync(&ppd->start_link_work);
9617 
9618 	ppd->offline_disabled_reason =
9619 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9620 	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9621 			     OPA_LINKDOWN_REASON_SMA_DISABLED);
9622 	set_link_state(ppd, HLS_DN_OFFLINE);
9623 
9624 	/* disable the port */
9625 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9626 }
9627 
9628 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9629 {
9630 	struct hfi1_pportdata *ppd;
9631 	int i;
9632 
9633 	ppd = (struct hfi1_pportdata *)(dd + 1);
9634 	for (i = 0; i < dd->num_pports; i++, ppd++) {
9635 		ppd->ibport_data.rvp.rc_acks = NULL;
9636 		ppd->ibport_data.rvp.rc_qacks = NULL;
9637 		ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9638 		ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9639 		ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9640 		if (!ppd->ibport_data.rvp.rc_acks ||
9641 		    !ppd->ibport_data.rvp.rc_delayed_comp ||
9642 		    !ppd->ibport_data.rvp.rc_qacks)
9643 			return -ENOMEM;
9644 	}
9645 
9646 	return 0;
9647 }
9648 
9649 static const char * const pt_names[] = {
9650 	"expected",
9651 	"eager",
9652 	"invalid"
9653 };
9654 
9655 static const char *pt_name(u32 type)
9656 {
9657 	return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9658 }
9659 
9660 /*
9661  * index is the index into the receive array
9662  */
9663 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9664 		  u32 type, unsigned long pa, u16 order)
9665 {
9666 	u64 reg;
9667 	void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9668 			      (dd->kregbase + RCV_ARRAY));
9669 
9670 	if (!(dd->flags & HFI1_PRESENT))
9671 		goto done;
9672 
9673 	if (type == PT_INVALID) {
9674 		pa = 0;
9675 	} else if (type > PT_INVALID) {
9676 		dd_dev_err(dd,
9677 			   "unexpected receive array type %u for index %u, not handled\n",
9678 			   type, index);
9679 		goto done;
9680 	}
9681 
9682 	hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9683 		  pt_name(type), index, pa, (unsigned long)order);
9684 
9685 #define RT_ADDR_SHIFT 12	/* 4KB kernel address boundary */
9686 	reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9687 		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9688 		| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9689 					<< RCV_ARRAY_RT_ADDR_SHIFT;
9690 	writeq(reg, base + (index * 8));
9691 
9692 	if (type == PT_EAGER)
9693 		/*
9694 		 * Eager entries are written one-by-one so we have to push them
9695 		 * after we write the entry.
9696 		 */
9697 		flush_wc();
9698 done:
9699 	return;
9700 }
9701 
9702 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9703 {
9704 	struct hfi1_devdata *dd = rcd->dd;
9705 	u32 i;
9706 
9707 	/* this could be optimized */
9708 	for (i = rcd->eager_base; i < rcd->eager_base +
9709 		     rcd->egrbufs.alloced; i++)
9710 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9711 
9712 	for (i = rcd->expected_base;
9713 			i < rcd->expected_base + rcd->expected_count; i++)
9714 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9715 }
9716 
9717 struct ib_header *hfi1_get_msgheader(
9718 	struct hfi1_devdata *dd, __le32 *rhf_addr)
9719 {
9720 	u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9721 
9722 	return (struct ib_header *)
9723 		(rhf_addr - dd->rhf_offset + offset);
9724 }
9725 
9726 static const char * const ib_cfg_name_strings[] = {
9727 	"HFI1_IB_CFG_LIDLMC",
9728 	"HFI1_IB_CFG_LWID_DG_ENB",
9729 	"HFI1_IB_CFG_LWID_ENB",
9730 	"HFI1_IB_CFG_LWID",
9731 	"HFI1_IB_CFG_SPD_ENB",
9732 	"HFI1_IB_CFG_SPD",
9733 	"HFI1_IB_CFG_RXPOL_ENB",
9734 	"HFI1_IB_CFG_LREV_ENB",
9735 	"HFI1_IB_CFG_LINKLATENCY",
9736 	"HFI1_IB_CFG_HRTBT",
9737 	"HFI1_IB_CFG_OP_VLS",
9738 	"HFI1_IB_CFG_VL_HIGH_CAP",
9739 	"HFI1_IB_CFG_VL_LOW_CAP",
9740 	"HFI1_IB_CFG_OVERRUN_THRESH",
9741 	"HFI1_IB_CFG_PHYERR_THRESH",
9742 	"HFI1_IB_CFG_LINKDEFAULT",
9743 	"HFI1_IB_CFG_PKEYS",
9744 	"HFI1_IB_CFG_MTU",
9745 	"HFI1_IB_CFG_LSTATE",
9746 	"HFI1_IB_CFG_VL_HIGH_LIMIT",
9747 	"HFI1_IB_CFG_PMA_TICKS",
9748 	"HFI1_IB_CFG_PORT"
9749 };
9750 
9751 static const char *ib_cfg_name(int which)
9752 {
9753 	if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9754 		return "invalid";
9755 	return ib_cfg_name_strings[which];
9756 }
9757 
9758 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9759 {
9760 	struct hfi1_devdata *dd = ppd->dd;
9761 	int val = 0;
9762 
9763 	switch (which) {
9764 	case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9765 		val = ppd->link_width_enabled;
9766 		break;
9767 	case HFI1_IB_CFG_LWID: /* currently active Link-width */
9768 		val = ppd->link_width_active;
9769 		break;
9770 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9771 		val = ppd->link_speed_enabled;
9772 		break;
9773 	case HFI1_IB_CFG_SPD: /* current Link speed */
9774 		val = ppd->link_speed_active;
9775 		break;
9776 
9777 	case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9778 	case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9779 	case HFI1_IB_CFG_LINKLATENCY:
9780 		goto unimplemented;
9781 
9782 	case HFI1_IB_CFG_OP_VLS:
9783 		val = ppd->vls_operational;
9784 		break;
9785 	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9786 		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9787 		break;
9788 	case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9789 		val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9790 		break;
9791 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9792 		val = ppd->overrun_threshold;
9793 		break;
9794 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9795 		val = ppd->phy_error_threshold;
9796 		break;
9797 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9798 		val = dd->link_default;
9799 		break;
9800 
9801 	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9802 	case HFI1_IB_CFG_PMA_TICKS:
9803 	default:
9804 unimplemented:
9805 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9806 			dd_dev_info(
9807 				dd,
9808 				"%s: which %s: not implemented\n",
9809 				__func__,
9810 				ib_cfg_name(which));
9811 		break;
9812 	}
9813 
9814 	return val;
9815 }
9816 
9817 /*
9818  * The largest MAD packet size.
9819  */
9820 #define MAX_MAD_PACKET 2048
9821 
9822 /*
9823  * Return the maximum header bytes that can go on the _wire_
9824  * for this device. This count includes the ICRC which is
9825  * not part of the packet held in memory but it is appended
9826  * by the HW.
9827  * This is dependent on the device's receive header entry size.
9828  * HFI allows this to be set per-receive context, but the
9829  * driver presently enforces a global value.
9830  */
9831 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9832 {
9833 	/*
9834 	 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9835 	 * the Receive Header Entry Size minus the PBC (or RHF) size
9836 	 * plus one DW for the ICRC appended by HW.
9837 	 *
9838 	 * dd->rcd[0].rcvhdrqentsize is in DW.
9839 	 * We use rcd[0] as all context will have the same value. Also,
9840 	 * the first kernel context would have been allocated by now so
9841 	 * we are guaranteed a valid value.
9842 	 */
9843 	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9844 }
9845 
9846 /*
9847  * Set Send Length
9848  * @ppd - per port data
9849  *
9850  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9851  * registers compare against LRH.PktLen, so use the max bytes included
9852  * in the LRH.
9853  *
9854  * This routine changes all VL values except VL15, which it maintains at
9855  * the same value.
9856  */
9857 static void set_send_length(struct hfi1_pportdata *ppd)
9858 {
9859 	struct hfi1_devdata *dd = ppd->dd;
9860 	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9861 	u32 maxvlmtu = dd->vld[15].mtu;
9862 	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9863 			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9864 		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9865 	int i, j;
9866 	u32 thres;
9867 
9868 	for (i = 0; i < ppd->vls_supported; i++) {
9869 		if (dd->vld[i].mtu > maxvlmtu)
9870 			maxvlmtu = dd->vld[i].mtu;
9871 		if (i <= 3)
9872 			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9873 				 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9874 				((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9875 		else
9876 			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9877 				 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9878 				((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9879 	}
9880 	write_csr(dd, SEND_LEN_CHECK0, len1);
9881 	write_csr(dd, SEND_LEN_CHECK1, len2);
9882 	/* adjust kernel credit return thresholds based on new MTUs */
9883 	/* all kernel receive contexts have the same hdrqentsize */
9884 	for (i = 0; i < ppd->vls_supported; i++) {
9885 		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9886 			    sc_mtu_to_threshold(dd->vld[i].sc,
9887 						dd->vld[i].mtu,
9888 						dd->rcd[0]->rcvhdrqentsize));
9889 		for (j = 0; j < INIT_SC_PER_VL; j++)
9890 			sc_set_cr_threshold(
9891 					pio_select_send_context_vl(dd, j, i),
9892 					    thres);
9893 	}
9894 	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9895 		    sc_mtu_to_threshold(dd->vld[15].sc,
9896 					dd->vld[15].mtu,
9897 					dd->rcd[0]->rcvhdrqentsize));
9898 	sc_set_cr_threshold(dd->vld[15].sc, thres);
9899 
9900 	/* Adjust maximum MTU for the port in DC */
9901 	dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9902 		(ilog2(maxvlmtu >> 8) + 1);
9903 	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9904 	len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9905 	len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9906 		DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9907 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9908 }
9909 
9910 static void set_lidlmc(struct hfi1_pportdata *ppd)
9911 {
9912 	int i;
9913 	u64 sreg = 0;
9914 	struct hfi1_devdata *dd = ppd->dd;
9915 	u32 mask = ~((1U << ppd->lmc) - 1);
9916 	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9917 
9918 	if (dd->hfi1_snoop.mode_flag)
9919 		dd_dev_info(dd, "Set lid/lmc while snooping");
9920 
9921 	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9922 		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9923 	c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9924 			<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9925 	      ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9926 			<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9927 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9928 
9929 	/*
9930 	 * Iterate over all the send contexts and set their SLID check
9931 	 */
9932 	sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9933 			SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9934 	       (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9935 			SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9936 
9937 	for (i = 0; i < dd->chip_send_contexts; i++) {
9938 		hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9939 			  i, (u32)sreg);
9940 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9941 	}
9942 
9943 	/* Now we have to do the same thing for the sdma engines */
9944 	sdma_update_lmc(dd, mask, ppd->lid);
9945 }
9946 
9947 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9948 {
9949 	unsigned long timeout;
9950 	u32 curr_state;
9951 
9952 	timeout = jiffies + msecs_to_jiffies(msecs);
9953 	while (1) {
9954 		curr_state = read_physical_state(dd);
9955 		if (curr_state == state)
9956 			break;
9957 		if (time_after(jiffies, timeout)) {
9958 			dd_dev_err(dd,
9959 				   "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9960 				   state, curr_state);
9961 			return -ETIMEDOUT;
9962 		}
9963 		usleep_range(1950, 2050); /* sleep 2ms-ish */
9964 	}
9965 
9966 	return 0;
9967 }
9968 
9969 static const char *state_completed_string(u32 completed)
9970 {
9971 	static const char * const state_completed[] = {
9972 		"EstablishComm",
9973 		"OptimizeEQ",
9974 		"VerifyCap"
9975 	};
9976 
9977 	if (completed < ARRAY_SIZE(state_completed))
9978 		return state_completed[completed];
9979 
9980 	return "unknown";
9981 }
9982 
9983 static const char all_lanes_dead_timeout_expired[] =
9984 	"All lanes were inactive – was the interconnect media removed?";
9985 static const char tx_out_of_policy[] =
9986 	"Passing lanes on local port do not meet the local link width policy";
9987 static const char no_state_complete[] =
9988 	"State timeout occurred before link partner completed the state";
9989 static const char * const state_complete_reasons[] = {
9990 	[0x00] = "Reason unknown",
9991 	[0x01] = "Link was halted by driver, refer to LinkDownReason",
9992 	[0x02] = "Link partner reported failure",
9993 	[0x10] = "Unable to achieve frame sync on any lane",
9994 	[0x11] =
9995 	  "Unable to find a common bit rate with the link partner",
9996 	[0x12] =
9997 	  "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9998 	[0x13] =
9999 	  "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10000 	[0x14] = no_state_complete,
10001 	[0x15] =
10002 	  "State timeout occurred before link partner identified equalization presets",
10003 	[0x16] =
10004 	  "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10005 	[0x17] = tx_out_of_policy,
10006 	[0x20] = all_lanes_dead_timeout_expired,
10007 	[0x21] =
10008 	  "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10009 	[0x22] = no_state_complete,
10010 	[0x23] =
10011 	  "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10012 	[0x24] = tx_out_of_policy,
10013 	[0x30] = all_lanes_dead_timeout_expired,
10014 	[0x31] =
10015 	  "State timeout occurred waiting for host to process received frames",
10016 	[0x32] = no_state_complete,
10017 	[0x33] =
10018 	  "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10019 	[0x34] = tx_out_of_policy,
10020 };
10021 
10022 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10023 						     u32 code)
10024 {
10025 	const char *str = NULL;
10026 
10027 	if (code < ARRAY_SIZE(state_complete_reasons))
10028 		str = state_complete_reasons[code];
10029 
10030 	if (str)
10031 		return str;
10032 	return "Reserved";
10033 }
10034 
10035 /* describe the given last state complete frame */
10036 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10037 				  const char *prefix)
10038 {
10039 	struct hfi1_devdata *dd = ppd->dd;
10040 	u32 success;
10041 	u32 state;
10042 	u32 reason;
10043 	u32 lanes;
10044 
10045 	/*
10046 	 * Decode frame:
10047 	 *  [ 0: 0] - success
10048 	 *  [ 3: 1] - state
10049 	 *  [ 7: 4] - next state timeout
10050 	 *  [15: 8] - reason code
10051 	 *  [31:16] - lanes
10052 	 */
10053 	success = frame & 0x1;
10054 	state = (frame >> 1) & 0x7;
10055 	reason = (frame >> 8) & 0xff;
10056 	lanes = (frame >> 16) & 0xffff;
10057 
10058 	dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10059 		   prefix, frame);
10060 	dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10061 		   state_completed_string(state), state);
10062 	dd_dev_err(dd, "    state successfully completed: %s\n",
10063 		   success ? "yes" : "no");
10064 	dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10065 		   reason, state_complete_reason_code_string(ppd, reason));
10066 	dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10067 }
10068 
10069 /*
10070  * Read the last state complete frames and explain them.  This routine
10071  * expects to be called if the link went down during link negotiation
10072  * and initialization (LNI).  That is, anywhere between polling and link up.
10073  */
10074 static void check_lni_states(struct hfi1_pportdata *ppd)
10075 {
10076 	u32 last_local_state;
10077 	u32 last_remote_state;
10078 
10079 	read_last_local_state(ppd->dd, &last_local_state);
10080 	read_last_remote_state(ppd->dd, &last_remote_state);
10081 
10082 	/*
10083 	 * Don't report anything if there is nothing to report.  A value of
10084 	 * 0 means the link was taken down while polling and there was no
10085 	 * training in-process.
10086 	 */
10087 	if (last_local_state == 0 && last_remote_state == 0)
10088 		return;
10089 
10090 	decode_state_complete(ppd, last_local_state, "transmitted");
10091 	decode_state_complete(ppd, last_remote_state, "received");
10092 }
10093 
10094 /*
10095  * Helper for set_link_state().  Do not call except from that routine.
10096  * Expects ppd->hls_mutex to be held.
10097  *
10098  * @rem_reason value to be sent to the neighbor
10099  *
10100  * LinkDownReasons only set if transition succeeds.
10101  */
10102 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10103 {
10104 	struct hfi1_devdata *dd = ppd->dd;
10105 	u32 pstate, previous_state;
10106 	int ret;
10107 	int do_transition;
10108 	int do_wait;
10109 
10110 	previous_state = ppd->host_link_state;
10111 	ppd->host_link_state = HLS_GOING_OFFLINE;
10112 	pstate = read_physical_state(dd);
10113 	if (pstate == PLS_OFFLINE) {
10114 		do_transition = 0;	/* in right state */
10115 		do_wait = 0;		/* ...no need to wait */
10116 	} else if ((pstate & 0xff) == PLS_OFFLINE) {
10117 		do_transition = 0;	/* in an offline transient state */
10118 		do_wait = 1;		/* ...wait for it to settle */
10119 	} else {
10120 		do_transition = 1;	/* need to move to offline */
10121 		do_wait = 1;		/* ...will need to wait */
10122 	}
10123 
10124 	if (do_transition) {
10125 		ret = set_physical_link_state(dd,
10126 					      (rem_reason << 8) | PLS_OFFLINE);
10127 
10128 		if (ret != HCMD_SUCCESS) {
10129 			dd_dev_err(dd,
10130 				   "Failed to transition to Offline link state, return %d\n",
10131 				   ret);
10132 			return -EINVAL;
10133 		}
10134 		if (ppd->offline_disabled_reason ==
10135 				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10136 			ppd->offline_disabled_reason =
10137 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10138 	}
10139 
10140 	if (do_wait) {
10141 		/* it can take a while for the link to go down */
10142 		ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
10143 		if (ret < 0)
10144 			return ret;
10145 	}
10146 
10147 	/* make sure the logical state is also down */
10148 	wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10149 
10150 	/*
10151 	 * Now in charge of LCB - must be after the physical state is
10152 	 * offline.quiet and before host_link_state is changed.
10153 	 */
10154 	set_host_lcb_access(dd);
10155 	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10156 	ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10157 
10158 	if (ppd->port_type == PORT_TYPE_QSFP &&
10159 	    ppd->qsfp_info.limiting_active &&
10160 	    qsfp_mod_present(ppd)) {
10161 		int ret;
10162 
10163 		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10164 		if (ret == 0) {
10165 			set_qsfp_tx(ppd, 0);
10166 			release_chip_resource(dd, qsfp_resource(dd));
10167 		} else {
10168 			/* not fatal, but should warn */
10169 			dd_dev_err(dd,
10170 				   "Unable to acquire lock to turn off QSFP TX\n");
10171 		}
10172 	}
10173 
10174 	/*
10175 	 * The LNI has a mandatory wait time after the physical state
10176 	 * moves to Offline.Quiet.  The wait time may be different
10177 	 * depending on how the link went down.  The 8051 firmware
10178 	 * will observe the needed wait time and only move to ready
10179 	 * when that is completed.  The largest of the quiet timeouts
10180 	 * is 6s, so wait that long and then at least 0.5s more for
10181 	 * other transitions, and another 0.5s for a buffer.
10182 	 */
10183 	ret = wait_fm_ready(dd, 7000);
10184 	if (ret) {
10185 		dd_dev_err(dd,
10186 			   "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10187 		/* state is really offline, so make it so */
10188 		ppd->host_link_state = HLS_DN_OFFLINE;
10189 		return ret;
10190 	}
10191 
10192 	/*
10193 	 * The state is now offline and the 8051 is ready to accept host
10194 	 * requests.
10195 	 *	- change our state
10196 	 *	- notify others if we were previously in a linkup state
10197 	 */
10198 	ppd->host_link_state = HLS_DN_OFFLINE;
10199 	if (previous_state & HLS_UP) {
10200 		/* went down while link was up */
10201 		handle_linkup_change(dd, 0);
10202 	} else if (previous_state
10203 			& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10204 		/* went down while attempting link up */
10205 		check_lni_states(ppd);
10206 	}
10207 
10208 	/* the active link width (downgrade) is 0 on link down */
10209 	ppd->link_width_active = 0;
10210 	ppd->link_width_downgrade_tx_active = 0;
10211 	ppd->link_width_downgrade_rx_active = 0;
10212 	ppd->current_egress_rate = 0;
10213 	return 0;
10214 }
10215 
10216 /* return the link state name */
10217 static const char *link_state_name(u32 state)
10218 {
10219 	const char *name;
10220 	int n = ilog2(state);
10221 	static const char * const names[] = {
10222 		[__HLS_UP_INIT_BP]	 = "INIT",
10223 		[__HLS_UP_ARMED_BP]	 = "ARMED",
10224 		[__HLS_UP_ACTIVE_BP]	 = "ACTIVE",
10225 		[__HLS_DN_DOWNDEF_BP]	 = "DOWNDEF",
10226 		[__HLS_DN_POLL_BP]	 = "POLL",
10227 		[__HLS_DN_DISABLE_BP]	 = "DISABLE",
10228 		[__HLS_DN_OFFLINE_BP]	 = "OFFLINE",
10229 		[__HLS_VERIFY_CAP_BP]	 = "VERIFY_CAP",
10230 		[__HLS_GOING_UP_BP]	 = "GOING_UP",
10231 		[__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10232 		[__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10233 	};
10234 
10235 	name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10236 	return name ? name : "unknown";
10237 }
10238 
10239 /* return the link state reason name */
10240 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10241 {
10242 	if (state == HLS_UP_INIT) {
10243 		switch (ppd->linkinit_reason) {
10244 		case OPA_LINKINIT_REASON_LINKUP:
10245 			return "(LINKUP)";
10246 		case OPA_LINKINIT_REASON_FLAPPING:
10247 			return "(FLAPPING)";
10248 		case OPA_LINKINIT_OUTSIDE_POLICY:
10249 			return "(OUTSIDE_POLICY)";
10250 		case OPA_LINKINIT_QUARANTINED:
10251 			return "(QUARANTINED)";
10252 		case OPA_LINKINIT_INSUFIC_CAPABILITY:
10253 			return "(INSUFIC_CAPABILITY)";
10254 		default:
10255 			break;
10256 		}
10257 	}
10258 	return "";
10259 }
10260 
10261 /*
10262  * driver_physical_state - convert the driver's notion of a port's
10263  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10264  * Return -1 (converted to a u32) to indicate error.
10265  */
10266 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10267 {
10268 	switch (ppd->host_link_state) {
10269 	case HLS_UP_INIT:
10270 	case HLS_UP_ARMED:
10271 	case HLS_UP_ACTIVE:
10272 		return IB_PORTPHYSSTATE_LINKUP;
10273 	case HLS_DN_POLL:
10274 		return IB_PORTPHYSSTATE_POLLING;
10275 	case HLS_DN_DISABLE:
10276 		return IB_PORTPHYSSTATE_DISABLED;
10277 	case HLS_DN_OFFLINE:
10278 		return OPA_PORTPHYSSTATE_OFFLINE;
10279 	case HLS_VERIFY_CAP:
10280 		return IB_PORTPHYSSTATE_POLLING;
10281 	case HLS_GOING_UP:
10282 		return IB_PORTPHYSSTATE_POLLING;
10283 	case HLS_GOING_OFFLINE:
10284 		return OPA_PORTPHYSSTATE_OFFLINE;
10285 	case HLS_LINK_COOLDOWN:
10286 		return OPA_PORTPHYSSTATE_OFFLINE;
10287 	case HLS_DN_DOWNDEF:
10288 	default:
10289 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10290 			   ppd->host_link_state);
10291 		return  -1;
10292 	}
10293 }
10294 
10295 /*
10296  * driver_logical_state - convert the driver's notion of a port's
10297  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10298  * (converted to a u32) to indicate error.
10299  */
10300 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10301 {
10302 	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10303 		return IB_PORT_DOWN;
10304 
10305 	switch (ppd->host_link_state & HLS_UP) {
10306 	case HLS_UP_INIT:
10307 		return IB_PORT_INIT;
10308 	case HLS_UP_ARMED:
10309 		return IB_PORT_ARMED;
10310 	case HLS_UP_ACTIVE:
10311 		return IB_PORT_ACTIVE;
10312 	default:
10313 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10314 			   ppd->host_link_state);
10315 	return -1;
10316 	}
10317 }
10318 
10319 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10320 			  u8 neigh_reason, u8 rem_reason)
10321 {
10322 	if (ppd->local_link_down_reason.latest == 0 &&
10323 	    ppd->neigh_link_down_reason.latest == 0) {
10324 		ppd->local_link_down_reason.latest = lcl_reason;
10325 		ppd->neigh_link_down_reason.latest = neigh_reason;
10326 		ppd->remote_link_down_reason = rem_reason;
10327 	}
10328 }
10329 
10330 /*
10331  * Change the physical and/or logical link state.
10332  *
10333  * Do not call this routine while inside an interrupt.  It contains
10334  * calls to routines that can take multiple seconds to finish.
10335  *
10336  * Returns 0 on success, -errno on failure.
10337  */
10338 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10339 {
10340 	struct hfi1_devdata *dd = ppd->dd;
10341 	struct ib_event event = {.device = NULL};
10342 	int ret1, ret = 0;
10343 	int orig_new_state, poll_bounce;
10344 
10345 	mutex_lock(&ppd->hls_lock);
10346 
10347 	orig_new_state = state;
10348 	if (state == HLS_DN_DOWNDEF)
10349 		state = dd->link_default;
10350 
10351 	/* interpret poll -> poll as a link bounce */
10352 	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10353 		      state == HLS_DN_POLL;
10354 
10355 	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10356 		    link_state_name(ppd->host_link_state),
10357 		    link_state_name(orig_new_state),
10358 		    poll_bounce ? "(bounce) " : "",
10359 		    link_state_reason_name(ppd, state));
10360 
10361 	/*
10362 	 * If we're going to a (HLS_*) link state that implies the logical
10363 	 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10364 	 * reset is_sm_config_started to 0.
10365 	 */
10366 	if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10367 		ppd->is_sm_config_started = 0;
10368 
10369 	/*
10370 	 * Do nothing if the states match.  Let a poll to poll link bounce
10371 	 * go through.
10372 	 */
10373 	if (ppd->host_link_state == state && !poll_bounce)
10374 		goto done;
10375 
10376 	switch (state) {
10377 	case HLS_UP_INIT:
10378 		if (ppd->host_link_state == HLS_DN_POLL &&
10379 		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10380 			/*
10381 			 * Quick link up jumps from polling to here.
10382 			 *
10383 			 * Whether in normal or loopback mode, the
10384 			 * simulator jumps from polling to link up.
10385 			 * Accept that here.
10386 			 */
10387 			/* OK */
10388 		} else if (ppd->host_link_state != HLS_GOING_UP) {
10389 			goto unexpected;
10390 		}
10391 
10392 		ppd->host_link_state = HLS_UP_INIT;
10393 		ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10394 		if (ret) {
10395 			/* logical state didn't change, stay at going_up */
10396 			ppd->host_link_state = HLS_GOING_UP;
10397 			dd_dev_err(dd,
10398 				   "%s: logical state did not change to INIT\n",
10399 				   __func__);
10400 		} else {
10401 			/* clear old transient LINKINIT_REASON code */
10402 			if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10403 				ppd->linkinit_reason =
10404 					OPA_LINKINIT_REASON_LINKUP;
10405 
10406 			/* enable the port */
10407 			add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10408 
10409 			handle_linkup_change(dd, 1);
10410 		}
10411 		break;
10412 	case HLS_UP_ARMED:
10413 		if (ppd->host_link_state != HLS_UP_INIT)
10414 			goto unexpected;
10415 
10416 		ppd->host_link_state = HLS_UP_ARMED;
10417 		set_logical_state(dd, LSTATE_ARMED);
10418 		ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10419 		if (ret) {
10420 			/* logical state didn't change, stay at init */
10421 			ppd->host_link_state = HLS_UP_INIT;
10422 			dd_dev_err(dd,
10423 				   "%s: logical state did not change to ARMED\n",
10424 				   __func__);
10425 		}
10426 		/*
10427 		 * The simulator does not currently implement SMA messages,
10428 		 * so neighbor_normal is not set.  Set it here when we first
10429 		 * move to Armed.
10430 		 */
10431 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10432 			ppd->neighbor_normal = 1;
10433 		break;
10434 	case HLS_UP_ACTIVE:
10435 		if (ppd->host_link_state != HLS_UP_ARMED)
10436 			goto unexpected;
10437 
10438 		ppd->host_link_state = HLS_UP_ACTIVE;
10439 		set_logical_state(dd, LSTATE_ACTIVE);
10440 		ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10441 		if (ret) {
10442 			/* logical state didn't change, stay at armed */
10443 			ppd->host_link_state = HLS_UP_ARMED;
10444 			dd_dev_err(dd,
10445 				   "%s: logical state did not change to ACTIVE\n",
10446 				   __func__);
10447 		} else {
10448 			/* tell all engines to go running */
10449 			sdma_all_running(dd);
10450 
10451 			/* Signal the IB layer that the port has went active */
10452 			event.device = &dd->verbs_dev.rdi.ibdev;
10453 			event.element.port_num = ppd->port;
10454 			event.event = IB_EVENT_PORT_ACTIVE;
10455 		}
10456 		break;
10457 	case HLS_DN_POLL:
10458 		if ((ppd->host_link_state == HLS_DN_DISABLE ||
10459 		     ppd->host_link_state == HLS_DN_OFFLINE) &&
10460 		    dd->dc_shutdown)
10461 			dc_start(dd);
10462 		/* Hand LED control to the DC */
10463 		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10464 
10465 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10466 			u8 tmp = ppd->link_enabled;
10467 
10468 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10469 			if (ret) {
10470 				ppd->link_enabled = tmp;
10471 				break;
10472 			}
10473 			ppd->remote_link_down_reason = 0;
10474 
10475 			if (ppd->driver_link_ready)
10476 				ppd->link_enabled = 1;
10477 		}
10478 
10479 		set_all_slowpath(ppd->dd);
10480 		ret = set_local_link_attributes(ppd);
10481 		if (ret)
10482 			break;
10483 
10484 		ppd->port_error_action = 0;
10485 		ppd->host_link_state = HLS_DN_POLL;
10486 
10487 		if (quick_linkup) {
10488 			/* quick linkup does not go into polling */
10489 			ret = do_quick_linkup(dd);
10490 		} else {
10491 			ret1 = set_physical_link_state(dd, PLS_POLLING);
10492 			if (ret1 != HCMD_SUCCESS) {
10493 				dd_dev_err(dd,
10494 					   "Failed to transition to Polling link state, return 0x%x\n",
10495 					   ret1);
10496 				ret = -EINVAL;
10497 			}
10498 		}
10499 		ppd->offline_disabled_reason =
10500 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10501 		/*
10502 		 * If an error occurred above, go back to offline.  The
10503 		 * caller may reschedule another attempt.
10504 		 */
10505 		if (ret)
10506 			goto_offline(ppd, 0);
10507 		break;
10508 	case HLS_DN_DISABLE:
10509 		/* link is disabled */
10510 		ppd->link_enabled = 0;
10511 
10512 		/* allow any state to transition to disabled */
10513 
10514 		/* must transition to offline first */
10515 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10516 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10517 			if (ret)
10518 				break;
10519 			ppd->remote_link_down_reason = 0;
10520 		}
10521 
10522 		ret1 = set_physical_link_state(dd, PLS_DISABLED);
10523 		if (ret1 != HCMD_SUCCESS) {
10524 			dd_dev_err(dd,
10525 				   "Failed to transition to Disabled link state, return 0x%x\n",
10526 				   ret1);
10527 			ret = -EINVAL;
10528 			break;
10529 		}
10530 		ppd->host_link_state = HLS_DN_DISABLE;
10531 		dc_shutdown(dd);
10532 		break;
10533 	case HLS_DN_OFFLINE:
10534 		if (ppd->host_link_state == HLS_DN_DISABLE)
10535 			dc_start(dd);
10536 
10537 		/* allow any state to transition to offline */
10538 		ret = goto_offline(ppd, ppd->remote_link_down_reason);
10539 		if (!ret)
10540 			ppd->remote_link_down_reason = 0;
10541 		break;
10542 	case HLS_VERIFY_CAP:
10543 		if (ppd->host_link_state != HLS_DN_POLL)
10544 			goto unexpected;
10545 		ppd->host_link_state = HLS_VERIFY_CAP;
10546 		break;
10547 	case HLS_GOING_UP:
10548 		if (ppd->host_link_state != HLS_VERIFY_CAP)
10549 			goto unexpected;
10550 
10551 		ret1 = set_physical_link_state(dd, PLS_LINKUP);
10552 		if (ret1 != HCMD_SUCCESS) {
10553 			dd_dev_err(dd,
10554 				   "Failed to transition to link up state, return 0x%x\n",
10555 				   ret1);
10556 			ret = -EINVAL;
10557 			break;
10558 		}
10559 		ppd->host_link_state = HLS_GOING_UP;
10560 		break;
10561 
10562 	case HLS_GOING_OFFLINE:		/* transient within goto_offline() */
10563 	case HLS_LINK_COOLDOWN:		/* transient within goto_offline() */
10564 	default:
10565 		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10566 			    __func__, state);
10567 		ret = -EINVAL;
10568 		break;
10569 	}
10570 
10571 	goto done;
10572 
10573 unexpected:
10574 	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10575 		   __func__, link_state_name(ppd->host_link_state),
10576 		   link_state_name(state));
10577 	ret = -EINVAL;
10578 
10579 done:
10580 	mutex_unlock(&ppd->hls_lock);
10581 
10582 	if (event.device)
10583 		ib_dispatch_event(&event);
10584 
10585 	return ret;
10586 }
10587 
10588 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10589 {
10590 	u64 reg;
10591 	int ret = 0;
10592 
10593 	switch (which) {
10594 	case HFI1_IB_CFG_LIDLMC:
10595 		set_lidlmc(ppd);
10596 		break;
10597 	case HFI1_IB_CFG_VL_HIGH_LIMIT:
10598 		/*
10599 		 * The VL Arbitrator high limit is sent in units of 4k
10600 		 * bytes, while HFI stores it in units of 64 bytes.
10601 		 */
10602 		val *= 4096 / 64;
10603 		reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10604 			<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10605 		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10606 		break;
10607 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10608 		/* HFI only supports POLL as the default link down state */
10609 		if (val != HLS_DN_POLL)
10610 			ret = -EINVAL;
10611 		break;
10612 	case HFI1_IB_CFG_OP_VLS:
10613 		if (ppd->vls_operational != val) {
10614 			ppd->vls_operational = val;
10615 			if (!ppd->port)
10616 				ret = -EINVAL;
10617 		}
10618 		break;
10619 	/*
10620 	 * For link width, link width downgrade, and speed enable, always AND
10621 	 * the setting with what is actually supported.  This has two benefits.
10622 	 * First, enabled can't have unsupported values, no matter what the
10623 	 * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10624 	 * "fill in with your supported value" have all the bits in the
10625 	 * field set, so simply ANDing with supported has the desired result.
10626 	 */
10627 	case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10628 		ppd->link_width_enabled = val & ppd->link_width_supported;
10629 		break;
10630 	case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10631 		ppd->link_width_downgrade_enabled =
10632 				val & ppd->link_width_downgrade_supported;
10633 		break;
10634 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10635 		ppd->link_speed_enabled = val & ppd->link_speed_supported;
10636 		break;
10637 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10638 		/*
10639 		 * HFI does not follow IB specs, save this value
10640 		 * so we can report it, if asked.
10641 		 */
10642 		ppd->overrun_threshold = val;
10643 		break;
10644 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10645 		/*
10646 		 * HFI does not follow IB specs, save this value
10647 		 * so we can report it, if asked.
10648 		 */
10649 		ppd->phy_error_threshold = val;
10650 		break;
10651 
10652 	case HFI1_IB_CFG_MTU:
10653 		set_send_length(ppd);
10654 		break;
10655 
10656 	case HFI1_IB_CFG_PKEYS:
10657 		if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10658 			set_partition_keys(ppd);
10659 		break;
10660 
10661 	default:
10662 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10663 			dd_dev_info(ppd->dd,
10664 				    "%s: which %s, val 0x%x: not implemented\n",
10665 				    __func__, ib_cfg_name(which), val);
10666 		break;
10667 	}
10668 	return ret;
10669 }
10670 
10671 /* begin functions related to vl arbitration table caching */
10672 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10673 {
10674 	int i;
10675 
10676 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10677 			VL_ARB_LOW_PRIO_TABLE_SIZE);
10678 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10679 			VL_ARB_HIGH_PRIO_TABLE_SIZE);
10680 
10681 	/*
10682 	 * Note that we always return values directly from the
10683 	 * 'vl_arb_cache' (and do no CSR reads) in response to a
10684 	 * 'Get(VLArbTable)'. This is obviously correct after a
10685 	 * 'Set(VLArbTable)', since the cache will then be up to
10686 	 * date. But it's also correct prior to any 'Set(VLArbTable)'
10687 	 * since then both the cache, and the relevant h/w registers
10688 	 * will be zeroed.
10689 	 */
10690 
10691 	for (i = 0; i < MAX_PRIO_TABLE; i++)
10692 		spin_lock_init(&ppd->vl_arb_cache[i].lock);
10693 }
10694 
10695 /*
10696  * vl_arb_lock_cache
10697  *
10698  * All other vl_arb_* functions should be called only after locking
10699  * the cache.
10700  */
10701 static inline struct vl_arb_cache *
10702 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10703 {
10704 	if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10705 		return NULL;
10706 	spin_lock(&ppd->vl_arb_cache[idx].lock);
10707 	return &ppd->vl_arb_cache[idx];
10708 }
10709 
10710 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10711 {
10712 	spin_unlock(&ppd->vl_arb_cache[idx].lock);
10713 }
10714 
10715 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10716 			     struct ib_vl_weight_elem *vl)
10717 {
10718 	memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10719 }
10720 
10721 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10722 			     struct ib_vl_weight_elem *vl)
10723 {
10724 	memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10725 }
10726 
10727 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10728 			      struct ib_vl_weight_elem *vl)
10729 {
10730 	return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10731 }
10732 
10733 /* end functions related to vl arbitration table caching */
10734 
10735 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10736 			  u32 size, struct ib_vl_weight_elem *vl)
10737 {
10738 	struct hfi1_devdata *dd = ppd->dd;
10739 	u64 reg;
10740 	unsigned int i, is_up = 0;
10741 	int drain, ret = 0;
10742 
10743 	mutex_lock(&ppd->hls_lock);
10744 
10745 	if (ppd->host_link_state & HLS_UP)
10746 		is_up = 1;
10747 
10748 	drain = !is_ax(dd) && is_up;
10749 
10750 	if (drain)
10751 		/*
10752 		 * Before adjusting VL arbitration weights, empty per-VL
10753 		 * FIFOs, otherwise a packet whose VL weight is being
10754 		 * set to 0 could get stuck in a FIFO with no chance to
10755 		 * egress.
10756 		 */
10757 		ret = stop_drain_data_vls(dd);
10758 
10759 	if (ret) {
10760 		dd_dev_err(
10761 			dd,
10762 			"%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10763 			__func__);
10764 		goto err;
10765 	}
10766 
10767 	for (i = 0; i < size; i++, vl++) {
10768 		/*
10769 		 * NOTE: The low priority shift and mask are used here, but
10770 		 * they are the same for both the low and high registers.
10771 		 */
10772 		reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10773 				<< SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10774 		      | (((u64)vl->weight
10775 				& SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10776 				<< SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10777 		write_csr(dd, target + (i * 8), reg);
10778 	}
10779 	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10780 
10781 	if (drain)
10782 		open_fill_data_vls(dd); /* reopen all VLs */
10783 
10784 err:
10785 	mutex_unlock(&ppd->hls_lock);
10786 
10787 	return ret;
10788 }
10789 
10790 /*
10791  * Read one credit merge VL register.
10792  */
10793 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10794 			   struct vl_limit *vll)
10795 {
10796 	u64 reg = read_csr(dd, csr);
10797 
10798 	vll->dedicated = cpu_to_be16(
10799 		(reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10800 		& SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10801 	vll->shared = cpu_to_be16(
10802 		(reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10803 		& SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10804 }
10805 
10806 /*
10807  * Read the current credit merge limits.
10808  */
10809 static int get_buffer_control(struct hfi1_devdata *dd,
10810 			      struct buffer_control *bc, u16 *overall_limit)
10811 {
10812 	u64 reg;
10813 	int i;
10814 
10815 	/* not all entries are filled in */
10816 	memset(bc, 0, sizeof(*bc));
10817 
10818 	/* OPA and HFI have a 1-1 mapping */
10819 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
10820 		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10821 
10822 	/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10823 	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10824 
10825 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10826 	bc->overall_shared_limit = cpu_to_be16(
10827 		(reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10828 		& SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10829 	if (overall_limit)
10830 		*overall_limit = (reg
10831 			>> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10832 			& SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10833 	return sizeof(struct buffer_control);
10834 }
10835 
10836 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10837 {
10838 	u64 reg;
10839 	int i;
10840 
10841 	/* each register contains 16 SC->VLnt mappings, 4 bits each */
10842 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10843 	for (i = 0; i < sizeof(u64); i++) {
10844 		u8 byte = *(((u8 *)&reg) + i);
10845 
10846 		dp->vlnt[2 * i] = byte & 0xf;
10847 		dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10848 	}
10849 
10850 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10851 	for (i = 0; i < sizeof(u64); i++) {
10852 		u8 byte = *(((u8 *)&reg) + i);
10853 
10854 		dp->vlnt[16 + (2 * i)] = byte & 0xf;
10855 		dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10856 	}
10857 	return sizeof(struct sc2vlnt);
10858 }
10859 
10860 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10861 			      struct ib_vl_weight_elem *vl)
10862 {
10863 	unsigned int i;
10864 
10865 	for (i = 0; i < nelems; i++, vl++) {
10866 		vl->vl = 0xf;
10867 		vl->weight = 0;
10868 	}
10869 }
10870 
10871 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10872 {
10873 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10874 		  DC_SC_VL_VAL(15_0,
10875 			       0, dp->vlnt[0] & 0xf,
10876 			       1, dp->vlnt[1] & 0xf,
10877 			       2, dp->vlnt[2] & 0xf,
10878 			       3, dp->vlnt[3] & 0xf,
10879 			       4, dp->vlnt[4] & 0xf,
10880 			       5, dp->vlnt[5] & 0xf,
10881 			       6, dp->vlnt[6] & 0xf,
10882 			       7, dp->vlnt[7] & 0xf,
10883 			       8, dp->vlnt[8] & 0xf,
10884 			       9, dp->vlnt[9] & 0xf,
10885 			       10, dp->vlnt[10] & 0xf,
10886 			       11, dp->vlnt[11] & 0xf,
10887 			       12, dp->vlnt[12] & 0xf,
10888 			       13, dp->vlnt[13] & 0xf,
10889 			       14, dp->vlnt[14] & 0xf,
10890 			       15, dp->vlnt[15] & 0xf));
10891 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10892 		  DC_SC_VL_VAL(31_16,
10893 			       16, dp->vlnt[16] & 0xf,
10894 			       17, dp->vlnt[17] & 0xf,
10895 			       18, dp->vlnt[18] & 0xf,
10896 			       19, dp->vlnt[19] & 0xf,
10897 			       20, dp->vlnt[20] & 0xf,
10898 			       21, dp->vlnt[21] & 0xf,
10899 			       22, dp->vlnt[22] & 0xf,
10900 			       23, dp->vlnt[23] & 0xf,
10901 			       24, dp->vlnt[24] & 0xf,
10902 			       25, dp->vlnt[25] & 0xf,
10903 			       26, dp->vlnt[26] & 0xf,
10904 			       27, dp->vlnt[27] & 0xf,
10905 			       28, dp->vlnt[28] & 0xf,
10906 			       29, dp->vlnt[29] & 0xf,
10907 			       30, dp->vlnt[30] & 0xf,
10908 			       31, dp->vlnt[31] & 0xf));
10909 }
10910 
10911 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10912 			u16 limit)
10913 {
10914 	if (limit != 0)
10915 		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10916 			    what, (int)limit, idx);
10917 }
10918 
10919 /* change only the shared limit portion of SendCmGLobalCredit */
10920 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10921 {
10922 	u64 reg;
10923 
10924 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10925 	reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10926 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10927 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10928 }
10929 
10930 /* change only the total credit limit portion of SendCmGLobalCredit */
10931 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10932 {
10933 	u64 reg;
10934 
10935 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10936 	reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10937 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10938 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10939 }
10940 
10941 /* set the given per-VL shared limit */
10942 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10943 {
10944 	u64 reg;
10945 	u32 addr;
10946 
10947 	if (vl < TXE_NUM_DATA_VL)
10948 		addr = SEND_CM_CREDIT_VL + (8 * vl);
10949 	else
10950 		addr = SEND_CM_CREDIT_VL15;
10951 
10952 	reg = read_csr(dd, addr);
10953 	reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10954 	reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10955 	write_csr(dd, addr, reg);
10956 }
10957 
10958 /* set the given per-VL dedicated limit */
10959 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10960 {
10961 	u64 reg;
10962 	u32 addr;
10963 
10964 	if (vl < TXE_NUM_DATA_VL)
10965 		addr = SEND_CM_CREDIT_VL + (8 * vl);
10966 	else
10967 		addr = SEND_CM_CREDIT_VL15;
10968 
10969 	reg = read_csr(dd, addr);
10970 	reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10971 	reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10972 	write_csr(dd, addr, reg);
10973 }
10974 
10975 /* spin until the given per-VL status mask bits clear */
10976 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10977 				     const char *which)
10978 {
10979 	unsigned long timeout;
10980 	u64 reg;
10981 
10982 	timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10983 	while (1) {
10984 		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10985 
10986 		if (reg == 0)
10987 			return;	/* success */
10988 		if (time_after(jiffies, timeout))
10989 			break;		/* timed out */
10990 		udelay(1);
10991 	}
10992 
10993 	dd_dev_err(dd,
10994 		   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10995 		   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10996 	/*
10997 	 * If this occurs, it is likely there was a credit loss on the link.
10998 	 * The only recovery from that is a link bounce.
10999 	 */
11000 	dd_dev_err(dd,
11001 		   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11002 }
11003 
11004 /*
11005  * The number of credits on the VLs may be changed while everything
11006  * is "live", but the following algorithm must be followed due to
11007  * how the hardware is actually implemented.  In particular,
11008  * Return_Credit_Status[] is the only correct status check.
11009  *
11010  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11011  *     set Global_Shared_Credit_Limit = 0
11012  *     use_all_vl = 1
11013  * mask0 = all VLs that are changing either dedicated or shared limits
11014  * set Shared_Limit[mask0] = 0
11015  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11016  * if (changing any dedicated limit)
11017  *     mask1 = all VLs that are lowering dedicated limits
11018  *     lower Dedicated_Limit[mask1]
11019  *     spin until Return_Credit_Status[mask1] == 0
11020  *     raise Dedicated_Limits
11021  * raise Shared_Limits
11022  * raise Global_Shared_Credit_Limit
11023  *
11024  * lower = if the new limit is lower, set the limit to the new value
11025  * raise = if the new limit is higher than the current value (may be changed
11026  *	earlier in the algorithm), set the new limit to the new value
11027  */
11028 int set_buffer_control(struct hfi1_pportdata *ppd,
11029 		       struct buffer_control *new_bc)
11030 {
11031 	struct hfi1_devdata *dd = ppd->dd;
11032 	u64 changing_mask, ld_mask, stat_mask;
11033 	int change_count;
11034 	int i, use_all_mask;
11035 	int this_shared_changing;
11036 	int vl_count = 0, ret;
11037 	/*
11038 	 * A0: add the variable any_shared_limit_changing below and in the
11039 	 * algorithm above.  If removing A0 support, it can be removed.
11040 	 */
11041 	int any_shared_limit_changing;
11042 	struct buffer_control cur_bc;
11043 	u8 changing[OPA_MAX_VLS];
11044 	u8 lowering_dedicated[OPA_MAX_VLS];
11045 	u16 cur_total;
11046 	u32 new_total = 0;
11047 	const u64 all_mask =
11048 	SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11049 	 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11050 	 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11051 	 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11052 	 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11053 	 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11054 	 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11055 	 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11056 	 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11057 
11058 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11059 #define NUM_USABLE_VLS 16	/* look at VL15 and less */
11060 
11061 	/* find the new total credits, do sanity check on unused VLs */
11062 	for (i = 0; i < OPA_MAX_VLS; i++) {
11063 		if (valid_vl(i)) {
11064 			new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11065 			continue;
11066 		}
11067 		nonzero_msg(dd, i, "dedicated",
11068 			    be16_to_cpu(new_bc->vl[i].dedicated));
11069 		nonzero_msg(dd, i, "shared",
11070 			    be16_to_cpu(new_bc->vl[i].shared));
11071 		new_bc->vl[i].dedicated = 0;
11072 		new_bc->vl[i].shared = 0;
11073 	}
11074 	new_total += be16_to_cpu(new_bc->overall_shared_limit);
11075 
11076 	/* fetch the current values */
11077 	get_buffer_control(dd, &cur_bc, &cur_total);
11078 
11079 	/*
11080 	 * Create the masks we will use.
11081 	 */
11082 	memset(changing, 0, sizeof(changing));
11083 	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11084 	/*
11085 	 * NOTE: Assumes that the individual VL bits are adjacent and in
11086 	 * increasing order
11087 	 */
11088 	stat_mask =
11089 		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11090 	changing_mask = 0;
11091 	ld_mask = 0;
11092 	change_count = 0;
11093 	any_shared_limit_changing = 0;
11094 	for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11095 		if (!valid_vl(i))
11096 			continue;
11097 		this_shared_changing = new_bc->vl[i].shared
11098 						!= cur_bc.vl[i].shared;
11099 		if (this_shared_changing)
11100 			any_shared_limit_changing = 1;
11101 		if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11102 		    this_shared_changing) {
11103 			changing[i] = 1;
11104 			changing_mask |= stat_mask;
11105 			change_count++;
11106 		}
11107 		if (be16_to_cpu(new_bc->vl[i].dedicated) <
11108 					be16_to_cpu(cur_bc.vl[i].dedicated)) {
11109 			lowering_dedicated[i] = 1;
11110 			ld_mask |= stat_mask;
11111 		}
11112 	}
11113 
11114 	/* bracket the credit change with a total adjustment */
11115 	if (new_total > cur_total)
11116 		set_global_limit(dd, new_total);
11117 
11118 	/*
11119 	 * Start the credit change algorithm.
11120 	 */
11121 	use_all_mask = 0;
11122 	if ((be16_to_cpu(new_bc->overall_shared_limit) <
11123 	     be16_to_cpu(cur_bc.overall_shared_limit)) ||
11124 	    (is_ax(dd) && any_shared_limit_changing)) {
11125 		set_global_shared(dd, 0);
11126 		cur_bc.overall_shared_limit = 0;
11127 		use_all_mask = 1;
11128 	}
11129 
11130 	for (i = 0; i < NUM_USABLE_VLS; i++) {
11131 		if (!valid_vl(i))
11132 			continue;
11133 
11134 		if (changing[i]) {
11135 			set_vl_shared(dd, i, 0);
11136 			cur_bc.vl[i].shared = 0;
11137 		}
11138 	}
11139 
11140 	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11141 				 "shared");
11142 
11143 	if (change_count > 0) {
11144 		for (i = 0; i < NUM_USABLE_VLS; i++) {
11145 			if (!valid_vl(i))
11146 				continue;
11147 
11148 			if (lowering_dedicated[i]) {
11149 				set_vl_dedicated(dd, i,
11150 						 be16_to_cpu(new_bc->
11151 							     vl[i].dedicated));
11152 				cur_bc.vl[i].dedicated =
11153 						new_bc->vl[i].dedicated;
11154 			}
11155 		}
11156 
11157 		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11158 
11159 		/* now raise all dedicated that are going up */
11160 		for (i = 0; i < NUM_USABLE_VLS; i++) {
11161 			if (!valid_vl(i))
11162 				continue;
11163 
11164 			if (be16_to_cpu(new_bc->vl[i].dedicated) >
11165 					be16_to_cpu(cur_bc.vl[i].dedicated))
11166 				set_vl_dedicated(dd, i,
11167 						 be16_to_cpu(new_bc->
11168 							     vl[i].dedicated));
11169 		}
11170 	}
11171 
11172 	/* next raise all shared that are going up */
11173 	for (i = 0; i < NUM_USABLE_VLS; i++) {
11174 		if (!valid_vl(i))
11175 			continue;
11176 
11177 		if (be16_to_cpu(new_bc->vl[i].shared) >
11178 				be16_to_cpu(cur_bc.vl[i].shared))
11179 			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11180 	}
11181 
11182 	/* finally raise the global shared */
11183 	if (be16_to_cpu(new_bc->overall_shared_limit) >
11184 	    be16_to_cpu(cur_bc.overall_shared_limit))
11185 		set_global_shared(dd,
11186 				  be16_to_cpu(new_bc->overall_shared_limit));
11187 
11188 	/* bracket the credit change with a total adjustment */
11189 	if (new_total < cur_total)
11190 		set_global_limit(dd, new_total);
11191 
11192 	/*
11193 	 * Determine the actual number of operational VLS using the number of
11194 	 * dedicated and shared credits for each VL.
11195 	 */
11196 	if (change_count > 0) {
11197 		for (i = 0; i < TXE_NUM_DATA_VL; i++)
11198 			if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11199 			    be16_to_cpu(new_bc->vl[i].shared) > 0)
11200 				vl_count++;
11201 		ppd->actual_vls_operational = vl_count;
11202 		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11203 				    ppd->actual_vls_operational :
11204 				    ppd->vls_operational,
11205 				    NULL);
11206 		if (ret == 0)
11207 			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11208 					   ppd->actual_vls_operational :
11209 					   ppd->vls_operational, NULL);
11210 		if (ret)
11211 			return ret;
11212 	}
11213 	return 0;
11214 }
11215 
11216 /*
11217  * Read the given fabric manager table. Return the size of the
11218  * table (in bytes) on success, and a negative error code on
11219  * failure.
11220  */
11221 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11222 
11223 {
11224 	int size;
11225 	struct vl_arb_cache *vlc;
11226 
11227 	switch (which) {
11228 	case FM_TBL_VL_HIGH_ARB:
11229 		size = 256;
11230 		/*
11231 		 * OPA specifies 128 elements (of 2 bytes each), though
11232 		 * HFI supports only 16 elements in h/w.
11233 		 */
11234 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11235 		vl_arb_get_cache(vlc, t);
11236 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11237 		break;
11238 	case FM_TBL_VL_LOW_ARB:
11239 		size = 256;
11240 		/*
11241 		 * OPA specifies 128 elements (of 2 bytes each), though
11242 		 * HFI supports only 16 elements in h/w.
11243 		 */
11244 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11245 		vl_arb_get_cache(vlc, t);
11246 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11247 		break;
11248 	case FM_TBL_BUFFER_CONTROL:
11249 		size = get_buffer_control(ppd->dd, t, NULL);
11250 		break;
11251 	case FM_TBL_SC2VLNT:
11252 		size = get_sc2vlnt(ppd->dd, t);
11253 		break;
11254 	case FM_TBL_VL_PREEMPT_ELEMS:
11255 		size = 256;
11256 		/* OPA specifies 128 elements, of 2 bytes each */
11257 		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11258 		break;
11259 	case FM_TBL_VL_PREEMPT_MATRIX:
11260 		size = 256;
11261 		/*
11262 		 * OPA specifies that this is the same size as the VL
11263 		 * arbitration tables (i.e., 256 bytes).
11264 		 */
11265 		break;
11266 	default:
11267 		return -EINVAL;
11268 	}
11269 	return size;
11270 }
11271 
11272 /*
11273  * Write the given fabric manager table.
11274  */
11275 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11276 {
11277 	int ret = 0;
11278 	struct vl_arb_cache *vlc;
11279 
11280 	switch (which) {
11281 	case FM_TBL_VL_HIGH_ARB:
11282 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11283 		if (vl_arb_match_cache(vlc, t)) {
11284 			vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11285 			break;
11286 		}
11287 		vl_arb_set_cache(vlc, t);
11288 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11289 		ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11290 				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11291 		break;
11292 	case FM_TBL_VL_LOW_ARB:
11293 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11294 		if (vl_arb_match_cache(vlc, t)) {
11295 			vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11296 			break;
11297 		}
11298 		vl_arb_set_cache(vlc, t);
11299 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11300 		ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11301 				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11302 		break;
11303 	case FM_TBL_BUFFER_CONTROL:
11304 		ret = set_buffer_control(ppd, t);
11305 		break;
11306 	case FM_TBL_SC2VLNT:
11307 		set_sc2vlnt(ppd->dd, t);
11308 		break;
11309 	default:
11310 		ret = -EINVAL;
11311 	}
11312 	return ret;
11313 }
11314 
11315 /*
11316  * Disable all data VLs.
11317  *
11318  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11319  */
11320 static int disable_data_vls(struct hfi1_devdata *dd)
11321 {
11322 	if (is_ax(dd))
11323 		return 1;
11324 
11325 	pio_send_control(dd, PSC_DATA_VL_DISABLE);
11326 
11327 	return 0;
11328 }
11329 
11330 /*
11331  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11332  * Just re-enables all data VLs (the "fill" part happens
11333  * automatically - the name was chosen for symmetry with
11334  * stop_drain_data_vls()).
11335  *
11336  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11337  */
11338 int open_fill_data_vls(struct hfi1_devdata *dd)
11339 {
11340 	if (is_ax(dd))
11341 		return 1;
11342 
11343 	pio_send_control(dd, PSC_DATA_VL_ENABLE);
11344 
11345 	return 0;
11346 }
11347 
11348 /*
11349  * drain_data_vls() - assumes that disable_data_vls() has been called,
11350  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11351  * engines to drop to 0.
11352  */
11353 static void drain_data_vls(struct hfi1_devdata *dd)
11354 {
11355 	sc_wait(dd);
11356 	sdma_wait(dd);
11357 	pause_for_credit_return(dd);
11358 }
11359 
11360 /*
11361  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11362  *
11363  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11364  * meant to be used like this:
11365  *
11366  * stop_drain_data_vls(dd);
11367  * // do things with per-VL resources
11368  * open_fill_data_vls(dd);
11369  */
11370 int stop_drain_data_vls(struct hfi1_devdata *dd)
11371 {
11372 	int ret;
11373 
11374 	ret = disable_data_vls(dd);
11375 	if (ret == 0)
11376 		drain_data_vls(dd);
11377 
11378 	return ret;
11379 }
11380 
11381 /*
11382  * Convert a nanosecond time to a cclock count.  No matter how slow
11383  * the cclock, a non-zero ns will always have a non-zero result.
11384  */
11385 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11386 {
11387 	u32 cclocks;
11388 
11389 	if (dd->icode == ICODE_FPGA_EMULATION)
11390 		cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11391 	else  /* simulation pretends to be ASIC */
11392 		cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11393 	if (ns && !cclocks)	/* if ns nonzero, must be at least 1 */
11394 		cclocks = 1;
11395 	return cclocks;
11396 }
11397 
11398 /*
11399  * Convert a cclock count to nanoseconds. Not matter how slow
11400  * the cclock, a non-zero cclocks will always have a non-zero result.
11401  */
11402 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11403 {
11404 	u32 ns;
11405 
11406 	if (dd->icode == ICODE_FPGA_EMULATION)
11407 		ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11408 	else  /* simulation pretends to be ASIC */
11409 		ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11410 	if (cclocks && !ns)
11411 		ns = 1;
11412 	return ns;
11413 }
11414 
11415 /*
11416  * Dynamically adjust the receive interrupt timeout for a context based on
11417  * incoming packet rate.
11418  *
11419  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11420  */
11421 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11422 {
11423 	struct hfi1_devdata *dd = rcd->dd;
11424 	u32 timeout = rcd->rcvavail_timeout;
11425 
11426 	/*
11427 	 * This algorithm doubles or halves the timeout depending on whether
11428 	 * the number of packets received in this interrupt were less than or
11429 	 * greater equal the interrupt count.
11430 	 *
11431 	 * The calculations below do not allow a steady state to be achieved.
11432 	 * Only at the endpoints it is possible to have an unchanging
11433 	 * timeout.
11434 	 */
11435 	if (npkts < rcv_intr_count) {
11436 		/*
11437 		 * Not enough packets arrived before the timeout, adjust
11438 		 * timeout downward.
11439 		 */
11440 		if (timeout < 2) /* already at minimum? */
11441 			return;
11442 		timeout >>= 1;
11443 	} else {
11444 		/*
11445 		 * More than enough packets arrived before the timeout, adjust
11446 		 * timeout upward.
11447 		 */
11448 		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11449 			return;
11450 		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11451 	}
11452 
11453 	rcd->rcvavail_timeout = timeout;
11454 	/*
11455 	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11456 	 * been verified to be in range
11457 	 */
11458 	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11459 			(u64)timeout <<
11460 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11461 }
11462 
11463 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11464 		    u32 intr_adjust, u32 npkts)
11465 {
11466 	struct hfi1_devdata *dd = rcd->dd;
11467 	u64 reg;
11468 	u32 ctxt = rcd->ctxt;
11469 
11470 	/*
11471 	 * Need to write timeout register before updating RcvHdrHead to ensure
11472 	 * that a new value is used when the HW decides to restart counting.
11473 	 */
11474 	if (intr_adjust)
11475 		adjust_rcv_timeout(rcd, npkts);
11476 	if (updegr) {
11477 		reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11478 			<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11479 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11480 	}
11481 	mmiowb();
11482 	reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11483 		(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11484 			<< RCV_HDR_HEAD_HEAD_SHIFT);
11485 	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11486 	mmiowb();
11487 }
11488 
11489 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11490 {
11491 	u32 head, tail;
11492 
11493 	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11494 		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11495 
11496 	if (rcd->rcvhdrtail_kvaddr)
11497 		tail = get_rcvhdrtail(rcd);
11498 	else
11499 		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11500 
11501 	return head == tail;
11502 }
11503 
11504 /*
11505  * Context Control and Receive Array encoding for buffer size:
11506  *	0x0 invalid
11507  *	0x1   4 KB
11508  *	0x2   8 KB
11509  *	0x3  16 KB
11510  *	0x4  32 KB
11511  *	0x5  64 KB
11512  *	0x6 128 KB
11513  *	0x7 256 KB
11514  *	0x8 512 KB (Receive Array only)
11515  *	0x9   1 MB (Receive Array only)
11516  *	0xa   2 MB (Receive Array only)
11517  *
11518  *	0xB-0xF - reserved (Receive Array only)
11519  *
11520  *
11521  * This routine assumes that the value has already been sanity checked.
11522  */
11523 static u32 encoded_size(u32 size)
11524 {
11525 	switch (size) {
11526 	case   4 * 1024: return 0x1;
11527 	case   8 * 1024: return 0x2;
11528 	case  16 * 1024: return 0x3;
11529 	case  32 * 1024: return 0x4;
11530 	case  64 * 1024: return 0x5;
11531 	case 128 * 1024: return 0x6;
11532 	case 256 * 1024: return 0x7;
11533 	case 512 * 1024: return 0x8;
11534 	case   1 * 1024 * 1024: return 0x9;
11535 	case   2 * 1024 * 1024: return 0xa;
11536 	}
11537 	return 0x1;	/* if invalid, go with the minimum size */
11538 }
11539 
11540 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11541 {
11542 	struct hfi1_ctxtdata *rcd;
11543 	u64 rcvctrl, reg;
11544 	int did_enable = 0;
11545 
11546 	rcd = dd->rcd[ctxt];
11547 	if (!rcd)
11548 		return;
11549 
11550 	hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11551 
11552 	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11553 	/* if the context already enabled, don't do the extra steps */
11554 	if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11555 	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11556 		/* reset the tail and hdr addresses, and sequence count */
11557 		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11558 				rcd->rcvhdrq_dma);
11559 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11560 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11561 					rcd->rcvhdrqtailaddr_dma);
11562 		rcd->seq_cnt = 1;
11563 
11564 		/* reset the cached receive header queue head value */
11565 		rcd->head = 0;
11566 
11567 		/*
11568 		 * Zero the receive header queue so we don't get false
11569 		 * positives when checking the sequence number.  The
11570 		 * sequence numbers could land exactly on the same spot.
11571 		 * E.g. a rcd restart before the receive header wrapped.
11572 		 */
11573 		memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11574 
11575 		/* starting timeout */
11576 		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11577 
11578 		/* enable the context */
11579 		rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11580 
11581 		/* clean the egr buffer size first */
11582 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11583 		rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11584 				& RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11585 					<< RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11586 
11587 		/* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11588 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11589 		did_enable = 1;
11590 
11591 		/* zero RcvEgrIndexHead */
11592 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11593 
11594 		/* set eager count and base index */
11595 		reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11596 			& RCV_EGR_CTRL_EGR_CNT_MASK)
11597 		       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11598 			(((rcd->eager_base >> RCV_SHIFT)
11599 			  & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11600 			 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11601 		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11602 
11603 		/*
11604 		 * Set TID (expected) count and base index.
11605 		 * rcd->expected_count is set to individual RcvArray entries,
11606 		 * not pairs, and the CSR takes a pair-count in groups of
11607 		 * four, so divide by 8.
11608 		 */
11609 		reg = (((rcd->expected_count >> RCV_SHIFT)
11610 					& RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11611 				<< RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11612 		      (((rcd->expected_base >> RCV_SHIFT)
11613 					& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11614 				<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11615 		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11616 		if (ctxt == HFI1_CTRL_CTXT)
11617 			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11618 	}
11619 	if (op & HFI1_RCVCTRL_CTXT_DIS) {
11620 		write_csr(dd, RCV_VL15, 0);
11621 		/*
11622 		 * When receive context is being disabled turn on tail
11623 		 * update with a dummy tail address and then disable
11624 		 * receive context.
11625 		 */
11626 		if (dd->rcvhdrtail_dummy_dma) {
11627 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11628 					dd->rcvhdrtail_dummy_dma);
11629 			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11630 			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11631 		}
11632 
11633 		rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11634 	}
11635 	if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11636 		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11637 	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11638 		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11639 	if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11640 		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11641 	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11642 		/* See comment on RcvCtxtCtrl.TailUpd above */
11643 		if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11644 			rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11645 	}
11646 	if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11647 		rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11648 	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11649 		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11650 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11651 		/*
11652 		 * In one-packet-per-eager mode, the size comes from
11653 		 * the RcvArray entry.
11654 		 */
11655 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11656 		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11657 	}
11658 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11659 		rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11660 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11661 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11662 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11663 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11664 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11665 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11666 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11667 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11668 	rcd->rcvctrl = rcvctrl;
11669 	hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11670 	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11671 
11672 	/* work around sticky RcvCtxtStatus.BlockedRHQFull */
11673 	if (did_enable &&
11674 	    (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11675 		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11676 		if (reg != 0) {
11677 			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11678 				    ctxt, reg);
11679 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11680 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11681 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11682 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11683 			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11684 			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11685 				    ctxt, reg, reg == 0 ? "not" : "still");
11686 		}
11687 	}
11688 
11689 	if (did_enable) {
11690 		/*
11691 		 * The interrupt timeout and count must be set after
11692 		 * the context is enabled to take effect.
11693 		 */
11694 		/* set interrupt timeout */
11695 		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11696 				(u64)rcd->rcvavail_timeout <<
11697 				RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11698 
11699 		/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11700 		reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11701 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11702 	}
11703 
11704 	if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11705 		/*
11706 		 * If the context has been disabled and the Tail Update has
11707 		 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11708 		 * so it doesn't contain an address that is invalid.
11709 		 */
11710 		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11711 				dd->rcvhdrtail_dummy_dma);
11712 }
11713 
11714 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11715 {
11716 	int ret;
11717 	u64 val = 0;
11718 
11719 	if (namep) {
11720 		ret = dd->cntrnameslen;
11721 		*namep = dd->cntrnames;
11722 	} else {
11723 		const struct cntr_entry *entry;
11724 		int i, j;
11725 
11726 		ret = (dd->ndevcntrs) * sizeof(u64);
11727 
11728 		/* Get the start of the block of counters */
11729 		*cntrp = dd->cntrs;
11730 
11731 		/*
11732 		 * Now go and fill in each counter in the block.
11733 		 */
11734 		for (i = 0; i < DEV_CNTR_LAST; i++) {
11735 			entry = &dev_cntrs[i];
11736 			hfi1_cdbg(CNTR, "reading %s", entry->name);
11737 			if (entry->flags & CNTR_DISABLED) {
11738 				/* Nothing */
11739 				hfi1_cdbg(CNTR, "\tDisabled\n");
11740 			} else {
11741 				if (entry->flags & CNTR_VL) {
11742 					hfi1_cdbg(CNTR, "\tPer VL\n");
11743 					for (j = 0; j < C_VL_COUNT; j++) {
11744 						val = entry->rw_cntr(entry,
11745 								  dd, j,
11746 								  CNTR_MODE_R,
11747 								  0);
11748 						hfi1_cdbg(
11749 						   CNTR,
11750 						   "\t\tRead 0x%llx for %d\n",
11751 						   val, j);
11752 						dd->cntrs[entry->offset + j] =
11753 									    val;
11754 					}
11755 				} else if (entry->flags & CNTR_SDMA) {
11756 					hfi1_cdbg(CNTR,
11757 						  "\t Per SDMA Engine\n");
11758 					for (j = 0; j < dd->chip_sdma_engines;
11759 					     j++) {
11760 						val =
11761 						entry->rw_cntr(entry, dd, j,
11762 							       CNTR_MODE_R, 0);
11763 						hfi1_cdbg(CNTR,
11764 							  "\t\tRead 0x%llx for %d\n",
11765 							  val, j);
11766 						dd->cntrs[entry->offset + j] =
11767 									val;
11768 					}
11769 				} else {
11770 					val = entry->rw_cntr(entry, dd,
11771 							CNTR_INVALID_VL,
11772 							CNTR_MODE_R, 0);
11773 					dd->cntrs[entry->offset] = val;
11774 					hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11775 				}
11776 			}
11777 		}
11778 	}
11779 	return ret;
11780 }
11781 
11782 /*
11783  * Used by sysfs to create files for hfi stats to read
11784  */
11785 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11786 {
11787 	int ret;
11788 	u64 val = 0;
11789 
11790 	if (namep) {
11791 		ret = ppd->dd->portcntrnameslen;
11792 		*namep = ppd->dd->portcntrnames;
11793 	} else {
11794 		const struct cntr_entry *entry;
11795 		int i, j;
11796 
11797 		ret = ppd->dd->nportcntrs * sizeof(u64);
11798 		*cntrp = ppd->cntrs;
11799 
11800 		for (i = 0; i < PORT_CNTR_LAST; i++) {
11801 			entry = &port_cntrs[i];
11802 			hfi1_cdbg(CNTR, "reading %s", entry->name);
11803 			if (entry->flags & CNTR_DISABLED) {
11804 				/* Nothing */
11805 				hfi1_cdbg(CNTR, "\tDisabled\n");
11806 				continue;
11807 			}
11808 
11809 			if (entry->flags & CNTR_VL) {
11810 				hfi1_cdbg(CNTR, "\tPer VL");
11811 				for (j = 0; j < C_VL_COUNT; j++) {
11812 					val = entry->rw_cntr(entry, ppd, j,
11813 							       CNTR_MODE_R,
11814 							       0);
11815 					hfi1_cdbg(
11816 					   CNTR,
11817 					   "\t\tRead 0x%llx for %d",
11818 					   val, j);
11819 					ppd->cntrs[entry->offset + j] = val;
11820 				}
11821 			} else {
11822 				val = entry->rw_cntr(entry, ppd,
11823 						       CNTR_INVALID_VL,
11824 						       CNTR_MODE_R,
11825 						       0);
11826 				ppd->cntrs[entry->offset] = val;
11827 				hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11828 			}
11829 		}
11830 	}
11831 	return ret;
11832 }
11833 
11834 static void free_cntrs(struct hfi1_devdata *dd)
11835 {
11836 	struct hfi1_pportdata *ppd;
11837 	int i;
11838 
11839 	if (dd->synth_stats_timer.data)
11840 		del_timer_sync(&dd->synth_stats_timer);
11841 	dd->synth_stats_timer.data = 0;
11842 	ppd = (struct hfi1_pportdata *)(dd + 1);
11843 	for (i = 0; i < dd->num_pports; i++, ppd++) {
11844 		kfree(ppd->cntrs);
11845 		kfree(ppd->scntrs);
11846 		free_percpu(ppd->ibport_data.rvp.rc_acks);
11847 		free_percpu(ppd->ibport_data.rvp.rc_qacks);
11848 		free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11849 		ppd->cntrs = NULL;
11850 		ppd->scntrs = NULL;
11851 		ppd->ibport_data.rvp.rc_acks = NULL;
11852 		ppd->ibport_data.rvp.rc_qacks = NULL;
11853 		ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11854 	}
11855 	kfree(dd->portcntrnames);
11856 	dd->portcntrnames = NULL;
11857 	kfree(dd->cntrs);
11858 	dd->cntrs = NULL;
11859 	kfree(dd->scntrs);
11860 	dd->scntrs = NULL;
11861 	kfree(dd->cntrnames);
11862 	dd->cntrnames = NULL;
11863 }
11864 
11865 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11866 			      u64 *psval, void *context, int vl)
11867 {
11868 	u64 val;
11869 	u64 sval = *psval;
11870 
11871 	if (entry->flags & CNTR_DISABLED) {
11872 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
11873 		return 0;
11874 	}
11875 
11876 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11877 
11878 	val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11879 
11880 	/* If its a synthetic counter there is more work we need to do */
11881 	if (entry->flags & CNTR_SYNTH) {
11882 		if (sval == CNTR_MAX) {
11883 			/* No need to read already saturated */
11884 			return CNTR_MAX;
11885 		}
11886 
11887 		if (entry->flags & CNTR_32BIT) {
11888 			/* 32bit counters can wrap multiple times */
11889 			u64 upper = sval >> 32;
11890 			u64 lower = (sval << 32) >> 32;
11891 
11892 			if (lower > val) { /* hw wrapped */
11893 				if (upper == CNTR_32BIT_MAX)
11894 					val = CNTR_MAX;
11895 				else
11896 					upper++;
11897 			}
11898 
11899 			if (val != CNTR_MAX)
11900 				val = (upper << 32) | val;
11901 
11902 		} else {
11903 			/* If we rolled we are saturated */
11904 			if ((val < sval) || (val > CNTR_MAX))
11905 				val = CNTR_MAX;
11906 		}
11907 	}
11908 
11909 	*psval = val;
11910 
11911 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11912 
11913 	return val;
11914 }
11915 
11916 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11917 			       struct cntr_entry *entry,
11918 			       u64 *psval, void *context, int vl, u64 data)
11919 {
11920 	u64 val;
11921 
11922 	if (entry->flags & CNTR_DISABLED) {
11923 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
11924 		return 0;
11925 	}
11926 
11927 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11928 
11929 	if (entry->flags & CNTR_SYNTH) {
11930 		*psval = data;
11931 		if (entry->flags & CNTR_32BIT) {
11932 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11933 					     (data << 32) >> 32);
11934 			val = data; /* return the full 64bit value */
11935 		} else {
11936 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11937 					     data);
11938 		}
11939 	} else {
11940 		val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11941 	}
11942 
11943 	*psval = val;
11944 
11945 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11946 
11947 	return val;
11948 }
11949 
11950 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11951 {
11952 	struct cntr_entry *entry;
11953 	u64 *sval;
11954 
11955 	entry = &dev_cntrs[index];
11956 	sval = dd->scntrs + entry->offset;
11957 
11958 	if (vl != CNTR_INVALID_VL)
11959 		sval += vl;
11960 
11961 	return read_dev_port_cntr(dd, entry, sval, dd, vl);
11962 }
11963 
11964 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11965 {
11966 	struct cntr_entry *entry;
11967 	u64 *sval;
11968 
11969 	entry = &dev_cntrs[index];
11970 	sval = dd->scntrs + entry->offset;
11971 
11972 	if (vl != CNTR_INVALID_VL)
11973 		sval += vl;
11974 
11975 	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11976 }
11977 
11978 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11979 {
11980 	struct cntr_entry *entry;
11981 	u64 *sval;
11982 
11983 	entry = &port_cntrs[index];
11984 	sval = ppd->scntrs + entry->offset;
11985 
11986 	if (vl != CNTR_INVALID_VL)
11987 		sval += vl;
11988 
11989 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11990 	    (index <= C_RCV_HDR_OVF_LAST)) {
11991 		/* We do not want to bother for disabled contexts */
11992 		return 0;
11993 	}
11994 
11995 	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11996 }
11997 
11998 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11999 {
12000 	struct cntr_entry *entry;
12001 	u64 *sval;
12002 
12003 	entry = &port_cntrs[index];
12004 	sval = ppd->scntrs + entry->offset;
12005 
12006 	if (vl != CNTR_INVALID_VL)
12007 		sval += vl;
12008 
12009 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12010 	    (index <= C_RCV_HDR_OVF_LAST)) {
12011 		/* We do not want to bother for disabled contexts */
12012 		return 0;
12013 	}
12014 
12015 	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12016 }
12017 
12018 static void update_synth_timer(unsigned long opaque)
12019 {
12020 	u64 cur_tx;
12021 	u64 cur_rx;
12022 	u64 total_flits;
12023 	u8 update = 0;
12024 	int i, j, vl;
12025 	struct hfi1_pportdata *ppd;
12026 	struct cntr_entry *entry;
12027 
12028 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12029 
12030 	/*
12031 	 * Rather than keep beating on the CSRs pick a minimal set that we can
12032 	 * check to watch for potential roll over. We can do this by looking at
12033 	 * the number of flits sent/recv. If the total flits exceeds 32bits then
12034 	 * we have to iterate all the counters and update.
12035 	 */
12036 	entry = &dev_cntrs[C_DC_RCV_FLITS];
12037 	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12038 
12039 	entry = &dev_cntrs[C_DC_XMIT_FLITS];
12040 	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12041 
12042 	hfi1_cdbg(
12043 	    CNTR,
12044 	    "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12045 	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12046 
12047 	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12048 		/*
12049 		 * May not be strictly necessary to update but it won't hurt and
12050 		 * simplifies the logic here.
12051 		 */
12052 		update = 1;
12053 		hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12054 			  dd->unit);
12055 	} else {
12056 		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12057 		hfi1_cdbg(CNTR,
12058 			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12059 			  total_flits, (u64)CNTR_32BIT_MAX);
12060 		if (total_flits >= CNTR_32BIT_MAX) {
12061 			hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12062 				  dd->unit);
12063 			update = 1;
12064 		}
12065 	}
12066 
12067 	if (update) {
12068 		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12069 		for (i = 0; i < DEV_CNTR_LAST; i++) {
12070 			entry = &dev_cntrs[i];
12071 			if (entry->flags & CNTR_VL) {
12072 				for (vl = 0; vl < C_VL_COUNT; vl++)
12073 					read_dev_cntr(dd, i, vl);
12074 			} else {
12075 				read_dev_cntr(dd, i, CNTR_INVALID_VL);
12076 			}
12077 		}
12078 		ppd = (struct hfi1_pportdata *)(dd + 1);
12079 		for (i = 0; i < dd->num_pports; i++, ppd++) {
12080 			for (j = 0; j < PORT_CNTR_LAST; j++) {
12081 				entry = &port_cntrs[j];
12082 				if (entry->flags & CNTR_VL) {
12083 					for (vl = 0; vl < C_VL_COUNT; vl++)
12084 						read_port_cntr(ppd, j, vl);
12085 				} else {
12086 					read_port_cntr(ppd, j, CNTR_INVALID_VL);
12087 				}
12088 			}
12089 		}
12090 
12091 		/*
12092 		 * We want the value in the register. The goal is to keep track
12093 		 * of the number of "ticks" not the counter value. In other
12094 		 * words if the register rolls we want to notice it and go ahead
12095 		 * and force an update.
12096 		 */
12097 		entry = &dev_cntrs[C_DC_XMIT_FLITS];
12098 		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12099 						CNTR_MODE_R, 0);
12100 
12101 		entry = &dev_cntrs[C_DC_RCV_FLITS];
12102 		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12103 						CNTR_MODE_R, 0);
12104 
12105 		hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12106 			  dd->unit, dd->last_tx, dd->last_rx);
12107 
12108 	} else {
12109 		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12110 	}
12111 
12112 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12113 }
12114 
12115 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
12116 static int init_cntrs(struct hfi1_devdata *dd)
12117 {
12118 	int i, rcv_ctxts, j;
12119 	size_t sz;
12120 	char *p;
12121 	char name[C_MAX_NAME];
12122 	struct hfi1_pportdata *ppd;
12123 	const char *bit_type_32 = ",32";
12124 	const int bit_type_32_sz = strlen(bit_type_32);
12125 
12126 	/* set up the stats timer; the add_timer is done at the end */
12127 	setup_timer(&dd->synth_stats_timer, update_synth_timer,
12128 		    (unsigned long)dd);
12129 
12130 	/***********************/
12131 	/* per device counters */
12132 	/***********************/
12133 
12134 	/* size names and determine how many we have*/
12135 	dd->ndevcntrs = 0;
12136 	sz = 0;
12137 
12138 	for (i = 0; i < DEV_CNTR_LAST; i++) {
12139 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12140 			hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12141 			continue;
12142 		}
12143 
12144 		if (dev_cntrs[i].flags & CNTR_VL) {
12145 			dev_cntrs[i].offset = dd->ndevcntrs;
12146 			for (j = 0; j < C_VL_COUNT; j++) {
12147 				snprintf(name, C_MAX_NAME, "%s%d",
12148 					 dev_cntrs[i].name, vl_from_idx(j));
12149 				sz += strlen(name);
12150 				/* Add ",32" for 32-bit counters */
12151 				if (dev_cntrs[i].flags & CNTR_32BIT)
12152 					sz += bit_type_32_sz;
12153 				sz++;
12154 				dd->ndevcntrs++;
12155 			}
12156 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12157 			dev_cntrs[i].offset = dd->ndevcntrs;
12158 			for (j = 0; j < dd->chip_sdma_engines; j++) {
12159 				snprintf(name, C_MAX_NAME, "%s%d",
12160 					 dev_cntrs[i].name, j);
12161 				sz += strlen(name);
12162 				/* Add ",32" for 32-bit counters */
12163 				if (dev_cntrs[i].flags & CNTR_32BIT)
12164 					sz += bit_type_32_sz;
12165 				sz++;
12166 				dd->ndevcntrs++;
12167 			}
12168 		} else {
12169 			/* +1 for newline. */
12170 			sz += strlen(dev_cntrs[i].name) + 1;
12171 			/* Add ",32" for 32-bit counters */
12172 			if (dev_cntrs[i].flags & CNTR_32BIT)
12173 				sz += bit_type_32_sz;
12174 			dev_cntrs[i].offset = dd->ndevcntrs;
12175 			dd->ndevcntrs++;
12176 		}
12177 	}
12178 
12179 	/* allocate space for the counter values */
12180 	dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12181 	if (!dd->cntrs)
12182 		goto bail;
12183 
12184 	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12185 	if (!dd->scntrs)
12186 		goto bail;
12187 
12188 	/* allocate space for the counter names */
12189 	dd->cntrnameslen = sz;
12190 	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12191 	if (!dd->cntrnames)
12192 		goto bail;
12193 
12194 	/* fill in the names */
12195 	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12196 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12197 			/* Nothing */
12198 		} else if (dev_cntrs[i].flags & CNTR_VL) {
12199 			for (j = 0; j < C_VL_COUNT; j++) {
12200 				snprintf(name, C_MAX_NAME, "%s%d",
12201 					 dev_cntrs[i].name,
12202 					 vl_from_idx(j));
12203 				memcpy(p, name, strlen(name));
12204 				p += strlen(name);
12205 
12206 				/* Counter is 32 bits */
12207 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12208 					memcpy(p, bit_type_32, bit_type_32_sz);
12209 					p += bit_type_32_sz;
12210 				}
12211 
12212 				*p++ = '\n';
12213 			}
12214 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12215 			for (j = 0; j < dd->chip_sdma_engines; j++) {
12216 				snprintf(name, C_MAX_NAME, "%s%d",
12217 					 dev_cntrs[i].name, j);
12218 				memcpy(p, name, strlen(name));
12219 				p += strlen(name);
12220 
12221 				/* Counter is 32 bits */
12222 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12223 					memcpy(p, bit_type_32, bit_type_32_sz);
12224 					p += bit_type_32_sz;
12225 				}
12226 
12227 				*p++ = '\n';
12228 			}
12229 		} else {
12230 			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12231 			p += strlen(dev_cntrs[i].name);
12232 
12233 			/* Counter is 32 bits */
12234 			if (dev_cntrs[i].flags & CNTR_32BIT) {
12235 				memcpy(p, bit_type_32, bit_type_32_sz);
12236 				p += bit_type_32_sz;
12237 			}
12238 
12239 			*p++ = '\n';
12240 		}
12241 	}
12242 
12243 	/*********************/
12244 	/* per port counters */
12245 	/*********************/
12246 
12247 	/*
12248 	 * Go through the counters for the overflows and disable the ones we
12249 	 * don't need. This varies based on platform so we need to do it
12250 	 * dynamically here.
12251 	 */
12252 	rcv_ctxts = dd->num_rcv_contexts;
12253 	for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12254 	     i <= C_RCV_HDR_OVF_LAST; i++) {
12255 		port_cntrs[i].flags |= CNTR_DISABLED;
12256 	}
12257 
12258 	/* size port counter names and determine how many we have*/
12259 	sz = 0;
12260 	dd->nportcntrs = 0;
12261 	for (i = 0; i < PORT_CNTR_LAST; i++) {
12262 		if (port_cntrs[i].flags & CNTR_DISABLED) {
12263 			hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12264 			continue;
12265 		}
12266 
12267 		if (port_cntrs[i].flags & CNTR_VL) {
12268 			port_cntrs[i].offset = dd->nportcntrs;
12269 			for (j = 0; j < C_VL_COUNT; j++) {
12270 				snprintf(name, C_MAX_NAME, "%s%d",
12271 					 port_cntrs[i].name, vl_from_idx(j));
12272 				sz += strlen(name);
12273 				/* Add ",32" for 32-bit counters */
12274 				if (port_cntrs[i].flags & CNTR_32BIT)
12275 					sz += bit_type_32_sz;
12276 				sz++;
12277 				dd->nportcntrs++;
12278 			}
12279 		} else {
12280 			/* +1 for newline */
12281 			sz += strlen(port_cntrs[i].name) + 1;
12282 			/* Add ",32" for 32-bit counters */
12283 			if (port_cntrs[i].flags & CNTR_32BIT)
12284 				sz += bit_type_32_sz;
12285 			port_cntrs[i].offset = dd->nportcntrs;
12286 			dd->nportcntrs++;
12287 		}
12288 	}
12289 
12290 	/* allocate space for the counter names */
12291 	dd->portcntrnameslen = sz;
12292 	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12293 	if (!dd->portcntrnames)
12294 		goto bail;
12295 
12296 	/* fill in port cntr names */
12297 	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12298 		if (port_cntrs[i].flags & CNTR_DISABLED)
12299 			continue;
12300 
12301 		if (port_cntrs[i].flags & CNTR_VL) {
12302 			for (j = 0; j < C_VL_COUNT; j++) {
12303 				snprintf(name, C_MAX_NAME, "%s%d",
12304 					 port_cntrs[i].name, vl_from_idx(j));
12305 				memcpy(p, name, strlen(name));
12306 				p += strlen(name);
12307 
12308 				/* Counter is 32 bits */
12309 				if (port_cntrs[i].flags & CNTR_32BIT) {
12310 					memcpy(p, bit_type_32, bit_type_32_sz);
12311 					p += bit_type_32_sz;
12312 				}
12313 
12314 				*p++ = '\n';
12315 			}
12316 		} else {
12317 			memcpy(p, port_cntrs[i].name,
12318 			       strlen(port_cntrs[i].name));
12319 			p += strlen(port_cntrs[i].name);
12320 
12321 			/* Counter is 32 bits */
12322 			if (port_cntrs[i].flags & CNTR_32BIT) {
12323 				memcpy(p, bit_type_32, bit_type_32_sz);
12324 				p += bit_type_32_sz;
12325 			}
12326 
12327 			*p++ = '\n';
12328 		}
12329 	}
12330 
12331 	/* allocate per port storage for counter values */
12332 	ppd = (struct hfi1_pportdata *)(dd + 1);
12333 	for (i = 0; i < dd->num_pports; i++, ppd++) {
12334 		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12335 		if (!ppd->cntrs)
12336 			goto bail;
12337 
12338 		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12339 		if (!ppd->scntrs)
12340 			goto bail;
12341 	}
12342 
12343 	/* CPU counters need to be allocated and zeroed */
12344 	if (init_cpu_counters(dd))
12345 		goto bail;
12346 
12347 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12348 	return 0;
12349 bail:
12350 	free_cntrs(dd);
12351 	return -ENOMEM;
12352 }
12353 
12354 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12355 {
12356 	switch (chip_lstate) {
12357 	default:
12358 		dd_dev_err(dd,
12359 			   "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12360 			   chip_lstate);
12361 		/* fall through */
12362 	case LSTATE_DOWN:
12363 		return IB_PORT_DOWN;
12364 	case LSTATE_INIT:
12365 		return IB_PORT_INIT;
12366 	case LSTATE_ARMED:
12367 		return IB_PORT_ARMED;
12368 	case LSTATE_ACTIVE:
12369 		return IB_PORT_ACTIVE;
12370 	}
12371 }
12372 
12373 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12374 {
12375 	/* look at the HFI meta-states only */
12376 	switch (chip_pstate & 0xf0) {
12377 	default:
12378 		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12379 			   chip_pstate);
12380 		/* fall through */
12381 	case PLS_DISABLED:
12382 		return IB_PORTPHYSSTATE_DISABLED;
12383 	case PLS_OFFLINE:
12384 		return OPA_PORTPHYSSTATE_OFFLINE;
12385 	case PLS_POLLING:
12386 		return IB_PORTPHYSSTATE_POLLING;
12387 	case PLS_CONFIGPHY:
12388 		return IB_PORTPHYSSTATE_TRAINING;
12389 	case PLS_LINKUP:
12390 		return IB_PORTPHYSSTATE_LINKUP;
12391 	case PLS_PHYTEST:
12392 		return IB_PORTPHYSSTATE_PHY_TEST;
12393 	}
12394 }
12395 
12396 /* return the OPA port logical state name */
12397 const char *opa_lstate_name(u32 lstate)
12398 {
12399 	static const char * const port_logical_names[] = {
12400 		"PORT_NOP",
12401 		"PORT_DOWN",
12402 		"PORT_INIT",
12403 		"PORT_ARMED",
12404 		"PORT_ACTIVE",
12405 		"PORT_ACTIVE_DEFER",
12406 	};
12407 	if (lstate < ARRAY_SIZE(port_logical_names))
12408 		return port_logical_names[lstate];
12409 	return "unknown";
12410 }
12411 
12412 /* return the OPA port physical state name */
12413 const char *opa_pstate_name(u32 pstate)
12414 {
12415 	static const char * const port_physical_names[] = {
12416 		"PHYS_NOP",
12417 		"reserved1",
12418 		"PHYS_POLL",
12419 		"PHYS_DISABLED",
12420 		"PHYS_TRAINING",
12421 		"PHYS_LINKUP",
12422 		"PHYS_LINK_ERR_RECOVER",
12423 		"PHYS_PHY_TEST",
12424 		"reserved8",
12425 		"PHYS_OFFLINE",
12426 		"PHYS_GANGED",
12427 		"PHYS_TEST",
12428 	};
12429 	if (pstate < ARRAY_SIZE(port_physical_names))
12430 		return port_physical_names[pstate];
12431 	return "unknown";
12432 }
12433 
12434 /*
12435  * Read the hardware link state and set the driver's cached value of it.
12436  * Return the (new) current value.
12437  */
12438 u32 get_logical_state(struct hfi1_pportdata *ppd)
12439 {
12440 	u32 new_state;
12441 
12442 	new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12443 	if (new_state != ppd->lstate) {
12444 		dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12445 			    opa_lstate_name(new_state), new_state);
12446 		ppd->lstate = new_state;
12447 	}
12448 	/*
12449 	 * Set port status flags in the page mapped into userspace
12450 	 * memory. Do it here to ensure a reliable state - this is
12451 	 * the only function called by all state handling code.
12452 	 * Always set the flags due to the fact that the cache value
12453 	 * might have been changed explicitly outside of this
12454 	 * function.
12455 	 */
12456 	if (ppd->statusp) {
12457 		switch (ppd->lstate) {
12458 		case IB_PORT_DOWN:
12459 		case IB_PORT_INIT:
12460 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12461 					   HFI1_STATUS_IB_READY);
12462 			break;
12463 		case IB_PORT_ARMED:
12464 			*ppd->statusp |= HFI1_STATUS_IB_CONF;
12465 			break;
12466 		case IB_PORT_ACTIVE:
12467 			*ppd->statusp |= HFI1_STATUS_IB_READY;
12468 			break;
12469 		}
12470 	}
12471 	return ppd->lstate;
12472 }
12473 
12474 /**
12475  * wait_logical_linkstate - wait for an IB link state change to occur
12476  * @ppd: port device
12477  * @state: the state to wait for
12478  * @msecs: the number of milliseconds to wait
12479  *
12480  * Wait up to msecs milliseconds for IB link state change to occur.
12481  * For now, take the easy polling route.
12482  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12483  */
12484 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12485 				  int msecs)
12486 {
12487 	unsigned long timeout;
12488 
12489 	timeout = jiffies + msecs_to_jiffies(msecs);
12490 	while (1) {
12491 		if (get_logical_state(ppd) == state)
12492 			return 0;
12493 		if (time_after(jiffies, timeout))
12494 			break;
12495 		msleep(20);
12496 	}
12497 	dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12498 
12499 	return -ETIMEDOUT;
12500 }
12501 
12502 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12503 {
12504 	u32 pstate;
12505 	u32 ib_pstate;
12506 
12507 	pstate = read_physical_state(ppd->dd);
12508 	ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12509 	if (ppd->last_pstate != ib_pstate) {
12510 		dd_dev_info(ppd->dd,
12511 			    "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12512 			    __func__, opa_pstate_name(ib_pstate), ib_pstate,
12513 			    pstate);
12514 		ppd->last_pstate = ib_pstate;
12515 	}
12516 	return ib_pstate;
12517 }
12518 
12519 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12520 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12521 
12522 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12523 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12524 
12525 int hfi1_init_ctxt(struct send_context *sc)
12526 {
12527 	if (sc) {
12528 		struct hfi1_devdata *dd = sc->dd;
12529 		u64 reg;
12530 		u8 set = (sc->type == SC_USER ?
12531 			  HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12532 			  HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12533 		reg = read_kctxt_csr(dd, sc->hw_context,
12534 				     SEND_CTXT_CHECK_ENABLE);
12535 		if (set)
12536 			CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12537 		else
12538 			SET_STATIC_RATE_CONTROL_SMASK(reg);
12539 		write_kctxt_csr(dd, sc->hw_context,
12540 				SEND_CTXT_CHECK_ENABLE, reg);
12541 	}
12542 	return 0;
12543 }
12544 
12545 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12546 {
12547 	int ret = 0;
12548 	u64 reg;
12549 
12550 	if (dd->icode != ICODE_RTL_SILICON) {
12551 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12552 			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12553 				    __func__);
12554 		return -EINVAL;
12555 	}
12556 	reg = read_csr(dd, ASIC_STS_THERM);
12557 	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12558 		      ASIC_STS_THERM_CURR_TEMP_MASK);
12559 	temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12560 			ASIC_STS_THERM_LO_TEMP_MASK);
12561 	temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12562 			ASIC_STS_THERM_HI_TEMP_MASK);
12563 	temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12564 			  ASIC_STS_THERM_CRIT_TEMP_MASK);
12565 	/* triggers is a 3-bit value - 1 bit per trigger. */
12566 	temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12567 
12568 	return ret;
12569 }
12570 
12571 /* ========================================================================= */
12572 
12573 /*
12574  * Enable/disable chip from delivering interrupts.
12575  */
12576 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12577 {
12578 	int i;
12579 
12580 	/*
12581 	 * In HFI, the mask needs to be 1 to allow interrupts.
12582 	 */
12583 	if (enable) {
12584 		/* enable all interrupts */
12585 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12586 			write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12587 
12588 		init_qsfp_int(dd);
12589 	} else {
12590 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12591 			write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12592 	}
12593 }
12594 
12595 /*
12596  * Clear all interrupt sources on the chip.
12597  */
12598 static void clear_all_interrupts(struct hfi1_devdata *dd)
12599 {
12600 	int i;
12601 
12602 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12603 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12604 
12605 	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12606 	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12607 	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12608 	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12609 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12610 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12611 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12612 	for (i = 0; i < dd->chip_send_contexts; i++)
12613 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12614 	for (i = 0; i < dd->chip_sdma_engines; i++)
12615 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12616 
12617 	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12618 	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12619 	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12620 }
12621 
12622 /* Move to pcie.c? */
12623 static void disable_intx(struct pci_dev *pdev)
12624 {
12625 	pci_intx(pdev, 0);
12626 }
12627 
12628 static void clean_up_interrupts(struct hfi1_devdata *dd)
12629 {
12630 	int i;
12631 
12632 	/* remove irqs - must happen before disabling/turning off */
12633 	if (dd->num_msix_entries) {
12634 		/* MSI-X */
12635 		struct hfi1_msix_entry *me = dd->msix_entries;
12636 
12637 		for (i = 0; i < dd->num_msix_entries; i++, me++) {
12638 			if (!me->arg) /* => no irq, no affinity */
12639 				continue;
12640 			hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12641 			free_irq(me->msix.vector, me->arg);
12642 		}
12643 	} else {
12644 		/* INTx */
12645 		if (dd->requested_intx_irq) {
12646 			free_irq(dd->pcidev->irq, dd);
12647 			dd->requested_intx_irq = 0;
12648 		}
12649 	}
12650 
12651 	/* turn off interrupts */
12652 	if (dd->num_msix_entries) {
12653 		/* MSI-X */
12654 		pci_disable_msix(dd->pcidev);
12655 	} else {
12656 		/* INTx */
12657 		disable_intx(dd->pcidev);
12658 	}
12659 
12660 	/* clean structures */
12661 	kfree(dd->msix_entries);
12662 	dd->msix_entries = NULL;
12663 	dd->num_msix_entries = 0;
12664 }
12665 
12666 /*
12667  * Remap the interrupt source from the general handler to the given MSI-X
12668  * interrupt.
12669  */
12670 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12671 {
12672 	u64 reg;
12673 	int m, n;
12674 
12675 	/* clear from the handled mask of the general interrupt */
12676 	m = isrc / 64;
12677 	n = isrc % 64;
12678 	dd->gi_mask[m] &= ~((u64)1 << n);
12679 
12680 	/* direct the chip source to the given MSI-X interrupt */
12681 	m = isrc / 8;
12682 	n = isrc % 8;
12683 	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12684 	reg &= ~((u64)0xff << (8 * n));
12685 	reg |= ((u64)msix_intr & 0xff) << (8 * n);
12686 	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12687 }
12688 
12689 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12690 				  int engine, int msix_intr)
12691 {
12692 	/*
12693 	 * SDMA engine interrupt sources grouped by type, rather than
12694 	 * engine.  Per-engine interrupts are as follows:
12695 	 *	SDMA
12696 	 *	SDMAProgress
12697 	 *	SDMAIdle
12698 	 */
12699 	remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12700 		   msix_intr);
12701 	remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12702 		   msix_intr);
12703 	remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12704 		   msix_intr);
12705 }
12706 
12707 static int request_intx_irq(struct hfi1_devdata *dd)
12708 {
12709 	int ret;
12710 
12711 	snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12712 		 dd->unit);
12713 	ret = request_irq(dd->pcidev->irq, general_interrupt,
12714 			  IRQF_SHARED, dd->intx_name, dd);
12715 	if (ret)
12716 		dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12717 			   ret);
12718 	else
12719 		dd->requested_intx_irq = 1;
12720 	return ret;
12721 }
12722 
12723 static int request_msix_irqs(struct hfi1_devdata *dd)
12724 {
12725 	int first_general, last_general;
12726 	int first_sdma, last_sdma;
12727 	int first_rx, last_rx;
12728 	int i, ret = 0;
12729 
12730 	/* calculate the ranges we are going to use */
12731 	first_general = 0;
12732 	last_general = first_general + 1;
12733 	first_sdma = last_general;
12734 	last_sdma = first_sdma + dd->num_sdma;
12735 	first_rx = last_sdma;
12736 	last_rx = first_rx + dd->n_krcv_queues;
12737 
12738 	/*
12739 	 * Sanity check - the code expects all SDMA chip source
12740 	 * interrupts to be in the same CSR, starting at bit 0.  Verify
12741 	 * that this is true by checking the bit location of the start.
12742 	 */
12743 	BUILD_BUG_ON(IS_SDMA_START % 64);
12744 
12745 	for (i = 0; i < dd->num_msix_entries; i++) {
12746 		struct hfi1_msix_entry *me = &dd->msix_entries[i];
12747 		const char *err_info;
12748 		irq_handler_t handler;
12749 		irq_handler_t thread = NULL;
12750 		void *arg;
12751 		int idx;
12752 		struct hfi1_ctxtdata *rcd = NULL;
12753 		struct sdma_engine *sde = NULL;
12754 
12755 		/* obtain the arguments to request_irq */
12756 		if (first_general <= i && i < last_general) {
12757 			idx = i - first_general;
12758 			handler = general_interrupt;
12759 			arg = dd;
12760 			snprintf(me->name, sizeof(me->name),
12761 				 DRIVER_NAME "_%d", dd->unit);
12762 			err_info = "general";
12763 			me->type = IRQ_GENERAL;
12764 		} else if (first_sdma <= i && i < last_sdma) {
12765 			idx = i - first_sdma;
12766 			sde = &dd->per_sdma[idx];
12767 			handler = sdma_interrupt;
12768 			arg = sde;
12769 			snprintf(me->name, sizeof(me->name),
12770 				 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12771 			err_info = "sdma";
12772 			remap_sdma_interrupts(dd, idx, i);
12773 			me->type = IRQ_SDMA;
12774 		} else if (first_rx <= i && i < last_rx) {
12775 			idx = i - first_rx;
12776 			rcd = dd->rcd[idx];
12777 			/* no interrupt if no rcd */
12778 			if (!rcd)
12779 				continue;
12780 			/*
12781 			 * Set the interrupt register and mask for this
12782 			 * context's interrupt.
12783 			 */
12784 			rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12785 			rcd->imask = ((u64)1) <<
12786 					((IS_RCVAVAIL_START + idx) % 64);
12787 			handler = receive_context_interrupt;
12788 			thread = receive_context_thread;
12789 			arg = rcd;
12790 			snprintf(me->name, sizeof(me->name),
12791 				 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12792 			err_info = "receive context";
12793 			remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12794 			me->type = IRQ_RCVCTXT;
12795 		} else {
12796 			/* not in our expected range - complain, then
12797 			 * ignore it
12798 			 */
12799 			dd_dev_err(dd,
12800 				   "Unexpected extra MSI-X interrupt %d\n", i);
12801 			continue;
12802 		}
12803 		/* no argument, no interrupt */
12804 		if (!arg)
12805 			continue;
12806 		/* make sure the name is terminated */
12807 		me->name[sizeof(me->name) - 1] = 0;
12808 
12809 		ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12810 					   me->name, arg);
12811 		if (ret) {
12812 			dd_dev_err(dd,
12813 				   "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12814 				   err_info, me->msix.vector, idx, ret);
12815 			return ret;
12816 		}
12817 		/*
12818 		 * assign arg after request_irq call, so it will be
12819 		 * cleaned up
12820 		 */
12821 		me->arg = arg;
12822 
12823 		ret = hfi1_get_irq_affinity(dd, me);
12824 		if (ret)
12825 			dd_dev_err(dd,
12826 				   "unable to pin IRQ %d\n", ret);
12827 	}
12828 
12829 	return ret;
12830 }
12831 
12832 /*
12833  * Set the general handler to accept all interrupts, remap all
12834  * chip interrupts back to MSI-X 0.
12835  */
12836 static void reset_interrupts(struct hfi1_devdata *dd)
12837 {
12838 	int i;
12839 
12840 	/* all interrupts handled by the general handler */
12841 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12842 		dd->gi_mask[i] = ~(u64)0;
12843 
12844 	/* all chip interrupts map to MSI-X 0 */
12845 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12846 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12847 }
12848 
12849 static int set_up_interrupts(struct hfi1_devdata *dd)
12850 {
12851 	struct hfi1_msix_entry *entries;
12852 	u32 total, request;
12853 	int i, ret;
12854 	int single_interrupt = 0; /* we expect to have all the interrupts */
12855 
12856 	/*
12857 	 * Interrupt count:
12858 	 *	1 general, "slow path" interrupt (includes the SDMA engines
12859 	 *		slow source, SDMACleanupDone)
12860 	 *	N interrupts - one per used SDMA engine
12861 	 *	M interrupt - one per kernel receive context
12862 	 */
12863 	total = 1 + dd->num_sdma + dd->n_krcv_queues;
12864 
12865 	entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12866 	if (!entries) {
12867 		ret = -ENOMEM;
12868 		goto fail;
12869 	}
12870 	/* 1-1 MSI-X entry assignment */
12871 	for (i = 0; i < total; i++)
12872 		entries[i].msix.entry = i;
12873 
12874 	/* ask for MSI-X interrupts */
12875 	request = total;
12876 	request_msix(dd, &request, entries);
12877 
12878 	if (request == 0) {
12879 		/* using INTx */
12880 		/* dd->num_msix_entries already zero */
12881 		kfree(entries);
12882 		single_interrupt = 1;
12883 		dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12884 	} else {
12885 		/* using MSI-X */
12886 		dd->num_msix_entries = request;
12887 		dd->msix_entries = entries;
12888 
12889 		if (request != total) {
12890 			/* using MSI-X, with reduced interrupts */
12891 			dd_dev_err(
12892 				dd,
12893 				"cannot handle reduced interrupt case, want %u, got %u\n",
12894 				total, request);
12895 			ret = -EINVAL;
12896 			goto fail;
12897 		}
12898 		dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12899 	}
12900 
12901 	/* mask all interrupts */
12902 	set_intr_state(dd, 0);
12903 	/* clear all pending interrupts */
12904 	clear_all_interrupts(dd);
12905 
12906 	/* reset general handler mask, chip MSI-X mappings */
12907 	reset_interrupts(dd);
12908 
12909 	if (single_interrupt)
12910 		ret = request_intx_irq(dd);
12911 	else
12912 		ret = request_msix_irqs(dd);
12913 	if (ret)
12914 		goto fail;
12915 
12916 	return 0;
12917 
12918 fail:
12919 	clean_up_interrupts(dd);
12920 	return ret;
12921 }
12922 
12923 /*
12924  * Set up context values in dd.  Sets:
12925  *
12926  *	num_rcv_contexts - number of contexts being used
12927  *	n_krcv_queues - number of kernel contexts
12928  *	first_user_ctxt - first non-kernel context in array of contexts
12929  *	freectxts  - number of free user contexts
12930  *	num_send_contexts - number of PIO send contexts being used
12931  */
12932 static int set_up_context_variables(struct hfi1_devdata *dd)
12933 {
12934 	unsigned long num_kernel_contexts;
12935 	int total_contexts;
12936 	int ret;
12937 	unsigned ngroups;
12938 	int qos_rmt_count;
12939 	int user_rmt_reduced;
12940 
12941 	/*
12942 	 * Kernel receive contexts:
12943 	 * - Context 0 - control context (VL15/multicast/error)
12944 	 * - Context 1 - first kernel context
12945 	 * - Context 2 - second kernel context
12946 	 * ...
12947 	 */
12948 	if (n_krcvqs)
12949 		/*
12950 		 * n_krcvqs is the sum of module parameter kernel receive
12951 		 * contexts, krcvqs[].  It does not include the control
12952 		 * context, so add that.
12953 		 */
12954 		num_kernel_contexts = n_krcvqs + 1;
12955 	else
12956 		num_kernel_contexts = DEFAULT_KRCVQS + 1;
12957 	/*
12958 	 * Every kernel receive context needs an ACK send context.
12959 	 * one send context is allocated for each VL{0-7} and VL15
12960 	 */
12961 	if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12962 		dd_dev_err(dd,
12963 			   "Reducing # kernel rcv contexts to: %d, from %lu\n",
12964 			   (int)(dd->chip_send_contexts - num_vls - 1),
12965 			   num_kernel_contexts);
12966 		num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12967 	}
12968 	/*
12969 	 * User contexts:
12970 	 *	- default to 1 user context per real (non-HT) CPU core if
12971 	 *	  num_user_contexts is negative
12972 	 */
12973 	if (num_user_contexts < 0)
12974 		num_user_contexts =
12975 			cpumask_weight(&node_affinity.real_cpu_mask);
12976 
12977 	total_contexts = num_kernel_contexts + num_user_contexts;
12978 
12979 	/*
12980 	 * Adjust the counts given a global max.
12981 	 */
12982 	if (total_contexts > dd->chip_rcv_contexts) {
12983 		dd_dev_err(dd,
12984 			   "Reducing # user receive contexts to: %d, from %d\n",
12985 			   (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12986 			   (int)num_user_contexts);
12987 		num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12988 		/* recalculate */
12989 		total_contexts = num_kernel_contexts + num_user_contexts;
12990 	}
12991 
12992 	/* each user context requires an entry in the RMT */
12993 	qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12994 	if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12995 		user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12996 		dd_dev_err(dd,
12997 			   "RMT size is reducing the number of user receive contexts from %d to %d\n",
12998 			   (int)num_user_contexts,
12999 			   user_rmt_reduced);
13000 		/* recalculate */
13001 		num_user_contexts = user_rmt_reduced;
13002 		total_contexts = num_kernel_contexts + num_user_contexts;
13003 	}
13004 
13005 	/* the first N are kernel contexts, the rest are user contexts */
13006 	dd->num_rcv_contexts = total_contexts;
13007 	dd->n_krcv_queues = num_kernel_contexts;
13008 	dd->first_user_ctxt = num_kernel_contexts;
13009 	dd->num_user_contexts = num_user_contexts;
13010 	dd->freectxts = num_user_contexts;
13011 	dd_dev_info(dd,
13012 		    "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13013 		    (int)dd->chip_rcv_contexts,
13014 		    (int)dd->num_rcv_contexts,
13015 		    (int)dd->n_krcv_queues,
13016 		    (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13017 
13018 	/*
13019 	 * Receive array allocation:
13020 	 *   All RcvArray entries are divided into groups of 8. This
13021 	 *   is required by the hardware and will speed up writes to
13022 	 *   consecutive entries by using write-combining of the entire
13023 	 *   cacheline.
13024 	 *
13025 	 *   The number of groups are evenly divided among all contexts.
13026 	 *   any left over groups will be given to the first N user
13027 	 *   contexts.
13028 	 */
13029 	dd->rcv_entries.group_size = RCV_INCREMENT;
13030 	ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13031 	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13032 	dd->rcv_entries.nctxt_extra = ngroups -
13033 		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13034 	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13035 		    dd->rcv_entries.ngroups,
13036 		    dd->rcv_entries.nctxt_extra);
13037 	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13038 	    MAX_EAGER_ENTRIES * 2) {
13039 		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13040 			dd->rcv_entries.group_size;
13041 		dd_dev_info(dd,
13042 			    "RcvArray group count too high, change to %u\n",
13043 			    dd->rcv_entries.ngroups);
13044 		dd->rcv_entries.nctxt_extra = 0;
13045 	}
13046 	/*
13047 	 * PIO send contexts
13048 	 */
13049 	ret = init_sc_pools_and_sizes(dd);
13050 	if (ret >= 0) {	/* success */
13051 		dd->num_send_contexts = ret;
13052 		dd_dev_info(
13053 			dd,
13054 			"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13055 			dd->chip_send_contexts,
13056 			dd->num_send_contexts,
13057 			dd->sc_sizes[SC_KERNEL].count,
13058 			dd->sc_sizes[SC_ACK].count,
13059 			dd->sc_sizes[SC_USER].count,
13060 			dd->sc_sizes[SC_VL15].count);
13061 		ret = 0;	/* success */
13062 	}
13063 
13064 	return ret;
13065 }
13066 
13067 /*
13068  * Set the device/port partition key table. The MAD code
13069  * will ensure that, at least, the partial management
13070  * partition key is present in the table.
13071  */
13072 static void set_partition_keys(struct hfi1_pportdata *ppd)
13073 {
13074 	struct hfi1_devdata *dd = ppd->dd;
13075 	u64 reg = 0;
13076 	int i;
13077 
13078 	dd_dev_info(dd, "Setting partition keys\n");
13079 	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13080 		reg |= (ppd->pkeys[i] &
13081 			RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13082 			((i % 4) *
13083 			 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13084 		/* Each register holds 4 PKey values. */
13085 		if ((i % 4) == 3) {
13086 			write_csr(dd, RCV_PARTITION_KEY +
13087 				  ((i - 3) * 2), reg);
13088 			reg = 0;
13089 		}
13090 	}
13091 
13092 	/* Always enable HW pkeys check when pkeys table is set */
13093 	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13094 }
13095 
13096 /*
13097  * These CSRs and memories are uninitialized on reset and must be
13098  * written before reading to set the ECC/parity bits.
13099  *
13100  * NOTE: All user context CSRs that are not mmaped write-only
13101  * (e.g. the TID flows) must be initialized even if the driver never
13102  * reads them.
13103  */
13104 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13105 {
13106 	int i, j;
13107 
13108 	/* CceIntMap */
13109 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13110 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13111 
13112 	/* SendCtxtCreditReturnAddr */
13113 	for (i = 0; i < dd->chip_send_contexts; i++)
13114 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13115 
13116 	/* PIO Send buffers */
13117 	/* SDMA Send buffers */
13118 	/*
13119 	 * These are not normally read, and (presently) have no method
13120 	 * to be read, so are not pre-initialized
13121 	 */
13122 
13123 	/* RcvHdrAddr */
13124 	/* RcvHdrTailAddr */
13125 	/* RcvTidFlowTable */
13126 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
13127 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13128 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13129 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13130 			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13131 	}
13132 
13133 	/* RcvArray */
13134 	for (i = 0; i < dd->chip_rcv_array_count; i++)
13135 		write_csr(dd, RCV_ARRAY + (8 * i),
13136 			  RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
13137 
13138 	/* RcvQPMapTable */
13139 	for (i = 0; i < 32; i++)
13140 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13141 }
13142 
13143 /*
13144  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13145  */
13146 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13147 			     u64 ctrl_bits)
13148 {
13149 	unsigned long timeout;
13150 	u64 reg;
13151 
13152 	/* is the condition present? */
13153 	reg = read_csr(dd, CCE_STATUS);
13154 	if ((reg & status_bits) == 0)
13155 		return;
13156 
13157 	/* clear the condition */
13158 	write_csr(dd, CCE_CTRL, ctrl_bits);
13159 
13160 	/* wait for the condition to clear */
13161 	timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13162 	while (1) {
13163 		reg = read_csr(dd, CCE_STATUS);
13164 		if ((reg & status_bits) == 0)
13165 			return;
13166 		if (time_after(jiffies, timeout)) {
13167 			dd_dev_err(dd,
13168 				   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13169 				   status_bits, reg & status_bits);
13170 			return;
13171 		}
13172 		udelay(1);
13173 	}
13174 }
13175 
13176 /* set CCE CSRs to chip reset defaults */
13177 static void reset_cce_csrs(struct hfi1_devdata *dd)
13178 {
13179 	int i;
13180 
13181 	/* CCE_REVISION read-only */
13182 	/* CCE_REVISION2 read-only */
13183 	/* CCE_CTRL - bits clear automatically */
13184 	/* CCE_STATUS read-only, use CceCtrl to clear */
13185 	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13186 	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13187 	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13188 	for (i = 0; i < CCE_NUM_SCRATCH; i++)
13189 		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13190 	/* CCE_ERR_STATUS read-only */
13191 	write_csr(dd, CCE_ERR_MASK, 0);
13192 	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13193 	/* CCE_ERR_FORCE leave alone */
13194 	for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13195 		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13196 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13197 	/* CCE_PCIE_CTRL leave alone */
13198 	for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13199 		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13200 		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13201 			  CCE_MSIX_TABLE_UPPER_RESETCSR);
13202 	}
13203 	for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13204 		/* CCE_MSIX_PBA read-only */
13205 		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13206 		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13207 	}
13208 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13209 		write_csr(dd, CCE_INT_MAP, 0);
13210 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13211 		/* CCE_INT_STATUS read-only */
13212 		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13213 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13214 		/* CCE_INT_FORCE leave alone */
13215 		/* CCE_INT_BLOCKED read-only */
13216 	}
13217 	for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13218 		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13219 }
13220 
13221 /* set MISC CSRs to chip reset defaults */
13222 static void reset_misc_csrs(struct hfi1_devdata *dd)
13223 {
13224 	int i;
13225 
13226 	for (i = 0; i < 32; i++) {
13227 		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13228 		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13229 		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13230 	}
13231 	/*
13232 	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13233 	 * only be written 128-byte chunks
13234 	 */
13235 	/* init RSA engine to clear lingering errors */
13236 	write_csr(dd, MISC_CFG_RSA_CMD, 1);
13237 	write_csr(dd, MISC_CFG_RSA_MU, 0);
13238 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
13239 	/* MISC_STS_8051_DIGEST read-only */
13240 	/* MISC_STS_SBM_DIGEST read-only */
13241 	/* MISC_STS_PCIE_DIGEST read-only */
13242 	/* MISC_STS_FAB_DIGEST read-only */
13243 	/* MISC_ERR_STATUS read-only */
13244 	write_csr(dd, MISC_ERR_MASK, 0);
13245 	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13246 	/* MISC_ERR_FORCE leave alone */
13247 }
13248 
13249 /* set TXE CSRs to chip reset defaults */
13250 static void reset_txe_csrs(struct hfi1_devdata *dd)
13251 {
13252 	int i;
13253 
13254 	/*
13255 	 * TXE Kernel CSRs
13256 	 */
13257 	write_csr(dd, SEND_CTRL, 0);
13258 	__cm_reset(dd, 0);	/* reset CM internal state */
13259 	/* SEND_CONTEXTS read-only */
13260 	/* SEND_DMA_ENGINES read-only */
13261 	/* SEND_PIO_MEM_SIZE read-only */
13262 	/* SEND_DMA_MEM_SIZE read-only */
13263 	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13264 	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
13265 	/* SEND_PIO_ERR_STATUS read-only */
13266 	write_csr(dd, SEND_PIO_ERR_MASK, 0);
13267 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13268 	/* SEND_PIO_ERR_FORCE leave alone */
13269 	/* SEND_DMA_ERR_STATUS read-only */
13270 	write_csr(dd, SEND_DMA_ERR_MASK, 0);
13271 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13272 	/* SEND_DMA_ERR_FORCE leave alone */
13273 	/* SEND_EGRESS_ERR_STATUS read-only */
13274 	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13275 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13276 	/* SEND_EGRESS_ERR_FORCE leave alone */
13277 	write_csr(dd, SEND_BTH_QP, 0);
13278 	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13279 	write_csr(dd, SEND_SC2VLT0, 0);
13280 	write_csr(dd, SEND_SC2VLT1, 0);
13281 	write_csr(dd, SEND_SC2VLT2, 0);
13282 	write_csr(dd, SEND_SC2VLT3, 0);
13283 	write_csr(dd, SEND_LEN_CHECK0, 0);
13284 	write_csr(dd, SEND_LEN_CHECK1, 0);
13285 	/* SEND_ERR_STATUS read-only */
13286 	write_csr(dd, SEND_ERR_MASK, 0);
13287 	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13288 	/* SEND_ERR_FORCE read-only */
13289 	for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13290 		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13291 	for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13292 		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13293 	for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13294 		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13295 	for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13296 		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13297 	for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13298 		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13299 	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13300 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13301 	/* SEND_CM_CREDIT_USED_STATUS read-only */
13302 	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13303 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13304 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13305 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13306 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13307 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
13308 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13309 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13310 	/* SEND_CM_CREDIT_USED_VL read-only */
13311 	/* SEND_CM_CREDIT_USED_VL15 read-only */
13312 	/* SEND_EGRESS_CTXT_STATUS read-only */
13313 	/* SEND_EGRESS_SEND_DMA_STATUS read-only */
13314 	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13315 	/* SEND_EGRESS_ERR_INFO read-only */
13316 	/* SEND_EGRESS_ERR_SOURCE read-only */
13317 
13318 	/*
13319 	 * TXE Per-Context CSRs
13320 	 */
13321 	for (i = 0; i < dd->chip_send_contexts; i++) {
13322 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13323 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13324 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13325 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13326 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13327 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13328 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13329 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13330 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13331 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13332 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13333 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13334 	}
13335 
13336 	/*
13337 	 * TXE Per-SDMA CSRs
13338 	 */
13339 	for (i = 0; i < dd->chip_sdma_engines; i++) {
13340 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13341 		/* SEND_DMA_STATUS read-only */
13342 		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13343 		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13344 		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13345 		/* SEND_DMA_HEAD read-only */
13346 		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13347 		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13348 		/* SEND_DMA_IDLE_CNT read-only */
13349 		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13350 		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13351 		/* SEND_DMA_DESC_FETCHED_CNT read-only */
13352 		/* SEND_DMA_ENG_ERR_STATUS read-only */
13353 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13354 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13355 		/* SEND_DMA_ENG_ERR_FORCE leave alone */
13356 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13357 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13358 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13359 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13360 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13361 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13362 		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13363 	}
13364 }
13365 
13366 /*
13367  * Expect on entry:
13368  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13369  */
13370 static void init_rbufs(struct hfi1_devdata *dd)
13371 {
13372 	u64 reg;
13373 	int count;
13374 
13375 	/*
13376 	 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13377 	 * clear.
13378 	 */
13379 	count = 0;
13380 	while (1) {
13381 		reg = read_csr(dd, RCV_STATUS);
13382 		if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13383 			    | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13384 			break;
13385 		/*
13386 		 * Give up after 1ms - maximum wait time.
13387 		 *
13388 		 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13389 		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13390 		 *	136 KB / (66% * 250MB/s) = 844us
13391 		 */
13392 		if (count++ > 500) {
13393 			dd_dev_err(dd,
13394 				   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13395 				   __func__, reg);
13396 			break;
13397 		}
13398 		udelay(2); /* do not busy-wait the CSR */
13399 	}
13400 
13401 	/* start the init - expect RcvCtrl to be 0 */
13402 	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13403 
13404 	/*
13405 	 * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13406 	 * period after the write before RcvStatus.RxRbufInitDone is valid.
13407 	 * The delay in the first run through the loop below is sufficient and
13408 	 * required before the first read of RcvStatus.RxRbufInintDone.
13409 	 */
13410 	read_csr(dd, RCV_CTRL);
13411 
13412 	/* wait for the init to finish */
13413 	count = 0;
13414 	while (1) {
13415 		/* delay is required first time through - see above */
13416 		udelay(2); /* do not busy-wait the CSR */
13417 		reg = read_csr(dd, RCV_STATUS);
13418 		if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13419 			break;
13420 
13421 		/* give up after 100us - slowest possible at 33MHz is 73us */
13422 		if (count++ > 50) {
13423 			dd_dev_err(dd,
13424 				   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13425 				   __func__);
13426 			break;
13427 		}
13428 	}
13429 }
13430 
13431 /* set RXE CSRs to chip reset defaults */
13432 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13433 {
13434 	int i, j;
13435 
13436 	/*
13437 	 * RXE Kernel CSRs
13438 	 */
13439 	write_csr(dd, RCV_CTRL, 0);
13440 	init_rbufs(dd);
13441 	/* RCV_STATUS read-only */
13442 	/* RCV_CONTEXTS read-only */
13443 	/* RCV_ARRAY_CNT read-only */
13444 	/* RCV_BUF_SIZE read-only */
13445 	write_csr(dd, RCV_BTH_QP, 0);
13446 	write_csr(dd, RCV_MULTICAST, 0);
13447 	write_csr(dd, RCV_BYPASS, 0);
13448 	write_csr(dd, RCV_VL15, 0);
13449 	/* this is a clear-down */
13450 	write_csr(dd, RCV_ERR_INFO,
13451 		  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13452 	/* RCV_ERR_STATUS read-only */
13453 	write_csr(dd, RCV_ERR_MASK, 0);
13454 	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13455 	/* RCV_ERR_FORCE leave alone */
13456 	for (i = 0; i < 32; i++)
13457 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13458 	for (i = 0; i < 4; i++)
13459 		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13460 	for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13461 		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13462 	for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13463 		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13464 	for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13465 		write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13466 		write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13467 		write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13468 	}
13469 	for (i = 0; i < 32; i++)
13470 		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13471 
13472 	/*
13473 	 * RXE Kernel and User Per-Context CSRs
13474 	 */
13475 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
13476 		/* kernel */
13477 		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13478 		/* RCV_CTXT_STATUS read-only */
13479 		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13480 		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13481 		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13482 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13483 		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13484 		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13485 		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13486 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13487 		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13488 		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13489 
13490 		/* user */
13491 		/* RCV_HDR_TAIL read-only */
13492 		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13493 		/* RCV_EGR_INDEX_TAIL read-only */
13494 		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13495 		/* RCV_EGR_OFFSET_TAIL read-only */
13496 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13497 			write_uctxt_csr(dd, i,
13498 					RCV_TID_FLOW_TABLE + (8 * j), 0);
13499 		}
13500 	}
13501 }
13502 
13503 /*
13504  * Set sc2vl tables.
13505  *
13506  * They power on to zeros, so to avoid send context errors
13507  * they need to be set:
13508  *
13509  * SC 0-7 -> VL 0-7 (respectively)
13510  * SC 15  -> VL 15
13511  * otherwise
13512  *        -> VL 0
13513  */
13514 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13515 {
13516 	int i;
13517 	/* init per architecture spec, constrained by hardware capability */
13518 
13519 	/* HFI maps sent packets */
13520 	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13521 		0,
13522 		0, 0, 1, 1,
13523 		2, 2, 3, 3,
13524 		4, 4, 5, 5,
13525 		6, 6, 7, 7));
13526 	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13527 		1,
13528 		8, 0, 9, 0,
13529 		10, 0, 11, 0,
13530 		12, 0, 13, 0,
13531 		14, 0, 15, 15));
13532 	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13533 		2,
13534 		16, 0, 17, 0,
13535 		18, 0, 19, 0,
13536 		20, 0, 21, 0,
13537 		22, 0, 23, 0));
13538 	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13539 		3,
13540 		24, 0, 25, 0,
13541 		26, 0, 27, 0,
13542 		28, 0, 29, 0,
13543 		30, 0, 31, 0));
13544 
13545 	/* DC maps received packets */
13546 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13547 		15_0,
13548 		0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13549 		8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13550 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13551 		31_16,
13552 		16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13553 		24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13554 
13555 	/* initialize the cached sc2vl values consistently with h/w */
13556 	for (i = 0; i < 32; i++) {
13557 		if (i < 8 || i == 15)
13558 			*((u8 *)(dd->sc2vl) + i) = (u8)i;
13559 		else
13560 			*((u8 *)(dd->sc2vl) + i) = 0;
13561 	}
13562 }
13563 
13564 /*
13565  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13566  * depend on the chip going through a power-on reset - a driver may be loaded
13567  * and unloaded many times.
13568  *
13569  * Do not write any CSR values to the chip in this routine - there may be
13570  * a reset following the (possible) FLR in this routine.
13571  *
13572  */
13573 static void init_chip(struct hfi1_devdata *dd)
13574 {
13575 	int i;
13576 
13577 	/*
13578 	 * Put the HFI CSRs in a known state.
13579 	 * Combine this with a DC reset.
13580 	 *
13581 	 * Stop the device from doing anything while we do a
13582 	 * reset.  We know there are no other active users of
13583 	 * the device since we are now in charge.  Turn off
13584 	 * off all outbound and inbound traffic and make sure
13585 	 * the device does not generate any interrupts.
13586 	 */
13587 
13588 	/* disable send contexts and SDMA engines */
13589 	write_csr(dd, SEND_CTRL, 0);
13590 	for (i = 0; i < dd->chip_send_contexts; i++)
13591 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13592 	for (i = 0; i < dd->chip_sdma_engines; i++)
13593 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13594 	/* disable port (turn off RXE inbound traffic) and contexts */
13595 	write_csr(dd, RCV_CTRL, 0);
13596 	for (i = 0; i < dd->chip_rcv_contexts; i++)
13597 		write_csr(dd, RCV_CTXT_CTRL, 0);
13598 	/* mask all interrupt sources */
13599 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13600 		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13601 
13602 	/*
13603 	 * DC Reset: do a full DC reset before the register clear.
13604 	 * A recommended length of time to hold is one CSR read,
13605 	 * so reread the CceDcCtrl.  Then, hold the DC in reset
13606 	 * across the clear.
13607 	 */
13608 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13609 	(void)read_csr(dd, CCE_DC_CTRL);
13610 
13611 	if (use_flr) {
13612 		/*
13613 		 * A FLR will reset the SPC core and part of the PCIe.
13614 		 * The parts that need to be restored have already been
13615 		 * saved.
13616 		 */
13617 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
13618 
13619 		/* do the FLR, the DC reset will remain */
13620 		hfi1_pcie_flr(dd);
13621 
13622 		/* restore command and BARs */
13623 		restore_pci_variables(dd);
13624 
13625 		if (is_ax(dd)) {
13626 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
13627 			hfi1_pcie_flr(dd);
13628 			restore_pci_variables(dd);
13629 		}
13630 	} else {
13631 		dd_dev_info(dd, "Resetting CSRs with writes\n");
13632 		reset_cce_csrs(dd);
13633 		reset_txe_csrs(dd);
13634 		reset_rxe_csrs(dd);
13635 		reset_misc_csrs(dd);
13636 	}
13637 	/* clear the DC reset */
13638 	write_csr(dd, CCE_DC_CTRL, 0);
13639 
13640 	/* Set the LED off */
13641 	setextled(dd, 0);
13642 
13643 	/*
13644 	 * Clear the QSFP reset.
13645 	 * An FLR enforces a 0 on all out pins. The driver does not touch
13646 	 * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13647 	 * anything plugged constantly in reset, if it pays attention
13648 	 * to RESET_N.
13649 	 * Prime examples of this are optical cables. Set all pins high.
13650 	 * I2CCLK and I2CDAT will change per direction, and INT_N and
13651 	 * MODPRS_N are input only and their value is ignored.
13652 	 */
13653 	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13654 	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13655 	init_chip_resources(dd);
13656 }
13657 
13658 static void init_early_variables(struct hfi1_devdata *dd)
13659 {
13660 	int i;
13661 
13662 	/* assign link credit variables */
13663 	dd->vau = CM_VAU;
13664 	dd->link_credits = CM_GLOBAL_CREDITS;
13665 	if (is_ax(dd))
13666 		dd->link_credits--;
13667 	dd->vcu = cu_to_vcu(hfi1_cu);
13668 	/* enough room for 8 MAD packets plus header - 17K */
13669 	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13670 	if (dd->vl15_init > dd->link_credits)
13671 		dd->vl15_init = dd->link_credits;
13672 
13673 	write_uninitialized_csrs_and_memories(dd);
13674 
13675 	if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13676 		for (i = 0; i < dd->num_pports; i++) {
13677 			struct hfi1_pportdata *ppd = &dd->pport[i];
13678 
13679 			set_partition_keys(ppd);
13680 		}
13681 	init_sc2vl_tables(dd);
13682 }
13683 
13684 static void init_kdeth_qp(struct hfi1_devdata *dd)
13685 {
13686 	/* user changed the KDETH_QP */
13687 	if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13688 		/* out of range or illegal value */
13689 		dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13690 		kdeth_qp = 0;
13691 	}
13692 	if (kdeth_qp == 0)	/* not set, or failed range check */
13693 		kdeth_qp = DEFAULT_KDETH_QP;
13694 
13695 	write_csr(dd, SEND_BTH_QP,
13696 		  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13697 		  SEND_BTH_QP_KDETH_QP_SHIFT);
13698 
13699 	write_csr(dd, RCV_BTH_QP,
13700 		  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13701 		  RCV_BTH_QP_KDETH_QP_SHIFT);
13702 }
13703 
13704 /**
13705  * init_qpmap_table
13706  * @dd - device data
13707  * @first_ctxt - first context
13708  * @last_ctxt - first context
13709  *
13710  * This return sets the qpn mapping table that
13711  * is indexed by qpn[8:1].
13712  *
13713  * The routine will round robin the 256 settings
13714  * from first_ctxt to last_ctxt.
13715  *
13716  * The first/last looks ahead to having specialized
13717  * receive contexts for mgmt and bypass.  Normal
13718  * verbs traffic will assumed to be on a range
13719  * of receive contexts.
13720  */
13721 static void init_qpmap_table(struct hfi1_devdata *dd,
13722 			     u32 first_ctxt,
13723 			     u32 last_ctxt)
13724 {
13725 	u64 reg = 0;
13726 	u64 regno = RCV_QP_MAP_TABLE;
13727 	int i;
13728 	u64 ctxt = first_ctxt;
13729 
13730 	for (i = 0; i < 256; i++) {
13731 		reg |= ctxt << (8 * (i % 8));
13732 		ctxt++;
13733 		if (ctxt > last_ctxt)
13734 			ctxt = first_ctxt;
13735 		if (i % 8 == 7) {
13736 			write_csr(dd, regno, reg);
13737 			reg = 0;
13738 			regno += 8;
13739 		}
13740 	}
13741 
13742 	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13743 			| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13744 }
13745 
13746 struct rsm_map_table {
13747 	u64 map[NUM_MAP_REGS];
13748 	unsigned int used;
13749 };
13750 
13751 struct rsm_rule_data {
13752 	u8 offset;
13753 	u8 pkt_type;
13754 	u32 field1_off;
13755 	u32 field2_off;
13756 	u32 index1_off;
13757 	u32 index1_width;
13758 	u32 index2_off;
13759 	u32 index2_width;
13760 	u32 mask1;
13761 	u32 value1;
13762 	u32 mask2;
13763 	u32 value2;
13764 };
13765 
13766 /*
13767  * Return an initialized RMT map table for users to fill in.  OK if it
13768  * returns NULL, indicating no table.
13769  */
13770 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13771 {
13772 	struct rsm_map_table *rmt;
13773 	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13774 
13775 	rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13776 	if (rmt) {
13777 		memset(rmt->map, rxcontext, sizeof(rmt->map));
13778 		rmt->used = 0;
13779 	}
13780 
13781 	return rmt;
13782 }
13783 
13784 /*
13785  * Write the final RMT map table to the chip and free the table.  OK if
13786  * table is NULL.
13787  */
13788 static void complete_rsm_map_table(struct hfi1_devdata *dd,
13789 				   struct rsm_map_table *rmt)
13790 {
13791 	int i;
13792 
13793 	if (rmt) {
13794 		/* write table to chip */
13795 		for (i = 0; i < NUM_MAP_REGS; i++)
13796 			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13797 
13798 		/* enable RSM */
13799 		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13800 	}
13801 }
13802 
13803 /*
13804  * Add a receive side mapping rule.
13805  */
13806 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13807 			 struct rsm_rule_data *rrd)
13808 {
13809 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13810 		  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13811 		  1ull << rule_index | /* enable bit */
13812 		  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13813 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13814 		  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13815 		  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13816 		  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13817 		  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13818 		  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13819 		  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13820 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13821 		  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13822 		  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13823 		  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13824 		  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13825 }
13826 
13827 /* return the number of RSM map table entries that will be used for QOS */
13828 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13829 			   unsigned int *np)
13830 {
13831 	int i;
13832 	unsigned int m, n;
13833 	u8 max_by_vl = 0;
13834 
13835 	/* is QOS active at all? */
13836 	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13837 	    num_vls == 1 ||
13838 	    krcvqsset <= 1)
13839 		goto no_qos;
13840 
13841 	/* determine bits for qpn */
13842 	for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13843 		if (krcvqs[i] > max_by_vl)
13844 			max_by_vl = krcvqs[i];
13845 	if (max_by_vl > 32)
13846 		goto no_qos;
13847 	m = ilog2(__roundup_pow_of_two(max_by_vl));
13848 
13849 	/* determine bits for vl */
13850 	n = ilog2(__roundup_pow_of_two(num_vls));
13851 
13852 	/* reject if too much is used */
13853 	if ((m + n) > 7)
13854 		goto no_qos;
13855 
13856 	if (mp)
13857 		*mp = m;
13858 	if (np)
13859 		*np = n;
13860 
13861 	return 1 << (m + n);
13862 
13863 no_qos:
13864 	if (mp)
13865 		*mp = 0;
13866 	if (np)
13867 		*np = 0;
13868 	return 0;
13869 }
13870 
13871 /**
13872  * init_qos - init RX qos
13873  * @dd - device data
13874  * @rmt - RSM map table
13875  *
13876  * This routine initializes Rule 0 and the RSM map table to implement
13877  * quality of service (qos).
13878  *
13879  * If all of the limit tests succeed, qos is applied based on the array
13880  * interpretation of krcvqs where entry 0 is VL0.
13881  *
13882  * The number of vl bits (n) and the number of qpn bits (m) are computed to
13883  * feed both the RSM map table and the single rule.
13884  */
13885 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13886 {
13887 	struct rsm_rule_data rrd;
13888 	unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13889 	unsigned int rmt_entries;
13890 	u64 reg;
13891 
13892 	if (!rmt)
13893 		goto bail;
13894 	rmt_entries = qos_rmt_entries(dd, &m, &n);
13895 	if (rmt_entries == 0)
13896 		goto bail;
13897 	qpns_per_vl = 1 << m;
13898 
13899 	/* enough room in the map table? */
13900 	rmt_entries = 1 << (m + n);
13901 	if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13902 		goto bail;
13903 
13904 	/* add qos entries to the the RSM map table */
13905 	for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13906 		unsigned tctxt;
13907 
13908 		for (qpn = 0, tctxt = ctxt;
13909 		     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13910 			unsigned idx, regoff, regidx;
13911 
13912 			/* generate the index the hardware will produce */
13913 			idx = rmt->used + ((qpn << n) ^ i);
13914 			regoff = (idx % 8) * 8;
13915 			regidx = idx / 8;
13916 			/* replace default with context number */
13917 			reg = rmt->map[regidx];
13918 			reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13919 				<< regoff);
13920 			reg |= (u64)(tctxt++) << regoff;
13921 			rmt->map[regidx] = reg;
13922 			if (tctxt == ctxt + krcvqs[i])
13923 				tctxt = ctxt;
13924 		}
13925 		ctxt += krcvqs[i];
13926 	}
13927 
13928 	rrd.offset = rmt->used;
13929 	rrd.pkt_type = 2;
13930 	rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13931 	rrd.field2_off = LRH_SC_MATCH_OFFSET;
13932 	rrd.index1_off = LRH_SC_SELECT_OFFSET;
13933 	rrd.index1_width = n;
13934 	rrd.index2_off = QPN_SELECT_OFFSET;
13935 	rrd.index2_width = m + n;
13936 	rrd.mask1 = LRH_BTH_MASK;
13937 	rrd.value1 = LRH_BTH_VALUE;
13938 	rrd.mask2 = LRH_SC_MASK;
13939 	rrd.value2 = LRH_SC_VALUE;
13940 
13941 	/* add rule 0 */
13942 	add_rsm_rule(dd, 0, &rrd);
13943 
13944 	/* mark RSM map entries as used */
13945 	rmt->used += rmt_entries;
13946 	/* map everything else to the mcast/err/vl15 context */
13947 	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13948 	dd->qos_shift = n + 1;
13949 	return;
13950 bail:
13951 	dd->qos_shift = 1;
13952 	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13953 }
13954 
13955 static void init_user_fecn_handling(struct hfi1_devdata *dd,
13956 				    struct rsm_map_table *rmt)
13957 {
13958 	struct rsm_rule_data rrd;
13959 	u64 reg;
13960 	int i, idx, regoff, regidx;
13961 	u8 offset;
13962 
13963 	/* there needs to be enough room in the map table */
13964 	if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13965 		dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13966 		return;
13967 	}
13968 
13969 	/*
13970 	 * RSM will extract the destination context as an index into the
13971 	 * map table.  The destination contexts are a sequential block
13972 	 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13973 	 * Map entries are accessed as offset + extracted value.  Adjust
13974 	 * the added offset so this sequence can be placed anywhere in
13975 	 * the table - as long as the entries themselves do not wrap.
13976 	 * There are only enough bits in offset for the table size, so
13977 	 * start with that to allow for a "negative" offset.
13978 	 */
13979 	offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13980 						(int)dd->first_user_ctxt);
13981 
13982 	for (i = dd->first_user_ctxt, idx = rmt->used;
13983 				i < dd->num_rcv_contexts; i++, idx++) {
13984 		/* replace with identity mapping */
13985 		regoff = (idx % 8) * 8;
13986 		regidx = idx / 8;
13987 		reg = rmt->map[regidx];
13988 		reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13989 		reg |= (u64)i << regoff;
13990 		rmt->map[regidx] = reg;
13991 	}
13992 
13993 	/*
13994 	 * For RSM intercept of Expected FECN packets:
13995 	 * o packet type 0 - expected
13996 	 * o match on F (bit 95), using select/match 1, and
13997 	 * o match on SH (bit 133), using select/match 2.
13998 	 *
13999 	 * Use index 1 to extract the 8-bit receive context from DestQP
14000 	 * (start at bit 64).  Use that as the RSM map table index.
14001 	 */
14002 	rrd.offset = offset;
14003 	rrd.pkt_type = 0;
14004 	rrd.field1_off = 95;
14005 	rrd.field2_off = 133;
14006 	rrd.index1_off = 64;
14007 	rrd.index1_width = 8;
14008 	rrd.index2_off = 0;
14009 	rrd.index2_width = 0;
14010 	rrd.mask1 = 1;
14011 	rrd.value1 = 1;
14012 	rrd.mask2 = 1;
14013 	rrd.value2 = 1;
14014 
14015 	/* add rule 1 */
14016 	add_rsm_rule(dd, 1, &rrd);
14017 
14018 	rmt->used += dd->num_user_contexts;
14019 }
14020 
14021 static void init_rxe(struct hfi1_devdata *dd)
14022 {
14023 	struct rsm_map_table *rmt;
14024 
14025 	/* enable all receive errors */
14026 	write_csr(dd, RCV_ERR_MASK, ~0ull);
14027 
14028 	rmt = alloc_rsm_map_table(dd);
14029 	/* set up QOS, including the QPN map table */
14030 	init_qos(dd, rmt);
14031 	init_user_fecn_handling(dd, rmt);
14032 	complete_rsm_map_table(dd, rmt);
14033 	kfree(rmt);
14034 
14035 	/*
14036 	 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14037 	 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14038 	 * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14039 	 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14040 	 * Max_PayLoad_Size set to its minimum of 128.
14041 	 *
14042 	 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14043 	 * (64 bytes).  Max_Payload_Size is possibly modified upward in
14044 	 * tune_pcie_caps() which is called after this routine.
14045 	 */
14046 }
14047 
14048 static void init_other(struct hfi1_devdata *dd)
14049 {
14050 	/* enable all CCE errors */
14051 	write_csr(dd, CCE_ERR_MASK, ~0ull);
14052 	/* enable *some* Misc errors */
14053 	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14054 	/* enable all DC errors, except LCB */
14055 	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14056 	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14057 }
14058 
14059 /*
14060  * Fill out the given AU table using the given CU.  A CU is defined in terms
14061  * AUs.  The table is a an encoding: given the index, how many AUs does that
14062  * represent?
14063  *
14064  * NOTE: Assumes that the register layout is the same for the
14065  * local and remote tables.
14066  */
14067 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14068 			       u32 csr0to3, u32 csr4to7)
14069 {
14070 	write_csr(dd, csr0to3,
14071 		  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14072 		  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14073 		  2ull * cu <<
14074 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14075 		  4ull * cu <<
14076 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14077 	write_csr(dd, csr4to7,
14078 		  8ull * cu <<
14079 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14080 		  16ull * cu <<
14081 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14082 		  32ull * cu <<
14083 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14084 		  64ull * cu <<
14085 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14086 }
14087 
14088 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14089 {
14090 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14091 			   SEND_CM_LOCAL_AU_TABLE4_TO7);
14092 }
14093 
14094 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14095 {
14096 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14097 			   SEND_CM_REMOTE_AU_TABLE4_TO7);
14098 }
14099 
14100 static void init_txe(struct hfi1_devdata *dd)
14101 {
14102 	int i;
14103 
14104 	/* enable all PIO, SDMA, general, and Egress errors */
14105 	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14106 	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14107 	write_csr(dd, SEND_ERR_MASK, ~0ull);
14108 	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14109 
14110 	/* enable all per-context and per-SDMA engine errors */
14111 	for (i = 0; i < dd->chip_send_contexts; i++)
14112 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14113 	for (i = 0; i < dd->chip_sdma_engines; i++)
14114 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14115 
14116 	/* set the local CU to AU mapping */
14117 	assign_local_cm_au_table(dd, dd->vcu);
14118 
14119 	/*
14120 	 * Set reasonable default for Credit Return Timer
14121 	 * Don't set on Simulator - causes it to choke.
14122 	 */
14123 	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14124 		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14125 }
14126 
14127 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14128 {
14129 	struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14130 	unsigned sctxt;
14131 	int ret = 0;
14132 	u64 reg;
14133 
14134 	if (!rcd || !rcd->sc) {
14135 		ret = -EINVAL;
14136 		goto done;
14137 	}
14138 	sctxt = rcd->sc->hw_context;
14139 	reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14140 		((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14141 		 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14142 	/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14143 	if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14144 		reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14145 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14146 	/*
14147 	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14148 	 */
14149 	if (!is_ax(dd)) {
14150 		reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14151 		reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14152 		write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14153 	}
14154 
14155 	/* Enable J_KEY check on receive context. */
14156 	reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14157 		((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14158 		 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14159 	write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14160 done:
14161 	return ret;
14162 }
14163 
14164 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14165 {
14166 	struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14167 	unsigned sctxt;
14168 	int ret = 0;
14169 	u64 reg;
14170 
14171 	if (!rcd || !rcd->sc) {
14172 		ret = -EINVAL;
14173 		goto done;
14174 	}
14175 	sctxt = rcd->sc->hw_context;
14176 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14177 	/*
14178 	 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14179 	 * This check would not have been enabled for A0 h/w, see
14180 	 * set_ctxt_jkey().
14181 	 */
14182 	if (!is_ax(dd)) {
14183 		reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14184 		reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14185 		write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14186 	}
14187 	/* Turn off the J_KEY on the receive side */
14188 	write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14189 done:
14190 	return ret;
14191 }
14192 
14193 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14194 {
14195 	struct hfi1_ctxtdata *rcd;
14196 	unsigned sctxt;
14197 	int ret = 0;
14198 	u64 reg;
14199 
14200 	if (ctxt < dd->num_rcv_contexts) {
14201 		rcd = dd->rcd[ctxt];
14202 	} else {
14203 		ret = -EINVAL;
14204 		goto done;
14205 	}
14206 	if (!rcd || !rcd->sc) {
14207 		ret = -EINVAL;
14208 		goto done;
14209 	}
14210 	sctxt = rcd->sc->hw_context;
14211 	reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14212 		SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14213 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14214 	reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14215 	reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14216 	reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14217 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14218 done:
14219 	return ret;
14220 }
14221 
14222 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14223 {
14224 	struct hfi1_ctxtdata *rcd;
14225 	unsigned sctxt;
14226 	int ret = 0;
14227 	u64 reg;
14228 
14229 	if (ctxt < dd->num_rcv_contexts) {
14230 		rcd = dd->rcd[ctxt];
14231 	} else {
14232 		ret = -EINVAL;
14233 		goto done;
14234 	}
14235 	if (!rcd || !rcd->sc) {
14236 		ret = -EINVAL;
14237 		goto done;
14238 	}
14239 	sctxt = rcd->sc->hw_context;
14240 	reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14241 	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14242 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14243 	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14244 done:
14245 	return ret;
14246 }
14247 
14248 /*
14249  * Start doing the clean up the the chip. Our clean up happens in multiple
14250  * stages and this is just the first.
14251  */
14252 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14253 {
14254 	aspm_exit(dd);
14255 	free_cntrs(dd);
14256 	free_rcverr(dd);
14257 	clean_up_interrupts(dd);
14258 	finish_chip_resources(dd);
14259 }
14260 
14261 #define HFI_BASE_GUID(dev) \
14262 	((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14263 
14264 /*
14265  * Information can be shared between the two HFIs on the same ASIC
14266  * in the same OS.  This function finds the peer device and sets
14267  * up a shared structure.
14268  */
14269 static int init_asic_data(struct hfi1_devdata *dd)
14270 {
14271 	unsigned long flags;
14272 	struct hfi1_devdata *tmp, *peer = NULL;
14273 	struct hfi1_asic_data *asic_data;
14274 	int ret = 0;
14275 
14276 	/* pre-allocate the asic structure in case we are the first device */
14277 	asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14278 	if (!asic_data)
14279 		return -ENOMEM;
14280 
14281 	spin_lock_irqsave(&hfi1_devs_lock, flags);
14282 	/* Find our peer device */
14283 	list_for_each_entry(tmp, &hfi1_dev_list, list) {
14284 		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14285 		    dd->unit != tmp->unit) {
14286 			peer = tmp;
14287 			break;
14288 		}
14289 	}
14290 
14291 	if (peer) {
14292 		/* use already allocated structure */
14293 		dd->asic_data = peer->asic_data;
14294 		kfree(asic_data);
14295 	} else {
14296 		dd->asic_data = asic_data;
14297 		mutex_init(&dd->asic_data->asic_resource_mutex);
14298 	}
14299 	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14300 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14301 
14302 	/* first one through - set up i2c devices */
14303 	if (!peer)
14304 		ret = set_up_i2c(dd, dd->asic_data);
14305 
14306 	return ret;
14307 }
14308 
14309 /*
14310  * Set dd->boardname.  Use a generic name if a name is not returned from
14311  * EFI variable space.
14312  *
14313  * Return 0 on success, -ENOMEM if space could not be allocated.
14314  */
14315 static int obtain_boardname(struct hfi1_devdata *dd)
14316 {
14317 	/* generic board description */
14318 	const char generic[] =
14319 		"Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14320 	unsigned long size;
14321 	int ret;
14322 
14323 	ret = read_hfi1_efi_var(dd, "description", &size,
14324 				(void **)&dd->boardname);
14325 	if (ret) {
14326 		dd_dev_info(dd, "Board description not found\n");
14327 		/* use generic description */
14328 		dd->boardname = kstrdup(generic, GFP_KERNEL);
14329 		if (!dd->boardname)
14330 			return -ENOMEM;
14331 	}
14332 	return 0;
14333 }
14334 
14335 /*
14336  * Check the interrupt registers to make sure that they are mapped correctly.
14337  * It is intended to help user identify any mismapping by VMM when the driver
14338  * is running in a VM. This function should only be called before interrupt
14339  * is set up properly.
14340  *
14341  * Return 0 on success, -EINVAL on failure.
14342  */
14343 static int check_int_registers(struct hfi1_devdata *dd)
14344 {
14345 	u64 reg;
14346 	u64 all_bits = ~(u64)0;
14347 	u64 mask;
14348 
14349 	/* Clear CceIntMask[0] to avoid raising any interrupts */
14350 	mask = read_csr(dd, CCE_INT_MASK);
14351 	write_csr(dd, CCE_INT_MASK, 0ull);
14352 	reg = read_csr(dd, CCE_INT_MASK);
14353 	if (reg)
14354 		goto err_exit;
14355 
14356 	/* Clear all interrupt status bits */
14357 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14358 	reg = read_csr(dd, CCE_INT_STATUS);
14359 	if (reg)
14360 		goto err_exit;
14361 
14362 	/* Set all interrupt status bits */
14363 	write_csr(dd, CCE_INT_FORCE, all_bits);
14364 	reg = read_csr(dd, CCE_INT_STATUS);
14365 	if (reg != all_bits)
14366 		goto err_exit;
14367 
14368 	/* Restore the interrupt mask */
14369 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14370 	write_csr(dd, CCE_INT_MASK, mask);
14371 
14372 	return 0;
14373 err_exit:
14374 	write_csr(dd, CCE_INT_MASK, mask);
14375 	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14376 	return -EINVAL;
14377 }
14378 
14379 /**
14380  * Allocate and initialize the device structure for the hfi.
14381  * @dev: the pci_dev for hfi1_ib device
14382  * @ent: pci_device_id struct for this dev
14383  *
14384  * Also allocates, initializes, and returns the devdata struct for this
14385  * device instance
14386  *
14387  * This is global, and is called directly at init to set up the
14388  * chip-specific function pointers for later use.
14389  */
14390 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14391 				  const struct pci_device_id *ent)
14392 {
14393 	struct hfi1_devdata *dd;
14394 	struct hfi1_pportdata *ppd;
14395 	u64 reg;
14396 	int i, ret;
14397 	static const char * const inames[] = { /* implementation names */
14398 		"RTL silicon",
14399 		"RTL VCS simulation",
14400 		"RTL FPGA emulation",
14401 		"Functional simulator"
14402 	};
14403 	struct pci_dev *parent = pdev->bus->self;
14404 
14405 	dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14406 				sizeof(struct hfi1_pportdata));
14407 	if (IS_ERR(dd))
14408 		goto bail;
14409 	ppd = dd->pport;
14410 	for (i = 0; i < dd->num_pports; i++, ppd++) {
14411 		int vl;
14412 		/* init common fields */
14413 		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14414 		/* DC supports 4 link widths */
14415 		ppd->link_width_supported =
14416 			OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14417 			OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14418 		ppd->link_width_downgrade_supported =
14419 			ppd->link_width_supported;
14420 		/* start out enabling only 4X */
14421 		ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14422 		ppd->link_width_downgrade_enabled =
14423 					ppd->link_width_downgrade_supported;
14424 		/* link width active is 0 when link is down */
14425 		/* link width downgrade active is 0 when link is down */
14426 
14427 		if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14428 		    num_vls > HFI1_MAX_VLS_SUPPORTED) {
14429 			hfi1_early_err(&pdev->dev,
14430 				       "Invalid num_vls %u, using %u VLs\n",
14431 				    num_vls, HFI1_MAX_VLS_SUPPORTED);
14432 			num_vls = HFI1_MAX_VLS_SUPPORTED;
14433 		}
14434 		ppd->vls_supported = num_vls;
14435 		ppd->vls_operational = ppd->vls_supported;
14436 		ppd->actual_vls_operational = ppd->vls_supported;
14437 		/* Set the default MTU. */
14438 		for (vl = 0; vl < num_vls; vl++)
14439 			dd->vld[vl].mtu = hfi1_max_mtu;
14440 		dd->vld[15].mtu = MAX_MAD_PACKET;
14441 		/*
14442 		 * Set the initial values to reasonable default, will be set
14443 		 * for real when link is up.
14444 		 */
14445 		ppd->lstate = IB_PORT_DOWN;
14446 		ppd->overrun_threshold = 0x4;
14447 		ppd->phy_error_threshold = 0xf;
14448 		ppd->port_crc_mode_enabled = link_crc_mask;
14449 		/* initialize supported LTP CRC mode */
14450 		ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14451 		/* initialize enabled LTP CRC mode */
14452 		ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14453 		/* start in offline */
14454 		ppd->host_link_state = HLS_DN_OFFLINE;
14455 		init_vl_arb_caches(ppd);
14456 		ppd->last_pstate = 0xff; /* invalid value */
14457 	}
14458 
14459 	dd->link_default = HLS_DN_POLL;
14460 
14461 	/*
14462 	 * Do remaining PCIe setup and save PCIe values in dd.
14463 	 * Any error printing is already done by the init code.
14464 	 * On return, we have the chip mapped.
14465 	 */
14466 	ret = hfi1_pcie_ddinit(dd, pdev, ent);
14467 	if (ret < 0)
14468 		goto bail_free;
14469 
14470 	/* verify that reads actually work, save revision for reset check */
14471 	dd->revision = read_csr(dd, CCE_REVISION);
14472 	if (dd->revision == ~(u64)0) {
14473 		dd_dev_err(dd, "cannot read chip CSRs\n");
14474 		ret = -EINVAL;
14475 		goto bail_cleanup;
14476 	}
14477 	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14478 			& CCE_REVISION_CHIP_REV_MAJOR_MASK;
14479 	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14480 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
14481 
14482 	/*
14483 	 * Check interrupt registers mapping if the driver has no access to
14484 	 * the upstream component. In this case, it is likely that the driver
14485 	 * is running in a VM.
14486 	 */
14487 	if (!parent) {
14488 		ret = check_int_registers(dd);
14489 		if (ret)
14490 			goto bail_cleanup;
14491 	}
14492 
14493 	/*
14494 	 * obtain the hardware ID - NOT related to unit, which is a
14495 	 * software enumeration
14496 	 */
14497 	reg = read_csr(dd, CCE_REVISION2);
14498 	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14499 					& CCE_REVISION2_HFI_ID_MASK;
14500 	/* the variable size will remove unwanted bits */
14501 	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14502 	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14503 	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14504 		    dd->icode < ARRAY_SIZE(inames) ?
14505 		    inames[dd->icode] : "unknown", (int)dd->irev);
14506 
14507 	/* speeds the hardware can support */
14508 	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14509 	/* speeds allowed to run at */
14510 	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14511 	/* give a reasonable active value, will be set on link up */
14512 	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14513 
14514 	dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14515 	dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14516 	dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14517 	dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14518 	dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14519 	/* fix up link widths for emulation _p */
14520 	ppd = dd->pport;
14521 	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14522 		ppd->link_width_supported =
14523 			ppd->link_width_enabled =
14524 			ppd->link_width_downgrade_supported =
14525 			ppd->link_width_downgrade_enabled =
14526 				OPA_LINK_WIDTH_1X;
14527 	}
14528 	/* insure num_vls isn't larger than number of sdma engines */
14529 	if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14530 		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14531 			   num_vls, dd->chip_sdma_engines);
14532 		num_vls = dd->chip_sdma_engines;
14533 		ppd->vls_supported = dd->chip_sdma_engines;
14534 		ppd->vls_operational = ppd->vls_supported;
14535 	}
14536 
14537 	/*
14538 	 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14539 	 * Limit the max if larger than the field holds.  If timeout is
14540 	 * non-zero, then the calculated field will be at least 1.
14541 	 *
14542 	 * Must be after icode is set up - the cclock rate depends
14543 	 * on knowing the hardware being used.
14544 	 */
14545 	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14546 	if (dd->rcv_intr_timeout_csr >
14547 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14548 		dd->rcv_intr_timeout_csr =
14549 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14550 	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14551 		dd->rcv_intr_timeout_csr = 1;
14552 
14553 	/* needs to be done before we look for the peer device */
14554 	read_guid(dd);
14555 
14556 	/* set up shared ASIC data with peer device */
14557 	ret = init_asic_data(dd);
14558 	if (ret)
14559 		goto bail_cleanup;
14560 
14561 	/* obtain chip sizes, reset chip CSRs */
14562 	init_chip(dd);
14563 
14564 	/* read in the PCIe link speed information */
14565 	ret = pcie_speeds(dd);
14566 	if (ret)
14567 		goto bail_cleanup;
14568 
14569 	/* call before get_platform_config(), after init_chip_resources() */
14570 	ret = eprom_init(dd);
14571 	if (ret)
14572 		goto bail_free_rcverr;
14573 
14574 	/* Needs to be called before hfi1_firmware_init */
14575 	get_platform_config(dd);
14576 
14577 	/* read in firmware */
14578 	ret = hfi1_firmware_init(dd);
14579 	if (ret)
14580 		goto bail_cleanup;
14581 
14582 	/*
14583 	 * In general, the PCIe Gen3 transition must occur after the
14584 	 * chip has been idled (so it won't initiate any PCIe transactions
14585 	 * e.g. an interrupt) and before the driver changes any registers
14586 	 * (the transition will reset the registers).
14587 	 *
14588 	 * In particular, place this call after:
14589 	 * - init_chip()     - the chip will not initiate any PCIe transactions
14590 	 * - pcie_speeds()   - reads the current link speed
14591 	 * - hfi1_firmware_init() - the needed firmware is ready to be
14592 	 *			    downloaded
14593 	 */
14594 	ret = do_pcie_gen3_transition(dd);
14595 	if (ret)
14596 		goto bail_cleanup;
14597 
14598 	/* start setting dd values and adjusting CSRs */
14599 	init_early_variables(dd);
14600 
14601 	parse_platform_config(dd);
14602 
14603 	ret = obtain_boardname(dd);
14604 	if (ret)
14605 		goto bail_cleanup;
14606 
14607 	snprintf(dd->boardversion, BOARD_VERS_MAX,
14608 		 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14609 		 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14610 		 (u32)dd->majrev,
14611 		 (u32)dd->minrev,
14612 		 (dd->revision >> CCE_REVISION_SW_SHIFT)
14613 		    & CCE_REVISION_SW_MASK);
14614 
14615 	ret = set_up_context_variables(dd);
14616 	if (ret)
14617 		goto bail_cleanup;
14618 
14619 	/* set initial RXE CSRs */
14620 	init_rxe(dd);
14621 	/* set initial TXE CSRs */
14622 	init_txe(dd);
14623 	/* set initial non-RXE, non-TXE CSRs */
14624 	init_other(dd);
14625 	/* set up KDETH QP prefix in both RX and TX CSRs */
14626 	init_kdeth_qp(dd);
14627 
14628 	ret = hfi1_dev_affinity_init(dd);
14629 	if (ret)
14630 		goto bail_cleanup;
14631 
14632 	/* send contexts must be set up before receive contexts */
14633 	ret = init_send_contexts(dd);
14634 	if (ret)
14635 		goto bail_cleanup;
14636 
14637 	ret = hfi1_create_ctxts(dd);
14638 	if (ret)
14639 		goto bail_cleanup;
14640 
14641 	dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14642 	/*
14643 	 * rcd[0] is guaranteed to be valid by this point. Also, all
14644 	 * context are using the same value, as per the module parameter.
14645 	 */
14646 	dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14647 
14648 	ret = init_pervl_scs(dd);
14649 	if (ret)
14650 		goto bail_cleanup;
14651 
14652 	/* sdma init */
14653 	for (i = 0; i < dd->num_pports; ++i) {
14654 		ret = sdma_init(dd, i);
14655 		if (ret)
14656 			goto bail_cleanup;
14657 	}
14658 
14659 	/* use contexts created by hfi1_create_ctxts */
14660 	ret = set_up_interrupts(dd);
14661 	if (ret)
14662 		goto bail_cleanup;
14663 
14664 	/* set up LCB access - must be after set_up_interrupts() */
14665 	init_lcb_access(dd);
14666 
14667 	/*
14668 	 * Serial number is created from the base guid:
14669 	 * [27:24] = base guid [38:35]
14670 	 * [23: 0] = base guid [23: 0]
14671 	 */
14672 	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14673 		 (dd->base_guid & 0xFFFFFF) |
14674 		     ((dd->base_guid >> 11) & 0xF000000));
14675 
14676 	dd->oui1 = dd->base_guid >> 56 & 0xFF;
14677 	dd->oui2 = dd->base_guid >> 48 & 0xFF;
14678 	dd->oui3 = dd->base_guid >> 40 & 0xFF;
14679 
14680 	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14681 	if (ret)
14682 		goto bail_clear_intr;
14683 
14684 	thermal_init(dd);
14685 
14686 	ret = init_cntrs(dd);
14687 	if (ret)
14688 		goto bail_clear_intr;
14689 
14690 	ret = init_rcverr(dd);
14691 	if (ret)
14692 		goto bail_free_cntrs;
14693 
14694 	goto bail;
14695 
14696 bail_free_rcverr:
14697 	free_rcverr(dd);
14698 bail_free_cntrs:
14699 	free_cntrs(dd);
14700 bail_clear_intr:
14701 	clean_up_interrupts(dd);
14702 bail_cleanup:
14703 	hfi1_pcie_ddcleanup(dd);
14704 bail_free:
14705 	hfi1_free_devdata(dd);
14706 	dd = ERR_PTR(ret);
14707 bail:
14708 	return dd;
14709 }
14710 
14711 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14712 			u32 dw_len)
14713 {
14714 	u32 delta_cycles;
14715 	u32 current_egress_rate = ppd->current_egress_rate;
14716 	/* rates here are in units of 10^6 bits/sec */
14717 
14718 	if (desired_egress_rate == -1)
14719 		return 0; /* shouldn't happen */
14720 
14721 	if (desired_egress_rate >= current_egress_rate)
14722 		return 0; /* we can't help go faster, only slower */
14723 
14724 	delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14725 			egress_cycles(dw_len * 4, current_egress_rate);
14726 
14727 	return (u16)delta_cycles;
14728 }
14729 
14730 /**
14731  * create_pbc - build a pbc for transmission
14732  * @flags: special case flags or-ed in built pbc
14733  * @srate: static rate
14734  * @vl: vl
14735  * @dwlen: dword length (header words + data words + pbc words)
14736  *
14737  * Create a PBC with the given flags, rate, VL, and length.
14738  *
14739  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14740  * for verbs, which does not use this PSM feature.  The lone other caller
14741  * is for the diagnostic interface which calls this if the user does not
14742  * supply their own PBC.
14743  */
14744 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14745 	       u32 dw_len)
14746 {
14747 	u64 pbc, delay = 0;
14748 
14749 	if (unlikely(srate_mbs))
14750 		delay = delay_cycles(ppd, srate_mbs, dw_len);
14751 
14752 	pbc = flags
14753 		| (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14754 		| ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14755 		| (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14756 		| (dw_len & PBC_LENGTH_DWS_MASK)
14757 			<< PBC_LENGTH_DWS_SHIFT;
14758 
14759 	return pbc;
14760 }
14761 
14762 #define SBUS_THERMAL    0x4f
14763 #define SBUS_THERM_MONITOR_MODE 0x1
14764 
14765 #define THERM_FAILURE(dev, ret, reason) \
14766 	dd_dev_err((dd),						\
14767 		   "Thermal sensor initialization failed: %s (%d)\n",	\
14768 		   (reason), (ret))
14769 
14770 /*
14771  * Initialize the thermal sensor.
14772  *
14773  * After initialization, enable polling of thermal sensor through
14774  * SBus interface. In order for this to work, the SBus Master
14775  * firmware has to be loaded due to the fact that the HW polling
14776  * logic uses SBus interrupts, which are not supported with
14777  * default firmware. Otherwise, no data will be returned through
14778  * the ASIC_STS_THERM CSR.
14779  */
14780 static int thermal_init(struct hfi1_devdata *dd)
14781 {
14782 	int ret = 0;
14783 
14784 	if (dd->icode != ICODE_RTL_SILICON ||
14785 	    check_chip_resource(dd, CR_THERM_INIT, NULL))
14786 		return ret;
14787 
14788 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14789 	if (ret) {
14790 		THERM_FAILURE(dd, ret, "Acquire SBus");
14791 		return ret;
14792 	}
14793 
14794 	dd_dev_info(dd, "Initializing thermal sensor\n");
14795 	/* Disable polling of thermal readings */
14796 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14797 	msleep(100);
14798 	/* Thermal Sensor Initialization */
14799 	/*    Step 1: Reset the Thermal SBus Receiver */
14800 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14801 				RESET_SBUS_RECEIVER, 0);
14802 	if (ret) {
14803 		THERM_FAILURE(dd, ret, "Bus Reset");
14804 		goto done;
14805 	}
14806 	/*    Step 2: Set Reset bit in Thermal block */
14807 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14808 				WRITE_SBUS_RECEIVER, 0x1);
14809 	if (ret) {
14810 		THERM_FAILURE(dd, ret, "Therm Block Reset");
14811 		goto done;
14812 	}
14813 	/*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14814 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14815 				WRITE_SBUS_RECEIVER, 0x32);
14816 	if (ret) {
14817 		THERM_FAILURE(dd, ret, "Write Clock Div");
14818 		goto done;
14819 	}
14820 	/*    Step 4: Select temperature mode */
14821 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14822 				WRITE_SBUS_RECEIVER,
14823 				SBUS_THERM_MONITOR_MODE);
14824 	if (ret) {
14825 		THERM_FAILURE(dd, ret, "Write Mode Sel");
14826 		goto done;
14827 	}
14828 	/*    Step 5: De-assert block reset and start conversion */
14829 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14830 				WRITE_SBUS_RECEIVER, 0x2);
14831 	if (ret) {
14832 		THERM_FAILURE(dd, ret, "Write Reset Deassert");
14833 		goto done;
14834 	}
14835 	/*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14836 	msleep(22);
14837 
14838 	/* Enable polling of thermal readings */
14839 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14840 
14841 	/* Set initialized flag */
14842 	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14843 	if (ret)
14844 		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14845 
14846 done:
14847 	release_chip_resource(dd, CR_SBUS);
14848 	return ret;
14849 }
14850 
14851 static void handle_temp_err(struct hfi1_devdata *dd)
14852 {
14853 	struct hfi1_pportdata *ppd = &dd->pport[0];
14854 	/*
14855 	 * Thermal Critical Interrupt
14856 	 * Put the device into forced freeze mode, take link down to
14857 	 * offline, and put DC into reset.
14858 	 */
14859 	dd_dev_emerg(dd,
14860 		     "Critical temperature reached! Forcing device into freeze mode!\n");
14861 	dd->flags |= HFI1_FORCED_FREEZE;
14862 	start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14863 	/*
14864 	 * Shut DC down as much and as quickly as possible.
14865 	 *
14866 	 * Step 1: Take the link down to OFFLINE. This will cause the
14867 	 *         8051 to put the Serdes in reset. However, we don't want to
14868 	 *         go through the entire link state machine since we want to
14869 	 *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14870 	 *         but rather an attempt to save the chip.
14871 	 *         Code below is almost the same as quiet_serdes() but avoids
14872 	 *         all the extra work and the sleeps.
14873 	 */
14874 	ppd->driver_link_ready = 0;
14875 	ppd->link_enabled = 0;
14876 	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14877 				PLS_OFFLINE);
14878 	/*
14879 	 * Step 2: Shutdown LCB and 8051
14880 	 *         After shutdown, do not restore DC_CFG_RESET value.
14881 	 */
14882 	dc_shutdown(dd);
14883 }
14884