1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 /* 3 * Copyright(c) 2015 - 2020 Intel Corporation. 4 * Copyright(c) 2021 Cornelis Networks. 5 */ 6 7 /* 8 * This file contains all of the code that is specific to the HFI chip 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 16 #include "hfi.h" 17 #include "trace.h" 18 #include "mad.h" 19 #include "pio.h" 20 #include "sdma.h" 21 #include "eprom.h" 22 #include "efivar.h" 23 #include "platform.h" 24 #include "aspm.h" 25 #include "affinity.h" 26 #include "debugfs.h" 27 #include "fault.h" 28 #include "netdev.h" 29 30 uint num_vls = HFI1_MAX_VLS_SUPPORTED; 31 module_param(num_vls, uint, S_IRUGO); 32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 33 34 /* 35 * Default time to aggregate two 10K packets from the idle state 36 * (timer not running). The timer starts at the end of the first packet, 37 * so only the time for one 10K packet and header plus a bit extra is needed. 38 * 10 * 1024 + 64 header byte = 10304 byte 39 * 10304 byte / 12.5 GB/s = 824.32ns 40 */ 41 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */ 42 module_param(rcv_intr_timeout, uint, S_IRUGO); 43 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns"); 44 45 uint rcv_intr_count = 16; /* same as qib */ 46 module_param(rcv_intr_count, uint, S_IRUGO); 47 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count"); 48 49 ushort link_crc_mask = SUPPORTED_CRCS; 50 module_param(link_crc_mask, ushort, S_IRUGO); 51 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link"); 52 53 uint loopback; 54 module_param_named(loopback, loopback, uint, S_IRUGO); 55 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable"); 56 57 /* Other driver tunables */ 58 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/ 59 static ushort crc_14b_sideband = 1; 60 static uint use_flr = 1; 61 uint quick_linkup; /* skip LNI */ 62 63 struct flag_table { 64 u64 flag; /* the flag */ 65 char *str; /* description string */ 66 u16 extra; /* extra information */ 67 u16 unused0; 68 u32 unused1; 69 }; 70 71 /* str must be a string constant */ 72 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra} 73 #define FLAG_ENTRY0(str, flag) {flag, str, 0} 74 75 /* Send Error Consequences */ 76 #define SEC_WRITE_DROPPED 0x1 77 #define SEC_PACKET_DROPPED 0x2 78 #define SEC_SC_HALTED 0x4 /* per-context only */ 79 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */ 80 81 #define DEFAULT_KRCVQS 2 82 #define MIN_KERNEL_KCTXTS 2 83 #define FIRST_KERNEL_KCTXT 1 84 85 /* 86 * RSM instance allocation 87 * 0 - User Fecn Handling 88 * 1 - Vnic 89 * 2 - AIP 90 * 3 - Verbs 91 */ 92 #define RSM_INS_FECN 0 93 #define RSM_INS_VNIC 1 94 #define RSM_INS_AIP 2 95 #define RSM_INS_VERBS 3 96 97 /* Bit offset into the GUID which carries HFI id information */ 98 #define GUID_HFI_INDEX_SHIFT 39 99 100 /* extract the emulation revision */ 101 #define emulator_rev(dd) ((dd)->irev >> 8) 102 /* parallel and serial emulation versions are 3 and 4 respectively */ 103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) 104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) 105 106 /* RSM fields for Verbs */ 107 /* packet type */ 108 #define IB_PACKET_TYPE 2ull 109 #define QW_SHIFT 6ull 110 /* QPN[7..1] */ 111 #define QPN_WIDTH 7ull 112 113 /* LRH.BTH: QW 0, OFFSET 48 - for match */ 114 #define LRH_BTH_QW 0ull 115 #define LRH_BTH_BIT_OFFSET 48ull 116 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off)) 117 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET) 118 #define LRH_BTH_SELECT 119 #define LRH_BTH_MASK 3ull 120 #define LRH_BTH_VALUE 2ull 121 122 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */ 123 #define LRH_SC_QW 0ull 124 #define LRH_SC_BIT_OFFSET 56ull 125 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off)) 126 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET) 127 #define LRH_SC_MASK 128ull 128 #define LRH_SC_VALUE 0ull 129 130 /* SC[n..0] QW 0, OFFSET 60 - for select */ 131 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull)) 132 133 /* QPN[m+n:1] QW 1, OFFSET 1 */ 134 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) 135 136 /* RSM fields for AIP */ 137 /* LRH.BTH above is reused for this rule */ 138 139 /* BTH.DESTQP: QW 1, OFFSET 16 for match */ 140 #define BTH_DESTQP_QW 1ull 141 #define BTH_DESTQP_BIT_OFFSET 16ull 142 #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off)) 143 #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET) 144 #define BTH_DESTQP_MASK 0xFFull 145 #define BTH_DESTQP_VALUE 0x81ull 146 147 /* DETH.SQPN: QW 1 Offset 56 for select */ 148 /* We use 8 most significant Soure QPN bits as entropy fpr AIP */ 149 #define DETH_AIP_SQPN_QW 3ull 150 #define DETH_AIP_SQPN_BIT_OFFSET 56ull 151 #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off)) 152 #define DETH_AIP_SQPN_SELECT_OFFSET \ 153 DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET) 154 155 /* RSM fields for Vnic */ 156 /* L2_TYPE: QW 0, OFFSET 61 - for match */ 157 #define L2_TYPE_QW 0ull 158 #define L2_TYPE_BIT_OFFSET 61ull 159 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off)) 160 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET) 161 #define L2_TYPE_MASK 3ull 162 #define L2_16B_VALUE 2ull 163 164 /* L4_TYPE QW 1, OFFSET 0 - for match */ 165 #define L4_TYPE_QW 1ull 166 #define L4_TYPE_BIT_OFFSET 0ull 167 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off)) 168 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET) 169 #define L4_16B_TYPE_MASK 0xFFull 170 #define L4_16B_ETH_VALUE 0x78ull 171 172 /* 16B VESWID - for select */ 173 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull)) 174 /* 16B ENTROPY - for select */ 175 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull)) 176 177 /* defines to build power on SC2VL table */ 178 #define SC2VL_VAL( \ 179 num, \ 180 sc0, sc0val, \ 181 sc1, sc1val, \ 182 sc2, sc2val, \ 183 sc3, sc3val, \ 184 sc4, sc4val, \ 185 sc5, sc5val, \ 186 sc6, sc6val, \ 187 sc7, sc7val) \ 188 ( \ 189 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \ 190 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \ 191 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \ 192 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \ 193 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \ 194 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \ 195 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \ 196 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \ 197 ) 198 199 #define DC_SC_VL_VAL( \ 200 range, \ 201 e0, e0val, \ 202 e1, e1val, \ 203 e2, e2val, \ 204 e3, e3val, \ 205 e4, e4val, \ 206 e5, e5val, \ 207 e6, e6val, \ 208 e7, e7val, \ 209 e8, e8val, \ 210 e9, e9val, \ 211 e10, e10val, \ 212 e11, e11val, \ 213 e12, e12val, \ 214 e13, e13val, \ 215 e14, e14val, \ 216 e15, e15val) \ 217 ( \ 218 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \ 219 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \ 220 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \ 221 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \ 222 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \ 223 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \ 224 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \ 225 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \ 226 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \ 227 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \ 228 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \ 229 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \ 230 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \ 231 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \ 232 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \ 233 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \ 234 ) 235 236 /* all CceStatus sub-block freeze bits */ 237 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \ 238 | CCE_STATUS_RXE_FROZE_SMASK \ 239 | CCE_STATUS_TXE_FROZE_SMASK \ 240 | CCE_STATUS_TXE_PIO_FROZE_SMASK) 241 /* all CceStatus sub-block TXE pause bits */ 242 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \ 243 | CCE_STATUS_TXE_PAUSED_SMASK \ 244 | CCE_STATUS_SDMA_PAUSED_SMASK) 245 /* all CceStatus sub-block RXE pause bits */ 246 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK 247 248 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL 249 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF 250 251 /* 252 * CCE Error flags. 253 */ 254 static struct flag_table cce_err_status_flags[] = { 255 /* 0*/ FLAG_ENTRY0("CceCsrParityErr", 256 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK), 257 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr", 258 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK), 259 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr", 260 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK), 261 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr", 262 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK), 263 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr", 264 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK), 265 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr", 266 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK), 267 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr", 268 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK), 269 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr", 270 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK), 271 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr", 272 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK), 273 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 274 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK), 275 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 276 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK), 277 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError", 278 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK), 279 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError", 280 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK), 281 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr", 282 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK), 283 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr", 284 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK), 285 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr", 286 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK), 287 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr", 288 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK), 289 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr", 290 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK), 291 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr", 292 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK), 293 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr", 294 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK), 295 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr", 296 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK), 297 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr", 298 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK), 299 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr", 300 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK), 301 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr", 302 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK), 303 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr", 304 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK), 305 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr", 306 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK), 307 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr", 308 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK), 309 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr", 310 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK), 311 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr", 312 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK), 313 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr", 314 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK), 315 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr", 316 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK), 317 /*31*/ FLAG_ENTRY0("LATriggered", 318 CCE_ERR_STATUS_LA_TRIGGERED_SMASK), 319 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr", 320 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK), 321 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr", 322 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK), 323 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr", 324 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK), 325 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr", 326 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK), 327 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr", 328 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK), 329 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr", 330 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK), 331 /*38*/ FLAG_ENTRY0("CceIntMapCorErr", 332 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK), 333 /*39*/ FLAG_ENTRY0("CceIntMapUncErr", 334 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK), 335 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr", 336 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK), 337 /*41-63 reserved*/ 338 }; 339 340 /* 341 * Misc Error flags 342 */ 343 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK 344 static struct flag_table misc_err_status_flags[] = { 345 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)), 346 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)), 347 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)), 348 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)), 349 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)), 350 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)), 351 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)), 352 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)), 353 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)), 354 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)), 355 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)), 356 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)), 357 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL)) 358 }; 359 360 /* 361 * TXE PIO Error flags and consequences 362 */ 363 static struct flag_table pio_err_status_flags[] = { 364 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt", 365 SEC_WRITE_DROPPED, 366 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK), 367 /* 1*/ FLAG_ENTRY("PioWriteAddrParity", 368 SEC_SPC_FREEZE, 369 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK), 370 /* 2*/ FLAG_ENTRY("PioCsrParity", 371 SEC_SPC_FREEZE, 372 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK), 373 /* 3*/ FLAG_ENTRY("PioSbMemFifo0", 374 SEC_SPC_FREEZE, 375 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK), 376 /* 4*/ FLAG_ENTRY("PioSbMemFifo1", 377 SEC_SPC_FREEZE, 378 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK), 379 /* 5*/ FLAG_ENTRY("PioPccFifoParity", 380 SEC_SPC_FREEZE, 381 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK), 382 /* 6*/ FLAG_ENTRY("PioPecFifoParity", 383 SEC_SPC_FREEZE, 384 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK), 385 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity", 386 SEC_SPC_FREEZE, 387 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK), 388 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity", 389 SEC_SPC_FREEZE, 390 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK), 391 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr", 392 SEC_SPC_FREEZE, 393 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK), 394 /*10*/ FLAG_ENTRY("PioSmPktResetParity", 395 SEC_SPC_FREEZE, 396 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK), 397 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc", 398 SEC_SPC_FREEZE, 399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK), 400 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc", 401 SEC_SPC_FREEZE, 402 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK), 403 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor", 404 0, 405 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK), 406 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor", 407 0, 408 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK), 409 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity", 410 SEC_SPC_FREEZE, 411 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK), 412 /*16*/ FLAG_ENTRY("PioPpmcPblFifo", 413 SEC_SPC_FREEZE, 414 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK), 415 /*17*/ FLAG_ENTRY("PioInitSmIn", 416 0, 417 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK), 418 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm", 419 SEC_SPC_FREEZE, 420 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK), 421 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc", 422 SEC_SPC_FREEZE, 423 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK), 424 /*20*/ FLAG_ENTRY("PioHostAddrMemCor", 425 0, 426 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK), 427 /*21*/ FLAG_ENTRY("PioWriteDataParity", 428 SEC_SPC_FREEZE, 429 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK), 430 /*22*/ FLAG_ENTRY("PioStateMachine", 431 SEC_SPC_FREEZE, 432 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), 433 /*23*/ FLAG_ENTRY("PioWriteQwValidParity", 434 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 435 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), 436 /*24*/ FLAG_ENTRY("PioBlockQwCountParity", 437 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 438 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), 439 /*25*/ FLAG_ENTRY("PioVlfVlLenParity", 440 SEC_SPC_FREEZE, 441 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK), 442 /*26*/ FLAG_ENTRY("PioVlfSopParity", 443 SEC_SPC_FREEZE, 444 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK), 445 /*27*/ FLAG_ENTRY("PioVlFifoParity", 446 SEC_SPC_FREEZE, 447 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK), 448 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity", 449 SEC_SPC_FREEZE, 450 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK), 451 /*29*/ FLAG_ENTRY("PioPpmcSopLen", 452 SEC_SPC_FREEZE, 453 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK), 454 /*30-31 reserved*/ 455 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity", 456 SEC_SPC_FREEZE, 457 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK), 458 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity", 459 SEC_SPC_FREEZE, 460 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK), 461 /*34*/ FLAG_ENTRY("PioPccSopHeadParity", 462 SEC_SPC_FREEZE, 463 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK), 464 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr", 465 SEC_SPC_FREEZE, 466 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK), 467 /*36-63 reserved*/ 468 }; 469 470 /* TXE PIO errors that cause an SPC freeze */ 471 #define ALL_PIO_FREEZE_ERR \ 472 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \ 473 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \ 474 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \ 475 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \ 476 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \ 477 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \ 478 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \ 479 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \ 480 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \ 481 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \ 482 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \ 483 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \ 484 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \ 485 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \ 486 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \ 487 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \ 488 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \ 489 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \ 490 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \ 491 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \ 492 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \ 493 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \ 494 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \ 495 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \ 496 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \ 497 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \ 498 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \ 499 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \ 500 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK) 501 502 /* 503 * TXE SDMA Error flags 504 */ 505 static struct flag_table sdma_err_status_flags[] = { 506 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", 507 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK), 508 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr", 509 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK), 510 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr", 511 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK), 512 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr", 513 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK), 514 /*04-63 reserved*/ 515 }; 516 517 /* TXE SDMA errors that cause an SPC freeze */ 518 #define ALL_SDMA_FREEZE_ERR \ 519 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \ 520 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ 521 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) 522 523 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ 524 #define PORT_DISCARD_EGRESS_ERRS \ 525 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ 526 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ 527 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) 528 529 /* 530 * TXE Egress Error flags 531 */ 532 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK 533 static struct flag_table egress_err_status_flags[] = { 534 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)), 535 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)), 536 /* 2 reserved */ 537 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr", 538 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)), 539 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)), 540 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)), 541 /* 6 reserved */ 542 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr", 543 SEES(TX_PIO_LAUNCH_INTF_PARITY)), 544 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr", 545 SEES(TX_SDMA_LAUNCH_INTF_PARITY)), 546 /* 9-10 reserved */ 547 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr", 548 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)), 549 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)), 550 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)), 551 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)), 552 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)), 553 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr", 554 SEES(TX_SDMA0_DISALLOWED_PACKET)), 555 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr", 556 SEES(TX_SDMA1_DISALLOWED_PACKET)), 557 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr", 558 SEES(TX_SDMA2_DISALLOWED_PACKET)), 559 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr", 560 SEES(TX_SDMA3_DISALLOWED_PACKET)), 561 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr", 562 SEES(TX_SDMA4_DISALLOWED_PACKET)), 563 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr", 564 SEES(TX_SDMA5_DISALLOWED_PACKET)), 565 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr", 566 SEES(TX_SDMA6_DISALLOWED_PACKET)), 567 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr", 568 SEES(TX_SDMA7_DISALLOWED_PACKET)), 569 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr", 570 SEES(TX_SDMA8_DISALLOWED_PACKET)), 571 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr", 572 SEES(TX_SDMA9_DISALLOWED_PACKET)), 573 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr", 574 SEES(TX_SDMA10_DISALLOWED_PACKET)), 575 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr", 576 SEES(TX_SDMA11_DISALLOWED_PACKET)), 577 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr", 578 SEES(TX_SDMA12_DISALLOWED_PACKET)), 579 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr", 580 SEES(TX_SDMA13_DISALLOWED_PACKET)), 581 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr", 582 SEES(TX_SDMA14_DISALLOWED_PACKET)), 583 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr", 584 SEES(TX_SDMA15_DISALLOWED_PACKET)), 585 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr", 586 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)), 587 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr", 588 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)), 589 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr", 590 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)), 591 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr", 592 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)), 593 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr", 594 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)), 595 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr", 596 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)), 597 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr", 598 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)), 599 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr", 600 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)), 601 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr", 602 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)), 603 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)), 604 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)), 605 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)), 606 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)), 607 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)), 608 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)), 609 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)), 610 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)), 611 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)), 612 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)), 613 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)), 614 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)), 615 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)), 616 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)), 617 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)), 618 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)), 619 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)), 620 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)), 621 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)), 622 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)), 623 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)), 624 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr", 625 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)), 626 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr", 627 SEES(TX_READ_PIO_MEMORY_CSR_UNC)), 628 }; 629 630 /* 631 * TXE Egress Error Info flags 632 */ 633 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK 634 static struct flag_table egress_err_info_flags[] = { 635 /* 0*/ FLAG_ENTRY0("Reserved", 0ull), 636 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)), 637 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 638 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 639 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)), 640 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)), 641 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)), 642 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)), 643 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)), 644 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)), 645 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)), 646 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)), 647 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)), 648 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)), 649 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)), 650 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)), 651 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)), 652 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)), 653 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)), 654 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)), 655 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)), 656 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)), 657 }; 658 659 /* TXE Egress errors that cause an SPC freeze */ 660 #define ALL_TXE_EGRESS_FREEZE_ERR \ 661 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \ 662 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \ 663 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \ 664 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \ 665 | SEES(TX_LAUNCH_CSR_PARITY) \ 666 | SEES(TX_SBRD_CTL_CSR_PARITY) \ 667 | SEES(TX_CONFIG_PARITY) \ 668 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \ 669 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \ 670 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \ 671 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \ 672 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \ 673 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \ 674 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \ 675 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \ 676 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \ 677 | SEES(TX_CREDIT_RETURN_PARITY)) 678 679 /* 680 * TXE Send error flags 681 */ 682 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK 683 static struct flag_table send_err_status_flags[] = { 684 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)), 685 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)), 686 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR)) 687 }; 688 689 /* 690 * TXE Send Context Error flags and consequences 691 */ 692 static struct flag_table sc_err_status_flags[] = { 693 /* 0*/ FLAG_ENTRY("InconsistentSop", 694 SEC_PACKET_DROPPED | SEC_SC_HALTED, 695 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK), 696 /* 1*/ FLAG_ENTRY("DisallowedPacket", 697 SEC_PACKET_DROPPED | SEC_SC_HALTED, 698 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK), 699 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary", 700 SEC_WRITE_DROPPED | SEC_SC_HALTED, 701 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK), 702 /* 3*/ FLAG_ENTRY("WriteOverflow", 703 SEC_WRITE_DROPPED | SEC_SC_HALTED, 704 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK), 705 /* 4*/ FLAG_ENTRY("WriteOutOfBounds", 706 SEC_WRITE_DROPPED | SEC_SC_HALTED, 707 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK), 708 /* 5-63 reserved*/ 709 }; 710 711 /* 712 * RXE Receive Error flags 713 */ 714 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK 715 static struct flag_table rxe_err_status_flags[] = { 716 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)), 717 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)), 718 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)), 719 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)), 720 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)), 721 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)), 722 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)), 723 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)), 724 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)), 725 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)), 726 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)), 727 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)), 728 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)), 729 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)), 730 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)), 731 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)), 732 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr", 733 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)), 734 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)), 735 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)), 736 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr", 737 RXES(RBUF_BLOCK_LIST_READ_UNC)), 738 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr", 739 RXES(RBUF_BLOCK_LIST_READ_COR)), 740 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr", 741 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)), 742 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr", 743 RXES(RBUF_CSR_QENT_CNT_PARITY)), 744 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr", 745 RXES(RBUF_CSR_QNEXT_BUF_PARITY)), 746 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr", 747 RXES(RBUF_CSR_QVLD_BIT_PARITY)), 748 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)), 749 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)), 750 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr", 751 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)), 752 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)), 753 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)), 754 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)), 755 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)), 756 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)), 757 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)), 758 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)), 759 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr", 760 RXES(RBUF_FL_INITDONE_PARITY)), 761 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr", 762 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)), 763 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)), 764 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)), 765 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)), 766 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr", 767 RXES(LOOKUP_DES_PART1_UNC_COR)), 768 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr", 769 RXES(LOOKUP_DES_PART2_PARITY)), 770 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)), 771 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)), 772 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)), 773 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)), 774 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)), 775 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)), 776 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)), 777 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)), 778 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)), 779 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)), 780 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)), 781 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)), 782 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)), 783 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)), 784 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)), 785 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)), 786 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)), 787 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)), 788 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)), 789 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)), 790 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)), 791 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY)) 792 }; 793 794 /* RXE errors that will trigger an SPC freeze */ 795 #define ALL_RXE_FREEZE_ERR \ 796 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \ 797 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \ 798 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \ 799 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \ 800 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \ 801 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \ 802 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \ 803 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \ 804 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \ 805 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \ 806 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \ 807 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \ 808 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \ 809 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \ 810 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \ 811 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \ 812 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \ 813 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \ 814 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \ 815 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \ 816 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \ 817 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \ 818 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \ 819 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \ 820 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \ 821 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \ 822 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \ 823 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \ 824 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \ 825 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \ 826 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \ 827 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \ 828 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \ 829 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \ 830 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \ 831 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \ 832 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \ 833 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \ 834 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \ 835 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \ 836 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \ 837 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \ 838 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \ 839 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK) 840 841 #define RXE_FREEZE_ABORT_MASK \ 842 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \ 843 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \ 844 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK) 845 846 /* 847 * DCC Error Flags 848 */ 849 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK 850 static struct flag_table dcc_err_flags[] = { 851 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)), 852 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)), 853 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)), 854 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)), 855 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)), 856 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)), 857 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)), 858 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)), 859 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)), 860 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)), 861 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)), 862 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)), 863 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)), 864 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)), 865 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)), 866 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)), 867 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)), 868 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)), 869 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)), 870 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)), 871 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)), 872 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)), 873 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)), 874 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)), 875 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)), 876 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)), 877 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)), 878 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)), 879 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)), 880 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)), 881 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)), 882 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)), 883 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)), 884 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)), 885 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)), 886 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)), 887 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)), 888 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)), 889 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)), 890 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)), 891 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)), 892 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)), 893 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)), 894 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)), 895 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)), 896 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)), 897 }; 898 899 /* 900 * LCB error flags 901 */ 902 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK 903 static struct flag_table lcb_err_flags[] = { 904 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)), 905 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)), 906 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)), 907 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST", 908 LCBE(ALL_LNS_FAILED_REINIT_TEST)), 909 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)), 910 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)), 911 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)), 912 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)), 913 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)), 914 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)), 915 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)), 916 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)), 917 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)), 918 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER", 919 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)), 920 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)), 921 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)), 922 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)), 923 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)), 924 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)), 925 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE", 926 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)), 927 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)), 928 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)), 929 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)), 930 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)), 931 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)), 932 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)), 933 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP", 934 LCBE(RST_FOR_INCOMPLT_RND_TRIP)), 935 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)), 936 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE", 937 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)), 938 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR", 939 LCBE(REDUNDANT_FLIT_PARITY_ERR)) 940 }; 941 942 /* 943 * DC8051 Error Flags 944 */ 945 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK 946 static struct flag_table dc8051_err_flags[] = { 947 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)), 948 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)), 949 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)), 950 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)), 951 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)), 952 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)), 953 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)), 954 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)), 955 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES", 956 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), 957 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)), 958 }; 959 960 /* 961 * DC8051 Information Error flags 962 * 963 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field. 964 */ 965 static struct flag_table dc8051_info_err_flags[] = { 966 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED), 967 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME), 968 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET), 969 FLAG_ENTRY0("Serdes internal loopback failure", 970 FAILED_SERDES_INTERNAL_LOOPBACK), 971 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT), 972 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING), 973 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE), 974 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM), 975 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ), 976 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1), 977 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2), 978 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT), 979 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT), 980 FLAG_ENTRY0("External Device Request Timeout", 981 EXTERNAL_DEVICE_REQ_TIMEOUT), 982 }; 983 984 /* 985 * DC8051 Information Host Information flags 986 * 987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field. 988 */ 989 static struct flag_table dc8051_info_host_msg_flags[] = { 990 FLAG_ENTRY0("Host request done", 0x0001), 991 FLAG_ENTRY0("BC PWR_MGM message", 0x0002), 992 FLAG_ENTRY0("BC SMA message", 0x0004), 993 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008), 994 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010), 995 FLAG_ENTRY0("External device config request", 0x0020), 996 FLAG_ENTRY0("VerifyCap all frames received", 0x0040), 997 FLAG_ENTRY0("LinkUp achieved", 0x0080), 998 FLAG_ENTRY0("Link going down", 0x0100), 999 FLAG_ENTRY0("Link width downgraded", 0x0200), 1000 }; 1001 1002 static u32 encoded_size(u32 size); 1003 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); 1004 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); 1005 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 1006 u8 *continuous); 1007 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 1008 u8 *vcu, u16 *vl15buf, u8 *crc_sizes); 1009 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 1010 u8 *remote_tx_rate, u16 *link_widths); 1011 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 1012 u8 *flag_bits, u16 *link_widths); 1013 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 1014 u8 *device_rev); 1015 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx); 1016 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, 1017 u8 *tx_polarity_inversion, 1018 u8 *rx_polarity_inversion, u8 *max_rate); 1019 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 1020 unsigned int context, u64 err_status); 1021 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg); 1022 static void handle_dcc_err(struct hfi1_devdata *dd, 1023 unsigned int context, u64 err_status); 1024 static void handle_lcb_err(struct hfi1_devdata *dd, 1025 unsigned int context, u64 err_status); 1026 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg); 1027 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1028 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1029 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1030 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1031 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1032 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1033 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1034 static void set_partition_keys(struct hfi1_pportdata *ppd); 1035 static const char *link_state_name(u32 state); 1036 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, 1037 u32 state); 1038 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 1039 u64 *out_data); 1040 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); 1041 static int thermal_init(struct hfi1_devdata *dd); 1042 1043 static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1044 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 1045 int msecs); 1046 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1047 int msecs); 1048 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1049 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); 1050 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1051 int msecs); 1052 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, 1053 int msecs); 1054 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); 1055 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); 1056 static void handle_temp_err(struct hfi1_devdata *dd); 1057 static void dc_shutdown(struct hfi1_devdata *dd); 1058 static void dc_start(struct hfi1_devdata *dd); 1059 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1060 unsigned int *np); 1061 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1062 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); 1063 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); 1064 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); 1065 1066 /* 1067 * Error interrupt table entry. This is used as input to the interrupt 1068 * "clear down" routine used for all second tier error interrupt register. 1069 * Second tier interrupt registers have a single bit representing them 1070 * in the top-level CceIntStatus. 1071 */ 1072 struct err_reg_info { 1073 u32 status; /* status CSR offset */ 1074 u32 clear; /* clear CSR offset */ 1075 u32 mask; /* mask CSR offset */ 1076 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg); 1077 const char *desc; 1078 }; 1079 1080 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) 1081 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) 1082 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) 1083 1084 /* 1085 * Helpers for building HFI and DC error interrupt table entries. Different 1086 * helpers are needed because of inconsistent register names. 1087 */ 1088 #define EE(reg, handler, desc) \ 1089 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \ 1090 handler, desc } 1091 #define DC_EE1(reg, handler, desc) \ 1092 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc } 1093 #define DC_EE2(reg, handler, desc) \ 1094 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc } 1095 1096 /* 1097 * Table of the "misc" grouping of error interrupts. Each entry refers to 1098 * another register containing more information. 1099 */ 1100 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = { 1101 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"), 1102 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"), 1103 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"), 1104 /* 3*/ { 0, 0, 0, NULL }, /* reserved */ 1105 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"), 1106 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"), 1107 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"), 1108 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr") 1109 /* the rest are reserved */ 1110 }; 1111 1112 /* 1113 * Index into the Various section of the interrupt sources 1114 * corresponding to the Critical Temperature interrupt. 1115 */ 1116 #define TCRIT_INT_SOURCE 4 1117 1118 /* 1119 * SDMA error interrupt entry - refers to another register containing more 1120 * information. 1121 */ 1122 static const struct err_reg_info sdma_eng_err = 1123 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr"); 1124 1125 static const struct err_reg_info various_err[NUM_VARIOUS] = { 1126 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */ 1127 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */ 1128 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"), 1129 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"), 1130 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */ 1131 /* rest are reserved */ 1132 }; 1133 1134 /* 1135 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG 1136 * register can not be derived from the MTU value because 10K is not 1137 * a power of 2. Therefore, we need a constant. Everything else can 1138 * be calculated. 1139 */ 1140 #define DCC_CFG_PORT_MTU_CAP_10240 7 1141 1142 /* 1143 * Table of the DC grouping of error interrupts. Each entry refers to 1144 * another register containing more information. 1145 */ 1146 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = { 1147 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"), 1148 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"), 1149 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"), 1150 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */ 1151 /* the rest are reserved */ 1152 }; 1153 1154 struct cntr_entry { 1155 /* 1156 * counter name 1157 */ 1158 char *name; 1159 1160 /* 1161 * csr to read for name (if applicable) 1162 */ 1163 u64 csr; 1164 1165 /* 1166 * offset into dd or ppd to store the counter's value 1167 */ 1168 int offset; 1169 1170 /* 1171 * flags 1172 */ 1173 u8 flags; 1174 1175 /* 1176 * accessor for stat element, context either dd or ppd 1177 */ 1178 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, 1179 int mode, u64 data); 1180 }; 1181 1182 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 1183 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159 1184 1185 #define CNTR_ELEM(name, csr, offset, flags, accessor) \ 1186 { \ 1187 name, \ 1188 csr, \ 1189 offset, \ 1190 flags, \ 1191 accessor \ 1192 } 1193 1194 /* 32bit RXE */ 1195 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1196 CNTR_ELEM(#name, \ 1197 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1198 0, flags | CNTR_32BIT, \ 1199 port_access_u32_csr) 1200 1201 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \ 1202 CNTR_ELEM(#name, \ 1203 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1204 0, flags | CNTR_32BIT, \ 1205 dev_access_u32_csr) 1206 1207 /* 64bit RXE */ 1208 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1209 CNTR_ELEM(#name, \ 1210 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1211 0, flags, \ 1212 port_access_u64_csr) 1213 1214 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \ 1215 CNTR_ELEM(#name, \ 1216 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1217 0, flags, \ 1218 dev_access_u64_csr) 1219 1220 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx 1221 #define OVR_ELM(ctx) \ 1222 CNTR_ELEM("RcvHdrOvr" #ctx, \ 1223 (RCV_HDR_OVFL_CNT + ctx * 0x100), \ 1224 0, CNTR_NORMAL, port_access_u64_csr) 1225 1226 /* 32bit TXE */ 1227 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1228 CNTR_ELEM(#name, \ 1229 (counter * 8 + SEND_COUNTER_ARRAY32), \ 1230 0, flags | CNTR_32BIT, \ 1231 port_access_u32_csr) 1232 1233 /* 64bit TXE */ 1234 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1235 CNTR_ELEM(#name, \ 1236 (counter * 8 + SEND_COUNTER_ARRAY64), \ 1237 0, flags, \ 1238 port_access_u64_csr) 1239 1240 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \ 1241 CNTR_ELEM(#name,\ 1242 counter * 8 + SEND_COUNTER_ARRAY64, \ 1243 0, \ 1244 flags, \ 1245 dev_access_u64_csr) 1246 1247 /* CCE */ 1248 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \ 1249 CNTR_ELEM(#name, \ 1250 (counter * 8 + CCE_COUNTER_ARRAY32), \ 1251 0, flags | CNTR_32BIT, \ 1252 dev_access_u32_csr) 1253 1254 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \ 1255 CNTR_ELEM(#name, \ 1256 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \ 1257 0, flags | CNTR_32BIT, \ 1258 dev_access_u32_csr) 1259 1260 /* DC */ 1261 #define DC_PERF_CNTR(name, counter, flags) \ 1262 CNTR_ELEM(#name, \ 1263 counter, \ 1264 0, \ 1265 flags, \ 1266 dev_access_u64_csr) 1267 1268 #define DC_PERF_CNTR_LCB(name, counter, flags) \ 1269 CNTR_ELEM(#name, \ 1270 counter, \ 1271 0, \ 1272 flags, \ 1273 dc_access_lcb_cntr) 1274 1275 /* ibp counters */ 1276 #define SW_IBP_CNTR(name, cntr) \ 1277 CNTR_ELEM(#name, \ 1278 0, \ 1279 0, \ 1280 CNTR_SYNTH, \ 1281 access_ibp_##cntr) 1282 1283 /** 1284 * hfi1_addr_from_offset - return addr for readq/writeq 1285 * @dd: the dd device 1286 * @offset: the offset of the CSR within bar0 1287 * 1288 * This routine selects the appropriate base address 1289 * based on the indicated offset. 1290 */ 1291 static inline void __iomem *hfi1_addr_from_offset( 1292 const struct hfi1_devdata *dd, 1293 u32 offset) 1294 { 1295 if (offset >= dd->base2_start) 1296 return dd->kregbase2 + (offset - dd->base2_start); 1297 return dd->kregbase1 + offset; 1298 } 1299 1300 /** 1301 * read_csr - read CSR at the indicated offset 1302 * @dd: the dd device 1303 * @offset: the offset of the CSR within bar0 1304 * 1305 * Return: the value read or all FF's if there 1306 * is no mapping 1307 */ 1308 u64 read_csr(const struct hfi1_devdata *dd, u32 offset) 1309 { 1310 if (dd->flags & HFI1_PRESENT) 1311 return readq(hfi1_addr_from_offset(dd, offset)); 1312 return -1; 1313 } 1314 1315 /** 1316 * write_csr - write CSR at the indicated offset 1317 * @dd: the dd device 1318 * @offset: the offset of the CSR within bar0 1319 * @value: value to write 1320 */ 1321 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) 1322 { 1323 if (dd->flags & HFI1_PRESENT) { 1324 void __iomem *base = hfi1_addr_from_offset(dd, offset); 1325 1326 /* avoid write to RcvArray */ 1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) 1328 return; 1329 writeq(value, base); 1330 } 1331 } 1332 1333 /** 1334 * get_csr_addr - return te iomem address for offset 1335 * @dd: the dd device 1336 * @offset: the offset of the CSR within bar0 1337 * 1338 * Return: The iomem address to use in subsequent 1339 * writeq/readq operations. 1340 */ 1341 void __iomem *get_csr_addr( 1342 const struct hfi1_devdata *dd, 1343 u32 offset) 1344 { 1345 if (dd->flags & HFI1_PRESENT) 1346 return hfi1_addr_from_offset(dd, offset); 1347 return NULL; 1348 } 1349 1350 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, 1351 int mode, u64 value) 1352 { 1353 u64 ret; 1354 1355 if (mode == CNTR_MODE_R) { 1356 ret = read_csr(dd, csr); 1357 } else if (mode == CNTR_MODE_W) { 1358 write_csr(dd, csr, value); 1359 ret = value; 1360 } else { 1361 dd_dev_err(dd, "Invalid cntr register access mode"); 1362 return 0; 1363 } 1364 1365 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode); 1366 return ret; 1367 } 1368 1369 /* Dev Access */ 1370 static u64 dev_access_u32_csr(const struct cntr_entry *entry, 1371 void *context, int vl, int mode, u64 data) 1372 { 1373 struct hfi1_devdata *dd = context; 1374 u64 csr = entry->csr; 1375 1376 if (entry->flags & CNTR_SDMA) { 1377 if (vl == CNTR_INVALID_VL) 1378 return 0; 1379 csr += 0x100 * vl; 1380 } else { 1381 if (vl != CNTR_INVALID_VL) 1382 return 0; 1383 } 1384 return read_write_csr(dd, csr, mode, data); 1385 } 1386 1387 static u64 access_sde_err_cnt(const struct cntr_entry *entry, 1388 void *context, int idx, int mode, u64 data) 1389 { 1390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1391 1392 if (dd->per_sdma && idx < dd->num_sdma) 1393 return dd->per_sdma[idx].err_cnt; 1394 return 0; 1395 } 1396 1397 static u64 access_sde_int_cnt(const struct cntr_entry *entry, 1398 void *context, int idx, int mode, u64 data) 1399 { 1400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1401 1402 if (dd->per_sdma && idx < dd->num_sdma) 1403 return dd->per_sdma[idx].sdma_int_cnt; 1404 return 0; 1405 } 1406 1407 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, 1408 void *context, int idx, int mode, u64 data) 1409 { 1410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1411 1412 if (dd->per_sdma && idx < dd->num_sdma) 1413 return dd->per_sdma[idx].idle_int_cnt; 1414 return 0; 1415 } 1416 1417 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, 1418 void *context, int idx, int mode, 1419 u64 data) 1420 { 1421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1422 1423 if (dd->per_sdma && idx < dd->num_sdma) 1424 return dd->per_sdma[idx].progress_int_cnt; 1425 return 0; 1426 } 1427 1428 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, 1429 int vl, int mode, u64 data) 1430 { 1431 struct hfi1_devdata *dd = context; 1432 1433 u64 val = 0; 1434 u64 csr = entry->csr; 1435 1436 if (entry->flags & CNTR_VL) { 1437 if (vl == CNTR_INVALID_VL) 1438 return 0; 1439 csr += 8 * vl; 1440 } else { 1441 if (vl != CNTR_INVALID_VL) 1442 return 0; 1443 } 1444 1445 val = read_write_csr(dd, csr, mode, data); 1446 return val; 1447 } 1448 1449 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, 1450 int vl, int mode, u64 data) 1451 { 1452 struct hfi1_devdata *dd = context; 1453 u32 csr = entry->csr; 1454 int ret = 0; 1455 1456 if (vl != CNTR_INVALID_VL) 1457 return 0; 1458 if (mode == CNTR_MODE_R) 1459 ret = read_lcb_csr(dd, csr, &data); 1460 else if (mode == CNTR_MODE_W) 1461 ret = write_lcb_csr(dd, csr, data); 1462 1463 if (ret) { 1464 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); 1465 return 0; 1466 } 1467 1468 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode); 1469 return data; 1470 } 1471 1472 /* Port Access */ 1473 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, 1474 int vl, int mode, u64 data) 1475 { 1476 struct hfi1_pportdata *ppd = context; 1477 1478 if (vl != CNTR_INVALID_VL) 1479 return 0; 1480 return read_write_csr(ppd->dd, entry->csr, mode, data); 1481 } 1482 1483 static u64 port_access_u64_csr(const struct cntr_entry *entry, 1484 void *context, int vl, int mode, u64 data) 1485 { 1486 struct hfi1_pportdata *ppd = context; 1487 u64 val; 1488 u64 csr = entry->csr; 1489 1490 if (entry->flags & CNTR_VL) { 1491 if (vl == CNTR_INVALID_VL) 1492 return 0; 1493 csr += 8 * vl; 1494 } else { 1495 if (vl != CNTR_INVALID_VL) 1496 return 0; 1497 } 1498 val = read_write_csr(ppd->dd, csr, mode, data); 1499 return val; 1500 } 1501 1502 /* Software defined */ 1503 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, 1504 u64 data) 1505 { 1506 u64 ret; 1507 1508 if (mode == CNTR_MODE_R) { 1509 ret = *cntr; 1510 } else if (mode == CNTR_MODE_W) { 1511 *cntr = data; 1512 ret = data; 1513 } else { 1514 dd_dev_err(dd, "Invalid cntr sw access mode"); 1515 return 0; 1516 } 1517 1518 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode); 1519 1520 return ret; 1521 } 1522 1523 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, 1524 int vl, int mode, u64 data) 1525 { 1526 struct hfi1_pportdata *ppd = context; 1527 1528 if (vl != CNTR_INVALID_VL) 1529 return 0; 1530 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); 1531 } 1532 1533 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, 1534 int vl, int mode, u64 data) 1535 { 1536 struct hfi1_pportdata *ppd = context; 1537 1538 if (vl != CNTR_INVALID_VL) 1539 return 0; 1540 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); 1541 } 1542 1543 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, 1544 void *context, int vl, int mode, 1545 u64 data) 1546 { 1547 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1548 1549 if (vl != CNTR_INVALID_VL) 1550 return 0; 1551 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); 1552 } 1553 1554 static u64 access_sw_xmit_discards(const struct cntr_entry *entry, 1555 void *context, int vl, int mode, u64 data) 1556 { 1557 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1558 u64 zero = 0; 1559 u64 *counter; 1560 1561 if (vl == CNTR_INVALID_VL) 1562 counter = &ppd->port_xmit_discards; 1563 else if (vl >= 0 && vl < C_VL_COUNT) 1564 counter = &ppd->port_xmit_discards_vl[vl]; 1565 else 1566 counter = &zero; 1567 1568 return read_write_sw(ppd->dd, counter, mode, data); 1569 } 1570 1571 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, 1572 void *context, int vl, int mode, 1573 u64 data) 1574 { 1575 struct hfi1_pportdata *ppd = context; 1576 1577 if (vl != CNTR_INVALID_VL) 1578 return 0; 1579 1580 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, 1581 mode, data); 1582 } 1583 1584 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, 1585 void *context, int vl, int mode, u64 data) 1586 { 1587 struct hfi1_pportdata *ppd = context; 1588 1589 if (vl != CNTR_INVALID_VL) 1590 return 0; 1591 1592 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, 1593 mode, data); 1594 } 1595 1596 u64 get_all_cpu_total(u64 __percpu *cntr) 1597 { 1598 int cpu; 1599 u64 counter = 0; 1600 1601 for_each_possible_cpu(cpu) 1602 counter += *per_cpu_ptr(cntr, cpu); 1603 return counter; 1604 } 1605 1606 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, 1607 u64 __percpu *cntr, 1608 int vl, int mode, u64 data) 1609 { 1610 u64 ret = 0; 1611 1612 if (vl != CNTR_INVALID_VL) 1613 return 0; 1614 1615 if (mode == CNTR_MODE_R) { 1616 ret = get_all_cpu_total(cntr) - *z_val; 1617 } else if (mode == CNTR_MODE_W) { 1618 /* A write can only zero the counter */ 1619 if (data == 0) 1620 *z_val = get_all_cpu_total(cntr); 1621 else 1622 dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); 1623 } else { 1624 dd_dev_err(dd, "Invalid cntr sw cpu access mode"); 1625 return 0; 1626 } 1627 1628 return ret; 1629 } 1630 1631 static u64 access_sw_cpu_intr(const struct cntr_entry *entry, 1632 void *context, int vl, int mode, u64 data) 1633 { 1634 struct hfi1_devdata *dd = context; 1635 1636 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, 1637 mode, data); 1638 } 1639 1640 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, 1641 void *context, int vl, int mode, u64 data) 1642 { 1643 struct hfi1_devdata *dd = context; 1644 1645 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, 1646 mode, data); 1647 } 1648 1649 static u64 access_sw_pio_wait(const struct cntr_entry *entry, 1650 void *context, int vl, int mode, u64 data) 1651 { 1652 struct hfi1_devdata *dd = context; 1653 1654 return dd->verbs_dev.n_piowait; 1655 } 1656 1657 static u64 access_sw_pio_drain(const struct cntr_entry *entry, 1658 void *context, int vl, int mode, u64 data) 1659 { 1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1661 1662 return dd->verbs_dev.n_piodrain; 1663 } 1664 1665 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, 1666 void *context, int vl, int mode, u64 data) 1667 { 1668 struct hfi1_devdata *dd = context; 1669 1670 return dd->ctx0_seq_drop; 1671 } 1672 1673 static u64 access_sw_vtx_wait(const struct cntr_entry *entry, 1674 void *context, int vl, int mode, u64 data) 1675 { 1676 struct hfi1_devdata *dd = context; 1677 1678 return dd->verbs_dev.n_txwait; 1679 } 1680 1681 static u64 access_sw_kmem_wait(const struct cntr_entry *entry, 1682 void *context, int vl, int mode, u64 data) 1683 { 1684 struct hfi1_devdata *dd = context; 1685 1686 return dd->verbs_dev.n_kmem_wait; 1687 } 1688 1689 static u64 access_sw_send_schedule(const struct cntr_entry *entry, 1690 void *context, int vl, int mode, u64 data) 1691 { 1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1693 1694 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, 1695 mode, data); 1696 } 1697 1698 /* Software counters for the error status bits within MISC_ERR_STATUS */ 1699 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry, 1700 void *context, int vl, int mode, 1701 u64 data) 1702 { 1703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1704 1705 return dd->misc_err_status_cnt[12]; 1706 } 1707 1708 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry, 1709 void *context, int vl, int mode, 1710 u64 data) 1711 { 1712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1713 1714 return dd->misc_err_status_cnt[11]; 1715 } 1716 1717 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry, 1718 void *context, int vl, int mode, 1719 u64 data) 1720 { 1721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1722 1723 return dd->misc_err_status_cnt[10]; 1724 } 1725 1726 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry, 1727 void *context, int vl, 1728 int mode, u64 data) 1729 { 1730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1731 1732 return dd->misc_err_status_cnt[9]; 1733 } 1734 1735 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry, 1736 void *context, int vl, int mode, 1737 u64 data) 1738 { 1739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1740 1741 return dd->misc_err_status_cnt[8]; 1742 } 1743 1744 static u64 access_misc_efuse_read_bad_addr_err_cnt( 1745 const struct cntr_entry *entry, 1746 void *context, int vl, int mode, u64 data) 1747 { 1748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1749 1750 return dd->misc_err_status_cnt[7]; 1751 } 1752 1753 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry, 1754 void *context, int vl, 1755 int mode, u64 data) 1756 { 1757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1758 1759 return dd->misc_err_status_cnt[6]; 1760 } 1761 1762 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry, 1763 void *context, int vl, int mode, 1764 u64 data) 1765 { 1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1767 1768 return dd->misc_err_status_cnt[5]; 1769 } 1770 1771 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry, 1772 void *context, int vl, int mode, 1773 u64 data) 1774 { 1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1776 1777 return dd->misc_err_status_cnt[4]; 1778 } 1779 1780 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry, 1781 void *context, int vl, 1782 int mode, u64 data) 1783 { 1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1785 1786 return dd->misc_err_status_cnt[3]; 1787 } 1788 1789 static u64 access_misc_csr_write_bad_addr_err_cnt( 1790 const struct cntr_entry *entry, 1791 void *context, int vl, int mode, u64 data) 1792 { 1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1794 1795 return dd->misc_err_status_cnt[2]; 1796 } 1797 1798 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1799 void *context, int vl, 1800 int mode, u64 data) 1801 { 1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1803 1804 return dd->misc_err_status_cnt[1]; 1805 } 1806 1807 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry, 1808 void *context, int vl, int mode, 1809 u64 data) 1810 { 1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1812 1813 return dd->misc_err_status_cnt[0]; 1814 } 1815 1816 /* 1817 * Software counter for the aggregate of 1818 * individual CceErrStatus counters 1819 */ 1820 static u64 access_sw_cce_err_status_aggregated_cnt( 1821 const struct cntr_entry *entry, 1822 void *context, int vl, int mode, u64 data) 1823 { 1824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1825 1826 return dd->sw_cce_err_status_aggregate; 1827 } 1828 1829 /* 1830 * Software counters corresponding to each of the 1831 * error status bits within CceErrStatus 1832 */ 1833 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry, 1834 void *context, int vl, int mode, 1835 u64 data) 1836 { 1837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1838 1839 return dd->cce_err_status_cnt[40]; 1840 } 1841 1842 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry, 1843 void *context, int vl, int mode, 1844 u64 data) 1845 { 1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1847 1848 return dd->cce_err_status_cnt[39]; 1849 } 1850 1851 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry, 1852 void *context, int vl, int mode, 1853 u64 data) 1854 { 1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1856 1857 return dd->cce_err_status_cnt[38]; 1858 } 1859 1860 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry, 1861 void *context, int vl, int mode, 1862 u64 data) 1863 { 1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1865 1866 return dd->cce_err_status_cnt[37]; 1867 } 1868 1869 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry, 1870 void *context, int vl, int mode, 1871 u64 data) 1872 { 1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1874 1875 return dd->cce_err_status_cnt[36]; 1876 } 1877 1878 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt( 1879 const struct cntr_entry *entry, 1880 void *context, int vl, int mode, u64 data) 1881 { 1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1883 1884 return dd->cce_err_status_cnt[35]; 1885 } 1886 1887 static u64 access_cce_rcpl_async_fifo_parity_err_cnt( 1888 const struct cntr_entry *entry, 1889 void *context, int vl, int mode, u64 data) 1890 { 1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1892 1893 return dd->cce_err_status_cnt[34]; 1894 } 1895 1896 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry, 1897 void *context, int vl, 1898 int mode, u64 data) 1899 { 1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1901 1902 return dd->cce_err_status_cnt[33]; 1903 } 1904 1905 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1906 void *context, int vl, int mode, 1907 u64 data) 1908 { 1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1910 1911 return dd->cce_err_status_cnt[32]; 1912 } 1913 1914 static u64 access_la_triggered_cnt(const struct cntr_entry *entry, 1915 void *context, int vl, int mode, u64 data) 1916 { 1917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1918 1919 return dd->cce_err_status_cnt[31]; 1920 } 1921 1922 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry, 1923 void *context, int vl, int mode, 1924 u64 data) 1925 { 1926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1927 1928 return dd->cce_err_status_cnt[30]; 1929 } 1930 1931 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry, 1932 void *context, int vl, int mode, 1933 u64 data) 1934 { 1935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1936 1937 return dd->cce_err_status_cnt[29]; 1938 } 1939 1940 static u64 access_pcic_transmit_back_parity_err_cnt( 1941 const struct cntr_entry *entry, 1942 void *context, int vl, int mode, u64 data) 1943 { 1944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1945 1946 return dd->cce_err_status_cnt[28]; 1947 } 1948 1949 static u64 access_pcic_transmit_front_parity_err_cnt( 1950 const struct cntr_entry *entry, 1951 void *context, int vl, int mode, u64 data) 1952 { 1953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1954 1955 return dd->cce_err_status_cnt[27]; 1956 } 1957 1958 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1959 void *context, int vl, int mode, 1960 u64 data) 1961 { 1962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1963 1964 return dd->cce_err_status_cnt[26]; 1965 } 1966 1967 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1968 void *context, int vl, int mode, 1969 u64 data) 1970 { 1971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1972 1973 return dd->cce_err_status_cnt[25]; 1974 } 1975 1976 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1977 void *context, int vl, int mode, 1978 u64 data) 1979 { 1980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1981 1982 return dd->cce_err_status_cnt[24]; 1983 } 1984 1985 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1986 void *context, int vl, int mode, 1987 u64 data) 1988 { 1989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1990 1991 return dd->cce_err_status_cnt[23]; 1992 } 1993 1994 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry, 1995 void *context, int vl, 1996 int mode, u64 data) 1997 { 1998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1999 2000 return dd->cce_err_status_cnt[22]; 2001 } 2002 2003 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry, 2004 void *context, int vl, int mode, 2005 u64 data) 2006 { 2007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2008 2009 return dd->cce_err_status_cnt[21]; 2010 } 2011 2012 static u64 access_pcic_n_post_dat_q_parity_err_cnt( 2013 const struct cntr_entry *entry, 2014 void *context, int vl, int mode, u64 data) 2015 { 2016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2017 2018 return dd->cce_err_status_cnt[20]; 2019 } 2020 2021 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry, 2022 void *context, int vl, 2023 int mode, u64 data) 2024 { 2025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2026 2027 return dd->cce_err_status_cnt[19]; 2028 } 2029 2030 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2031 void *context, int vl, int mode, 2032 u64 data) 2033 { 2034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2035 2036 return dd->cce_err_status_cnt[18]; 2037 } 2038 2039 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2040 void *context, int vl, int mode, 2041 u64 data) 2042 { 2043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2044 2045 return dd->cce_err_status_cnt[17]; 2046 } 2047 2048 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2049 void *context, int vl, int mode, 2050 u64 data) 2051 { 2052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2053 2054 return dd->cce_err_status_cnt[16]; 2055 } 2056 2057 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2058 void *context, int vl, int mode, 2059 u64 data) 2060 { 2061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2062 2063 return dd->cce_err_status_cnt[15]; 2064 } 2065 2066 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry, 2067 void *context, int vl, 2068 int mode, u64 data) 2069 { 2070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2071 2072 return dd->cce_err_status_cnt[14]; 2073 } 2074 2075 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry, 2076 void *context, int vl, int mode, 2077 u64 data) 2078 { 2079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2080 2081 return dd->cce_err_status_cnt[13]; 2082 } 2083 2084 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt( 2085 const struct cntr_entry *entry, 2086 void *context, int vl, int mode, u64 data) 2087 { 2088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2089 2090 return dd->cce_err_status_cnt[12]; 2091 } 2092 2093 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt( 2094 const struct cntr_entry *entry, 2095 void *context, int vl, int mode, u64 data) 2096 { 2097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2098 2099 return dd->cce_err_status_cnt[11]; 2100 } 2101 2102 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt( 2103 const struct cntr_entry *entry, 2104 void *context, int vl, int mode, u64 data) 2105 { 2106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2107 2108 return dd->cce_err_status_cnt[10]; 2109 } 2110 2111 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt( 2112 const struct cntr_entry *entry, 2113 void *context, int vl, int mode, u64 data) 2114 { 2115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2116 2117 return dd->cce_err_status_cnt[9]; 2118 } 2119 2120 static u64 access_cce_cli2_async_fifo_parity_err_cnt( 2121 const struct cntr_entry *entry, 2122 void *context, int vl, int mode, u64 data) 2123 { 2124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2125 2126 return dd->cce_err_status_cnt[8]; 2127 } 2128 2129 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry, 2130 void *context, int vl, 2131 int mode, u64 data) 2132 { 2133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2134 2135 return dd->cce_err_status_cnt[7]; 2136 } 2137 2138 static u64 access_cce_cli0_async_fifo_parity_err_cnt( 2139 const struct cntr_entry *entry, 2140 void *context, int vl, int mode, u64 data) 2141 { 2142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2143 2144 return dd->cce_err_status_cnt[6]; 2145 } 2146 2147 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry, 2148 void *context, int vl, int mode, 2149 u64 data) 2150 { 2151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2152 2153 return dd->cce_err_status_cnt[5]; 2154 } 2155 2156 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry, 2157 void *context, int vl, int mode, 2158 u64 data) 2159 { 2160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2161 2162 return dd->cce_err_status_cnt[4]; 2163 } 2164 2165 static u64 access_cce_trgt_async_fifo_parity_err_cnt( 2166 const struct cntr_entry *entry, 2167 void *context, int vl, int mode, u64 data) 2168 { 2169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2170 2171 return dd->cce_err_status_cnt[3]; 2172 } 2173 2174 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2175 void *context, int vl, 2176 int mode, u64 data) 2177 { 2178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2179 2180 return dd->cce_err_status_cnt[2]; 2181 } 2182 2183 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2184 void *context, int vl, 2185 int mode, u64 data) 2186 { 2187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2188 2189 return dd->cce_err_status_cnt[1]; 2190 } 2191 2192 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry, 2193 void *context, int vl, int mode, 2194 u64 data) 2195 { 2196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2197 2198 return dd->cce_err_status_cnt[0]; 2199 } 2200 2201 /* 2202 * Software counters corresponding to each of the 2203 * error status bits within RcvErrStatus 2204 */ 2205 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry, 2206 void *context, int vl, int mode, 2207 u64 data) 2208 { 2209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2210 2211 return dd->rcv_err_status_cnt[63]; 2212 } 2213 2214 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2215 void *context, int vl, 2216 int mode, u64 data) 2217 { 2218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2219 2220 return dd->rcv_err_status_cnt[62]; 2221 } 2222 2223 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2224 void *context, int vl, int mode, 2225 u64 data) 2226 { 2227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2228 2229 return dd->rcv_err_status_cnt[61]; 2230 } 2231 2232 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry, 2233 void *context, int vl, int mode, 2234 u64 data) 2235 { 2236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2237 2238 return dd->rcv_err_status_cnt[60]; 2239 } 2240 2241 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2242 void *context, int vl, 2243 int mode, u64 data) 2244 { 2245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2246 2247 return dd->rcv_err_status_cnt[59]; 2248 } 2249 2250 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2251 void *context, int vl, 2252 int mode, u64 data) 2253 { 2254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2255 2256 return dd->rcv_err_status_cnt[58]; 2257 } 2258 2259 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry, 2260 void *context, int vl, int mode, 2261 u64 data) 2262 { 2263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2264 2265 return dd->rcv_err_status_cnt[57]; 2266 } 2267 2268 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry, 2269 void *context, int vl, int mode, 2270 u64 data) 2271 { 2272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2273 2274 return dd->rcv_err_status_cnt[56]; 2275 } 2276 2277 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry, 2278 void *context, int vl, int mode, 2279 u64 data) 2280 { 2281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2282 2283 return dd->rcv_err_status_cnt[55]; 2284 } 2285 2286 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt( 2287 const struct cntr_entry *entry, 2288 void *context, int vl, int mode, u64 data) 2289 { 2290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2291 2292 return dd->rcv_err_status_cnt[54]; 2293 } 2294 2295 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt( 2296 const struct cntr_entry *entry, 2297 void *context, int vl, int mode, u64 data) 2298 { 2299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2300 2301 return dd->rcv_err_status_cnt[53]; 2302 } 2303 2304 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry, 2305 void *context, int vl, 2306 int mode, u64 data) 2307 { 2308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2309 2310 return dd->rcv_err_status_cnt[52]; 2311 } 2312 2313 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry, 2314 void *context, int vl, 2315 int mode, u64 data) 2316 { 2317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2318 2319 return dd->rcv_err_status_cnt[51]; 2320 } 2321 2322 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry, 2323 void *context, int vl, 2324 int mode, u64 data) 2325 { 2326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2327 2328 return dd->rcv_err_status_cnt[50]; 2329 } 2330 2331 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry, 2332 void *context, int vl, 2333 int mode, u64 data) 2334 { 2335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2336 2337 return dd->rcv_err_status_cnt[49]; 2338 } 2339 2340 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry, 2341 void *context, int vl, 2342 int mode, u64 data) 2343 { 2344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2345 2346 return dd->rcv_err_status_cnt[48]; 2347 } 2348 2349 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry, 2350 void *context, int vl, 2351 int mode, u64 data) 2352 { 2353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2354 2355 return dd->rcv_err_status_cnt[47]; 2356 } 2357 2358 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry, 2359 void *context, int vl, int mode, 2360 u64 data) 2361 { 2362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2363 2364 return dd->rcv_err_status_cnt[46]; 2365 } 2366 2367 static u64 access_rx_hq_intr_csr_parity_err_cnt( 2368 const struct cntr_entry *entry, 2369 void *context, int vl, int mode, u64 data) 2370 { 2371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2372 2373 return dd->rcv_err_status_cnt[45]; 2374 } 2375 2376 static u64 access_rx_lookup_csr_parity_err_cnt( 2377 const struct cntr_entry *entry, 2378 void *context, int vl, int mode, u64 data) 2379 { 2380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2381 2382 return dd->rcv_err_status_cnt[44]; 2383 } 2384 2385 static u64 access_rx_lookup_rcv_array_cor_err_cnt( 2386 const struct cntr_entry *entry, 2387 void *context, int vl, int mode, u64 data) 2388 { 2389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2390 2391 return dd->rcv_err_status_cnt[43]; 2392 } 2393 2394 static u64 access_rx_lookup_rcv_array_unc_err_cnt( 2395 const struct cntr_entry *entry, 2396 void *context, int vl, int mode, u64 data) 2397 { 2398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2399 2400 return dd->rcv_err_status_cnt[42]; 2401 } 2402 2403 static u64 access_rx_lookup_des_part2_parity_err_cnt( 2404 const struct cntr_entry *entry, 2405 void *context, int vl, int mode, u64 data) 2406 { 2407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2408 2409 return dd->rcv_err_status_cnt[41]; 2410 } 2411 2412 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt( 2413 const struct cntr_entry *entry, 2414 void *context, int vl, int mode, u64 data) 2415 { 2416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2417 2418 return dd->rcv_err_status_cnt[40]; 2419 } 2420 2421 static u64 access_rx_lookup_des_part1_unc_err_cnt( 2422 const struct cntr_entry *entry, 2423 void *context, int vl, int mode, u64 data) 2424 { 2425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2426 2427 return dd->rcv_err_status_cnt[39]; 2428 } 2429 2430 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt( 2431 const struct cntr_entry *entry, 2432 void *context, int vl, int mode, u64 data) 2433 { 2434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2435 2436 return dd->rcv_err_status_cnt[38]; 2437 } 2438 2439 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt( 2440 const struct cntr_entry *entry, 2441 void *context, int vl, int mode, u64 data) 2442 { 2443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2444 2445 return dd->rcv_err_status_cnt[37]; 2446 } 2447 2448 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt( 2449 const struct cntr_entry *entry, 2450 void *context, int vl, int mode, u64 data) 2451 { 2452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2453 2454 return dd->rcv_err_status_cnt[36]; 2455 } 2456 2457 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt( 2458 const struct cntr_entry *entry, 2459 void *context, int vl, int mode, u64 data) 2460 { 2461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2462 2463 return dd->rcv_err_status_cnt[35]; 2464 } 2465 2466 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt( 2467 const struct cntr_entry *entry, 2468 void *context, int vl, int mode, u64 data) 2469 { 2470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2471 2472 return dd->rcv_err_status_cnt[34]; 2473 } 2474 2475 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt( 2476 const struct cntr_entry *entry, 2477 void *context, int vl, int mode, u64 data) 2478 { 2479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2480 2481 return dd->rcv_err_status_cnt[33]; 2482 } 2483 2484 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry, 2485 void *context, int vl, int mode, 2486 u64 data) 2487 { 2488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2489 2490 return dd->rcv_err_status_cnt[32]; 2491 } 2492 2493 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry, 2494 void *context, int vl, int mode, 2495 u64 data) 2496 { 2497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2498 2499 return dd->rcv_err_status_cnt[31]; 2500 } 2501 2502 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry, 2503 void *context, int vl, int mode, 2504 u64 data) 2505 { 2506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2507 2508 return dd->rcv_err_status_cnt[30]; 2509 } 2510 2511 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry, 2512 void *context, int vl, int mode, 2513 u64 data) 2514 { 2515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2516 2517 return dd->rcv_err_status_cnt[29]; 2518 } 2519 2520 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry, 2521 void *context, int vl, 2522 int mode, u64 data) 2523 { 2524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2525 2526 return dd->rcv_err_status_cnt[28]; 2527 } 2528 2529 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt( 2530 const struct cntr_entry *entry, 2531 void *context, int vl, int mode, u64 data) 2532 { 2533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2534 2535 return dd->rcv_err_status_cnt[27]; 2536 } 2537 2538 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt( 2539 const struct cntr_entry *entry, 2540 void *context, int vl, int mode, u64 data) 2541 { 2542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2543 2544 return dd->rcv_err_status_cnt[26]; 2545 } 2546 2547 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt( 2548 const struct cntr_entry *entry, 2549 void *context, int vl, int mode, u64 data) 2550 { 2551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2552 2553 return dd->rcv_err_status_cnt[25]; 2554 } 2555 2556 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt( 2557 const struct cntr_entry *entry, 2558 void *context, int vl, int mode, u64 data) 2559 { 2560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2561 2562 return dd->rcv_err_status_cnt[24]; 2563 } 2564 2565 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt( 2566 const struct cntr_entry *entry, 2567 void *context, int vl, int mode, u64 data) 2568 { 2569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2570 2571 return dd->rcv_err_status_cnt[23]; 2572 } 2573 2574 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt( 2575 const struct cntr_entry *entry, 2576 void *context, int vl, int mode, u64 data) 2577 { 2578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2579 2580 return dd->rcv_err_status_cnt[22]; 2581 } 2582 2583 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt( 2584 const struct cntr_entry *entry, 2585 void *context, int vl, int mode, u64 data) 2586 { 2587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2588 2589 return dd->rcv_err_status_cnt[21]; 2590 } 2591 2592 static u64 access_rx_rbuf_block_list_read_cor_err_cnt( 2593 const struct cntr_entry *entry, 2594 void *context, int vl, int mode, u64 data) 2595 { 2596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2597 2598 return dd->rcv_err_status_cnt[20]; 2599 } 2600 2601 static u64 access_rx_rbuf_block_list_read_unc_err_cnt( 2602 const struct cntr_entry *entry, 2603 void *context, int vl, int mode, u64 data) 2604 { 2605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2606 2607 return dd->rcv_err_status_cnt[19]; 2608 } 2609 2610 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry, 2611 void *context, int vl, 2612 int mode, u64 data) 2613 { 2614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2615 2616 return dd->rcv_err_status_cnt[18]; 2617 } 2618 2619 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry, 2620 void *context, int vl, 2621 int mode, u64 data) 2622 { 2623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2624 2625 return dd->rcv_err_status_cnt[17]; 2626 } 2627 2628 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt( 2629 const struct cntr_entry *entry, 2630 void *context, int vl, int mode, u64 data) 2631 { 2632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2633 2634 return dd->rcv_err_status_cnt[16]; 2635 } 2636 2637 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt( 2638 const struct cntr_entry *entry, 2639 void *context, int vl, int mode, u64 data) 2640 { 2641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2642 2643 return dd->rcv_err_status_cnt[15]; 2644 } 2645 2646 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry, 2647 void *context, int vl, 2648 int mode, u64 data) 2649 { 2650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2651 2652 return dd->rcv_err_status_cnt[14]; 2653 } 2654 2655 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry, 2656 void *context, int vl, 2657 int mode, u64 data) 2658 { 2659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2660 2661 return dd->rcv_err_status_cnt[13]; 2662 } 2663 2664 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2665 void *context, int vl, int mode, 2666 u64 data) 2667 { 2668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2669 2670 return dd->rcv_err_status_cnt[12]; 2671 } 2672 2673 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry, 2674 void *context, int vl, int mode, 2675 u64 data) 2676 { 2677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2678 2679 return dd->rcv_err_status_cnt[11]; 2680 } 2681 2682 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry, 2683 void *context, int vl, int mode, 2684 u64 data) 2685 { 2686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2687 2688 return dd->rcv_err_status_cnt[10]; 2689 } 2690 2691 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry, 2692 void *context, int vl, int mode, 2693 u64 data) 2694 { 2695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2696 2697 return dd->rcv_err_status_cnt[9]; 2698 } 2699 2700 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry, 2701 void *context, int vl, int mode, 2702 u64 data) 2703 { 2704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2705 2706 return dd->rcv_err_status_cnt[8]; 2707 } 2708 2709 static u64 access_rx_rcv_qp_map_table_cor_err_cnt( 2710 const struct cntr_entry *entry, 2711 void *context, int vl, int mode, u64 data) 2712 { 2713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2714 2715 return dd->rcv_err_status_cnt[7]; 2716 } 2717 2718 static u64 access_rx_rcv_qp_map_table_unc_err_cnt( 2719 const struct cntr_entry *entry, 2720 void *context, int vl, int mode, u64 data) 2721 { 2722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2723 2724 return dd->rcv_err_status_cnt[6]; 2725 } 2726 2727 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry, 2728 void *context, int vl, int mode, 2729 u64 data) 2730 { 2731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2732 2733 return dd->rcv_err_status_cnt[5]; 2734 } 2735 2736 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry, 2737 void *context, int vl, int mode, 2738 u64 data) 2739 { 2740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2741 2742 return dd->rcv_err_status_cnt[4]; 2743 } 2744 2745 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry, 2746 void *context, int vl, int mode, 2747 u64 data) 2748 { 2749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2750 2751 return dd->rcv_err_status_cnt[3]; 2752 } 2753 2754 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry, 2755 void *context, int vl, int mode, 2756 u64 data) 2757 { 2758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2759 2760 return dd->rcv_err_status_cnt[2]; 2761 } 2762 2763 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry, 2764 void *context, int vl, int mode, 2765 u64 data) 2766 { 2767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2768 2769 return dd->rcv_err_status_cnt[1]; 2770 } 2771 2772 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry, 2773 void *context, int vl, int mode, 2774 u64 data) 2775 { 2776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2777 2778 return dd->rcv_err_status_cnt[0]; 2779 } 2780 2781 /* 2782 * Software counters corresponding to each of the 2783 * error status bits within SendPioErrStatus 2784 */ 2785 static u64 access_pio_pec_sop_head_parity_err_cnt( 2786 const struct cntr_entry *entry, 2787 void *context, int vl, int mode, u64 data) 2788 { 2789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2790 2791 return dd->send_pio_err_status_cnt[35]; 2792 } 2793 2794 static u64 access_pio_pcc_sop_head_parity_err_cnt( 2795 const struct cntr_entry *entry, 2796 void *context, int vl, int mode, u64 data) 2797 { 2798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2799 2800 return dd->send_pio_err_status_cnt[34]; 2801 } 2802 2803 static u64 access_pio_last_returned_cnt_parity_err_cnt( 2804 const struct cntr_entry *entry, 2805 void *context, int vl, int mode, u64 data) 2806 { 2807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2808 2809 return dd->send_pio_err_status_cnt[33]; 2810 } 2811 2812 static u64 access_pio_current_free_cnt_parity_err_cnt( 2813 const struct cntr_entry *entry, 2814 void *context, int vl, int mode, u64 data) 2815 { 2816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2817 2818 return dd->send_pio_err_status_cnt[32]; 2819 } 2820 2821 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry, 2822 void *context, int vl, int mode, 2823 u64 data) 2824 { 2825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2826 2827 return dd->send_pio_err_status_cnt[31]; 2828 } 2829 2830 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry, 2831 void *context, int vl, int mode, 2832 u64 data) 2833 { 2834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2835 2836 return dd->send_pio_err_status_cnt[30]; 2837 } 2838 2839 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry, 2840 void *context, int vl, int mode, 2841 u64 data) 2842 { 2843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2844 2845 return dd->send_pio_err_status_cnt[29]; 2846 } 2847 2848 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt( 2849 const struct cntr_entry *entry, 2850 void *context, int vl, int mode, u64 data) 2851 { 2852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2853 2854 return dd->send_pio_err_status_cnt[28]; 2855 } 2856 2857 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry, 2858 void *context, int vl, int mode, 2859 u64 data) 2860 { 2861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2862 2863 return dd->send_pio_err_status_cnt[27]; 2864 } 2865 2866 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry, 2867 void *context, int vl, int mode, 2868 u64 data) 2869 { 2870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2871 2872 return dd->send_pio_err_status_cnt[26]; 2873 } 2874 2875 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry, 2876 void *context, int vl, 2877 int mode, u64 data) 2878 { 2879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2880 2881 return dd->send_pio_err_status_cnt[25]; 2882 } 2883 2884 static u64 access_pio_block_qw_count_parity_err_cnt( 2885 const struct cntr_entry *entry, 2886 void *context, int vl, int mode, u64 data) 2887 { 2888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2889 2890 return dd->send_pio_err_status_cnt[24]; 2891 } 2892 2893 static u64 access_pio_write_qw_valid_parity_err_cnt( 2894 const struct cntr_entry *entry, 2895 void *context, int vl, int mode, u64 data) 2896 { 2897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2898 2899 return dd->send_pio_err_status_cnt[23]; 2900 } 2901 2902 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry, 2903 void *context, int vl, int mode, 2904 u64 data) 2905 { 2906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2907 2908 return dd->send_pio_err_status_cnt[22]; 2909 } 2910 2911 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry, 2912 void *context, int vl, 2913 int mode, u64 data) 2914 { 2915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2916 2917 return dd->send_pio_err_status_cnt[21]; 2918 } 2919 2920 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry, 2921 void *context, int vl, 2922 int mode, u64 data) 2923 { 2924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2925 2926 return dd->send_pio_err_status_cnt[20]; 2927 } 2928 2929 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry, 2930 void *context, int vl, 2931 int mode, u64 data) 2932 { 2933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2934 2935 return dd->send_pio_err_status_cnt[19]; 2936 } 2937 2938 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt( 2939 const struct cntr_entry *entry, 2940 void *context, int vl, int mode, u64 data) 2941 { 2942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2943 2944 return dd->send_pio_err_status_cnt[18]; 2945 } 2946 2947 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry, 2948 void *context, int vl, int mode, 2949 u64 data) 2950 { 2951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2952 2953 return dd->send_pio_err_status_cnt[17]; 2954 } 2955 2956 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry, 2957 void *context, int vl, int mode, 2958 u64 data) 2959 { 2960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2961 2962 return dd->send_pio_err_status_cnt[16]; 2963 } 2964 2965 static u64 access_pio_credit_ret_fifo_parity_err_cnt( 2966 const struct cntr_entry *entry, 2967 void *context, int vl, int mode, u64 data) 2968 { 2969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2970 2971 return dd->send_pio_err_status_cnt[15]; 2972 } 2973 2974 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt( 2975 const struct cntr_entry *entry, 2976 void *context, int vl, int mode, u64 data) 2977 { 2978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2979 2980 return dd->send_pio_err_status_cnt[14]; 2981 } 2982 2983 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt( 2984 const struct cntr_entry *entry, 2985 void *context, int vl, int mode, u64 data) 2986 { 2987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2988 2989 return dd->send_pio_err_status_cnt[13]; 2990 } 2991 2992 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt( 2993 const struct cntr_entry *entry, 2994 void *context, int vl, int mode, u64 data) 2995 { 2996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2997 2998 return dd->send_pio_err_status_cnt[12]; 2999 } 3000 3001 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt( 3002 const struct cntr_entry *entry, 3003 void *context, int vl, int mode, u64 data) 3004 { 3005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3006 3007 return dd->send_pio_err_status_cnt[11]; 3008 } 3009 3010 static u64 access_pio_sm_pkt_reset_parity_err_cnt( 3011 const struct cntr_entry *entry, 3012 void *context, int vl, int mode, u64 data) 3013 { 3014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3015 3016 return dd->send_pio_err_status_cnt[10]; 3017 } 3018 3019 static u64 access_pio_pkt_evict_fifo_parity_err_cnt( 3020 const struct cntr_entry *entry, 3021 void *context, int vl, int mode, u64 data) 3022 { 3023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3024 3025 return dd->send_pio_err_status_cnt[9]; 3026 } 3027 3028 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt( 3029 const struct cntr_entry *entry, 3030 void *context, int vl, int mode, u64 data) 3031 { 3032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3033 3034 return dd->send_pio_err_status_cnt[8]; 3035 } 3036 3037 static u64 access_pio_sbrdctl_crrel_parity_err_cnt( 3038 const struct cntr_entry *entry, 3039 void *context, int vl, int mode, u64 data) 3040 { 3041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3042 3043 return dd->send_pio_err_status_cnt[7]; 3044 } 3045 3046 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry, 3047 void *context, int vl, int mode, 3048 u64 data) 3049 { 3050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3051 3052 return dd->send_pio_err_status_cnt[6]; 3053 } 3054 3055 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry, 3056 void *context, int vl, int mode, 3057 u64 data) 3058 { 3059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3060 3061 return dd->send_pio_err_status_cnt[5]; 3062 } 3063 3064 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry, 3065 void *context, int vl, int mode, 3066 u64 data) 3067 { 3068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3069 3070 return dd->send_pio_err_status_cnt[4]; 3071 } 3072 3073 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry, 3074 void *context, int vl, int mode, 3075 u64 data) 3076 { 3077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3078 3079 return dd->send_pio_err_status_cnt[3]; 3080 } 3081 3082 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry, 3083 void *context, int vl, int mode, 3084 u64 data) 3085 { 3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3087 3088 return dd->send_pio_err_status_cnt[2]; 3089 } 3090 3091 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry, 3092 void *context, int vl, 3093 int mode, u64 data) 3094 { 3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3096 3097 return dd->send_pio_err_status_cnt[1]; 3098 } 3099 3100 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry, 3101 void *context, int vl, int mode, 3102 u64 data) 3103 { 3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3105 3106 return dd->send_pio_err_status_cnt[0]; 3107 } 3108 3109 /* 3110 * Software counters corresponding to each of the 3111 * error status bits within SendDmaErrStatus 3112 */ 3113 static u64 access_sdma_pcie_req_tracking_cor_err_cnt( 3114 const struct cntr_entry *entry, 3115 void *context, int vl, int mode, u64 data) 3116 { 3117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3118 3119 return dd->send_dma_err_status_cnt[3]; 3120 } 3121 3122 static u64 access_sdma_pcie_req_tracking_unc_err_cnt( 3123 const struct cntr_entry *entry, 3124 void *context, int vl, int mode, u64 data) 3125 { 3126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3127 3128 return dd->send_dma_err_status_cnt[2]; 3129 } 3130 3131 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry, 3132 void *context, int vl, int mode, 3133 u64 data) 3134 { 3135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3136 3137 return dd->send_dma_err_status_cnt[1]; 3138 } 3139 3140 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry, 3141 void *context, int vl, int mode, 3142 u64 data) 3143 { 3144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3145 3146 return dd->send_dma_err_status_cnt[0]; 3147 } 3148 3149 /* 3150 * Software counters corresponding to each of the 3151 * error status bits within SendEgressErrStatus 3152 */ 3153 static u64 access_tx_read_pio_memory_csr_unc_err_cnt( 3154 const struct cntr_entry *entry, 3155 void *context, int vl, int mode, u64 data) 3156 { 3157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3158 3159 return dd->send_egress_err_status_cnt[63]; 3160 } 3161 3162 static u64 access_tx_read_sdma_memory_csr_err_cnt( 3163 const struct cntr_entry *entry, 3164 void *context, int vl, int mode, u64 data) 3165 { 3166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3167 3168 return dd->send_egress_err_status_cnt[62]; 3169 } 3170 3171 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry, 3172 void *context, int vl, int mode, 3173 u64 data) 3174 { 3175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3176 3177 return dd->send_egress_err_status_cnt[61]; 3178 } 3179 3180 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry, 3181 void *context, int vl, 3182 int mode, u64 data) 3183 { 3184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3185 3186 return dd->send_egress_err_status_cnt[60]; 3187 } 3188 3189 static u64 access_tx_read_sdma_memory_cor_err_cnt( 3190 const struct cntr_entry *entry, 3191 void *context, int vl, int mode, u64 data) 3192 { 3193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3194 3195 return dd->send_egress_err_status_cnt[59]; 3196 } 3197 3198 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry, 3199 void *context, int vl, int mode, 3200 u64 data) 3201 { 3202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3203 3204 return dd->send_egress_err_status_cnt[58]; 3205 } 3206 3207 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry, 3208 void *context, int vl, int mode, 3209 u64 data) 3210 { 3211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3212 3213 return dd->send_egress_err_status_cnt[57]; 3214 } 3215 3216 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry, 3217 void *context, int vl, int mode, 3218 u64 data) 3219 { 3220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3221 3222 return dd->send_egress_err_status_cnt[56]; 3223 } 3224 3225 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry, 3226 void *context, int vl, int mode, 3227 u64 data) 3228 { 3229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3230 3231 return dd->send_egress_err_status_cnt[55]; 3232 } 3233 3234 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry, 3235 void *context, int vl, int mode, 3236 u64 data) 3237 { 3238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3239 3240 return dd->send_egress_err_status_cnt[54]; 3241 } 3242 3243 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry, 3244 void *context, int vl, int mode, 3245 u64 data) 3246 { 3247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3248 3249 return dd->send_egress_err_status_cnt[53]; 3250 } 3251 3252 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry, 3253 void *context, int vl, int mode, 3254 u64 data) 3255 { 3256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3257 3258 return dd->send_egress_err_status_cnt[52]; 3259 } 3260 3261 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry, 3262 void *context, int vl, int mode, 3263 u64 data) 3264 { 3265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3266 3267 return dd->send_egress_err_status_cnt[51]; 3268 } 3269 3270 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry, 3271 void *context, int vl, int mode, 3272 u64 data) 3273 { 3274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3275 3276 return dd->send_egress_err_status_cnt[50]; 3277 } 3278 3279 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry, 3280 void *context, int vl, int mode, 3281 u64 data) 3282 { 3283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3284 3285 return dd->send_egress_err_status_cnt[49]; 3286 } 3287 3288 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry, 3289 void *context, int vl, int mode, 3290 u64 data) 3291 { 3292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3293 3294 return dd->send_egress_err_status_cnt[48]; 3295 } 3296 3297 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry, 3298 void *context, int vl, int mode, 3299 u64 data) 3300 { 3301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3302 3303 return dd->send_egress_err_status_cnt[47]; 3304 } 3305 3306 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry, 3307 void *context, int vl, int mode, 3308 u64 data) 3309 { 3310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3311 3312 return dd->send_egress_err_status_cnt[46]; 3313 } 3314 3315 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry, 3316 void *context, int vl, int mode, 3317 u64 data) 3318 { 3319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3320 3321 return dd->send_egress_err_status_cnt[45]; 3322 } 3323 3324 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry, 3325 void *context, int vl, 3326 int mode, u64 data) 3327 { 3328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3329 3330 return dd->send_egress_err_status_cnt[44]; 3331 } 3332 3333 static u64 access_tx_read_sdma_memory_unc_err_cnt( 3334 const struct cntr_entry *entry, 3335 void *context, int vl, int mode, u64 data) 3336 { 3337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3338 3339 return dd->send_egress_err_status_cnt[43]; 3340 } 3341 3342 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry, 3343 void *context, int vl, int mode, 3344 u64 data) 3345 { 3346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3347 3348 return dd->send_egress_err_status_cnt[42]; 3349 } 3350 3351 static u64 access_tx_credit_return_partiy_err_cnt( 3352 const struct cntr_entry *entry, 3353 void *context, int vl, int mode, u64 data) 3354 { 3355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3356 3357 return dd->send_egress_err_status_cnt[41]; 3358 } 3359 3360 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt( 3361 const struct cntr_entry *entry, 3362 void *context, int vl, int mode, u64 data) 3363 { 3364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3365 3366 return dd->send_egress_err_status_cnt[40]; 3367 } 3368 3369 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt( 3370 const struct cntr_entry *entry, 3371 void *context, int vl, int mode, u64 data) 3372 { 3373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3374 3375 return dd->send_egress_err_status_cnt[39]; 3376 } 3377 3378 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt( 3379 const struct cntr_entry *entry, 3380 void *context, int vl, int mode, u64 data) 3381 { 3382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3383 3384 return dd->send_egress_err_status_cnt[38]; 3385 } 3386 3387 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt( 3388 const struct cntr_entry *entry, 3389 void *context, int vl, int mode, u64 data) 3390 { 3391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3392 3393 return dd->send_egress_err_status_cnt[37]; 3394 } 3395 3396 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt( 3397 const struct cntr_entry *entry, 3398 void *context, int vl, int mode, u64 data) 3399 { 3400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3401 3402 return dd->send_egress_err_status_cnt[36]; 3403 } 3404 3405 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt( 3406 const struct cntr_entry *entry, 3407 void *context, int vl, int mode, u64 data) 3408 { 3409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3410 3411 return dd->send_egress_err_status_cnt[35]; 3412 } 3413 3414 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt( 3415 const struct cntr_entry *entry, 3416 void *context, int vl, int mode, u64 data) 3417 { 3418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3419 3420 return dd->send_egress_err_status_cnt[34]; 3421 } 3422 3423 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt( 3424 const struct cntr_entry *entry, 3425 void *context, int vl, int mode, u64 data) 3426 { 3427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3428 3429 return dd->send_egress_err_status_cnt[33]; 3430 } 3431 3432 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt( 3433 const struct cntr_entry *entry, 3434 void *context, int vl, int mode, u64 data) 3435 { 3436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3437 3438 return dd->send_egress_err_status_cnt[32]; 3439 } 3440 3441 static u64 access_tx_sdma15_disallowed_packet_err_cnt( 3442 const struct cntr_entry *entry, 3443 void *context, int vl, int mode, u64 data) 3444 { 3445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3446 3447 return dd->send_egress_err_status_cnt[31]; 3448 } 3449 3450 static u64 access_tx_sdma14_disallowed_packet_err_cnt( 3451 const struct cntr_entry *entry, 3452 void *context, int vl, int mode, u64 data) 3453 { 3454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3455 3456 return dd->send_egress_err_status_cnt[30]; 3457 } 3458 3459 static u64 access_tx_sdma13_disallowed_packet_err_cnt( 3460 const struct cntr_entry *entry, 3461 void *context, int vl, int mode, u64 data) 3462 { 3463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3464 3465 return dd->send_egress_err_status_cnt[29]; 3466 } 3467 3468 static u64 access_tx_sdma12_disallowed_packet_err_cnt( 3469 const struct cntr_entry *entry, 3470 void *context, int vl, int mode, u64 data) 3471 { 3472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3473 3474 return dd->send_egress_err_status_cnt[28]; 3475 } 3476 3477 static u64 access_tx_sdma11_disallowed_packet_err_cnt( 3478 const struct cntr_entry *entry, 3479 void *context, int vl, int mode, u64 data) 3480 { 3481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3482 3483 return dd->send_egress_err_status_cnt[27]; 3484 } 3485 3486 static u64 access_tx_sdma10_disallowed_packet_err_cnt( 3487 const struct cntr_entry *entry, 3488 void *context, int vl, int mode, u64 data) 3489 { 3490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3491 3492 return dd->send_egress_err_status_cnt[26]; 3493 } 3494 3495 static u64 access_tx_sdma9_disallowed_packet_err_cnt( 3496 const struct cntr_entry *entry, 3497 void *context, int vl, int mode, u64 data) 3498 { 3499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3500 3501 return dd->send_egress_err_status_cnt[25]; 3502 } 3503 3504 static u64 access_tx_sdma8_disallowed_packet_err_cnt( 3505 const struct cntr_entry *entry, 3506 void *context, int vl, int mode, u64 data) 3507 { 3508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3509 3510 return dd->send_egress_err_status_cnt[24]; 3511 } 3512 3513 static u64 access_tx_sdma7_disallowed_packet_err_cnt( 3514 const struct cntr_entry *entry, 3515 void *context, int vl, int mode, u64 data) 3516 { 3517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3518 3519 return dd->send_egress_err_status_cnt[23]; 3520 } 3521 3522 static u64 access_tx_sdma6_disallowed_packet_err_cnt( 3523 const struct cntr_entry *entry, 3524 void *context, int vl, int mode, u64 data) 3525 { 3526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3527 3528 return dd->send_egress_err_status_cnt[22]; 3529 } 3530 3531 static u64 access_tx_sdma5_disallowed_packet_err_cnt( 3532 const struct cntr_entry *entry, 3533 void *context, int vl, int mode, u64 data) 3534 { 3535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3536 3537 return dd->send_egress_err_status_cnt[21]; 3538 } 3539 3540 static u64 access_tx_sdma4_disallowed_packet_err_cnt( 3541 const struct cntr_entry *entry, 3542 void *context, int vl, int mode, u64 data) 3543 { 3544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3545 3546 return dd->send_egress_err_status_cnt[20]; 3547 } 3548 3549 static u64 access_tx_sdma3_disallowed_packet_err_cnt( 3550 const struct cntr_entry *entry, 3551 void *context, int vl, int mode, u64 data) 3552 { 3553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3554 3555 return dd->send_egress_err_status_cnt[19]; 3556 } 3557 3558 static u64 access_tx_sdma2_disallowed_packet_err_cnt( 3559 const struct cntr_entry *entry, 3560 void *context, int vl, int mode, u64 data) 3561 { 3562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3563 3564 return dd->send_egress_err_status_cnt[18]; 3565 } 3566 3567 static u64 access_tx_sdma1_disallowed_packet_err_cnt( 3568 const struct cntr_entry *entry, 3569 void *context, int vl, int mode, u64 data) 3570 { 3571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3572 3573 return dd->send_egress_err_status_cnt[17]; 3574 } 3575 3576 static u64 access_tx_sdma0_disallowed_packet_err_cnt( 3577 const struct cntr_entry *entry, 3578 void *context, int vl, int mode, u64 data) 3579 { 3580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3581 3582 return dd->send_egress_err_status_cnt[16]; 3583 } 3584 3585 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry, 3586 void *context, int vl, int mode, 3587 u64 data) 3588 { 3589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3590 3591 return dd->send_egress_err_status_cnt[15]; 3592 } 3593 3594 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry, 3595 void *context, int vl, 3596 int mode, u64 data) 3597 { 3598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3599 3600 return dd->send_egress_err_status_cnt[14]; 3601 } 3602 3603 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry, 3604 void *context, int vl, int mode, 3605 u64 data) 3606 { 3607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3608 3609 return dd->send_egress_err_status_cnt[13]; 3610 } 3611 3612 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry, 3613 void *context, int vl, int mode, 3614 u64 data) 3615 { 3616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3617 3618 return dd->send_egress_err_status_cnt[12]; 3619 } 3620 3621 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt( 3622 const struct cntr_entry *entry, 3623 void *context, int vl, int mode, u64 data) 3624 { 3625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3626 3627 return dd->send_egress_err_status_cnt[11]; 3628 } 3629 3630 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry, 3631 void *context, int vl, int mode, 3632 u64 data) 3633 { 3634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3635 3636 return dd->send_egress_err_status_cnt[10]; 3637 } 3638 3639 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry, 3640 void *context, int vl, int mode, 3641 u64 data) 3642 { 3643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3644 3645 return dd->send_egress_err_status_cnt[9]; 3646 } 3647 3648 static u64 access_tx_sdma_launch_intf_parity_err_cnt( 3649 const struct cntr_entry *entry, 3650 void *context, int vl, int mode, u64 data) 3651 { 3652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3653 3654 return dd->send_egress_err_status_cnt[8]; 3655 } 3656 3657 static u64 access_tx_pio_launch_intf_parity_err_cnt( 3658 const struct cntr_entry *entry, 3659 void *context, int vl, int mode, u64 data) 3660 { 3661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3662 3663 return dd->send_egress_err_status_cnt[7]; 3664 } 3665 3666 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry, 3667 void *context, int vl, int mode, 3668 u64 data) 3669 { 3670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3671 3672 return dd->send_egress_err_status_cnt[6]; 3673 } 3674 3675 static u64 access_tx_incorrect_link_state_err_cnt( 3676 const struct cntr_entry *entry, 3677 void *context, int vl, int mode, u64 data) 3678 { 3679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3680 3681 return dd->send_egress_err_status_cnt[5]; 3682 } 3683 3684 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry, 3685 void *context, int vl, int mode, 3686 u64 data) 3687 { 3688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3689 3690 return dd->send_egress_err_status_cnt[4]; 3691 } 3692 3693 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt( 3694 const struct cntr_entry *entry, 3695 void *context, int vl, int mode, u64 data) 3696 { 3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3698 3699 return dd->send_egress_err_status_cnt[3]; 3700 } 3701 3702 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry, 3703 void *context, int vl, int mode, 3704 u64 data) 3705 { 3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3707 3708 return dd->send_egress_err_status_cnt[2]; 3709 } 3710 3711 static u64 access_tx_pkt_integrity_mem_unc_err_cnt( 3712 const struct cntr_entry *entry, 3713 void *context, int vl, int mode, u64 data) 3714 { 3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3716 3717 return dd->send_egress_err_status_cnt[1]; 3718 } 3719 3720 static u64 access_tx_pkt_integrity_mem_cor_err_cnt( 3721 const struct cntr_entry *entry, 3722 void *context, int vl, int mode, u64 data) 3723 { 3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3725 3726 return dd->send_egress_err_status_cnt[0]; 3727 } 3728 3729 /* 3730 * Software counters corresponding to each of the 3731 * error status bits within SendErrStatus 3732 */ 3733 static u64 access_send_csr_write_bad_addr_err_cnt( 3734 const struct cntr_entry *entry, 3735 void *context, int vl, int mode, u64 data) 3736 { 3737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3738 3739 return dd->send_err_status_cnt[2]; 3740 } 3741 3742 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 3743 void *context, int vl, 3744 int mode, u64 data) 3745 { 3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3747 3748 return dd->send_err_status_cnt[1]; 3749 } 3750 3751 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry, 3752 void *context, int vl, int mode, 3753 u64 data) 3754 { 3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3756 3757 return dd->send_err_status_cnt[0]; 3758 } 3759 3760 /* 3761 * Software counters corresponding to each of the 3762 * error status bits within SendCtxtErrStatus 3763 */ 3764 static u64 access_pio_write_out_of_bounds_err_cnt( 3765 const struct cntr_entry *entry, 3766 void *context, int vl, int mode, u64 data) 3767 { 3768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3769 3770 return dd->sw_ctxt_err_status_cnt[4]; 3771 } 3772 3773 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry, 3774 void *context, int vl, int mode, 3775 u64 data) 3776 { 3777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3778 3779 return dd->sw_ctxt_err_status_cnt[3]; 3780 } 3781 3782 static u64 access_pio_write_crosses_boundary_err_cnt( 3783 const struct cntr_entry *entry, 3784 void *context, int vl, int mode, u64 data) 3785 { 3786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3787 3788 return dd->sw_ctxt_err_status_cnt[2]; 3789 } 3790 3791 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry, 3792 void *context, int vl, 3793 int mode, u64 data) 3794 { 3795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3796 3797 return dd->sw_ctxt_err_status_cnt[1]; 3798 } 3799 3800 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry, 3801 void *context, int vl, int mode, 3802 u64 data) 3803 { 3804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3805 3806 return dd->sw_ctxt_err_status_cnt[0]; 3807 } 3808 3809 /* 3810 * Software counters corresponding to each of the 3811 * error status bits within SendDmaEngErrStatus 3812 */ 3813 static u64 access_sdma_header_request_fifo_cor_err_cnt( 3814 const struct cntr_entry *entry, 3815 void *context, int vl, int mode, u64 data) 3816 { 3817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3818 3819 return dd->sw_send_dma_eng_err_status_cnt[23]; 3820 } 3821 3822 static u64 access_sdma_header_storage_cor_err_cnt( 3823 const struct cntr_entry *entry, 3824 void *context, int vl, int mode, u64 data) 3825 { 3826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3827 3828 return dd->sw_send_dma_eng_err_status_cnt[22]; 3829 } 3830 3831 static u64 access_sdma_packet_tracking_cor_err_cnt( 3832 const struct cntr_entry *entry, 3833 void *context, int vl, int mode, u64 data) 3834 { 3835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3836 3837 return dd->sw_send_dma_eng_err_status_cnt[21]; 3838 } 3839 3840 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry, 3841 void *context, int vl, int mode, 3842 u64 data) 3843 { 3844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3845 3846 return dd->sw_send_dma_eng_err_status_cnt[20]; 3847 } 3848 3849 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry, 3850 void *context, int vl, int mode, 3851 u64 data) 3852 { 3853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3854 3855 return dd->sw_send_dma_eng_err_status_cnt[19]; 3856 } 3857 3858 static u64 access_sdma_header_request_fifo_unc_err_cnt( 3859 const struct cntr_entry *entry, 3860 void *context, int vl, int mode, u64 data) 3861 { 3862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3863 3864 return dd->sw_send_dma_eng_err_status_cnt[18]; 3865 } 3866 3867 static u64 access_sdma_header_storage_unc_err_cnt( 3868 const struct cntr_entry *entry, 3869 void *context, int vl, int mode, u64 data) 3870 { 3871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3872 3873 return dd->sw_send_dma_eng_err_status_cnt[17]; 3874 } 3875 3876 static u64 access_sdma_packet_tracking_unc_err_cnt( 3877 const struct cntr_entry *entry, 3878 void *context, int vl, int mode, u64 data) 3879 { 3880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3881 3882 return dd->sw_send_dma_eng_err_status_cnt[16]; 3883 } 3884 3885 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry, 3886 void *context, int vl, int mode, 3887 u64 data) 3888 { 3889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3890 3891 return dd->sw_send_dma_eng_err_status_cnt[15]; 3892 } 3893 3894 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry, 3895 void *context, int vl, int mode, 3896 u64 data) 3897 { 3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3899 3900 return dd->sw_send_dma_eng_err_status_cnt[14]; 3901 } 3902 3903 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry, 3904 void *context, int vl, int mode, 3905 u64 data) 3906 { 3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3908 3909 return dd->sw_send_dma_eng_err_status_cnt[13]; 3910 } 3911 3912 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry, 3913 void *context, int vl, int mode, 3914 u64 data) 3915 { 3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3917 3918 return dd->sw_send_dma_eng_err_status_cnt[12]; 3919 } 3920 3921 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry, 3922 void *context, int vl, int mode, 3923 u64 data) 3924 { 3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3926 3927 return dd->sw_send_dma_eng_err_status_cnt[11]; 3928 } 3929 3930 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry, 3931 void *context, int vl, int mode, 3932 u64 data) 3933 { 3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3935 3936 return dd->sw_send_dma_eng_err_status_cnt[10]; 3937 } 3938 3939 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry, 3940 void *context, int vl, int mode, 3941 u64 data) 3942 { 3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3944 3945 return dd->sw_send_dma_eng_err_status_cnt[9]; 3946 } 3947 3948 static u64 access_sdma_packet_desc_overflow_err_cnt( 3949 const struct cntr_entry *entry, 3950 void *context, int vl, int mode, u64 data) 3951 { 3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3953 3954 return dd->sw_send_dma_eng_err_status_cnt[8]; 3955 } 3956 3957 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry, 3958 void *context, int vl, 3959 int mode, u64 data) 3960 { 3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3962 3963 return dd->sw_send_dma_eng_err_status_cnt[7]; 3964 } 3965 3966 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry, 3967 void *context, int vl, int mode, u64 data) 3968 { 3969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3970 3971 return dd->sw_send_dma_eng_err_status_cnt[6]; 3972 } 3973 3974 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry, 3975 void *context, int vl, int mode, 3976 u64 data) 3977 { 3978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3979 3980 return dd->sw_send_dma_eng_err_status_cnt[5]; 3981 } 3982 3983 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry, 3984 void *context, int vl, int mode, 3985 u64 data) 3986 { 3987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3988 3989 return dd->sw_send_dma_eng_err_status_cnt[4]; 3990 } 3991 3992 static u64 access_sdma_tail_out_of_bounds_err_cnt( 3993 const struct cntr_entry *entry, 3994 void *context, int vl, int mode, u64 data) 3995 { 3996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3997 3998 return dd->sw_send_dma_eng_err_status_cnt[3]; 3999 } 4000 4001 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry, 4002 void *context, int vl, int mode, 4003 u64 data) 4004 { 4005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4006 4007 return dd->sw_send_dma_eng_err_status_cnt[2]; 4008 } 4009 4010 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry, 4011 void *context, int vl, int mode, 4012 u64 data) 4013 { 4014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4015 4016 return dd->sw_send_dma_eng_err_status_cnt[1]; 4017 } 4018 4019 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry, 4020 void *context, int vl, int mode, 4021 u64 data) 4022 { 4023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4024 4025 return dd->sw_send_dma_eng_err_status_cnt[0]; 4026 } 4027 4028 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry, 4029 void *context, int vl, int mode, 4030 u64 data) 4031 { 4032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4033 4034 u64 val = 0; 4035 u64 csr = entry->csr; 4036 4037 val = read_write_csr(dd, csr, mode, data); 4038 if (mode == CNTR_MODE_R) { 4039 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? 4040 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; 4041 } else if (mode == CNTR_MODE_W) { 4042 dd->sw_rcv_bypass_packet_errors = 0; 4043 } else { 4044 dd_dev_err(dd, "Invalid cntr register access mode"); 4045 return 0; 4046 } 4047 return val; 4048 } 4049 4050 #define def_access_sw_cpu(cntr) \ 4051 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ 4052 void *context, int vl, int mode, u64 data) \ 4053 { \ 4054 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4055 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ 4056 ppd->ibport_data.rvp.cntr, vl, \ 4057 mode, data); \ 4058 } 4059 4060 def_access_sw_cpu(rc_acks); 4061 def_access_sw_cpu(rc_qacks); 4062 def_access_sw_cpu(rc_delayed_comp); 4063 4064 #define def_access_ibp_counter(cntr) \ 4065 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ 4066 void *context, int vl, int mode, u64 data) \ 4067 { \ 4068 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4069 \ 4070 if (vl != CNTR_INVALID_VL) \ 4071 return 0; \ 4072 \ 4073 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ 4074 mode, data); \ 4075 } 4076 4077 def_access_ibp_counter(loop_pkts); 4078 def_access_ibp_counter(rc_resends); 4079 def_access_ibp_counter(rnr_naks); 4080 def_access_ibp_counter(other_naks); 4081 def_access_ibp_counter(rc_timeouts); 4082 def_access_ibp_counter(pkt_drops); 4083 def_access_ibp_counter(dmawait); 4084 def_access_ibp_counter(rc_seqnak); 4085 def_access_ibp_counter(rc_dupreq); 4086 def_access_ibp_counter(rdma_seq); 4087 def_access_ibp_counter(unaligned); 4088 def_access_ibp_counter(seq_naks); 4089 def_access_ibp_counter(rc_crwaits); 4090 4091 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { 4092 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), 4093 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), 4094 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), 4095 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), 4096 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), 4097 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, 4098 CNTR_NORMAL), 4099 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, 4100 CNTR_NORMAL), 4101 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs, 4102 RCV_TID_FLOW_GEN_MISMATCH_CNT, 4103 CNTR_NORMAL), 4104 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL, 4105 CNTR_NORMAL), 4106 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs, 4107 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL), 4108 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt, 4109 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL), 4110 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT, 4111 CNTR_NORMAL), 4112 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT, 4113 CNTR_NORMAL), 4114 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT, 4115 CNTR_NORMAL), 4116 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT, 4117 CNTR_NORMAL), 4118 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT, 4119 CNTR_NORMAL), 4120 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT, 4121 CNTR_NORMAL), 4122 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt, 4123 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL), 4124 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt, 4125 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL), 4126 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT, 4127 CNTR_SYNTH), 4128 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH, 4129 access_dc_rcv_err_cnt), 4130 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT, 4131 CNTR_SYNTH), 4132 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT, 4133 CNTR_SYNTH), 4134 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT, 4135 CNTR_SYNTH), 4136 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts, 4137 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH), 4138 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts, 4139 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT, 4140 CNTR_SYNTH), 4141 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr, 4142 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH), 4143 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT, 4144 CNTR_SYNTH), 4145 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT, 4146 CNTR_SYNTH), 4147 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT, 4148 CNTR_SYNTH), 4149 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT, 4150 CNTR_SYNTH), 4151 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT, 4152 CNTR_SYNTH), 4153 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT, 4154 CNTR_SYNTH), 4155 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT, 4156 CNTR_SYNTH), 4157 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT, 4158 CNTR_SYNTH | CNTR_VL), 4159 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT, 4160 CNTR_SYNTH | CNTR_VL), 4161 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH), 4162 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT, 4163 CNTR_SYNTH | CNTR_VL), 4164 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH), 4165 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT, 4166 CNTR_SYNTH | CNTR_VL), 4167 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT, 4168 CNTR_SYNTH), 4169 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT, 4170 CNTR_SYNTH | CNTR_VL), 4171 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT, 4172 CNTR_SYNTH), 4173 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT, 4174 CNTR_SYNTH | CNTR_VL), 4175 [C_DC_TOTAL_CRC] = 4176 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR, 4177 CNTR_SYNTH), 4178 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0, 4179 CNTR_SYNTH), 4180 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1, 4181 CNTR_SYNTH), 4182 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2, 4183 CNTR_SYNTH), 4184 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3, 4185 CNTR_SYNTH), 4186 [C_DC_CRC_MULT_LN] = 4187 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN, 4188 CNTR_SYNTH), 4189 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT, 4190 CNTR_SYNTH), 4191 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT, 4192 CNTR_SYNTH), 4193 [C_DC_SEQ_CRC_CNT] = 4194 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT, 4195 CNTR_SYNTH), 4196 [C_DC_ESC0_ONLY_CNT] = 4197 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT, 4198 CNTR_SYNTH), 4199 [C_DC_ESC0_PLUS1_CNT] = 4200 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT, 4201 CNTR_SYNTH), 4202 [C_DC_ESC0_PLUS2_CNT] = 4203 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT, 4204 CNTR_SYNTH), 4205 [C_DC_REINIT_FROM_PEER_CNT] = 4206 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 4207 CNTR_SYNTH), 4208 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT, 4209 CNTR_SYNTH), 4210 [C_DC_MISC_FLG_CNT] = 4211 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT, 4212 CNTR_SYNTH), 4213 [C_DC_PRF_GOOD_LTP_CNT] = 4214 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH), 4215 [C_DC_PRF_ACCEPTED_LTP_CNT] = 4216 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT, 4217 CNTR_SYNTH), 4218 [C_DC_PRF_RX_FLIT_CNT] = 4219 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH), 4220 [C_DC_PRF_TX_FLIT_CNT] = 4221 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH), 4222 [C_DC_PRF_CLK_CNTR] = 4223 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH), 4224 [C_DC_PG_DBG_FLIT_CRDTS_CNT] = 4225 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH), 4226 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] = 4227 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT, 4228 CNTR_SYNTH), 4229 [C_DC_PG_STS_TX_SBE_CNT] = 4230 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH), 4231 [C_DC_PG_STS_TX_MBE_CNT] = 4232 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT, 4233 CNTR_SYNTH), 4234 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL, 4235 access_sw_cpu_intr), 4236 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, 4237 access_sw_cpu_rcv_limit), 4238 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL, 4239 access_sw_ctx0_seq_drop), 4240 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, 4241 access_sw_vtx_wait), 4242 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, 4243 access_sw_pio_wait), 4244 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL, 4245 access_sw_pio_drain), 4246 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, 4247 access_sw_kmem_wait), 4248 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL, 4249 hfi1_access_sw_tid_wait), 4250 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, 4251 access_sw_send_schedule), 4252 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", 4253 SEND_DMA_DESC_FETCHED_CNT, 0, 4254 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4255 dev_access_u32_csr), 4256 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0, 4257 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4258 access_sde_int_cnt), 4259 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0, 4260 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4261 access_sde_err_cnt), 4262 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0, 4263 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4264 access_sde_idle_int_cnt), 4265 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0, 4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4267 access_sde_progress_int_cnt), 4268 /* MISC_ERR_STATUS */ 4269 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0, 4270 CNTR_NORMAL, 4271 access_misc_pll_lock_fail_err_cnt), 4272 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0, 4273 CNTR_NORMAL, 4274 access_misc_mbist_fail_err_cnt), 4275 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0, 4276 CNTR_NORMAL, 4277 access_misc_invalid_eep_cmd_err_cnt), 4278 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0, 4279 CNTR_NORMAL, 4280 access_misc_efuse_done_parity_err_cnt), 4281 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0, 4282 CNTR_NORMAL, 4283 access_misc_efuse_write_err_cnt), 4284 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0, 4285 0, CNTR_NORMAL, 4286 access_misc_efuse_read_bad_addr_err_cnt), 4287 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0, 4288 CNTR_NORMAL, 4289 access_misc_efuse_csr_parity_err_cnt), 4290 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0, 4291 CNTR_NORMAL, 4292 access_misc_fw_auth_failed_err_cnt), 4293 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0, 4294 CNTR_NORMAL, 4295 access_misc_key_mismatch_err_cnt), 4296 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0, 4297 CNTR_NORMAL, 4298 access_misc_sbus_write_failed_err_cnt), 4299 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0, 4300 CNTR_NORMAL, 4301 access_misc_csr_write_bad_addr_err_cnt), 4302 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0, 4303 CNTR_NORMAL, 4304 access_misc_csr_read_bad_addr_err_cnt), 4305 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0, 4306 CNTR_NORMAL, 4307 access_misc_csr_parity_err_cnt), 4308 /* CceErrStatus */ 4309 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0, 4310 CNTR_NORMAL, 4311 access_sw_cce_err_status_aggregated_cnt), 4312 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0, 4313 CNTR_NORMAL, 4314 access_cce_msix_csr_parity_err_cnt), 4315 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0, 4316 CNTR_NORMAL, 4317 access_cce_int_map_unc_err_cnt), 4318 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0, 4319 CNTR_NORMAL, 4320 access_cce_int_map_cor_err_cnt), 4321 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0, 4322 CNTR_NORMAL, 4323 access_cce_msix_table_unc_err_cnt), 4324 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0, 4325 CNTR_NORMAL, 4326 access_cce_msix_table_cor_err_cnt), 4327 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0, 4328 0, CNTR_NORMAL, 4329 access_cce_rxdma_conv_fifo_parity_err_cnt), 4330 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0, 4331 0, CNTR_NORMAL, 4332 access_cce_rcpl_async_fifo_parity_err_cnt), 4333 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0, 4334 CNTR_NORMAL, 4335 access_cce_seg_write_bad_addr_err_cnt), 4336 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0, 4337 CNTR_NORMAL, 4338 access_cce_seg_read_bad_addr_err_cnt), 4339 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0, 4340 CNTR_NORMAL, 4341 access_la_triggered_cnt), 4342 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0, 4343 CNTR_NORMAL, 4344 access_cce_trgt_cpl_timeout_err_cnt), 4345 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0, 4346 CNTR_NORMAL, 4347 access_pcic_receive_parity_err_cnt), 4348 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0, 4349 CNTR_NORMAL, 4350 access_pcic_transmit_back_parity_err_cnt), 4351 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0, 4352 0, CNTR_NORMAL, 4353 access_pcic_transmit_front_parity_err_cnt), 4354 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0, 4355 CNTR_NORMAL, 4356 access_pcic_cpl_dat_q_unc_err_cnt), 4357 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0, 4358 CNTR_NORMAL, 4359 access_pcic_cpl_hd_q_unc_err_cnt), 4360 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0, 4361 CNTR_NORMAL, 4362 access_pcic_post_dat_q_unc_err_cnt), 4363 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0, 4364 CNTR_NORMAL, 4365 access_pcic_post_hd_q_unc_err_cnt), 4366 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0, 4367 CNTR_NORMAL, 4368 access_pcic_retry_sot_mem_unc_err_cnt), 4369 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0, 4370 CNTR_NORMAL, 4371 access_pcic_retry_mem_unc_err), 4372 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0, 4373 CNTR_NORMAL, 4374 access_pcic_n_post_dat_q_parity_err_cnt), 4375 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0, 4376 CNTR_NORMAL, 4377 access_pcic_n_post_h_q_parity_err_cnt), 4378 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0, 4379 CNTR_NORMAL, 4380 access_pcic_cpl_dat_q_cor_err_cnt), 4381 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0, 4382 CNTR_NORMAL, 4383 access_pcic_cpl_hd_q_cor_err_cnt), 4384 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0, 4385 CNTR_NORMAL, 4386 access_pcic_post_dat_q_cor_err_cnt), 4387 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0, 4388 CNTR_NORMAL, 4389 access_pcic_post_hd_q_cor_err_cnt), 4390 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0, 4391 CNTR_NORMAL, 4392 access_pcic_retry_sot_mem_cor_err_cnt), 4393 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0, 4394 CNTR_NORMAL, 4395 access_pcic_retry_mem_cor_err_cnt), 4396 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM( 4397 "CceCli1AsyncFifoDbgParityError", 0, 0, 4398 CNTR_NORMAL, 4399 access_cce_cli1_async_fifo_dbg_parity_err_cnt), 4400 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM( 4401 "CceCli1AsyncFifoRxdmaParityError", 0, 0, 4402 CNTR_NORMAL, 4403 access_cce_cli1_async_fifo_rxdma_parity_err_cnt 4404 ), 4405 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM( 4406 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0, 4407 CNTR_NORMAL, 4408 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt), 4409 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM( 4410 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0, 4411 CNTR_NORMAL, 4412 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt), 4413 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0, 4414 0, CNTR_NORMAL, 4415 access_cce_cli2_async_fifo_parity_err_cnt), 4416 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0, 4417 CNTR_NORMAL, 4418 access_cce_csr_cfg_bus_parity_err_cnt), 4419 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0, 4420 0, CNTR_NORMAL, 4421 access_cce_cli0_async_fifo_parity_err_cnt), 4422 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0, 4423 CNTR_NORMAL, 4424 access_cce_rspd_data_parity_err_cnt), 4425 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0, 4426 CNTR_NORMAL, 4427 access_cce_trgt_access_err_cnt), 4428 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0, 4429 0, CNTR_NORMAL, 4430 access_cce_trgt_async_fifo_parity_err_cnt), 4431 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0, 4432 CNTR_NORMAL, 4433 access_cce_csr_write_bad_addr_err_cnt), 4434 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0, 4435 CNTR_NORMAL, 4436 access_cce_csr_read_bad_addr_err_cnt), 4437 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0, 4438 CNTR_NORMAL, 4439 access_ccs_csr_parity_err_cnt), 4440 4441 /* RcvErrStatus */ 4442 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0, 4443 CNTR_NORMAL, 4444 access_rx_csr_parity_err_cnt), 4445 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0, 4446 CNTR_NORMAL, 4447 access_rx_csr_write_bad_addr_err_cnt), 4448 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0, 4449 CNTR_NORMAL, 4450 access_rx_csr_read_bad_addr_err_cnt), 4451 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0, 4452 CNTR_NORMAL, 4453 access_rx_dma_csr_unc_err_cnt), 4454 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0, 4455 CNTR_NORMAL, 4456 access_rx_dma_dq_fsm_encoding_err_cnt), 4457 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0, 4458 CNTR_NORMAL, 4459 access_rx_dma_eq_fsm_encoding_err_cnt), 4460 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0, 4461 CNTR_NORMAL, 4462 access_rx_dma_csr_parity_err_cnt), 4463 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0, 4464 CNTR_NORMAL, 4465 access_rx_rbuf_data_cor_err_cnt), 4466 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0, 4467 CNTR_NORMAL, 4468 access_rx_rbuf_data_unc_err_cnt), 4469 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0, 4470 CNTR_NORMAL, 4471 access_rx_dma_data_fifo_rd_cor_err_cnt), 4472 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0, 4473 CNTR_NORMAL, 4474 access_rx_dma_data_fifo_rd_unc_err_cnt), 4475 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0, 4476 CNTR_NORMAL, 4477 access_rx_dma_hdr_fifo_rd_cor_err_cnt), 4478 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0, 4479 CNTR_NORMAL, 4480 access_rx_dma_hdr_fifo_rd_unc_err_cnt), 4481 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0, 4482 CNTR_NORMAL, 4483 access_rx_rbuf_desc_part2_cor_err_cnt), 4484 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0, 4485 CNTR_NORMAL, 4486 access_rx_rbuf_desc_part2_unc_err_cnt), 4487 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0, 4488 CNTR_NORMAL, 4489 access_rx_rbuf_desc_part1_cor_err_cnt), 4490 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0, 4491 CNTR_NORMAL, 4492 access_rx_rbuf_desc_part1_unc_err_cnt), 4493 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0, 4494 CNTR_NORMAL, 4495 access_rx_hq_intr_fsm_err_cnt), 4496 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0, 4497 CNTR_NORMAL, 4498 access_rx_hq_intr_csr_parity_err_cnt), 4499 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0, 4500 CNTR_NORMAL, 4501 access_rx_lookup_csr_parity_err_cnt), 4502 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0, 4503 CNTR_NORMAL, 4504 access_rx_lookup_rcv_array_cor_err_cnt), 4505 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0, 4506 CNTR_NORMAL, 4507 access_rx_lookup_rcv_array_unc_err_cnt), 4508 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0, 4509 0, CNTR_NORMAL, 4510 access_rx_lookup_des_part2_parity_err_cnt), 4511 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0, 4512 0, CNTR_NORMAL, 4513 access_rx_lookup_des_part1_unc_cor_err_cnt), 4514 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0, 4515 CNTR_NORMAL, 4516 access_rx_lookup_des_part1_unc_err_cnt), 4517 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0, 4518 CNTR_NORMAL, 4519 access_rx_rbuf_next_free_buf_cor_err_cnt), 4520 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0, 4521 CNTR_NORMAL, 4522 access_rx_rbuf_next_free_buf_unc_err_cnt), 4523 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM( 4524 "RxRbufFlInitWrAddrParityErr", 0, 0, 4525 CNTR_NORMAL, 4526 access_rbuf_fl_init_wr_addr_parity_err_cnt), 4527 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0, 4528 0, CNTR_NORMAL, 4529 access_rx_rbuf_fl_initdone_parity_err_cnt), 4530 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0, 4531 0, CNTR_NORMAL, 4532 access_rx_rbuf_fl_write_addr_parity_err_cnt), 4533 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0, 4534 CNTR_NORMAL, 4535 access_rx_rbuf_fl_rd_addr_parity_err_cnt), 4536 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0, 4537 CNTR_NORMAL, 4538 access_rx_rbuf_empty_err_cnt), 4539 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0, 4540 CNTR_NORMAL, 4541 access_rx_rbuf_full_err_cnt), 4542 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0, 4543 CNTR_NORMAL, 4544 access_rbuf_bad_lookup_err_cnt), 4545 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0, 4546 CNTR_NORMAL, 4547 access_rbuf_ctx_id_parity_err_cnt), 4548 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0, 4549 CNTR_NORMAL, 4550 access_rbuf_csr_qeopdw_parity_err_cnt), 4551 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM( 4552 "RxRbufCsrQNumOfPktParityErr", 0, 0, 4553 CNTR_NORMAL, 4554 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt), 4555 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM( 4556 "RxRbufCsrQTlPtrParityErr", 0, 0, 4557 CNTR_NORMAL, 4558 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt), 4559 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0, 4560 0, CNTR_NORMAL, 4561 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt), 4562 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0, 4563 0, CNTR_NORMAL, 4564 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt), 4565 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr", 4566 0, 0, CNTR_NORMAL, 4567 access_rx_rbuf_csr_q_next_buf_parity_err_cnt), 4568 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0, 4569 0, CNTR_NORMAL, 4570 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt), 4571 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM( 4572 "RxRbufCsrQHeadBufNumParityErr", 0, 0, 4573 CNTR_NORMAL, 4574 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt), 4575 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0, 4576 0, CNTR_NORMAL, 4577 access_rx_rbuf_block_list_read_cor_err_cnt), 4578 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0, 4579 0, CNTR_NORMAL, 4580 access_rx_rbuf_block_list_read_unc_err_cnt), 4581 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0, 4582 CNTR_NORMAL, 4583 access_rx_rbuf_lookup_des_cor_err_cnt), 4584 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0, 4585 CNTR_NORMAL, 4586 access_rx_rbuf_lookup_des_unc_err_cnt), 4587 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM( 4588 "RxRbufLookupDesRegUncCorErr", 0, 0, 4589 CNTR_NORMAL, 4590 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt), 4591 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0, 4592 CNTR_NORMAL, 4593 access_rx_rbuf_lookup_des_reg_unc_err_cnt), 4594 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0, 4595 CNTR_NORMAL, 4596 access_rx_rbuf_free_list_cor_err_cnt), 4597 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0, 4598 CNTR_NORMAL, 4599 access_rx_rbuf_free_list_unc_err_cnt), 4600 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0, 4601 CNTR_NORMAL, 4602 access_rx_rcv_fsm_encoding_err_cnt), 4603 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0, 4604 CNTR_NORMAL, 4605 access_rx_dma_flag_cor_err_cnt), 4606 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0, 4607 CNTR_NORMAL, 4608 access_rx_dma_flag_unc_err_cnt), 4609 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0, 4610 CNTR_NORMAL, 4611 access_rx_dc_sop_eop_parity_err_cnt), 4612 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0, 4613 CNTR_NORMAL, 4614 access_rx_rcv_csr_parity_err_cnt), 4615 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0, 4616 CNTR_NORMAL, 4617 access_rx_rcv_qp_map_table_cor_err_cnt), 4618 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0, 4619 CNTR_NORMAL, 4620 access_rx_rcv_qp_map_table_unc_err_cnt), 4621 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0, 4622 CNTR_NORMAL, 4623 access_rx_rcv_data_cor_err_cnt), 4624 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0, 4625 CNTR_NORMAL, 4626 access_rx_rcv_data_unc_err_cnt), 4627 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0, 4628 CNTR_NORMAL, 4629 access_rx_rcv_hdr_cor_err_cnt), 4630 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0, 4631 CNTR_NORMAL, 4632 access_rx_rcv_hdr_unc_err_cnt), 4633 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0, 4634 CNTR_NORMAL, 4635 access_rx_dc_intf_parity_err_cnt), 4636 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0, 4637 CNTR_NORMAL, 4638 access_rx_dma_csr_cor_err_cnt), 4639 /* SendPioErrStatus */ 4640 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0, 4641 CNTR_NORMAL, 4642 access_pio_pec_sop_head_parity_err_cnt), 4643 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0, 4644 CNTR_NORMAL, 4645 access_pio_pcc_sop_head_parity_err_cnt), 4646 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr", 4647 0, 0, CNTR_NORMAL, 4648 access_pio_last_returned_cnt_parity_err_cnt), 4649 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0, 4650 0, CNTR_NORMAL, 4651 access_pio_current_free_cnt_parity_err_cnt), 4652 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0, 4653 CNTR_NORMAL, 4654 access_pio_reserved_31_err_cnt), 4655 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0, 4656 CNTR_NORMAL, 4657 access_pio_reserved_30_err_cnt), 4658 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0, 4659 CNTR_NORMAL, 4660 access_pio_ppmc_sop_len_err_cnt), 4661 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0, 4662 CNTR_NORMAL, 4663 access_pio_ppmc_bqc_mem_parity_err_cnt), 4664 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0, 4665 CNTR_NORMAL, 4666 access_pio_vl_fifo_parity_err_cnt), 4667 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0, 4668 CNTR_NORMAL, 4669 access_pio_vlf_sop_parity_err_cnt), 4670 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0, 4671 CNTR_NORMAL, 4672 access_pio_vlf_v1_len_parity_err_cnt), 4673 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0, 4674 CNTR_NORMAL, 4675 access_pio_block_qw_count_parity_err_cnt), 4676 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0, 4677 CNTR_NORMAL, 4678 access_pio_write_qw_valid_parity_err_cnt), 4679 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0, 4680 CNTR_NORMAL, 4681 access_pio_state_machine_err_cnt), 4682 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0, 4683 CNTR_NORMAL, 4684 access_pio_write_data_parity_err_cnt), 4685 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0, 4686 CNTR_NORMAL, 4687 access_pio_host_addr_mem_cor_err_cnt), 4688 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0, 4689 CNTR_NORMAL, 4690 access_pio_host_addr_mem_unc_err_cnt), 4691 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0, 4692 CNTR_NORMAL, 4693 access_pio_pkt_evict_sm_or_arb_sm_err_cnt), 4694 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0, 4695 CNTR_NORMAL, 4696 access_pio_init_sm_in_err_cnt), 4697 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0, 4698 CNTR_NORMAL, 4699 access_pio_ppmc_pbl_fifo_err_cnt), 4700 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0, 4701 0, CNTR_NORMAL, 4702 access_pio_credit_ret_fifo_parity_err_cnt), 4703 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0, 4704 CNTR_NORMAL, 4705 access_pio_v1_len_mem_bank1_cor_err_cnt), 4706 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0, 4707 CNTR_NORMAL, 4708 access_pio_v1_len_mem_bank0_cor_err_cnt), 4709 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0, 4710 CNTR_NORMAL, 4711 access_pio_v1_len_mem_bank1_unc_err_cnt), 4712 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0, 4713 CNTR_NORMAL, 4714 access_pio_v1_len_mem_bank0_unc_err_cnt), 4715 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0, 4716 CNTR_NORMAL, 4717 access_pio_sm_pkt_reset_parity_err_cnt), 4718 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0, 4719 CNTR_NORMAL, 4720 access_pio_pkt_evict_fifo_parity_err_cnt), 4721 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM( 4722 "PioSbrdctrlCrrelFifoParityErr", 0, 0, 4723 CNTR_NORMAL, 4724 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt), 4725 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0, 4726 CNTR_NORMAL, 4727 access_pio_sbrdctl_crrel_parity_err_cnt), 4728 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0, 4729 CNTR_NORMAL, 4730 access_pio_pec_fifo_parity_err_cnt), 4731 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0, 4732 CNTR_NORMAL, 4733 access_pio_pcc_fifo_parity_err_cnt), 4734 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0, 4735 CNTR_NORMAL, 4736 access_pio_sb_mem_fifo1_err_cnt), 4737 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0, 4738 CNTR_NORMAL, 4739 access_pio_sb_mem_fifo0_err_cnt), 4740 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0, 4741 CNTR_NORMAL, 4742 access_pio_csr_parity_err_cnt), 4743 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0, 4744 CNTR_NORMAL, 4745 access_pio_write_addr_parity_err_cnt), 4746 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0, 4747 CNTR_NORMAL, 4748 access_pio_write_bad_ctxt_err_cnt), 4749 /* SendDmaErrStatus */ 4750 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0, 4751 0, CNTR_NORMAL, 4752 access_sdma_pcie_req_tracking_cor_err_cnt), 4753 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0, 4754 0, CNTR_NORMAL, 4755 access_sdma_pcie_req_tracking_unc_err_cnt), 4756 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0, 4757 CNTR_NORMAL, 4758 access_sdma_csr_parity_err_cnt), 4759 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0, 4760 CNTR_NORMAL, 4761 access_sdma_rpy_tag_err_cnt), 4762 /* SendEgressErrStatus */ 4763 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0, 4764 CNTR_NORMAL, 4765 access_tx_read_pio_memory_csr_unc_err_cnt), 4766 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0, 4767 0, CNTR_NORMAL, 4768 access_tx_read_sdma_memory_csr_err_cnt), 4769 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0, 4770 CNTR_NORMAL, 4771 access_tx_egress_fifo_cor_err_cnt), 4772 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0, 4773 CNTR_NORMAL, 4774 access_tx_read_pio_memory_cor_err_cnt), 4775 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0, 4776 CNTR_NORMAL, 4777 access_tx_read_sdma_memory_cor_err_cnt), 4778 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0, 4779 CNTR_NORMAL, 4780 access_tx_sb_hdr_cor_err_cnt), 4781 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0, 4782 CNTR_NORMAL, 4783 access_tx_credit_overrun_err_cnt), 4784 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0, 4785 CNTR_NORMAL, 4786 access_tx_launch_fifo8_cor_err_cnt), 4787 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0, 4788 CNTR_NORMAL, 4789 access_tx_launch_fifo7_cor_err_cnt), 4790 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0, 4791 CNTR_NORMAL, 4792 access_tx_launch_fifo6_cor_err_cnt), 4793 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0, 4794 CNTR_NORMAL, 4795 access_tx_launch_fifo5_cor_err_cnt), 4796 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0, 4797 CNTR_NORMAL, 4798 access_tx_launch_fifo4_cor_err_cnt), 4799 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0, 4800 CNTR_NORMAL, 4801 access_tx_launch_fifo3_cor_err_cnt), 4802 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0, 4803 CNTR_NORMAL, 4804 access_tx_launch_fifo2_cor_err_cnt), 4805 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0, 4806 CNTR_NORMAL, 4807 access_tx_launch_fifo1_cor_err_cnt), 4808 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0, 4809 CNTR_NORMAL, 4810 access_tx_launch_fifo0_cor_err_cnt), 4811 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0, 4812 CNTR_NORMAL, 4813 access_tx_credit_return_vl_err_cnt), 4814 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0, 4815 CNTR_NORMAL, 4816 access_tx_hcrc_insertion_err_cnt), 4817 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0, 4818 CNTR_NORMAL, 4819 access_tx_egress_fifo_unc_err_cnt), 4820 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0, 4821 CNTR_NORMAL, 4822 access_tx_read_pio_memory_unc_err_cnt), 4823 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0, 4824 CNTR_NORMAL, 4825 access_tx_read_sdma_memory_unc_err_cnt), 4826 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0, 4827 CNTR_NORMAL, 4828 access_tx_sb_hdr_unc_err_cnt), 4829 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0, 4830 CNTR_NORMAL, 4831 access_tx_credit_return_partiy_err_cnt), 4832 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr", 4833 0, 0, CNTR_NORMAL, 4834 access_tx_launch_fifo8_unc_or_parity_err_cnt), 4835 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr", 4836 0, 0, CNTR_NORMAL, 4837 access_tx_launch_fifo7_unc_or_parity_err_cnt), 4838 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr", 4839 0, 0, CNTR_NORMAL, 4840 access_tx_launch_fifo6_unc_or_parity_err_cnt), 4841 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr", 4842 0, 0, CNTR_NORMAL, 4843 access_tx_launch_fifo5_unc_or_parity_err_cnt), 4844 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr", 4845 0, 0, CNTR_NORMAL, 4846 access_tx_launch_fifo4_unc_or_parity_err_cnt), 4847 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr", 4848 0, 0, CNTR_NORMAL, 4849 access_tx_launch_fifo3_unc_or_parity_err_cnt), 4850 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr", 4851 0, 0, CNTR_NORMAL, 4852 access_tx_launch_fifo2_unc_or_parity_err_cnt), 4853 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr", 4854 0, 0, CNTR_NORMAL, 4855 access_tx_launch_fifo1_unc_or_parity_err_cnt), 4856 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr", 4857 0, 0, CNTR_NORMAL, 4858 access_tx_launch_fifo0_unc_or_parity_err_cnt), 4859 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr", 4860 0, 0, CNTR_NORMAL, 4861 access_tx_sdma15_disallowed_packet_err_cnt), 4862 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr", 4863 0, 0, CNTR_NORMAL, 4864 access_tx_sdma14_disallowed_packet_err_cnt), 4865 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr", 4866 0, 0, CNTR_NORMAL, 4867 access_tx_sdma13_disallowed_packet_err_cnt), 4868 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr", 4869 0, 0, CNTR_NORMAL, 4870 access_tx_sdma12_disallowed_packet_err_cnt), 4871 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr", 4872 0, 0, CNTR_NORMAL, 4873 access_tx_sdma11_disallowed_packet_err_cnt), 4874 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr", 4875 0, 0, CNTR_NORMAL, 4876 access_tx_sdma10_disallowed_packet_err_cnt), 4877 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr", 4878 0, 0, CNTR_NORMAL, 4879 access_tx_sdma9_disallowed_packet_err_cnt), 4880 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr", 4881 0, 0, CNTR_NORMAL, 4882 access_tx_sdma8_disallowed_packet_err_cnt), 4883 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr", 4884 0, 0, CNTR_NORMAL, 4885 access_tx_sdma7_disallowed_packet_err_cnt), 4886 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr", 4887 0, 0, CNTR_NORMAL, 4888 access_tx_sdma6_disallowed_packet_err_cnt), 4889 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr", 4890 0, 0, CNTR_NORMAL, 4891 access_tx_sdma5_disallowed_packet_err_cnt), 4892 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr", 4893 0, 0, CNTR_NORMAL, 4894 access_tx_sdma4_disallowed_packet_err_cnt), 4895 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr", 4896 0, 0, CNTR_NORMAL, 4897 access_tx_sdma3_disallowed_packet_err_cnt), 4898 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr", 4899 0, 0, CNTR_NORMAL, 4900 access_tx_sdma2_disallowed_packet_err_cnt), 4901 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr", 4902 0, 0, CNTR_NORMAL, 4903 access_tx_sdma1_disallowed_packet_err_cnt), 4904 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr", 4905 0, 0, CNTR_NORMAL, 4906 access_tx_sdma0_disallowed_packet_err_cnt), 4907 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0, 4908 CNTR_NORMAL, 4909 access_tx_config_parity_err_cnt), 4910 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0, 4911 CNTR_NORMAL, 4912 access_tx_sbrd_ctl_csr_parity_err_cnt), 4913 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0, 4914 CNTR_NORMAL, 4915 access_tx_launch_csr_parity_err_cnt), 4916 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0, 4917 CNTR_NORMAL, 4918 access_tx_illegal_vl_err_cnt), 4919 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM( 4920 "TxSbrdCtlStateMachineParityErr", 0, 0, 4921 CNTR_NORMAL, 4922 access_tx_sbrd_ctl_state_machine_parity_err_cnt), 4923 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0, 4924 CNTR_NORMAL, 4925 access_egress_reserved_10_err_cnt), 4926 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0, 4927 CNTR_NORMAL, 4928 access_egress_reserved_9_err_cnt), 4929 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr", 4930 0, 0, CNTR_NORMAL, 4931 access_tx_sdma_launch_intf_parity_err_cnt), 4932 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0, 4933 CNTR_NORMAL, 4934 access_tx_pio_launch_intf_parity_err_cnt), 4935 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0, 4936 CNTR_NORMAL, 4937 access_egress_reserved_6_err_cnt), 4938 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0, 4939 CNTR_NORMAL, 4940 access_tx_incorrect_link_state_err_cnt), 4941 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0, 4942 CNTR_NORMAL, 4943 access_tx_linkdown_err_cnt), 4944 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM( 4945 "EgressFifoUnderrunOrParityErr", 0, 0, 4946 CNTR_NORMAL, 4947 access_tx_egress_fifi_underrun_or_parity_err_cnt), 4948 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0, 4949 CNTR_NORMAL, 4950 access_egress_reserved_2_err_cnt), 4951 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0, 4952 CNTR_NORMAL, 4953 access_tx_pkt_integrity_mem_unc_err_cnt), 4954 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0, 4955 CNTR_NORMAL, 4956 access_tx_pkt_integrity_mem_cor_err_cnt), 4957 /* SendErrStatus */ 4958 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0, 4959 CNTR_NORMAL, 4960 access_send_csr_write_bad_addr_err_cnt), 4961 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0, 4962 CNTR_NORMAL, 4963 access_send_csr_read_bad_addr_err_cnt), 4964 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0, 4965 CNTR_NORMAL, 4966 access_send_csr_parity_cnt), 4967 /* SendCtxtErrStatus */ 4968 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0, 4969 CNTR_NORMAL, 4970 access_pio_write_out_of_bounds_err_cnt), 4971 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0, 4972 CNTR_NORMAL, 4973 access_pio_write_overflow_err_cnt), 4974 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr", 4975 0, 0, CNTR_NORMAL, 4976 access_pio_write_crosses_boundary_err_cnt), 4977 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0, 4978 CNTR_NORMAL, 4979 access_pio_disallowed_packet_err_cnt), 4980 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0, 4981 CNTR_NORMAL, 4982 access_pio_inconsistent_sop_err_cnt), 4983 /* SendDmaEngErrStatus */ 4984 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr", 4985 0, 0, CNTR_NORMAL, 4986 access_sdma_header_request_fifo_cor_err_cnt), 4987 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0, 4988 CNTR_NORMAL, 4989 access_sdma_header_storage_cor_err_cnt), 4990 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0, 4991 CNTR_NORMAL, 4992 access_sdma_packet_tracking_cor_err_cnt), 4993 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0, 4994 CNTR_NORMAL, 4995 access_sdma_assembly_cor_err_cnt), 4996 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0, 4997 CNTR_NORMAL, 4998 access_sdma_desc_table_cor_err_cnt), 4999 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr", 5000 0, 0, CNTR_NORMAL, 5001 access_sdma_header_request_fifo_unc_err_cnt), 5002 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0, 5003 CNTR_NORMAL, 5004 access_sdma_header_storage_unc_err_cnt), 5005 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0, 5006 CNTR_NORMAL, 5007 access_sdma_packet_tracking_unc_err_cnt), 5008 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0, 5009 CNTR_NORMAL, 5010 access_sdma_assembly_unc_err_cnt), 5011 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0, 5012 CNTR_NORMAL, 5013 access_sdma_desc_table_unc_err_cnt), 5014 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0, 5015 CNTR_NORMAL, 5016 access_sdma_timeout_err_cnt), 5017 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0, 5018 CNTR_NORMAL, 5019 access_sdma_header_length_err_cnt), 5020 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0, 5021 CNTR_NORMAL, 5022 access_sdma_header_address_err_cnt), 5023 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0, 5024 CNTR_NORMAL, 5025 access_sdma_header_select_err_cnt), 5026 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0, 5027 CNTR_NORMAL, 5028 access_sdma_reserved_9_err_cnt), 5029 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0, 5030 CNTR_NORMAL, 5031 access_sdma_packet_desc_overflow_err_cnt), 5032 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0, 5033 CNTR_NORMAL, 5034 access_sdma_length_mismatch_err_cnt), 5035 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0, 5036 CNTR_NORMAL, 5037 access_sdma_halt_err_cnt), 5038 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0, 5039 CNTR_NORMAL, 5040 access_sdma_mem_read_err_cnt), 5041 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0, 5042 CNTR_NORMAL, 5043 access_sdma_first_desc_err_cnt), 5044 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0, 5045 CNTR_NORMAL, 5046 access_sdma_tail_out_of_bounds_err_cnt), 5047 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0, 5048 CNTR_NORMAL, 5049 access_sdma_too_long_err_cnt), 5050 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0, 5051 CNTR_NORMAL, 5052 access_sdma_gen_mismatch_err_cnt), 5053 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0, 5054 CNTR_NORMAL, 5055 access_sdma_wrong_dw_err_cnt), 5056 }; 5057 5058 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { 5059 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT, 5060 CNTR_NORMAL), 5061 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT, 5062 CNTR_NORMAL), 5063 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT, 5064 CNTR_NORMAL), 5065 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT, 5066 CNTR_NORMAL), 5067 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT, 5068 CNTR_NORMAL), 5069 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT, 5070 CNTR_NORMAL), 5071 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT, 5072 CNTR_NORMAL), 5073 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL), 5074 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), 5075 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), 5076 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, 5077 CNTR_SYNTH | CNTR_VL), 5078 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, 5079 CNTR_SYNTH | CNTR_VL), 5080 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, 5081 CNTR_SYNTH | CNTR_VL), 5082 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), 5083 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), 5084 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5085 access_sw_link_dn_cnt), 5086 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5087 access_sw_link_up_cnt), 5088 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL, 5089 access_sw_unknown_frame_cnt), 5090 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5091 access_sw_xmit_discards), 5092 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0, 5093 CNTR_SYNTH | CNTR_32BIT | CNTR_VL, 5094 access_sw_xmit_discards), 5095 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH, 5096 access_xmit_constraint_errs), 5097 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH, 5098 access_rcv_constraint_errs), 5099 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), 5100 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), 5101 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), 5102 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks), 5103 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts), 5104 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops), 5105 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait), 5106 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak), 5107 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq), 5108 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), 5109 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), 5110 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), 5111 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), 5112 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, 5113 access_sw_cpu_rc_acks), 5114 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, 5115 access_sw_cpu_rc_qacks), 5116 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL, 5117 access_sw_cpu_rc_delayed_comp), 5118 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), 5119 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), 5120 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), 5121 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7), 5122 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9), 5123 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11), 5124 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13), 5125 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15), 5126 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17), 5127 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19), 5128 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21), 5129 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23), 5130 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25), 5131 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27), 5132 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29), 5133 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31), 5134 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33), 5135 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35), 5136 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37), 5137 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39), 5138 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41), 5139 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43), 5140 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45), 5141 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47), 5142 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49), 5143 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51), 5144 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53), 5145 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55), 5146 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57), 5147 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59), 5148 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61), 5149 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63), 5150 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65), 5151 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67), 5152 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69), 5153 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71), 5154 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73), 5155 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75), 5156 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77), 5157 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79), 5158 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81), 5159 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83), 5160 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85), 5161 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87), 5162 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89), 5163 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91), 5164 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93), 5165 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95), 5166 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97), 5167 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99), 5168 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101), 5169 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103), 5170 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105), 5171 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107), 5172 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109), 5173 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111), 5174 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113), 5175 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115), 5176 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117), 5177 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119), 5178 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121), 5179 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123), 5180 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125), 5181 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127), 5182 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129), 5183 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131), 5184 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133), 5185 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135), 5186 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137), 5187 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139), 5188 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141), 5189 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143), 5190 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145), 5191 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147), 5192 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149), 5193 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151), 5194 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153), 5195 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155), 5196 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157), 5197 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159), 5198 }; 5199 5200 /* ======================================================================== */ 5201 5202 /* return true if this is chip revision revision a */ 5203 int is_ax(struct hfi1_devdata *dd) 5204 { 5205 u8 chip_rev_minor = 5206 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5207 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5208 return (chip_rev_minor & 0xf0) == 0; 5209 } 5210 5211 /* return true if this is chip revision revision b */ 5212 int is_bx(struct hfi1_devdata *dd) 5213 { 5214 u8 chip_rev_minor = 5215 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5216 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5217 return (chip_rev_minor & 0xF0) == 0x10; 5218 } 5219 5220 /* return true is kernel urg disabled for rcd */ 5221 bool is_urg_masked(struct hfi1_ctxtdata *rcd) 5222 { 5223 u64 mask; 5224 u32 is = IS_RCVURGENT_START + rcd->ctxt; 5225 u8 bit = is % 64; 5226 5227 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); 5228 return !(mask & BIT_ULL(bit)); 5229 } 5230 5231 /* 5232 * Append string s to buffer buf. Arguments curp and len are the current 5233 * position and remaining length, respectively. 5234 * 5235 * return 0 on success, 1 on out of room 5236 */ 5237 static int append_str(char *buf, char **curp, int *lenp, const char *s) 5238 { 5239 char *p = *curp; 5240 int len = *lenp; 5241 int result = 0; /* success */ 5242 char c; 5243 5244 /* add a comma, if first in the buffer */ 5245 if (p != buf) { 5246 if (len == 0) { 5247 result = 1; /* out of room */ 5248 goto done; 5249 } 5250 *p++ = ','; 5251 len--; 5252 } 5253 5254 /* copy the string */ 5255 while ((c = *s++) != 0) { 5256 if (len == 0) { 5257 result = 1; /* out of room */ 5258 goto done; 5259 } 5260 *p++ = c; 5261 len--; 5262 } 5263 5264 done: 5265 /* write return values */ 5266 *curp = p; 5267 *lenp = len; 5268 5269 return result; 5270 } 5271 5272 /* 5273 * Using the given flag table, print a comma separated string into 5274 * the buffer. End in '*' if the buffer is too short. 5275 */ 5276 static char *flag_string(char *buf, int buf_len, u64 flags, 5277 struct flag_table *table, int table_size) 5278 { 5279 char extra[32]; 5280 char *p = buf; 5281 int len = buf_len; 5282 int no_room = 0; 5283 int i; 5284 5285 /* make sure there is at least 2 so we can form "*" */ 5286 if (len < 2) 5287 return ""; 5288 5289 len--; /* leave room for a nul */ 5290 for (i = 0; i < table_size; i++) { 5291 if (flags & table[i].flag) { 5292 no_room = append_str(buf, &p, &len, table[i].str); 5293 if (no_room) 5294 break; 5295 flags &= ~table[i].flag; 5296 } 5297 } 5298 5299 /* any undocumented bits left? */ 5300 if (!no_room && flags) { 5301 snprintf(extra, sizeof(extra), "bits 0x%llx", flags); 5302 no_room = append_str(buf, &p, &len, extra); 5303 } 5304 5305 /* add * if ran out of room */ 5306 if (no_room) { 5307 /* may need to back up to add space for a '*' */ 5308 if (len == 0) 5309 --p; 5310 *p++ = '*'; 5311 } 5312 5313 /* add final nul - space already allocated above */ 5314 *p = 0; 5315 return buf; 5316 } 5317 5318 /* first 8 CCE error interrupt source names */ 5319 static const char * const cce_misc_names[] = { 5320 "CceErrInt", /* 0 */ 5321 "RxeErrInt", /* 1 */ 5322 "MiscErrInt", /* 2 */ 5323 "Reserved3", /* 3 */ 5324 "PioErrInt", /* 4 */ 5325 "SDmaErrInt", /* 5 */ 5326 "EgressErrInt", /* 6 */ 5327 "TxeErrInt" /* 7 */ 5328 }; 5329 5330 /* 5331 * Return the miscellaneous error interrupt name. 5332 */ 5333 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) 5334 { 5335 if (source < ARRAY_SIZE(cce_misc_names)) 5336 strncpy(buf, cce_misc_names[source], bsize); 5337 else 5338 snprintf(buf, bsize, "Reserved%u", 5339 source + IS_GENERAL_ERR_START); 5340 5341 return buf; 5342 } 5343 5344 /* 5345 * Return the SDMA engine error interrupt name. 5346 */ 5347 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source) 5348 { 5349 snprintf(buf, bsize, "SDmaEngErrInt%u", source); 5350 return buf; 5351 } 5352 5353 /* 5354 * Return the send context error interrupt name. 5355 */ 5356 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source) 5357 { 5358 snprintf(buf, bsize, "SendCtxtErrInt%u", source); 5359 return buf; 5360 } 5361 5362 static const char * const various_names[] = { 5363 "PbcInt", 5364 "GpioAssertInt", 5365 "Qsfp1Int", 5366 "Qsfp2Int", 5367 "TCritInt" 5368 }; 5369 5370 /* 5371 * Return the various interrupt name. 5372 */ 5373 static char *is_various_name(char *buf, size_t bsize, unsigned int source) 5374 { 5375 if (source < ARRAY_SIZE(various_names)) 5376 strncpy(buf, various_names[source], bsize); 5377 else 5378 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); 5379 return buf; 5380 } 5381 5382 /* 5383 * Return the DC interrupt name. 5384 */ 5385 static char *is_dc_name(char *buf, size_t bsize, unsigned int source) 5386 { 5387 static const char * const dc_int_names[] = { 5388 "common", 5389 "lcb", 5390 "8051", 5391 "lbm" /* local block merge */ 5392 }; 5393 5394 if (source < ARRAY_SIZE(dc_int_names)) 5395 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]); 5396 else 5397 snprintf(buf, bsize, "DCInt%u", source); 5398 return buf; 5399 } 5400 5401 static const char * const sdma_int_names[] = { 5402 "SDmaInt", 5403 "SdmaIdleInt", 5404 "SdmaProgressInt", 5405 }; 5406 5407 /* 5408 * Return the SDMA engine interrupt name. 5409 */ 5410 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source) 5411 { 5412 /* what interrupt */ 5413 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 5414 /* which engine */ 5415 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 5416 5417 if (likely(what < 3)) 5418 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which); 5419 else 5420 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source); 5421 return buf; 5422 } 5423 5424 /* 5425 * Return the receive available interrupt name. 5426 */ 5427 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source) 5428 { 5429 snprintf(buf, bsize, "RcvAvailInt%u", source); 5430 return buf; 5431 } 5432 5433 /* 5434 * Return the receive urgent interrupt name. 5435 */ 5436 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source) 5437 { 5438 snprintf(buf, bsize, "RcvUrgentInt%u", source); 5439 return buf; 5440 } 5441 5442 /* 5443 * Return the send credit interrupt name. 5444 */ 5445 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source) 5446 { 5447 snprintf(buf, bsize, "SendCreditInt%u", source); 5448 return buf; 5449 } 5450 5451 /* 5452 * Return the reserved interrupt name. 5453 */ 5454 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) 5455 { 5456 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START); 5457 return buf; 5458 } 5459 5460 static char *cce_err_status_string(char *buf, int buf_len, u64 flags) 5461 { 5462 return flag_string(buf, buf_len, flags, 5463 cce_err_status_flags, 5464 ARRAY_SIZE(cce_err_status_flags)); 5465 } 5466 5467 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) 5468 { 5469 return flag_string(buf, buf_len, flags, 5470 rxe_err_status_flags, 5471 ARRAY_SIZE(rxe_err_status_flags)); 5472 } 5473 5474 static char *misc_err_status_string(char *buf, int buf_len, u64 flags) 5475 { 5476 return flag_string(buf, buf_len, flags, misc_err_status_flags, 5477 ARRAY_SIZE(misc_err_status_flags)); 5478 } 5479 5480 static char *pio_err_status_string(char *buf, int buf_len, u64 flags) 5481 { 5482 return flag_string(buf, buf_len, flags, 5483 pio_err_status_flags, 5484 ARRAY_SIZE(pio_err_status_flags)); 5485 } 5486 5487 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) 5488 { 5489 return flag_string(buf, buf_len, flags, 5490 sdma_err_status_flags, 5491 ARRAY_SIZE(sdma_err_status_flags)); 5492 } 5493 5494 static char *egress_err_status_string(char *buf, int buf_len, u64 flags) 5495 { 5496 return flag_string(buf, buf_len, flags, 5497 egress_err_status_flags, 5498 ARRAY_SIZE(egress_err_status_flags)); 5499 } 5500 5501 static char *egress_err_info_string(char *buf, int buf_len, u64 flags) 5502 { 5503 return flag_string(buf, buf_len, flags, 5504 egress_err_info_flags, 5505 ARRAY_SIZE(egress_err_info_flags)); 5506 } 5507 5508 static char *send_err_status_string(char *buf, int buf_len, u64 flags) 5509 { 5510 return flag_string(buf, buf_len, flags, 5511 send_err_status_flags, 5512 ARRAY_SIZE(send_err_status_flags)); 5513 } 5514 5515 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5516 { 5517 char buf[96]; 5518 int i = 0; 5519 5520 /* 5521 * For most these errors, there is nothing that can be done except 5522 * report or record it. 5523 */ 5524 dd_dev_info(dd, "CCE Error: %s\n", 5525 cce_err_status_string(buf, sizeof(buf), reg)); 5526 5527 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && 5528 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { 5529 /* this error requires a manual drop into SPC freeze mode */ 5530 /* then a fix up */ 5531 start_freeze_handling(dd->pport, FREEZE_SELF); 5532 } 5533 5534 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) { 5535 if (reg & (1ull << i)) { 5536 incr_cntr64(&dd->cce_err_status_cnt[i]); 5537 /* maintain a counter over all cce_err_status errors */ 5538 incr_cntr64(&dd->sw_cce_err_status_aggregate); 5539 } 5540 } 5541 } 5542 5543 /* 5544 * Check counters for receive errors that do not have an interrupt 5545 * associated with them. 5546 */ 5547 #define RCVERR_CHECK_TIME 10 5548 static void update_rcverr_timer(struct timer_list *t) 5549 { 5550 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); 5551 struct hfi1_pportdata *ppd = dd->pport; 5552 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); 5553 5554 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && 5555 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { 5556 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); 5557 set_link_down_reason( 5558 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, 5559 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); 5560 queue_work(ppd->link_wq, &ppd->link_bounce_work); 5561 } 5562 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; 5563 5564 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5565 } 5566 5567 static int init_rcverr(struct hfi1_devdata *dd) 5568 { 5569 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); 5570 /* Assume the hardware counter has been reset */ 5571 dd->rcv_ovfl_cnt = 0; 5572 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5573 } 5574 5575 static void free_rcverr(struct hfi1_devdata *dd) 5576 { 5577 if (dd->rcverr_timer.function) 5578 del_timer_sync(&dd->rcverr_timer); 5579 } 5580 5581 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5582 { 5583 char buf[96]; 5584 int i = 0; 5585 5586 dd_dev_info(dd, "Receive Error: %s\n", 5587 rxe_err_status_string(buf, sizeof(buf), reg)); 5588 5589 if (reg & ALL_RXE_FREEZE_ERR) { 5590 int flags = 0; 5591 5592 /* 5593 * Freeze mode recovery is disabled for the errors 5594 * in RXE_FREEZE_ABORT_MASK 5595 */ 5596 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) 5597 flags = FREEZE_ABORT; 5598 5599 start_freeze_handling(dd->pport, flags); 5600 } 5601 5602 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) { 5603 if (reg & (1ull << i)) 5604 incr_cntr64(&dd->rcv_err_status_cnt[i]); 5605 } 5606 } 5607 5608 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5609 { 5610 char buf[96]; 5611 int i = 0; 5612 5613 dd_dev_info(dd, "Misc Error: %s", 5614 misc_err_status_string(buf, sizeof(buf), reg)); 5615 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { 5616 if (reg & (1ull << i)) 5617 incr_cntr64(&dd->misc_err_status_cnt[i]); 5618 } 5619 } 5620 5621 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5622 { 5623 char buf[96]; 5624 int i = 0; 5625 5626 dd_dev_info(dd, "PIO Error: %s\n", 5627 pio_err_status_string(buf, sizeof(buf), reg)); 5628 5629 if (reg & ALL_PIO_FREEZE_ERR) 5630 start_freeze_handling(dd->pport, 0); 5631 5632 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) { 5633 if (reg & (1ull << i)) 5634 incr_cntr64(&dd->send_pio_err_status_cnt[i]); 5635 } 5636 } 5637 5638 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5639 { 5640 char buf[96]; 5641 int i = 0; 5642 5643 dd_dev_info(dd, "SDMA Error: %s\n", 5644 sdma_err_status_string(buf, sizeof(buf), reg)); 5645 5646 if (reg & ALL_SDMA_FREEZE_ERR) 5647 start_freeze_handling(dd->pport, 0); 5648 5649 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) { 5650 if (reg & (1ull << i)) 5651 incr_cntr64(&dd->send_dma_err_status_cnt[i]); 5652 } 5653 } 5654 5655 static inline void __count_port_discards(struct hfi1_pportdata *ppd) 5656 { 5657 incr_cntr64(&ppd->port_xmit_discards); 5658 } 5659 5660 static void count_port_inactive(struct hfi1_devdata *dd) 5661 { 5662 __count_port_discards(dd->pport); 5663 } 5664 5665 /* 5666 * We have had a "disallowed packet" error during egress. Determine the 5667 * integrity check which failed, and update relevant error counter, etc. 5668 * 5669 * Note that the SEND_EGRESS_ERR_INFO register has only a single 5670 * bit of state per integrity check, and so we can miss the reason for an 5671 * egress error if more than one packet fails the same integrity check 5672 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. 5673 */ 5674 static void handle_send_egress_err_info(struct hfi1_devdata *dd, 5675 int vl) 5676 { 5677 struct hfi1_pportdata *ppd = dd->pport; 5678 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ 5679 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); 5680 char buf[96]; 5681 5682 /* clear down all observed info as quickly as possible after read */ 5683 write_csr(dd, SEND_EGRESS_ERR_INFO, info); 5684 5685 dd_dev_info(dd, 5686 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", 5687 info, egress_err_info_string(buf, sizeof(buf), info), src); 5688 5689 /* Eventually add other counters for each bit */ 5690 if (info & PORT_DISCARD_EGRESS_ERRS) { 5691 int weight, i; 5692 5693 /* 5694 * Count all applicable bits as individual errors and 5695 * attribute them to the packet that triggered this handler. 5696 * This may not be completely accurate due to limitations 5697 * on the available hardware error information. There is 5698 * a single information register and any number of error 5699 * packets may have occurred and contributed to it before 5700 * this routine is called. This means that: 5701 * a) If multiple packets with the same error occur before 5702 * this routine is called, earlier packets are missed. 5703 * There is only a single bit for each error type. 5704 * b) Errors may not be attributed to the correct VL. 5705 * The driver is attributing all bits in the info register 5706 * to the packet that triggered this call, but bits 5707 * could be an accumulation of different packets with 5708 * different VLs. 5709 * c) A single error packet may have multiple counts attached 5710 * to it. There is no way for the driver to know if 5711 * multiple bits set in the info register are due to a 5712 * single packet or multiple packets. The driver assumes 5713 * multiple packets. 5714 */ 5715 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); 5716 for (i = 0; i < weight; i++) { 5717 __count_port_discards(ppd); 5718 if (vl >= 0 && vl < TXE_NUM_DATA_VL) 5719 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); 5720 else if (vl == 15) 5721 incr_cntr64(&ppd->port_xmit_discards_vl 5722 [C_VL_15]); 5723 } 5724 } 5725 } 5726 5727 /* 5728 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5729 * register. Does it represent a 'port inactive' error? 5730 */ 5731 static inline int port_inactive_err(u64 posn) 5732 { 5733 return (posn >= SEES(TX_LINKDOWN) && 5734 posn <= SEES(TX_INCORRECT_LINK_STATE)); 5735 } 5736 5737 /* 5738 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5739 * register. Does it represent a 'disallowed packet' error? 5740 */ 5741 static inline int disallowed_pkt_err(int posn) 5742 { 5743 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && 5744 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); 5745 } 5746 5747 /* 5748 * Input value is a bit position of one of the SDMA engine disallowed 5749 * packet errors. Return which engine. Use of this must be guarded by 5750 * disallowed_pkt_err(). 5751 */ 5752 static inline int disallowed_pkt_engine(int posn) 5753 { 5754 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); 5755 } 5756 5757 /* 5758 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot 5759 * be done. 5760 */ 5761 static int engine_to_vl(struct hfi1_devdata *dd, int engine) 5762 { 5763 struct sdma_vl_map *m; 5764 int vl; 5765 5766 /* range check */ 5767 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) 5768 return -1; 5769 5770 rcu_read_lock(); 5771 m = rcu_dereference(dd->sdma_map); 5772 vl = m->engine_to_vl[engine]; 5773 rcu_read_unlock(); 5774 5775 return vl; 5776 } 5777 5778 /* 5779 * Translate the send context (sofware index) into a VL. Return -1 if the 5780 * translation cannot be done. 5781 */ 5782 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) 5783 { 5784 struct send_context_info *sci; 5785 struct send_context *sc; 5786 int i; 5787 5788 sci = &dd->send_contexts[sw_index]; 5789 5790 /* there is no information for user (PSM) and ack contexts */ 5791 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) 5792 return -1; 5793 5794 sc = sci->sc; 5795 if (!sc) 5796 return -1; 5797 if (dd->vld[15].sc == sc) 5798 return 15; 5799 for (i = 0; i < num_vls; i++) 5800 if (dd->vld[i].sc == sc) 5801 return i; 5802 5803 return -1; 5804 } 5805 5806 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5807 { 5808 u64 reg_copy = reg, handled = 0; 5809 char buf[96]; 5810 int i = 0; 5811 5812 if (reg & ALL_TXE_EGRESS_FREEZE_ERR) 5813 start_freeze_handling(dd->pport, 0); 5814 else if (is_ax(dd) && 5815 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && 5816 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) 5817 start_freeze_handling(dd->pport, 0); 5818 5819 while (reg_copy) { 5820 int posn = fls64(reg_copy); 5821 /* fls64() returns a 1-based offset, we want it zero based */ 5822 int shift = posn - 1; 5823 u64 mask = 1ULL << shift; 5824 5825 if (port_inactive_err(shift)) { 5826 count_port_inactive(dd); 5827 handled |= mask; 5828 } else if (disallowed_pkt_err(shift)) { 5829 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); 5830 5831 handle_send_egress_err_info(dd, vl); 5832 handled |= mask; 5833 } 5834 reg_copy &= ~mask; 5835 } 5836 5837 reg &= ~handled; 5838 5839 if (reg) 5840 dd_dev_info(dd, "Egress Error: %s\n", 5841 egress_err_status_string(buf, sizeof(buf), reg)); 5842 5843 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { 5844 if (reg & (1ull << i)) 5845 incr_cntr64(&dd->send_egress_err_status_cnt[i]); 5846 } 5847 } 5848 5849 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5850 { 5851 char buf[96]; 5852 int i = 0; 5853 5854 dd_dev_info(dd, "Send Error: %s\n", 5855 send_err_status_string(buf, sizeof(buf), reg)); 5856 5857 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { 5858 if (reg & (1ull << i)) 5859 incr_cntr64(&dd->send_err_status_cnt[i]); 5860 } 5861 } 5862 5863 /* 5864 * The maximum number of times the error clear down will loop before 5865 * blocking a repeating error. This value is arbitrary. 5866 */ 5867 #define MAX_CLEAR_COUNT 20 5868 5869 /* 5870 * Clear and handle an error register. All error interrupts are funneled 5871 * through here to have a central location to correctly handle single- 5872 * or multi-shot errors. 5873 * 5874 * For non per-context registers, call this routine with a context value 5875 * of 0 so the per-context offset is zero. 5876 * 5877 * If the handler loops too many times, assume that something is wrong 5878 * and can't be fixed, so mask the error bits. 5879 */ 5880 static void interrupt_clear_down(struct hfi1_devdata *dd, 5881 u32 context, 5882 const struct err_reg_info *eri) 5883 { 5884 u64 reg; 5885 u32 count; 5886 5887 /* read in a loop until no more errors are seen */ 5888 count = 0; 5889 while (1) { 5890 reg = read_kctxt_csr(dd, context, eri->status); 5891 if (reg == 0) 5892 break; 5893 write_kctxt_csr(dd, context, eri->clear, reg); 5894 if (likely(eri->handler)) 5895 eri->handler(dd, context, reg); 5896 count++; 5897 if (count > MAX_CLEAR_COUNT) { 5898 u64 mask; 5899 5900 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", 5901 eri->desc, reg); 5902 /* 5903 * Read-modify-write so any other masked bits 5904 * remain masked. 5905 */ 5906 mask = read_kctxt_csr(dd, context, eri->mask); 5907 mask &= ~reg; 5908 write_kctxt_csr(dd, context, eri->mask, mask); 5909 break; 5910 } 5911 } 5912 } 5913 5914 /* 5915 * CCE block "misc" interrupt. Source is < 16. 5916 */ 5917 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) 5918 { 5919 const struct err_reg_info *eri = &misc_errs[source]; 5920 5921 if (eri->handler) { 5922 interrupt_clear_down(dd, 0, eri); 5923 } else { 5924 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", 5925 source); 5926 } 5927 } 5928 5929 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) 5930 { 5931 return flag_string(buf, buf_len, flags, 5932 sc_err_status_flags, 5933 ARRAY_SIZE(sc_err_status_flags)); 5934 } 5935 5936 /* 5937 * Send context error interrupt. Source (hw_context) is < 160. 5938 * 5939 * All send context errors cause the send context to halt. The normal 5940 * clear-down mechanism cannot be used because we cannot clear the 5941 * error bits until several other long-running items are done first. 5942 * This is OK because with the context halted, nothing else is going 5943 * to happen on it anyway. 5944 */ 5945 static void is_sendctxt_err_int(struct hfi1_devdata *dd, 5946 unsigned int hw_context) 5947 { 5948 struct send_context_info *sci; 5949 struct send_context *sc; 5950 char flags[96]; 5951 u64 status; 5952 u32 sw_index; 5953 int i = 0; 5954 unsigned long irq_flags; 5955 5956 sw_index = dd->hw_to_sw[hw_context]; 5957 if (sw_index >= dd->num_send_contexts) { 5958 dd_dev_err(dd, 5959 "out of range sw index %u for send context %u\n", 5960 sw_index, hw_context); 5961 return; 5962 } 5963 sci = &dd->send_contexts[sw_index]; 5964 spin_lock_irqsave(&dd->sc_lock, irq_flags); 5965 sc = sci->sc; 5966 if (!sc) { 5967 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5968 sw_index, hw_context); 5969 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5970 return; 5971 } 5972 5973 /* tell the software that a halt has begun */ 5974 sc_stop(sc, SCF_HALTED); 5975 5976 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); 5977 5978 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, 5979 send_context_err_status_string(flags, sizeof(flags), 5980 status)); 5981 5982 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) 5983 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); 5984 5985 /* 5986 * Automatically restart halted kernel contexts out of interrupt 5987 * context. User contexts must ask the driver to restart the context. 5988 */ 5989 if (sc->type != SC_USER) 5990 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5991 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5992 5993 /* 5994 * Update the counters for the corresponding status bits. 5995 * Note that these particular counters are aggregated over all 5996 * 160 contexts. 5997 */ 5998 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) { 5999 if (status & (1ull << i)) 6000 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); 6001 } 6002 } 6003 6004 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 6005 unsigned int source, u64 status) 6006 { 6007 struct sdma_engine *sde; 6008 int i = 0; 6009 6010 sde = &dd->per_sdma[source]; 6011 #ifdef CONFIG_SDMA_VERBOSITY 6012 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6013 slashstrip(__FILE__), __LINE__, __func__); 6014 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", 6015 sde->this_idx, source, (unsigned long long)status); 6016 #endif 6017 sde->err_cnt++; 6018 sdma_engine_error(sde, status); 6019 6020 /* 6021 * Update the counters for the corresponding status bits. 6022 * Note that these particular counters are aggregated over 6023 * all 16 DMA engines. 6024 */ 6025 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) { 6026 if (status & (1ull << i)) 6027 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); 6028 } 6029 } 6030 6031 /* 6032 * CCE block SDMA error interrupt. Source is < 16. 6033 */ 6034 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) 6035 { 6036 #ifdef CONFIG_SDMA_VERBOSITY 6037 struct sdma_engine *sde = &dd->per_sdma[source]; 6038 6039 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6040 slashstrip(__FILE__), __LINE__, __func__); 6041 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, 6042 source); 6043 sdma_dumpstate(sde); 6044 #endif 6045 interrupt_clear_down(dd, source, &sdma_eng_err); 6046 } 6047 6048 /* 6049 * CCE block "various" interrupt. Source is < 8. 6050 */ 6051 static void is_various_int(struct hfi1_devdata *dd, unsigned int source) 6052 { 6053 const struct err_reg_info *eri = &various_err[source]; 6054 6055 /* 6056 * TCritInt cannot go through interrupt_clear_down() 6057 * because it is not a second tier interrupt. The handler 6058 * should be called directly. 6059 */ 6060 if (source == TCRIT_INT_SOURCE) 6061 handle_temp_err(dd); 6062 else if (eri->handler) 6063 interrupt_clear_down(dd, 0, eri); 6064 else 6065 dd_dev_info(dd, 6066 "%s: Unimplemented/reserved interrupt %d\n", 6067 __func__, source); 6068 } 6069 6070 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) 6071 { 6072 /* src_ctx is always zero */ 6073 struct hfi1_pportdata *ppd = dd->pport; 6074 unsigned long flags; 6075 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 6076 6077 if (reg & QSFP_HFI0_MODPRST_N) { 6078 if (!qsfp_mod_present(ppd)) { 6079 dd_dev_info(dd, "%s: QSFP module removed\n", 6080 __func__); 6081 6082 ppd->driver_link_ready = 0; 6083 /* 6084 * Cable removed, reset all our information about the 6085 * cache and cable capabilities 6086 */ 6087 6088 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6089 /* 6090 * We don't set cache_refresh_required here as we expect 6091 * an interrupt when a cable is inserted 6092 */ 6093 ppd->qsfp_info.cache_valid = 0; 6094 ppd->qsfp_info.reset_needed = 0; 6095 ppd->qsfp_info.limiting_active = 0; 6096 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6097 flags); 6098 /* Invert the ModPresent pin now to detect plug-in */ 6099 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6100 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6101 6102 if ((ppd->offline_disabled_reason > 6103 HFI1_ODR_MASK( 6104 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || 6105 (ppd->offline_disabled_reason == 6106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) 6107 ppd->offline_disabled_reason = 6108 HFI1_ODR_MASK( 6109 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); 6110 6111 if (ppd->host_link_state == HLS_DN_POLL) { 6112 /* 6113 * The link is still in POLL. This means 6114 * that the normal link down processing 6115 * will not happen. We have to do it here 6116 * before turning the DC off. 6117 */ 6118 queue_work(ppd->link_wq, &ppd->link_down_work); 6119 } 6120 } else { 6121 dd_dev_info(dd, "%s: QSFP module inserted\n", 6122 __func__); 6123 6124 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6125 ppd->qsfp_info.cache_valid = 0; 6126 ppd->qsfp_info.cache_refresh_required = 1; 6127 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6128 flags); 6129 6130 /* 6131 * Stop inversion of ModPresent pin to detect 6132 * removal of the cable 6133 */ 6134 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; 6135 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6136 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6137 6138 ppd->offline_disabled_reason = 6139 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 6140 } 6141 } 6142 6143 if (reg & QSFP_HFI0_INT_N) { 6144 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", 6145 __func__); 6146 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6147 ppd->qsfp_info.check_interrupt_flags = 1; 6148 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); 6149 } 6150 6151 /* Schedule the QSFP work only if there is a cable attached. */ 6152 if (qsfp_mod_present(ppd)) 6153 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); 6154 } 6155 6156 static int request_host_lcb_access(struct hfi1_devdata *dd) 6157 { 6158 int ret; 6159 6160 ret = do_8051_command(dd, HCMD_MISC, 6161 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << 6162 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6163 if (ret != HCMD_SUCCESS) { 6164 dd_dev_err(dd, "%s: command failed with error %d\n", 6165 __func__, ret); 6166 } 6167 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6168 } 6169 6170 static int request_8051_lcb_access(struct hfi1_devdata *dd) 6171 { 6172 int ret; 6173 6174 ret = do_8051_command(dd, HCMD_MISC, 6175 (u64)HCMD_MISC_GRANT_LCB_ACCESS << 6176 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6177 if (ret != HCMD_SUCCESS) { 6178 dd_dev_err(dd, "%s: command failed with error %d\n", 6179 __func__, ret); 6180 } 6181 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6182 } 6183 6184 /* 6185 * Set the LCB selector - allow host access. The DCC selector always 6186 * points to the host. 6187 */ 6188 static inline void set_host_lcb_access(struct hfi1_devdata *dd) 6189 { 6190 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6191 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | 6192 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); 6193 } 6194 6195 /* 6196 * Clear the LCB selector - allow 8051 access. The DCC selector always 6197 * points to the host. 6198 */ 6199 static inline void set_8051_lcb_access(struct hfi1_devdata *dd) 6200 { 6201 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6202 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); 6203 } 6204 6205 /* 6206 * Acquire LCB access from the 8051. If the host already has access, 6207 * just increment a counter. Otherwise, inform the 8051 that the 6208 * host is taking access. 6209 * 6210 * Returns: 6211 * 0 on success 6212 * -EBUSY if the 8051 has control and cannot be disturbed 6213 * -errno if unable to acquire access from the 8051 6214 */ 6215 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6216 { 6217 struct hfi1_pportdata *ppd = dd->pport; 6218 int ret = 0; 6219 6220 /* 6221 * Use the host link state lock so the operation of this routine 6222 * { link state check, selector change, count increment } can occur 6223 * as a unit against a link state change. Otherwise there is a 6224 * race between the state change and the count increment. 6225 */ 6226 if (sleep_ok) { 6227 mutex_lock(&ppd->hls_lock); 6228 } else { 6229 while (!mutex_trylock(&ppd->hls_lock)) 6230 udelay(1); 6231 } 6232 6233 /* this access is valid only when the link is up */ 6234 if (ppd->host_link_state & HLS_DOWN) { 6235 dd_dev_info(dd, "%s: link state %s not up\n", 6236 __func__, link_state_name(ppd->host_link_state)); 6237 ret = -EBUSY; 6238 goto done; 6239 } 6240 6241 if (dd->lcb_access_count == 0) { 6242 ret = request_host_lcb_access(dd); 6243 if (ret) { 6244 dd_dev_err(dd, 6245 "%s: unable to acquire LCB access, err %d\n", 6246 __func__, ret); 6247 goto done; 6248 } 6249 set_host_lcb_access(dd); 6250 } 6251 dd->lcb_access_count++; 6252 done: 6253 mutex_unlock(&ppd->hls_lock); 6254 return ret; 6255 } 6256 6257 /* 6258 * Release LCB access by decrementing the use count. If the count is moving 6259 * from 1 to 0, inform 8051 that it has control back. 6260 * 6261 * Returns: 6262 * 0 on success 6263 * -errno if unable to release access to the 8051 6264 */ 6265 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6266 { 6267 int ret = 0; 6268 6269 /* 6270 * Use the host link state lock because the acquire needed it. 6271 * Here, we only need to keep { selector change, count decrement } 6272 * as a unit. 6273 */ 6274 if (sleep_ok) { 6275 mutex_lock(&dd->pport->hls_lock); 6276 } else { 6277 while (!mutex_trylock(&dd->pport->hls_lock)) 6278 udelay(1); 6279 } 6280 6281 if (dd->lcb_access_count == 0) { 6282 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", 6283 __func__); 6284 goto done; 6285 } 6286 6287 if (dd->lcb_access_count == 1) { 6288 set_8051_lcb_access(dd); 6289 ret = request_8051_lcb_access(dd); 6290 if (ret) { 6291 dd_dev_err(dd, 6292 "%s: unable to release LCB access, err %d\n", 6293 __func__, ret); 6294 /* restore host access if the grant didn't work */ 6295 set_host_lcb_access(dd); 6296 goto done; 6297 } 6298 } 6299 dd->lcb_access_count--; 6300 done: 6301 mutex_unlock(&dd->pport->hls_lock); 6302 return ret; 6303 } 6304 6305 /* 6306 * Initialize LCB access variables and state. Called during driver load, 6307 * after most of the initialization is finished. 6308 * 6309 * The DC default is LCB access on for the host. The driver defaults to 6310 * leaving access to the 8051. Assign access now - this constrains the call 6311 * to this routine to be after all LCB set-up is done. In particular, after 6312 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts() 6313 */ 6314 static void init_lcb_access(struct hfi1_devdata *dd) 6315 { 6316 dd->lcb_access_count = 0; 6317 } 6318 6319 /* 6320 * Write a response back to a 8051 request. 6321 */ 6322 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) 6323 { 6324 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 6325 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | 6326 (u64)return_code << 6327 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | 6328 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 6329 } 6330 6331 /* 6332 * Handle host requests from the 8051. 6333 */ 6334 static void handle_8051_request(struct hfi1_pportdata *ppd) 6335 { 6336 struct hfi1_devdata *dd = ppd->dd; 6337 u64 reg; 6338 u16 data = 0; 6339 u8 type; 6340 6341 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); 6342 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) 6343 return; /* no request */ 6344 6345 /* zero out COMPLETED so the response is seen */ 6346 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); 6347 6348 /* extract request details */ 6349 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT) 6350 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK; 6351 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT) 6352 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK; 6353 6354 switch (type) { 6355 case HREQ_LOAD_CONFIG: 6356 case HREQ_SAVE_CONFIG: 6357 case HREQ_READ_CONFIG: 6358 case HREQ_SET_TX_EQ_ABS: 6359 case HREQ_SET_TX_EQ_REL: 6360 case HREQ_ENABLE: 6361 dd_dev_info(dd, "8051 request: request 0x%x not supported\n", 6362 type); 6363 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6364 break; 6365 case HREQ_LCB_RESET: 6366 /* Put the LCB, RX FPE and TX FPE into reset */ 6367 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); 6368 /* Make sure the write completed */ 6369 (void)read_csr(dd, DCC_CFG_RESET); 6370 /* Hold the reset long enough to take effect */ 6371 udelay(1); 6372 /* Take the LCB, RX FPE and TX FPE out of reset */ 6373 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6374 hreq_response(dd, HREQ_SUCCESS, 0); 6375 6376 break; 6377 case HREQ_CONFIG_DONE: 6378 hreq_response(dd, HREQ_SUCCESS, 0); 6379 break; 6380 6381 case HREQ_INTERFACE_TEST: 6382 hreq_response(dd, HREQ_SUCCESS, data); 6383 break; 6384 default: 6385 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); 6386 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6387 break; 6388 } 6389 } 6390 6391 /* 6392 * Set up allocation unit vaulue. 6393 */ 6394 void set_up_vau(struct hfi1_devdata *dd, u8 vau) 6395 { 6396 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6397 6398 /* do not modify other values in the register */ 6399 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; 6400 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; 6401 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6402 } 6403 6404 /* 6405 * Set up initial VL15 credits of the remote. Assumes the rest of 6406 * the CM credit registers are zero from a previous global or credit reset. 6407 * Shared limit for VL15 will always be 0. 6408 */ 6409 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) 6410 { 6411 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6412 6413 /* set initial values for total and shared credit limit */ 6414 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | 6415 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); 6416 6417 /* 6418 * Set total limit to be equal to VL15 credits. 6419 * Leave shared limit at 0. 6420 */ 6421 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 6422 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6423 6424 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6425 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6426 } 6427 6428 /* 6429 * Zero all credit details from the previous connection and 6430 * reset the CM manager's internal counters. 6431 */ 6432 void reset_link_credits(struct hfi1_devdata *dd) 6433 { 6434 int i; 6435 6436 /* remove all previous VL credit limits */ 6437 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6438 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6439 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6440 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); 6441 /* reset the CM block */ 6442 pio_send_control(dd, PSC_CM_RESET); 6443 /* reset cached value */ 6444 dd->vl15buf_cached = 0; 6445 } 6446 6447 /* convert a vCU to a CU */ 6448 static u32 vcu_to_cu(u8 vcu) 6449 { 6450 return 1 << vcu; 6451 } 6452 6453 /* convert a CU to a vCU */ 6454 static u8 cu_to_vcu(u32 cu) 6455 { 6456 return ilog2(cu); 6457 } 6458 6459 /* convert a vAU to an AU */ 6460 static u32 vau_to_au(u8 vau) 6461 { 6462 return 8 * (1 << vau); 6463 } 6464 6465 static void set_linkup_defaults(struct hfi1_pportdata *ppd) 6466 { 6467 ppd->sm_trap_qp = 0x0; 6468 ppd->sa_qp = 0x1; 6469 } 6470 6471 /* 6472 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset. 6473 */ 6474 static void lcb_shutdown(struct hfi1_devdata *dd, int abort) 6475 { 6476 u64 reg; 6477 6478 /* clear lcb run: LCB_CFG_RUN.EN = 0 */ 6479 write_csr(dd, DC_LCB_CFG_RUN, 0); 6480 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ 6481 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 6482 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); 6483 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ 6484 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); 6485 reg = read_csr(dd, DCC_CFG_RESET); 6486 write_csr(dd, DCC_CFG_RESET, reg | 6487 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE); 6488 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ 6489 if (!abort) { 6490 udelay(1); /* must hold for the longer of 16cclks or 20ns */ 6491 write_csr(dd, DCC_CFG_RESET, reg); 6492 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6493 } 6494 } 6495 6496 /* 6497 * This routine should be called after the link has been transitioned to 6498 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into 6499 * reset). 6500 * 6501 * The expectation is that the caller of this routine would have taken 6502 * care of properly transitioning the link into the correct state. 6503 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6504 * before calling this function. 6505 */ 6506 static void _dc_shutdown(struct hfi1_devdata *dd) 6507 { 6508 lockdep_assert_held(&dd->dc8051_lock); 6509 6510 if (dd->dc_shutdown) 6511 return; 6512 6513 dd->dc_shutdown = 1; 6514 /* Shutdown the LCB */ 6515 lcb_shutdown(dd, 1); 6516 /* 6517 * Going to OFFLINE would have causes the 8051 to put the 6518 * SerDes into reset already. Just need to shut down the 8051, 6519 * itself. 6520 */ 6521 write_csr(dd, DC_DC8051_CFG_RST, 0x1); 6522 } 6523 6524 static void dc_shutdown(struct hfi1_devdata *dd) 6525 { 6526 mutex_lock(&dd->dc8051_lock); 6527 _dc_shutdown(dd); 6528 mutex_unlock(&dd->dc8051_lock); 6529 } 6530 6531 /* 6532 * Calling this after the DC has been brought out of reset should not 6533 * do any damage. 6534 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6535 * before calling this function. 6536 */ 6537 static void _dc_start(struct hfi1_devdata *dd) 6538 { 6539 lockdep_assert_held(&dd->dc8051_lock); 6540 6541 if (!dd->dc_shutdown) 6542 return; 6543 6544 /* Take the 8051 out of reset */ 6545 write_csr(dd, DC_DC8051_CFG_RST, 0ull); 6546 /* Wait until 8051 is ready */ 6547 if (wait_fm_ready(dd, TIMEOUT_8051_START)) 6548 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", 6549 __func__); 6550 6551 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ 6552 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6553 /* lcb_shutdown() with abort=1 does not restore these */ 6554 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6555 dd->dc_shutdown = 0; 6556 } 6557 6558 static void dc_start(struct hfi1_devdata *dd) 6559 { 6560 mutex_lock(&dd->dc8051_lock); 6561 _dc_start(dd); 6562 mutex_unlock(&dd->dc8051_lock); 6563 } 6564 6565 /* 6566 * These LCB adjustments are for the Aurora SerDes core in the FPGA. 6567 */ 6568 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) 6569 { 6570 u64 rx_radr, tx_radr; 6571 u32 version; 6572 6573 if (dd->icode != ICODE_FPGA_EMULATION) 6574 return; 6575 6576 /* 6577 * These LCB defaults on emulator _s are good, nothing to do here: 6578 * LCB_CFG_TX_FIFOS_RADR 6579 * LCB_CFG_RX_FIFOS_RADR 6580 * LCB_CFG_LN_DCLK 6581 * LCB_CFG_IGNORE_LOST_RCLK 6582 */ 6583 if (is_emulator_s(dd)) 6584 return; 6585 /* else this is _p */ 6586 6587 version = emulator_rev(dd); 6588 if (!is_ax(dd)) 6589 version = 0x2d; /* all B0 use 0x2d or higher settings */ 6590 6591 if (version <= 0x12) { 6592 /* release 0x12 and below */ 6593 6594 /* 6595 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9 6596 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9 6597 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa 6598 */ 6599 rx_radr = 6600 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6601 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6602 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6603 /* 6604 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default) 6605 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6 6606 */ 6607 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6608 } else if (version <= 0x18) { 6609 /* release 0x13 up to 0x18 */ 6610 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6611 rx_radr = 6612 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6613 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6614 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6615 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6616 } else if (version == 0x19) { 6617 /* release 0x19 */ 6618 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */ 6619 rx_radr = 6620 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6621 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6622 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6623 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6624 } else if (version == 0x1a) { 6625 /* release 0x1a */ 6626 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6627 rx_radr = 6628 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6629 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6630 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6631 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6632 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); 6633 } else { 6634 /* release 0x1b and higher */ 6635 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */ 6636 rx_radr = 6637 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6638 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6639 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6640 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6641 } 6642 6643 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); 6644 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ 6645 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 6646 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 6647 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); 6648 } 6649 6650 /* 6651 * Handle a SMA idle message 6652 * 6653 * This is a work-queue function outside of the interrupt. 6654 */ 6655 void handle_sma_message(struct work_struct *work) 6656 { 6657 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6658 sma_message_work); 6659 struct hfi1_devdata *dd = ppd->dd; 6660 u64 msg; 6661 int ret; 6662 6663 /* 6664 * msg is bytes 1-4 of the 40-bit idle message - the command code 6665 * is stripped off 6666 */ 6667 ret = read_idle_sma(dd, &msg); 6668 if (ret) 6669 return; 6670 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); 6671 /* 6672 * React to the SMA message. Byte[1] (0 for us) is the command. 6673 */ 6674 switch (msg & 0xff) { 6675 case SMA_IDLE_ARM: 6676 /* 6677 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6678 * State Transitions 6679 * 6680 * Only expected in INIT or ARMED, discard otherwise. 6681 */ 6682 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) 6683 ppd->neighbor_normal = 1; 6684 break; 6685 case SMA_IDLE_ACTIVE: 6686 /* 6687 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6688 * State Transitions 6689 * 6690 * Can activate the node. Discard otherwise. 6691 */ 6692 if (ppd->host_link_state == HLS_UP_ARMED && 6693 ppd->is_active_optimize_enabled) { 6694 ppd->neighbor_normal = 1; 6695 ret = set_link_state(ppd, HLS_UP_ACTIVE); 6696 if (ret) 6697 dd_dev_err( 6698 dd, 6699 "%s: received Active SMA idle message, couldn't set link to Active\n", 6700 __func__); 6701 } 6702 break; 6703 default: 6704 dd_dev_err(dd, 6705 "%s: received unexpected SMA idle message 0x%llx\n", 6706 __func__, msg); 6707 break; 6708 } 6709 } 6710 6711 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) 6712 { 6713 u64 rcvctrl; 6714 unsigned long flags; 6715 6716 spin_lock_irqsave(&dd->rcvctrl_lock, flags); 6717 rcvctrl = read_csr(dd, RCV_CTRL); 6718 rcvctrl |= add; 6719 rcvctrl &= ~clear; 6720 write_csr(dd, RCV_CTRL, rcvctrl); 6721 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); 6722 } 6723 6724 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) 6725 { 6726 adjust_rcvctrl(dd, add, 0); 6727 } 6728 6729 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) 6730 { 6731 adjust_rcvctrl(dd, 0, clear); 6732 } 6733 6734 /* 6735 * Called from all interrupt handlers to start handling an SPC freeze. 6736 */ 6737 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) 6738 { 6739 struct hfi1_devdata *dd = ppd->dd; 6740 struct send_context *sc; 6741 int i; 6742 int sc_flags; 6743 6744 if (flags & FREEZE_SELF) 6745 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6746 6747 /* enter frozen mode */ 6748 dd->flags |= HFI1_FROZEN; 6749 6750 /* notify all SDMA engines that they are going into a freeze */ 6751 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); 6752 6753 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? 6754 SCF_LINK_DOWN : 0); 6755 /* do halt pre-handling on all enabled send contexts */ 6756 for (i = 0; i < dd->num_send_contexts; i++) { 6757 sc = dd->send_contexts[i].sc; 6758 if (sc && (sc->flags & SCF_ENABLED)) 6759 sc_stop(sc, sc_flags); 6760 } 6761 6762 /* Send context are frozen. Notify user space */ 6763 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT); 6764 6765 if (flags & FREEZE_ABORT) { 6766 dd_dev_err(dd, 6767 "Aborted freeze recovery. Please REBOOT system\n"); 6768 return; 6769 } 6770 /* queue non-interrupt handler */ 6771 queue_work(ppd->hfi1_wq, &ppd->freeze_work); 6772 } 6773 6774 /* 6775 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen, 6776 * depending on the "freeze" parameter. 6777 * 6778 * No need to return an error if it times out, our only option 6779 * is to proceed anyway. 6780 */ 6781 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) 6782 { 6783 unsigned long timeout; 6784 u64 reg; 6785 6786 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT); 6787 while (1) { 6788 reg = read_csr(dd, CCE_STATUS); 6789 if (freeze) { 6790 /* waiting until all indicators are set */ 6791 if ((reg & ALL_FROZE) == ALL_FROZE) 6792 return; /* all done */ 6793 } else { 6794 /* waiting until all indicators are clear */ 6795 if ((reg & ALL_FROZE) == 0) 6796 return; /* all done */ 6797 } 6798 6799 if (time_after(jiffies, timeout)) { 6800 dd_dev_err(dd, 6801 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", 6802 freeze ? "" : "un", reg & ALL_FROZE, 6803 freeze ? ALL_FROZE : 0ull); 6804 return; 6805 } 6806 usleep_range(80, 120); 6807 } 6808 } 6809 6810 /* 6811 * Do all freeze handling for the RXE block. 6812 */ 6813 static void rxe_freeze(struct hfi1_devdata *dd) 6814 { 6815 int i; 6816 struct hfi1_ctxtdata *rcd; 6817 6818 /* disable port */ 6819 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6820 6821 /* disable all receive contexts */ 6822 for (i = 0; i < dd->num_rcv_contexts; i++) { 6823 rcd = hfi1_rcd_get_by_index(dd, i); 6824 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); 6825 hfi1_rcd_put(rcd); 6826 } 6827 } 6828 6829 /* 6830 * Unfreeze handling for the RXE block - kernel contexts only. 6831 * This will also enable the port. User contexts will do unfreeze 6832 * handling on a per-context basis as they call into the driver. 6833 * 6834 */ 6835 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) 6836 { 6837 u32 rcvmask; 6838 u16 i; 6839 struct hfi1_ctxtdata *rcd; 6840 6841 /* enable all kernel contexts */ 6842 for (i = 0; i < dd->num_rcv_contexts; i++) { 6843 rcd = hfi1_rcd_get_by_index(dd, i); 6844 6845 /* Ensure all non-user contexts(including vnic) are enabled */ 6846 if (!rcd || 6847 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { 6848 hfi1_rcd_put(rcd); 6849 continue; 6850 } 6851 rcvmask = HFI1_RCVCTRL_CTXT_ENB; 6852 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ 6853 rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? 6854 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 6855 hfi1_rcvctrl(dd, rcvmask, rcd); 6856 hfi1_rcd_put(rcd); 6857 } 6858 6859 /* enable port */ 6860 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6861 } 6862 6863 /* 6864 * Non-interrupt SPC freeze handling. 6865 * 6866 * This is a work-queue function outside of the triggering interrupt. 6867 */ 6868 void handle_freeze(struct work_struct *work) 6869 { 6870 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6871 freeze_work); 6872 struct hfi1_devdata *dd = ppd->dd; 6873 6874 /* wait for freeze indicators on all affected blocks */ 6875 wait_for_freeze_status(dd, 1); 6876 6877 /* SPC is now frozen */ 6878 6879 /* do send PIO freeze steps */ 6880 pio_freeze(dd); 6881 6882 /* do send DMA freeze steps */ 6883 sdma_freeze(dd); 6884 6885 /* do send egress freeze steps - nothing to do */ 6886 6887 /* do receive freeze steps */ 6888 rxe_freeze(dd); 6889 6890 /* 6891 * Unfreeze the hardware - clear the freeze, wait for each 6892 * block's frozen bit to clear, then clear the frozen flag. 6893 */ 6894 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6895 wait_for_freeze_status(dd, 0); 6896 6897 if (is_ax(dd)) { 6898 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6899 wait_for_freeze_status(dd, 1); 6900 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6901 wait_for_freeze_status(dd, 0); 6902 } 6903 6904 /* do send PIO unfreeze steps for kernel contexts */ 6905 pio_kernel_unfreeze(dd); 6906 6907 /* do send DMA unfreeze steps */ 6908 sdma_unfreeze(dd); 6909 6910 /* do send egress unfreeze steps - nothing to do */ 6911 6912 /* do receive unfreeze steps for kernel contexts */ 6913 rxe_kernel_unfreeze(dd); 6914 6915 /* 6916 * The unfreeze procedure touches global device registers when 6917 * it disables and re-enables RXE. Mark the device unfrozen 6918 * after all that is done so other parts of the driver waiting 6919 * for the device to unfreeze don't do things out of order. 6920 * 6921 * The above implies that the meaning of HFI1_FROZEN flag is 6922 * "Device has gone into freeze mode and freeze mode handling 6923 * is still in progress." 6924 * 6925 * The flag will be removed when freeze mode processing has 6926 * completed. 6927 */ 6928 dd->flags &= ~HFI1_FROZEN; 6929 wake_up(&dd->event_queue); 6930 6931 /* no longer frozen */ 6932 } 6933 6934 /** 6935 * update_xmit_counters - update PortXmitWait/PortVlXmitWait 6936 * counters. 6937 * @ppd: info of physical Hfi port 6938 * @link_width: new link width after link up or downgrade 6939 * 6940 * Update the PortXmitWait and PortVlXmitWait counters after 6941 * a link up or downgrade event to reflect a link width change. 6942 */ 6943 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) 6944 { 6945 int i; 6946 u16 tx_width; 6947 u16 link_speed; 6948 6949 tx_width = tx_link_width(link_width); 6950 link_speed = get_link_speed(ppd->link_speed_active); 6951 6952 /* 6953 * There are C_VL_COUNT number of PortVLXmitWait counters. 6954 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 6955 */ 6956 for (i = 0; i < C_VL_COUNT + 1; i++) 6957 get_xmit_wait_counters(ppd, tx_width, link_speed, i); 6958 } 6959 6960 /* 6961 * Handle a link up interrupt from the 8051. 6962 * 6963 * This is a work-queue function outside of the interrupt. 6964 */ 6965 void handle_link_up(struct work_struct *work) 6966 { 6967 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6968 link_up_work); 6969 struct hfi1_devdata *dd = ppd->dd; 6970 6971 set_link_state(ppd, HLS_UP_INIT); 6972 6973 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6974 read_ltp_rtt(dd); 6975 /* 6976 * OPA specifies that certain counters are cleared on a transition 6977 * to link up, so do that. 6978 */ 6979 clear_linkup_counters(dd); 6980 /* 6981 * And (re)set link up default values. 6982 */ 6983 set_linkup_defaults(ppd); 6984 6985 /* 6986 * Set VL15 credits. Use cached value from verify cap interrupt. 6987 * In case of quick linkup or simulator, vl15 value will be set by 6988 * handle_linkup_change. VerifyCap interrupt handler will not be 6989 * called in those scenarios. 6990 */ 6991 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) 6992 set_up_vl15(dd, dd->vl15buf_cached); 6993 6994 /* enforce link speed enabled */ 6995 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6996 /* oops - current speed is not enabled, bounce */ 6997 dd_dev_err(dd, 6998 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6999 ppd->link_speed_active, ppd->link_speed_enabled); 7000 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, 7001 OPA_LINKDOWN_REASON_SPEED_POLICY); 7002 set_link_state(ppd, HLS_DN_OFFLINE); 7003 start_link(ppd); 7004 } 7005 } 7006 7007 /* 7008 * Several pieces of LNI information were cached for SMA in ppd. 7009 * Reset these on link down 7010 */ 7011 static void reset_neighbor_info(struct hfi1_pportdata *ppd) 7012 { 7013 ppd->neighbor_guid = 0; 7014 ppd->neighbor_port_number = 0; 7015 ppd->neighbor_type = 0; 7016 ppd->neighbor_fm_security = 0; 7017 } 7018 7019 static const char * const link_down_reason_strs[] = { 7020 [OPA_LINKDOWN_REASON_NONE] = "None", 7021 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0", 7022 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length", 7023 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long", 7024 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short", 7025 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID", 7026 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID", 7027 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2", 7028 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC", 7029 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8", 7030 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail", 7031 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10", 7032 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error", 7033 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15", 7034 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker", 7035 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14", 7036 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15", 7037 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance", 7038 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance", 7039 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance", 7040 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack", 7041 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker", 7042 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt", 7043 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit", 7044 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit", 7045 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24", 7046 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25", 7047 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26", 7048 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27", 7049 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28", 7050 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29", 7051 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30", 7052 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] = 7053 "Excessive buffer overrun", 7054 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown", 7055 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot", 7056 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown", 7057 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce", 7058 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy", 7059 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy", 7060 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected", 7061 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] = 7062 "Local media not installed", 7063 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed", 7064 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config", 7065 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] = 7066 "End to end not installed", 7067 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy", 7068 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy", 7069 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy", 7070 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management", 7071 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled", 7072 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient" 7073 }; 7074 7075 /* return the neighbor link down reason string */ 7076 static const char *link_down_reason_str(u8 reason) 7077 { 7078 const char *str = NULL; 7079 7080 if (reason < ARRAY_SIZE(link_down_reason_strs)) 7081 str = link_down_reason_strs[reason]; 7082 if (!str) 7083 str = "(invalid)"; 7084 7085 return str; 7086 } 7087 7088 /* 7089 * Handle a link down interrupt from the 8051. 7090 * 7091 * This is a work-queue function outside of the interrupt. 7092 */ 7093 void handle_link_down(struct work_struct *work) 7094 { 7095 u8 lcl_reason, neigh_reason = 0; 7096 u8 link_down_reason; 7097 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7098 link_down_work); 7099 int was_up; 7100 static const char ldr_str[] = "Link down reason: "; 7101 7102 if ((ppd->host_link_state & 7103 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && 7104 ppd->port_type == PORT_TYPE_FIXED) 7105 ppd->offline_disabled_reason = 7106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); 7107 7108 /* Go offline first, then deal with reading/writing through 8051 */ 7109 was_up = !!(ppd->host_link_state & HLS_UP); 7110 set_link_state(ppd, HLS_DN_OFFLINE); 7111 xchg(&ppd->is_link_down_queued, 0); 7112 7113 if (was_up) { 7114 lcl_reason = 0; 7115 /* link down reason is only valid if the link was up */ 7116 read_link_down_reason(ppd->dd, &link_down_reason); 7117 switch (link_down_reason) { 7118 case LDR_LINK_TRANSFER_ACTIVE_LOW: 7119 /* the link went down, no idle message reason */ 7120 dd_dev_info(ppd->dd, "%sUnexpected link down\n", 7121 ldr_str); 7122 break; 7123 case LDR_RECEIVED_LINKDOWN_IDLE_MSG: 7124 /* 7125 * The neighbor reason is only valid if an idle message 7126 * was received for it. 7127 */ 7128 read_planned_down_reason_code(ppd->dd, &neigh_reason); 7129 dd_dev_info(ppd->dd, 7130 "%sNeighbor link down message %d, %s\n", 7131 ldr_str, neigh_reason, 7132 link_down_reason_str(neigh_reason)); 7133 break; 7134 case LDR_RECEIVED_HOST_OFFLINE_REQ: 7135 dd_dev_info(ppd->dd, 7136 "%sHost requested link to go offline\n", 7137 ldr_str); 7138 break; 7139 default: 7140 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", 7141 ldr_str, link_down_reason); 7142 break; 7143 } 7144 7145 /* 7146 * If no reason, assume peer-initiated but missed 7147 * LinkGoingDown idle flits. 7148 */ 7149 if (neigh_reason == 0) 7150 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN; 7151 } else { 7152 /* went down while polling or going up */ 7153 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT; 7154 } 7155 7156 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0); 7157 7158 /* inform the SMA when the link transitions from up to down */ 7159 if (was_up && ppd->local_link_down_reason.sma == 0 && 7160 ppd->neigh_link_down_reason.sma == 0) { 7161 ppd->local_link_down_reason.sma = 7162 ppd->local_link_down_reason.latest; 7163 ppd->neigh_link_down_reason.sma = 7164 ppd->neigh_link_down_reason.latest; 7165 } 7166 7167 reset_neighbor_info(ppd); 7168 7169 /* disable the port */ 7170 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 7171 7172 /* 7173 * If there is no cable attached, turn the DC off. Otherwise, 7174 * start the link bring up. 7175 */ 7176 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) 7177 dc_shutdown(ppd->dd); 7178 else 7179 start_link(ppd); 7180 } 7181 7182 void handle_link_bounce(struct work_struct *work) 7183 { 7184 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7185 link_bounce_work); 7186 7187 /* 7188 * Only do something if the link is currently up. 7189 */ 7190 if (ppd->host_link_state & HLS_UP) { 7191 set_link_state(ppd, HLS_DN_OFFLINE); 7192 start_link(ppd); 7193 } else { 7194 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", 7195 __func__, link_state_name(ppd->host_link_state)); 7196 } 7197 } 7198 7199 /* 7200 * Mask conversion: Capability exchange to Port LTP. The capability 7201 * exchange has an implicit 16b CRC that is mandatory. 7202 */ 7203 static int cap_to_port_ltp(int cap) 7204 { 7205 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */ 7206 7207 if (cap & CAP_CRC_14B) 7208 port_ltp |= PORT_LTP_CRC_MODE_14; 7209 if (cap & CAP_CRC_48B) 7210 port_ltp |= PORT_LTP_CRC_MODE_48; 7211 if (cap & CAP_CRC_12B_16B_PER_LANE) 7212 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE; 7213 7214 return port_ltp; 7215 } 7216 7217 /* 7218 * Convert an OPA Port LTP mask to capability mask 7219 */ 7220 int port_ltp_to_cap(int port_ltp) 7221 { 7222 int cap_mask = 0; 7223 7224 if (port_ltp & PORT_LTP_CRC_MODE_14) 7225 cap_mask |= CAP_CRC_14B; 7226 if (port_ltp & PORT_LTP_CRC_MODE_48) 7227 cap_mask |= CAP_CRC_48B; 7228 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE) 7229 cap_mask |= CAP_CRC_12B_16B_PER_LANE; 7230 7231 return cap_mask; 7232 } 7233 7234 /* 7235 * Convert a single DC LCB CRC mode to an OPA Port LTP mask. 7236 */ 7237 static int lcb_to_port_ltp(int lcb_crc) 7238 { 7239 int port_ltp = 0; 7240 7241 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE) 7242 port_ltp = PORT_LTP_CRC_MODE_PER_LANE; 7243 else if (lcb_crc == LCB_CRC_48B) 7244 port_ltp = PORT_LTP_CRC_MODE_48; 7245 else if (lcb_crc == LCB_CRC_14B) 7246 port_ltp = PORT_LTP_CRC_MODE_14; 7247 else 7248 port_ltp = PORT_LTP_CRC_MODE_16; 7249 7250 return port_ltp; 7251 } 7252 7253 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) 7254 { 7255 if (ppd->pkeys[2] != 0) { 7256 ppd->pkeys[2] = 0; 7257 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7258 hfi1_event_pkey_change(ppd->dd, ppd->port); 7259 } 7260 } 7261 7262 /* 7263 * Convert the given link width to the OPA link width bitmask. 7264 */ 7265 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) 7266 { 7267 switch (width) { 7268 case 0: 7269 /* 7270 * Simulator and quick linkup do not set the width. 7271 * Just set it to 4x without complaint. 7272 */ 7273 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) 7274 return OPA_LINK_WIDTH_4X; 7275 return 0; /* no lanes up */ 7276 case 1: return OPA_LINK_WIDTH_1X; 7277 case 2: return OPA_LINK_WIDTH_2X; 7278 case 3: return OPA_LINK_WIDTH_3X; 7279 case 4: return OPA_LINK_WIDTH_4X; 7280 default: 7281 dd_dev_info(dd, "%s: invalid width %d, using 4\n", 7282 __func__, width); 7283 return OPA_LINK_WIDTH_4X; 7284 } 7285 } 7286 7287 /* 7288 * Do a population count on the bottom nibble. 7289 */ 7290 static const u8 bit_counts[16] = { 7291 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 7292 }; 7293 7294 static inline u8 nibble_to_count(u8 nibble) 7295 { 7296 return bit_counts[nibble & 0xf]; 7297 } 7298 7299 /* 7300 * Read the active lane information from the 8051 registers and return 7301 * their widths. 7302 * 7303 * Active lane information is found in these 8051 registers: 7304 * enable_lane_tx 7305 * enable_lane_rx 7306 */ 7307 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, 7308 u16 *rx_width) 7309 { 7310 u16 tx, rx; 7311 u8 enable_lane_rx; 7312 u8 enable_lane_tx; 7313 u8 tx_polarity_inversion; 7314 u8 rx_polarity_inversion; 7315 u8 max_rate; 7316 7317 /* read the active lanes */ 7318 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 7319 &rx_polarity_inversion, &max_rate); 7320 read_local_lni(dd, &enable_lane_rx); 7321 7322 /* convert to counts */ 7323 tx = nibble_to_count(enable_lane_tx); 7324 rx = nibble_to_count(enable_lane_rx); 7325 7326 /* 7327 * Set link_speed_active here, overriding what was set in 7328 * handle_verify_cap(). The ASIC 8051 firmware does not correctly 7329 * set the max_rate field in handle_verify_cap until v0.19. 7330 */ 7331 if ((dd->icode == ICODE_RTL_SILICON) && 7332 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { 7333 /* max_rate: 0 = 12.5G, 1 = 25G */ 7334 switch (max_rate) { 7335 case 0: 7336 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; 7337 break; 7338 case 1: 7339 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; 7340 break; 7341 default: 7342 dd_dev_err(dd, 7343 "%s: unexpected max rate %d, using 25Gb\n", 7344 __func__, (int)max_rate); 7345 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; 7346 break; 7347 } 7348 } 7349 7350 dd_dev_info(dd, 7351 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", 7352 enable_lane_tx, tx, enable_lane_rx, rx); 7353 *tx_width = link_width_to_bits(dd, tx); 7354 *rx_width = link_width_to_bits(dd, rx); 7355 } 7356 7357 /* 7358 * Read verify_cap_local_fm_link_width[1] to obtain the link widths. 7359 * Valid after the end of VerifyCap and during LinkUp. Does not change 7360 * after link up. I.e. look elsewhere for downgrade information. 7361 * 7362 * Bits are: 7363 * + bits [7:4] contain the number of active transmitters 7364 * + bits [3:0] contain the number of active receivers 7365 * These are numbers 1 through 4 and can be different values if the 7366 * link is asymmetric. 7367 * 7368 * verify_cap_local_fm_link_width[0] retains its original value. 7369 */ 7370 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, 7371 u16 *rx_width) 7372 { 7373 u16 widths, tx, rx; 7374 u8 misc_bits, local_flags; 7375 u16 active_tx, active_rx; 7376 7377 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); 7378 tx = widths >> 12; 7379 rx = (widths >> 8) & 0xf; 7380 7381 *tx_width = link_width_to_bits(dd, tx); 7382 *rx_width = link_width_to_bits(dd, rx); 7383 7384 /* print the active widths */ 7385 get_link_widths(dd, &active_tx, &active_rx); 7386 } 7387 7388 /* 7389 * Set ppd->link_width_active and ppd->link_width_downgrade_active using 7390 * hardware information when the link first comes up. 7391 * 7392 * The link width is not available until after VerifyCap.AllFramesReceived 7393 * (the trigger for handle_verify_cap), so this is outside that routine 7394 * and should be called when the 8051 signals linkup. 7395 */ 7396 void get_linkup_link_widths(struct hfi1_pportdata *ppd) 7397 { 7398 u16 tx_width, rx_width; 7399 7400 /* get end-of-LNI link widths */ 7401 get_linkup_widths(ppd->dd, &tx_width, &rx_width); 7402 7403 /* use tx_width as the link is supposed to be symmetric on link up */ 7404 ppd->link_width_active = tx_width; 7405 /* link width downgrade active (LWD.A) starts out matching LW.A */ 7406 ppd->link_width_downgrade_tx_active = ppd->link_width_active; 7407 ppd->link_width_downgrade_rx_active = ppd->link_width_active; 7408 /* per OPA spec, on link up LWD.E resets to LWD.S */ 7409 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; 7410 /* cache the active egress rate (units {10^6 bits/sec]) */ 7411 ppd->current_egress_rate = active_egress_rate(ppd); 7412 } 7413 7414 /* 7415 * Handle a verify capabilities interrupt from the 8051. 7416 * 7417 * This is a work-queue function outside of the interrupt. 7418 */ 7419 void handle_verify_cap(struct work_struct *work) 7420 { 7421 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7422 link_vc_work); 7423 struct hfi1_devdata *dd = ppd->dd; 7424 u64 reg; 7425 u8 power_management; 7426 u8 continuous; 7427 u8 vcu; 7428 u8 vau; 7429 u8 z; 7430 u16 vl15buf; 7431 u16 link_widths; 7432 u16 crc_mask; 7433 u16 crc_val; 7434 u16 device_id; 7435 u16 active_tx, active_rx; 7436 u8 partner_supported_crc; 7437 u8 remote_tx_rate; 7438 u8 device_rev; 7439 7440 set_link_state(ppd, HLS_VERIFY_CAP); 7441 7442 lcb_shutdown(dd, 0); 7443 adjust_lcb_for_fpga_serdes(dd); 7444 7445 read_vc_remote_phy(dd, &power_management, &continuous); 7446 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, 7447 &partner_supported_crc); 7448 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); 7449 read_remote_device_id(dd, &device_id, &device_rev); 7450 7451 /* print the active widths */ 7452 get_link_widths(dd, &active_tx, &active_rx); 7453 dd_dev_info(dd, 7454 "Peer PHY: power management 0x%x, continuous updates 0x%x\n", 7455 (int)power_management, (int)continuous); 7456 dd_dev_info(dd, 7457 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", 7458 (int)vau, (int)z, (int)vcu, (int)vl15buf, 7459 (int)partner_supported_crc); 7460 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", 7461 (u32)remote_tx_rate, (u32)link_widths); 7462 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", 7463 (u32)device_id, (u32)device_rev); 7464 /* 7465 * The peer vAU value just read is the peer receiver value. HFI does 7466 * not support a transmit vAU of 0 (AU == 8). We advertised that 7467 * with Z=1 in the fabric capabilities sent to the peer. The peer 7468 * will see our Z=1, and, if it advertised a vAU of 0, will move its 7469 * receive to vAU of 1 (AU == 16). Do the same here. We do not care 7470 * about the peer Z value - our sent vAU is 3 (hardwired) and is not 7471 * subject to the Z value exception. 7472 */ 7473 if (vau == 0) 7474 vau = 1; 7475 set_up_vau(dd, vau); 7476 7477 /* 7478 * Set VL15 credits to 0 in global credit register. Cache remote VL15 7479 * credits value and wait for link-up interrupt ot set it. 7480 */ 7481 set_up_vl15(dd, 0); 7482 dd->vl15buf_cached = vl15buf; 7483 7484 /* set up the LCB CRC mode */ 7485 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; 7486 7487 /* order is important: use the lowest bit in common */ 7488 if (crc_mask & CAP_CRC_14B) 7489 crc_val = LCB_CRC_14B; 7490 else if (crc_mask & CAP_CRC_48B) 7491 crc_val = LCB_CRC_48B; 7492 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE) 7493 crc_val = LCB_CRC_12B_16B_PER_LANE; 7494 else 7495 crc_val = LCB_CRC_16B; 7496 7497 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); 7498 write_csr(dd, DC_LCB_CFG_CRC_MODE, 7499 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT); 7500 7501 /* set (14b only) or clear sideband credit */ 7502 reg = read_csr(dd, SEND_CM_CTRL); 7503 if (crc_val == LCB_CRC_14B && crc_14b_sideband) { 7504 write_csr(dd, SEND_CM_CTRL, 7505 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7506 } else { 7507 write_csr(dd, SEND_CM_CTRL, 7508 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7509 } 7510 7511 ppd->link_speed_active = 0; /* invalid value */ 7512 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 7513 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */ 7514 switch (remote_tx_rate) { 7515 case 0: 7516 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7517 break; 7518 case 1: 7519 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7520 break; 7521 } 7522 } else { 7523 /* actual rate is highest bit of the ANDed rates */ 7524 u8 rate = remote_tx_rate & ppd->local_tx_rate; 7525 7526 if (rate & 2) 7527 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7528 else if (rate & 1) 7529 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7530 } 7531 if (ppd->link_speed_active == 0) { 7532 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", 7533 __func__, (int)remote_tx_rate); 7534 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7535 } 7536 7537 /* 7538 * Cache the values of the supported, enabled, and active 7539 * LTP CRC modes to return in 'portinfo' queries. But the bit 7540 * flags that are returned in the portinfo query differ from 7541 * what's in the link_crc_mask, crc_sizes, and crc_val 7542 * variables. Convert these here. 7543 */ 7544 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 7545 /* supported crc modes */ 7546 ppd->port_ltp_crc_mode |= 7547 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; 7548 /* enabled crc modes */ 7549 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); 7550 /* active crc mode */ 7551 7552 /* set up the remote credit return table */ 7553 assign_remote_cm_au_table(dd, vcu); 7554 7555 /* 7556 * The LCB is reset on entry to handle_verify_cap(), so this must 7557 * be applied on every link up. 7558 * 7559 * Adjust LCB error kill enable to kill the link if 7560 * these RBUF errors are seen: 7561 * REPLAY_BUF_MBE_SMASK 7562 * FLIT_INPUT_BUF_MBE_SMASK 7563 */ 7564 if (is_ax(dd)) { /* fixed in B0 */ 7565 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); 7566 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 7567 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK; 7568 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); 7569 } 7570 7571 /* pull LCB fifos out of reset - all fifo clocks must be stable */ 7572 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 7573 7574 /* give 8051 access to the LCB CSRs */ 7575 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 7576 set_8051_lcb_access(dd); 7577 7578 /* tell the 8051 to go to LinkUp */ 7579 set_link_state(ppd, HLS_GOING_UP); 7580 } 7581 7582 /** 7583 * apply_link_downgrade_policy - Apply the link width downgrade enabled 7584 * policy against the current active link widths. 7585 * @ppd: info of physical Hfi port 7586 * @refresh_widths: True indicates link downgrade event 7587 * @return: True indicates a successful link downgrade. False indicates 7588 * link downgrade event failed and the link will bounce back to 7589 * default link width. 7590 * 7591 * Called when the enabled policy changes or the active link widths 7592 * change. 7593 * Refresh_widths indicates that a link downgrade occurred. The 7594 * link_downgraded variable is set by refresh_widths and 7595 * determines the success/failure of the policy application. 7596 */ 7597 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 7598 bool refresh_widths) 7599 { 7600 int do_bounce = 0; 7601 int tries; 7602 u16 lwde; 7603 u16 tx, rx; 7604 bool link_downgraded = refresh_widths; 7605 7606 /* use the hls lock to avoid a race with actual link up */ 7607 tries = 0; 7608 retry: 7609 mutex_lock(&ppd->hls_lock); 7610 /* only apply if the link is up */ 7611 if (ppd->host_link_state & HLS_DOWN) { 7612 /* still going up..wait and retry */ 7613 if (ppd->host_link_state & HLS_GOING_UP) { 7614 if (++tries < 1000) { 7615 mutex_unlock(&ppd->hls_lock); 7616 usleep_range(100, 120); /* arbitrary */ 7617 goto retry; 7618 } 7619 dd_dev_err(ppd->dd, 7620 "%s: giving up waiting for link state change\n", 7621 __func__); 7622 } 7623 goto done; 7624 } 7625 7626 lwde = ppd->link_width_downgrade_enabled; 7627 7628 if (refresh_widths) { 7629 get_link_widths(ppd->dd, &tx, &rx); 7630 ppd->link_width_downgrade_tx_active = tx; 7631 ppd->link_width_downgrade_rx_active = rx; 7632 } 7633 7634 if (ppd->link_width_downgrade_tx_active == 0 || 7635 ppd->link_width_downgrade_rx_active == 0) { 7636 /* the 8051 reported a dead link as a downgrade */ 7637 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); 7638 link_downgraded = false; 7639 } else if (lwde == 0) { 7640 /* downgrade is disabled */ 7641 7642 /* bounce if not at starting active width */ 7643 if ((ppd->link_width_active != 7644 ppd->link_width_downgrade_tx_active) || 7645 (ppd->link_width_active != 7646 ppd->link_width_downgrade_rx_active)) { 7647 dd_dev_err(ppd->dd, 7648 "Link downgrade is disabled and link has downgraded, downing link\n"); 7649 dd_dev_err(ppd->dd, 7650 " original 0x%x, tx active 0x%x, rx active 0x%x\n", 7651 ppd->link_width_active, 7652 ppd->link_width_downgrade_tx_active, 7653 ppd->link_width_downgrade_rx_active); 7654 do_bounce = 1; 7655 link_downgraded = false; 7656 } 7657 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || 7658 (lwde & ppd->link_width_downgrade_rx_active) == 0) { 7659 /* Tx or Rx is outside the enabled policy */ 7660 dd_dev_err(ppd->dd, 7661 "Link is outside of downgrade allowed, downing link\n"); 7662 dd_dev_err(ppd->dd, 7663 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", 7664 lwde, ppd->link_width_downgrade_tx_active, 7665 ppd->link_width_downgrade_rx_active); 7666 do_bounce = 1; 7667 link_downgraded = false; 7668 } 7669 7670 done: 7671 mutex_unlock(&ppd->hls_lock); 7672 7673 if (do_bounce) { 7674 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, 7675 OPA_LINKDOWN_REASON_WIDTH_POLICY); 7676 set_link_state(ppd, HLS_DN_OFFLINE); 7677 start_link(ppd); 7678 } 7679 7680 return link_downgraded; 7681 } 7682 7683 /* 7684 * Handle a link downgrade interrupt from the 8051. 7685 * 7686 * This is a work-queue function outside of the interrupt. 7687 */ 7688 void handle_link_downgrade(struct work_struct *work) 7689 { 7690 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7691 link_downgrade_work); 7692 7693 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); 7694 if (apply_link_downgrade_policy(ppd, true)) 7695 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); 7696 } 7697 7698 static char *dcc_err_string(char *buf, int buf_len, u64 flags) 7699 { 7700 return flag_string(buf, buf_len, flags, dcc_err_flags, 7701 ARRAY_SIZE(dcc_err_flags)); 7702 } 7703 7704 static char *lcb_err_string(char *buf, int buf_len, u64 flags) 7705 { 7706 return flag_string(buf, buf_len, flags, lcb_err_flags, 7707 ARRAY_SIZE(lcb_err_flags)); 7708 } 7709 7710 static char *dc8051_err_string(char *buf, int buf_len, u64 flags) 7711 { 7712 return flag_string(buf, buf_len, flags, dc8051_err_flags, 7713 ARRAY_SIZE(dc8051_err_flags)); 7714 } 7715 7716 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags) 7717 { 7718 return flag_string(buf, buf_len, flags, dc8051_info_err_flags, 7719 ARRAY_SIZE(dc8051_info_err_flags)); 7720 } 7721 7722 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags) 7723 { 7724 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags, 7725 ARRAY_SIZE(dc8051_info_host_msg_flags)); 7726 } 7727 7728 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) 7729 { 7730 struct hfi1_pportdata *ppd = dd->pport; 7731 u64 info, err, host_msg; 7732 int queue_link_down = 0; 7733 char buf[96]; 7734 7735 /* look at the flags */ 7736 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) { 7737 /* 8051 information set by firmware */ 7738 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */ 7739 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); 7740 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT) 7741 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK; 7742 host_msg = (info >> 7743 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT) 7744 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK; 7745 7746 /* 7747 * Handle error flags. 7748 */ 7749 if (err & FAILED_LNI) { 7750 /* 7751 * LNI error indications are cleared by the 8051 7752 * only when starting polling. Only pay attention 7753 * to them when in the states that occur during 7754 * LNI. 7755 */ 7756 if (ppd->host_link_state 7757 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 7758 queue_link_down = 1; 7759 dd_dev_info(dd, "Link error: %s\n", 7760 dc8051_info_err_string(buf, 7761 sizeof(buf), 7762 err & 7763 FAILED_LNI)); 7764 } 7765 err &= ~(u64)FAILED_LNI; 7766 } 7767 /* unknown frames can happen durning LNI, just count */ 7768 if (err & UNKNOWN_FRAME) { 7769 ppd->unknown_frame_count++; 7770 err &= ~(u64)UNKNOWN_FRAME; 7771 } 7772 if (err) { 7773 /* report remaining errors, but do not do anything */ 7774 dd_dev_err(dd, "8051 info error: %s\n", 7775 dc8051_info_err_string(buf, sizeof(buf), 7776 err)); 7777 } 7778 7779 /* 7780 * Handle host message flags. 7781 */ 7782 if (host_msg & HOST_REQ_DONE) { 7783 /* 7784 * Presently, the driver does a busy wait for 7785 * host requests to complete. This is only an 7786 * informational message. 7787 * NOTE: The 8051 clears the host message 7788 * information *on the next 8051 command*. 7789 * Therefore, when linkup is achieved, 7790 * this flag will still be set. 7791 */ 7792 host_msg &= ~(u64)HOST_REQ_DONE; 7793 } 7794 if (host_msg & BC_SMA_MSG) { 7795 queue_work(ppd->link_wq, &ppd->sma_message_work); 7796 host_msg &= ~(u64)BC_SMA_MSG; 7797 } 7798 if (host_msg & LINKUP_ACHIEVED) { 7799 dd_dev_info(dd, "8051: Link up\n"); 7800 queue_work(ppd->link_wq, &ppd->link_up_work); 7801 host_msg &= ~(u64)LINKUP_ACHIEVED; 7802 } 7803 if (host_msg & EXT_DEVICE_CFG_REQ) { 7804 handle_8051_request(ppd); 7805 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; 7806 } 7807 if (host_msg & VERIFY_CAP_FRAME) { 7808 queue_work(ppd->link_wq, &ppd->link_vc_work); 7809 host_msg &= ~(u64)VERIFY_CAP_FRAME; 7810 } 7811 if (host_msg & LINK_GOING_DOWN) { 7812 const char *extra = ""; 7813 /* no downgrade action needed if going down */ 7814 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7815 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7816 extra = " (ignoring downgrade)"; 7817 } 7818 dd_dev_info(dd, "8051: Link down%s\n", extra); 7819 queue_link_down = 1; 7820 host_msg &= ~(u64)LINK_GOING_DOWN; 7821 } 7822 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7823 queue_work(ppd->link_wq, &ppd->link_downgrade_work); 7824 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7825 } 7826 if (host_msg) { 7827 /* report remaining messages, but do not do anything */ 7828 dd_dev_info(dd, "8051 info host message: %s\n", 7829 dc8051_info_host_msg_string(buf, 7830 sizeof(buf), 7831 host_msg)); 7832 } 7833 7834 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; 7835 } 7836 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) { 7837 /* 7838 * Lost the 8051 heartbeat. If this happens, we 7839 * receive constant interrupts about it. Disable 7840 * the interrupt after the first. 7841 */ 7842 dd_dev_err(dd, "Lost 8051 heartbeat\n"); 7843 write_csr(dd, DC_DC8051_ERR_EN, 7844 read_csr(dd, DC_DC8051_ERR_EN) & 7845 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); 7846 7847 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; 7848 } 7849 if (reg) { 7850 /* report the error, but do not do anything */ 7851 dd_dev_err(dd, "8051 error: %s\n", 7852 dc8051_err_string(buf, sizeof(buf), reg)); 7853 } 7854 7855 if (queue_link_down) { 7856 /* 7857 * if the link is already going down or disabled, do not 7858 * queue another. If there's a link down entry already 7859 * queued, don't queue another one. 7860 */ 7861 if ((ppd->host_link_state & 7862 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || 7863 ppd->link_enabled == 0) { 7864 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", 7865 __func__, ppd->host_link_state, 7866 ppd->link_enabled); 7867 } else { 7868 if (xchg(&ppd->is_link_down_queued, 1) == 1) 7869 dd_dev_info(dd, 7870 "%s: link down request already queued\n", 7871 __func__); 7872 else 7873 queue_work(ppd->link_wq, &ppd->link_down_work); 7874 } 7875 } 7876 } 7877 7878 static const char * const fm_config_txt[] = { 7879 [0] = 7880 "BadHeadDist: Distance violation between two head flits", 7881 [1] = 7882 "BadTailDist: Distance violation between two tail flits", 7883 [2] = 7884 "BadCtrlDist: Distance violation between two credit control flits", 7885 [3] = 7886 "BadCrdAck: Credits return for unsupported VL", 7887 [4] = 7888 "UnsupportedVLMarker: Received VL Marker", 7889 [5] = 7890 "BadPreempt: Exceeded the preemption nesting level", 7891 [6] = 7892 "BadControlFlit: Received unsupported control flit", 7893 /* no 7 */ 7894 [8] = 7895 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL", 7896 }; 7897 7898 static const char * const port_rcv_txt[] = { 7899 [1] = 7900 "BadPktLen: Illegal PktLen", 7901 [2] = 7902 "PktLenTooLong: Packet longer than PktLen", 7903 [3] = 7904 "PktLenTooShort: Packet shorter than PktLen", 7905 [4] = 7906 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)", 7907 [5] = 7908 "BadDLID: Illegal DLID (0, doesn't match HFI)", 7909 [6] = 7910 "BadL2: Illegal L2 opcode", 7911 [7] = 7912 "BadSC: Unsupported SC", 7913 [9] = 7914 "BadRC: Illegal RC", 7915 [11] = 7916 "PreemptError: Preempting with same VL", 7917 [12] = 7918 "PreemptVL15: Preempting a VL15 packet", 7919 }; 7920 7921 #define OPA_LDR_FMCONFIG_OFFSET 16 7922 #define OPA_LDR_PORTRCV_OFFSET 0 7923 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 7924 { 7925 u64 info, hdr0, hdr1; 7926 const char *extra; 7927 char buf[96]; 7928 struct hfi1_pportdata *ppd = dd->pport; 7929 u8 lcl_reason = 0; 7930 int do_bounce = 0; 7931 7932 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) { 7933 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { 7934 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); 7935 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; 7936 /* set status bit */ 7937 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; 7938 } 7939 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK; 7940 } 7941 7942 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) { 7943 struct hfi1_pportdata *ppd = dd->pport; 7944 /* this counter saturates at (2^32) - 1 */ 7945 if (ppd->link_downed < (u32)UINT_MAX) 7946 ppd->link_downed++; 7947 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK; 7948 } 7949 7950 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) { 7951 u8 reason_valid = 1; 7952 7953 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); 7954 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { 7955 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; 7956 /* set status bit */ 7957 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; 7958 } 7959 switch (info) { 7960 case 0: 7961 case 1: 7962 case 2: 7963 case 3: 7964 case 4: 7965 case 5: 7966 case 6: 7967 extra = fm_config_txt[info]; 7968 break; 7969 case 8: 7970 extra = fm_config_txt[info]; 7971 if (ppd->port_error_action & 7972 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) { 7973 do_bounce = 1; 7974 /* 7975 * lcl_reason cannot be derived from info 7976 * for this error 7977 */ 7978 lcl_reason = 7979 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER; 7980 } 7981 break; 7982 default: 7983 reason_valid = 0; 7984 snprintf(buf, sizeof(buf), "reserved%lld", info); 7985 extra = buf; 7986 break; 7987 } 7988 7989 if (reason_valid && !do_bounce) { 7990 do_bounce = ppd->port_error_action & 7991 (1 << (OPA_LDR_FMCONFIG_OFFSET + info)); 7992 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST; 7993 } 7994 7995 /* just report this */ 7996 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", 7997 extra); 7998 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK; 7999 } 8000 8001 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) { 8002 u8 reason_valid = 1; 8003 8004 info = read_csr(dd, DCC_ERR_INFO_PORTRCV); 8005 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); 8006 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); 8007 if (!(dd->err_info_rcvport.status_and_code & 8008 OPA_EI_STATUS_SMASK)) { 8009 dd->err_info_rcvport.status_and_code = 8010 info & OPA_EI_CODE_SMASK; 8011 /* set status bit */ 8012 dd->err_info_rcvport.status_and_code |= 8013 OPA_EI_STATUS_SMASK; 8014 /* 8015 * save first 2 flits in the packet that caused 8016 * the error 8017 */ 8018 dd->err_info_rcvport.packet_flit1 = hdr0; 8019 dd->err_info_rcvport.packet_flit2 = hdr1; 8020 } 8021 switch (info) { 8022 case 1: 8023 case 2: 8024 case 3: 8025 case 4: 8026 case 5: 8027 case 6: 8028 case 7: 8029 case 9: 8030 case 11: 8031 case 12: 8032 extra = port_rcv_txt[info]; 8033 break; 8034 default: 8035 reason_valid = 0; 8036 snprintf(buf, sizeof(buf), "reserved%lld", info); 8037 extra = buf; 8038 break; 8039 } 8040 8041 if (reason_valid && !do_bounce) { 8042 do_bounce = ppd->port_error_action & 8043 (1 << (OPA_LDR_PORTRCV_OFFSET + info)); 8044 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0; 8045 } 8046 8047 /* just report this */ 8048 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" 8049 " hdr0 0x%llx, hdr1 0x%llx\n", 8050 extra, hdr0, hdr1); 8051 8052 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; 8053 } 8054 8055 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) { 8056 /* informative only */ 8057 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); 8058 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK; 8059 } 8060 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) { 8061 /* informative only */ 8062 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); 8063 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK; 8064 } 8065 8066 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) 8067 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK; 8068 8069 /* report any remaining errors */ 8070 if (reg) 8071 dd_dev_info_ratelimited(dd, "DCC Error: %s\n", 8072 dcc_err_string(buf, sizeof(buf), reg)); 8073 8074 if (lcl_reason == 0) 8075 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; 8076 8077 if (do_bounce) { 8078 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", 8079 __func__); 8080 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason); 8081 queue_work(ppd->link_wq, &ppd->link_bounce_work); 8082 } 8083 } 8084 8085 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 8086 { 8087 char buf[96]; 8088 8089 dd_dev_info(dd, "LCB Error: %s\n", 8090 lcb_err_string(buf, sizeof(buf), reg)); 8091 } 8092 8093 /* 8094 * CCE block DC interrupt. Source is < 8. 8095 */ 8096 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) 8097 { 8098 const struct err_reg_info *eri = &dc_errs[source]; 8099 8100 if (eri->handler) { 8101 interrupt_clear_down(dd, 0, eri); 8102 } else if (source == 3 /* dc_lbm_int */) { 8103 /* 8104 * This indicates that a parity error has occurred on the 8105 * address/control lines presented to the LBM. The error 8106 * is a single pulse, there is no associated error flag, 8107 * and it is non-maskable. This is because if a parity 8108 * error occurs on the request the request is dropped. 8109 * This should never occur, but it is nice to know if it 8110 * ever does. 8111 */ 8112 dd_dev_err(dd, "Parity error in DC LBM block\n"); 8113 } else { 8114 dd_dev_err(dd, "Invalid DC interrupt %u\n", source); 8115 } 8116 } 8117 8118 /* 8119 * TX block send credit interrupt. Source is < 160. 8120 */ 8121 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) 8122 { 8123 sc_group_release_update(dd, source); 8124 } 8125 8126 /* 8127 * TX block SDMA interrupt. Source is < 48. 8128 * 8129 * SDMA interrupts are grouped by type: 8130 * 8131 * 0 - N-1 = SDma 8132 * N - 2N-1 = SDmaProgress 8133 * 2N - 3N-1 = SDmaIdle 8134 */ 8135 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) 8136 { 8137 /* what interrupt */ 8138 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 8139 /* which engine */ 8140 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 8141 8142 #ifdef CONFIG_SDMA_VERBOSITY 8143 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, 8144 slashstrip(__FILE__), __LINE__, __func__); 8145 sdma_dumpstate(&dd->per_sdma[which]); 8146 #endif 8147 8148 if (likely(what < 3 && which < dd->num_sdma)) { 8149 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); 8150 } else { 8151 /* should not happen */ 8152 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); 8153 } 8154 } 8155 8156 /** 8157 * is_rcv_avail_int() - User receive context available IRQ handler 8158 * @dd: valid dd 8159 * @source: logical IRQ source (offset from IS_RCVAVAIL_START) 8160 * 8161 * RX block receive available interrupt. Source is < 160. 8162 * 8163 * This is the general interrupt handler for user (PSM) receive contexts, 8164 * and can only be used for non-threaded IRQs. 8165 */ 8166 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) 8167 { 8168 struct hfi1_ctxtdata *rcd; 8169 char *err_detail; 8170 8171 if (likely(source < dd->num_rcv_contexts)) { 8172 rcd = hfi1_rcd_get_by_index(dd, source); 8173 if (rcd) { 8174 handle_user_interrupt(rcd); 8175 hfi1_rcd_put(rcd); 8176 return; /* OK */ 8177 } 8178 /* received an interrupt, but no rcd */ 8179 err_detail = "dataless"; 8180 } else { 8181 /* received an interrupt, but are not using that context */ 8182 err_detail = "out of range"; 8183 } 8184 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", 8185 err_detail, source); 8186 } 8187 8188 /** 8189 * is_rcv_urgent_int() - User receive context urgent IRQ handler 8190 * @dd: valid dd 8191 * @source: logical IRQ source (offset from IS_RCVURGENT_START) 8192 * 8193 * RX block receive urgent interrupt. Source is < 160. 8194 * 8195 * NOTE: kernel receive contexts specifically do NOT enable this IRQ. 8196 */ 8197 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) 8198 { 8199 struct hfi1_ctxtdata *rcd; 8200 char *err_detail; 8201 8202 if (likely(source < dd->num_rcv_contexts)) { 8203 rcd = hfi1_rcd_get_by_index(dd, source); 8204 if (rcd) { 8205 handle_user_interrupt(rcd); 8206 hfi1_rcd_put(rcd); 8207 return; /* OK */ 8208 } 8209 /* received an interrupt, but no rcd */ 8210 err_detail = "dataless"; 8211 } else { 8212 /* received an interrupt, but are not using that context */ 8213 err_detail = "out of range"; 8214 } 8215 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", 8216 err_detail, source); 8217 } 8218 8219 /* 8220 * Reserved range interrupt. Should not be called in normal operation. 8221 */ 8222 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) 8223 { 8224 char name[64]; 8225 8226 dd_dev_err(dd, "unexpected %s interrupt\n", 8227 is_reserved_name(name, sizeof(name), source)); 8228 } 8229 8230 static const struct is_table is_table[] = { 8231 /* 8232 * start end 8233 * name func interrupt func 8234 */ 8235 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, 8236 is_misc_err_name, is_misc_err_int }, 8237 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, 8238 is_sdma_eng_err_name, is_sdma_eng_err_int }, 8239 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, 8240 is_sendctxt_err_name, is_sendctxt_err_int }, 8241 { IS_SDMA_START, IS_SDMA_IDLE_END, 8242 is_sdma_eng_name, is_sdma_eng_int }, 8243 { IS_VARIOUS_START, IS_VARIOUS_END, 8244 is_various_name, is_various_int }, 8245 { IS_DC_START, IS_DC_END, 8246 is_dc_name, is_dc_int }, 8247 { IS_RCVAVAIL_START, IS_RCVAVAIL_END, 8248 is_rcv_avail_name, is_rcv_avail_int }, 8249 { IS_RCVURGENT_START, IS_RCVURGENT_END, 8250 is_rcv_urgent_name, is_rcv_urgent_int }, 8251 { IS_SENDCREDIT_START, IS_SENDCREDIT_END, 8252 is_send_credit_name, is_send_credit_int}, 8253 { IS_RESERVED_START, IS_RESERVED_END, 8254 is_reserved_name, is_reserved_int}, 8255 }; 8256 8257 /* 8258 * Interrupt source interrupt - called when the given source has an interrupt. 8259 * Source is a bit index into an array of 64-bit integers. 8260 */ 8261 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) 8262 { 8263 const struct is_table *entry; 8264 8265 /* avoids a double compare by walking the table in-order */ 8266 for (entry = &is_table[0]; entry->is_name; entry++) { 8267 if (source <= entry->end) { 8268 trace_hfi1_interrupt(dd, entry, source); 8269 entry->is_int(dd, source - entry->start); 8270 return; 8271 } 8272 } 8273 /* fell off the end */ 8274 dd_dev_err(dd, "invalid interrupt source %u\n", source); 8275 } 8276 8277 /** 8278 * general_interrupt - General interrupt handler 8279 * @irq: MSIx IRQ vector 8280 * @data: hfi1 devdata 8281 * 8282 * This is able to correctly handle all non-threaded interrupts. Receive 8283 * context DATA IRQs are threaded and are not supported by this handler. 8284 * 8285 */ 8286 irqreturn_t general_interrupt(int irq, void *data) 8287 { 8288 struct hfi1_devdata *dd = data; 8289 u64 regs[CCE_NUM_INT_CSRS]; 8290 u32 bit; 8291 int i; 8292 irqreturn_t handled = IRQ_NONE; 8293 8294 this_cpu_inc(*dd->int_counter); 8295 8296 /* phase 1: scan and clear all handled interrupts */ 8297 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 8298 if (dd->gi_mask[i] == 0) { 8299 regs[i] = 0; /* used later */ 8300 continue; 8301 } 8302 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & 8303 dd->gi_mask[i]; 8304 /* only clear if anything is set */ 8305 if (regs[i]) 8306 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); 8307 } 8308 8309 /* phase 2: call the appropriate handler */ 8310 for_each_set_bit(bit, (unsigned long *)®s[0], 8311 CCE_NUM_INT_CSRS * 64) { 8312 is_interrupt(dd, bit); 8313 handled = IRQ_HANDLED; 8314 } 8315 8316 return handled; 8317 } 8318 8319 irqreturn_t sdma_interrupt(int irq, void *data) 8320 { 8321 struct sdma_engine *sde = data; 8322 struct hfi1_devdata *dd = sde->dd; 8323 u64 status; 8324 8325 #ifdef CONFIG_SDMA_VERBOSITY 8326 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 8327 slashstrip(__FILE__), __LINE__, __func__); 8328 sdma_dumpstate(sde); 8329 #endif 8330 8331 this_cpu_inc(*dd->int_counter); 8332 8333 /* This read_csr is really bad in the hot path */ 8334 status = read_csr(dd, 8335 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) 8336 & sde->imask; 8337 if (likely(status)) { 8338 /* clear the interrupt(s) */ 8339 write_csr(dd, 8340 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), 8341 status); 8342 8343 /* handle the interrupt(s) */ 8344 sdma_engine_interrupt(sde, status); 8345 } else { 8346 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", 8347 sde->this_idx); 8348 } 8349 return IRQ_HANDLED; 8350 } 8351 8352 /* 8353 * Clear the receive interrupt. Use a read of the interrupt clear CSR 8354 * to insure that the write completed. This does NOT guarantee that 8355 * queued DMA writes to memory from the chip are pushed. 8356 */ 8357 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) 8358 { 8359 struct hfi1_devdata *dd = rcd->dd; 8360 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); 8361 8362 write_csr(dd, addr, rcd->imask); 8363 /* force the above write on the chip and get a value back */ 8364 (void)read_csr(dd, addr); 8365 } 8366 8367 /* force the receive interrupt */ 8368 void force_recv_intr(struct hfi1_ctxtdata *rcd) 8369 { 8370 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); 8371 } 8372 8373 /* 8374 * Return non-zero if a packet is present. 8375 * 8376 * This routine is called when rechecking for packets after the RcvAvail 8377 * interrupt has been cleared down. First, do a quick check of memory for 8378 * a packet present. If not found, use an expensive CSR read of the context 8379 * tail to determine the actual tail. The CSR read is necessary because there 8380 * is no method to push pending DMAs to memory other than an interrupt and we 8381 * are trying to determine if we need to force an interrupt. 8382 */ 8383 static inline int check_packet_present(struct hfi1_ctxtdata *rcd) 8384 { 8385 u32 tail; 8386 8387 if (hfi1_packet_present(rcd)) 8388 return 1; 8389 8390 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ 8391 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 8392 return hfi1_rcd_head(rcd) != tail; 8393 } 8394 8395 /* 8396 * Common code for receive contexts interrupt handlers. 8397 * Update traces, increment kernel IRQ counter and 8398 * setup ASPM when needed. 8399 */ 8400 static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) 8401 { 8402 struct hfi1_devdata *dd = rcd->dd; 8403 8404 trace_hfi1_receive_interrupt(dd, rcd); 8405 this_cpu_inc(*dd->int_counter); 8406 aspm_ctx_disable(rcd); 8407 } 8408 8409 /* 8410 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt 8411 * when there are packets present in the queue. When calling 8412 * with interrupts enabled please use hfi1_rcd_eoi_intr. 8413 * 8414 * @rcd: valid receive context 8415 */ 8416 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) 8417 { 8418 clear_recv_intr(rcd); 8419 if (check_packet_present(rcd)) 8420 force_recv_intr(rcd); 8421 } 8422 8423 /** 8424 * hfi1_rcd_eoi_intr() - End of Interrupt processing action 8425 * 8426 * @rcd: Ptr to hfi1_ctxtdata of receive context 8427 * 8428 * Hold IRQs so we can safely clear the interrupt and 8429 * recheck for a packet that may have arrived after the previous 8430 * check and the interrupt clear. If a packet arrived, force another 8431 * interrupt. This routine can be called at the end of receive packet 8432 * processing in interrupt service routines, interrupt service thread 8433 * and softirqs 8434 */ 8435 static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) 8436 { 8437 unsigned long flags; 8438 8439 local_irq_save(flags); 8440 __hfi1_rcd_eoi_intr(rcd); 8441 local_irq_restore(flags); 8442 } 8443 8444 /** 8445 * hfi1_netdev_rx_napi - napi poll function to move eoi inline 8446 * @napi: pointer to napi object 8447 * @budget: netdev budget 8448 */ 8449 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget) 8450 { 8451 struct hfi1_netdev_rxq *rxq = container_of(napi, 8452 struct hfi1_netdev_rxq, napi); 8453 struct hfi1_ctxtdata *rcd = rxq->rcd; 8454 int work_done = 0; 8455 8456 work_done = rcd->do_interrupt(rcd, budget); 8457 8458 if (work_done < budget) { 8459 napi_complete_done(napi, work_done); 8460 hfi1_rcd_eoi_intr(rcd); 8461 } 8462 8463 return work_done; 8464 } 8465 8466 /* Receive packet napi handler for netdevs VNIC and AIP */ 8467 irqreturn_t receive_context_interrupt_napi(int irq, void *data) 8468 { 8469 struct hfi1_ctxtdata *rcd = data; 8470 8471 receive_interrupt_common(rcd); 8472 8473 if (likely(rcd->napi)) { 8474 if (likely(napi_schedule_prep(rcd->napi))) 8475 __napi_schedule_irqoff(rcd->napi); 8476 else 8477 __hfi1_rcd_eoi_intr(rcd); 8478 } else { 8479 WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n", 8480 rcd->ctxt); 8481 __hfi1_rcd_eoi_intr(rcd); 8482 } 8483 8484 return IRQ_HANDLED; 8485 } 8486 8487 /* 8488 * Receive packet IRQ handler. This routine expects to be on its own IRQ. 8489 * This routine will try to handle packets immediately (latency), but if 8490 * it finds too many, it will invoke the thread handler (bandwitdh). The 8491 * chip receive interrupt is *not* cleared down until this or the thread (if 8492 * invoked) is finished. The intent is to avoid extra interrupts while we 8493 * are processing packets anyway. 8494 */ 8495 irqreturn_t receive_context_interrupt(int irq, void *data) 8496 { 8497 struct hfi1_ctxtdata *rcd = data; 8498 int disposition; 8499 8500 receive_interrupt_common(rcd); 8501 8502 /* receive interrupt remains blocked while processing packets */ 8503 disposition = rcd->do_interrupt(rcd, 0); 8504 8505 /* 8506 * Too many packets were seen while processing packets in this 8507 * IRQ handler. Invoke the handler thread. The receive interrupt 8508 * remains blocked. 8509 */ 8510 if (disposition == RCV_PKT_LIMIT) 8511 return IRQ_WAKE_THREAD; 8512 8513 __hfi1_rcd_eoi_intr(rcd); 8514 return IRQ_HANDLED; 8515 } 8516 8517 /* 8518 * Receive packet thread handler. This expects to be invoked with the 8519 * receive interrupt still blocked. 8520 */ 8521 irqreturn_t receive_context_thread(int irq, void *data) 8522 { 8523 struct hfi1_ctxtdata *rcd = data; 8524 8525 /* receive interrupt is still blocked from the IRQ handler */ 8526 (void)rcd->do_interrupt(rcd, 1); 8527 8528 hfi1_rcd_eoi_intr(rcd); 8529 8530 return IRQ_HANDLED; 8531 } 8532 8533 /* ========================================================================= */ 8534 8535 u32 read_physical_state(struct hfi1_devdata *dd) 8536 { 8537 u64 reg; 8538 8539 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); 8540 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT) 8541 & DC_DC8051_STS_CUR_STATE_PORT_MASK; 8542 } 8543 8544 u32 read_logical_state(struct hfi1_devdata *dd) 8545 { 8546 u64 reg; 8547 8548 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8549 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT) 8550 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK; 8551 } 8552 8553 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) 8554 { 8555 u64 reg; 8556 8557 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8558 /* clear current state, set new state */ 8559 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK; 8560 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT; 8561 write_csr(dd, DCC_CFG_PORT_CONFIG, reg); 8562 } 8563 8564 /* 8565 * Use the 8051 to read a LCB CSR. 8566 */ 8567 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) 8568 { 8569 u32 regno; 8570 int ret; 8571 8572 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 8573 if (acquire_lcb_access(dd, 0) == 0) { 8574 *data = read_csr(dd, addr); 8575 release_lcb_access(dd, 0); 8576 return 0; 8577 } 8578 return -EBUSY; 8579 } 8580 8581 /* register is an index of LCB registers: (offset - base) / 8 */ 8582 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8583 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); 8584 if (ret != HCMD_SUCCESS) 8585 return -EBUSY; 8586 return 0; 8587 } 8588 8589 /* 8590 * Provide a cache for some of the LCB registers in case the LCB is 8591 * unavailable. 8592 * (The LCB is unavailable in certain link states, for example.) 8593 */ 8594 struct lcb_datum { 8595 u32 off; 8596 u64 val; 8597 }; 8598 8599 static struct lcb_datum lcb_cache[] = { 8600 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0}, 8601 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 }, 8602 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 }, 8603 }; 8604 8605 static void update_lcb_cache(struct hfi1_devdata *dd) 8606 { 8607 int i; 8608 int ret; 8609 u64 val; 8610 8611 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8612 ret = read_lcb_csr(dd, lcb_cache[i].off, &val); 8613 8614 /* Update if we get good data */ 8615 if (likely(ret != -EBUSY)) 8616 lcb_cache[i].val = val; 8617 } 8618 } 8619 8620 static int read_lcb_cache(u32 off, u64 *val) 8621 { 8622 int i; 8623 8624 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8625 if (lcb_cache[i].off == off) { 8626 *val = lcb_cache[i].val; 8627 return 0; 8628 } 8629 } 8630 8631 pr_warn("%s bad offset 0x%x\n", __func__, off); 8632 return -1; 8633 } 8634 8635 /* 8636 * Read an LCB CSR. Access may not be in host control, so check. 8637 * Return 0 on success, -EBUSY on failure. 8638 */ 8639 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) 8640 { 8641 struct hfi1_pportdata *ppd = dd->pport; 8642 8643 /* if up, go through the 8051 for the value */ 8644 if (ppd->host_link_state & HLS_UP) 8645 return read_lcb_via_8051(dd, addr, data); 8646 /* if going up or down, check the cache, otherwise, no access */ 8647 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { 8648 if (read_lcb_cache(addr, data)) 8649 return -EBUSY; 8650 return 0; 8651 } 8652 8653 /* otherwise, host has access */ 8654 *data = read_csr(dd, addr); 8655 return 0; 8656 } 8657 8658 /* 8659 * Use the 8051 to write a LCB CSR. 8660 */ 8661 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) 8662 { 8663 u32 regno; 8664 int ret; 8665 8666 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || 8667 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { 8668 if (acquire_lcb_access(dd, 0) == 0) { 8669 write_csr(dd, addr, data); 8670 release_lcb_access(dd, 0); 8671 return 0; 8672 } 8673 return -EBUSY; 8674 } 8675 8676 /* register is an index of LCB registers: (offset - base) / 8 */ 8677 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8678 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); 8679 if (ret != HCMD_SUCCESS) 8680 return -EBUSY; 8681 return 0; 8682 } 8683 8684 /* 8685 * Write an LCB CSR. Access may not be in host control, so check. 8686 * Return 0 on success, -EBUSY on failure. 8687 */ 8688 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) 8689 { 8690 struct hfi1_pportdata *ppd = dd->pport; 8691 8692 /* if up, go through the 8051 for the value */ 8693 if (ppd->host_link_state & HLS_UP) 8694 return write_lcb_via_8051(dd, addr, data); 8695 /* if going up or down, no access */ 8696 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) 8697 return -EBUSY; 8698 /* otherwise, host has access */ 8699 write_csr(dd, addr, data); 8700 return 0; 8701 } 8702 8703 /* 8704 * Returns: 8705 * < 0 = Linux error, not able to get access 8706 * > 0 = 8051 command RETURN_CODE 8707 */ 8708 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 8709 u64 *out_data) 8710 { 8711 u64 reg, completed; 8712 int return_code; 8713 unsigned long timeout; 8714 8715 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data); 8716 8717 mutex_lock(&dd->dc8051_lock); 8718 8719 /* We can't send any commands to the 8051 if it's in reset */ 8720 if (dd->dc_shutdown) { 8721 return_code = -ENODEV; 8722 goto fail; 8723 } 8724 8725 /* 8726 * If an 8051 host command timed out previously, then the 8051 is 8727 * stuck. 8728 * 8729 * On first timeout, attempt to reset and restart the entire DC 8730 * block (including 8051). (Is this too big of a hammer?) 8731 * 8732 * If the 8051 times out a second time, the reset did not bring it 8733 * back to healthy life. In that case, fail any subsequent commands. 8734 */ 8735 if (dd->dc8051_timed_out) { 8736 if (dd->dc8051_timed_out > 1) { 8737 dd_dev_err(dd, 8738 "Previous 8051 host command timed out, skipping command %u\n", 8739 type); 8740 return_code = -ENXIO; 8741 goto fail; 8742 } 8743 _dc_shutdown(dd); 8744 _dc_start(dd); 8745 } 8746 8747 /* 8748 * If there is no timeout, then the 8051 command interface is 8749 * waiting for a command. 8750 */ 8751 8752 /* 8753 * When writing a LCB CSR, out_data contains the full value to 8754 * to be written, while in_data contains the relative LCB 8755 * address in 7:0. Do the work here, rather than the caller, 8756 * of distrubting the write data to where it needs to go: 8757 * 8758 * Write data 8759 * 39:00 -> in_data[47:8] 8760 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE 8761 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA 8762 */ 8763 if (type == HCMD_WRITE_LCB_CSR) { 8764 in_data |= ((*out_data) & 0xffffffffffull) << 8; 8765 /* must preserve COMPLETED - it is tied to hardware */ 8766 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); 8767 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK; 8768 reg |= ((((*out_data) >> 40) & 0xff) << 8769 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT) 8770 | ((((*out_data) >> 48) & 0xffff) << 8771 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 8772 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); 8773 } 8774 8775 /* 8776 * Do two writes: the first to stabilize the type and req_data, the 8777 * second to activate. 8778 */ 8779 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK) 8780 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8781 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK) 8782 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT; 8783 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8784 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK; 8785 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8786 8787 /* wait for completion, alternate: interrupt */ 8788 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT); 8789 while (1) { 8790 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); 8791 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK; 8792 if (completed) 8793 break; 8794 if (time_after(jiffies, timeout)) { 8795 dd->dc8051_timed_out++; 8796 dd_dev_err(dd, "8051 host command %u timeout\n", type); 8797 if (out_data) 8798 *out_data = 0; 8799 return_code = -ETIMEDOUT; 8800 goto fail; 8801 } 8802 udelay(2); 8803 } 8804 8805 if (out_data) { 8806 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT) 8807 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK; 8808 if (type == HCMD_READ_LCB_CSR) { 8809 /* top 16 bits are in a different register */ 8810 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) 8811 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK) 8812 << (48 8813 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); 8814 } 8815 } 8816 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT) 8817 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK; 8818 dd->dc8051_timed_out = 0; 8819 /* 8820 * Clear command for next user. 8821 */ 8822 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); 8823 8824 fail: 8825 mutex_unlock(&dd->dc8051_lock); 8826 return return_code; 8827 } 8828 8829 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) 8830 { 8831 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); 8832 } 8833 8834 int load_8051_config(struct hfi1_devdata *dd, u8 field_id, 8835 u8 lane_id, u32 config_data) 8836 { 8837 u64 data; 8838 int ret; 8839 8840 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT 8841 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT 8842 | (u64)config_data << LOAD_DATA_DATA_SHIFT; 8843 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); 8844 if (ret != HCMD_SUCCESS) { 8845 dd_dev_err(dd, 8846 "load 8051 config: field id %d, lane %d, err %d\n", 8847 (int)field_id, (int)lane_id, ret); 8848 } 8849 return ret; 8850 } 8851 8852 /* 8853 * Read the 8051 firmware "registers". Use the RAM directly. Always 8854 * set the result, even on error. 8855 * Return 0 on success, -errno on failure 8856 */ 8857 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, 8858 u32 *result) 8859 { 8860 u64 big_data; 8861 u32 addr; 8862 int ret; 8863 8864 /* address start depends on the lane_id */ 8865 if (lane_id < 4) 8866 addr = (4 * NUM_GENERAL_FIELDS) 8867 + (lane_id * 4 * NUM_LANE_FIELDS); 8868 else 8869 addr = 0; 8870 addr += field_id * 4; 8871 8872 /* read is in 8-byte chunks, hardware will truncate the address down */ 8873 ret = read_8051_data(dd, addr, 8, &big_data); 8874 8875 if (ret == 0) { 8876 /* extract the 4 bytes we want */ 8877 if (addr & 0x4) 8878 *result = (u32)(big_data >> 32); 8879 else 8880 *result = (u32)big_data; 8881 } else { 8882 *result = 0; 8883 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", 8884 __func__, lane_id, field_id); 8885 } 8886 8887 return ret; 8888 } 8889 8890 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, 8891 u8 continuous) 8892 { 8893 u32 frame; 8894 8895 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 8896 | power_management << POWER_MANAGEMENT_SHIFT; 8897 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, 8898 GENERAL_CONFIG, frame); 8899 } 8900 8901 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, 8902 u16 vl15buf, u8 crc_sizes) 8903 { 8904 u32 frame; 8905 8906 frame = (u32)vau << VAU_SHIFT 8907 | (u32)z << Z_SHIFT 8908 | (u32)vcu << VCU_SHIFT 8909 | (u32)vl15buf << VL15BUF_SHIFT 8910 | (u32)crc_sizes << CRC_SIZES_SHIFT; 8911 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, 8912 GENERAL_CONFIG, frame); 8913 } 8914 8915 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 8916 u8 *flag_bits, u16 *link_widths) 8917 { 8918 u32 frame; 8919 8920 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8921 &frame); 8922 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; 8923 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; 8924 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 8925 } 8926 8927 static int write_vc_local_link_mode(struct hfi1_devdata *dd, 8928 u8 misc_bits, 8929 u8 flag_bits, 8930 u16 link_widths) 8931 { 8932 u32 frame; 8933 8934 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT 8935 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT 8936 | (u32)link_widths << LINK_WIDTH_SHIFT; 8937 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8938 frame); 8939 } 8940 8941 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, 8942 u8 device_rev) 8943 { 8944 u32 frame; 8945 8946 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) 8947 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT); 8948 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); 8949 } 8950 8951 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 8952 u8 *device_rev) 8953 { 8954 u32 frame; 8955 8956 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); 8957 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; 8958 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) 8959 & REMOTE_DEVICE_REV_MASK; 8960 } 8961 8962 int write_host_interface_version(struct hfi1_devdata *dd, u8 version) 8963 { 8964 u32 frame; 8965 u32 mask; 8966 8967 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT); 8968 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); 8969 /* Clear, then set field */ 8970 frame &= ~mask; 8971 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); 8972 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, 8973 frame); 8974 } 8975 8976 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, 8977 u8 *ver_patch) 8978 { 8979 u32 frame; 8980 8981 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); 8982 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & 8983 STS_FM_VERSION_MAJOR_MASK; 8984 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & 8985 STS_FM_VERSION_MINOR_MASK; 8986 8987 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); 8988 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & 8989 STS_FM_VERSION_PATCH_MASK; 8990 } 8991 8992 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 8993 u8 *continuous) 8994 { 8995 u32 frame; 8996 8997 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); 8998 *power_management = (frame >> POWER_MANAGEMENT_SHIFT) 8999 & POWER_MANAGEMENT_MASK; 9000 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) 9001 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK; 9002 } 9003 9004 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 9005 u8 *vcu, u16 *vl15buf, u8 *crc_sizes) 9006 { 9007 u32 frame; 9008 9009 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); 9010 *vau = (frame >> VAU_SHIFT) & VAU_MASK; 9011 *z = (frame >> Z_SHIFT) & Z_MASK; 9012 *vcu = (frame >> VCU_SHIFT) & VCU_MASK; 9013 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; 9014 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; 9015 } 9016 9017 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 9018 u8 *remote_tx_rate, 9019 u16 *link_widths) 9020 { 9021 u32 frame; 9022 9023 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, 9024 &frame); 9025 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) 9026 & REMOTE_TX_RATE_MASK; 9027 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 9028 } 9029 9030 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) 9031 { 9032 u32 frame; 9033 9034 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); 9035 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; 9036 } 9037 9038 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) 9039 { 9040 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); 9041 } 9042 9043 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) 9044 { 9045 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); 9046 } 9047 9048 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) 9049 { 9050 u32 frame; 9051 int ret; 9052 9053 *link_quality = 0; 9054 if (dd->pport->host_link_state & HLS_UP) { 9055 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, 9056 &frame); 9057 if (ret == 0) 9058 *link_quality = (frame >> LINK_QUALITY_SHIFT) 9059 & LINK_QUALITY_MASK; 9060 } 9061 } 9062 9063 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) 9064 { 9065 u32 frame; 9066 9067 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); 9068 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; 9069 } 9070 9071 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) 9072 { 9073 u32 frame; 9074 9075 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); 9076 *ldr = (frame & 0xff); 9077 } 9078 9079 static int read_tx_settings(struct hfi1_devdata *dd, 9080 u8 *enable_lane_tx, 9081 u8 *tx_polarity_inversion, 9082 u8 *rx_polarity_inversion, 9083 u8 *max_rate) 9084 { 9085 u32 frame; 9086 int ret; 9087 9088 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); 9089 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) 9090 & ENABLE_LANE_TX_MASK; 9091 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) 9092 & TX_POLARITY_INVERSION_MASK; 9093 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) 9094 & RX_POLARITY_INVERSION_MASK; 9095 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; 9096 return ret; 9097 } 9098 9099 static int write_tx_settings(struct hfi1_devdata *dd, 9100 u8 enable_lane_tx, 9101 u8 tx_polarity_inversion, 9102 u8 rx_polarity_inversion, 9103 u8 max_rate) 9104 { 9105 u32 frame; 9106 9107 /* no need to mask, all variable sizes match field widths */ 9108 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT 9109 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT 9110 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT 9111 | max_rate << MAX_RATE_SHIFT; 9112 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); 9113 } 9114 9115 /* 9116 * Read an idle LCB message. 9117 * 9118 * Returns 0 on success, -EINVAL on error 9119 */ 9120 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) 9121 { 9122 int ret; 9123 9124 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); 9125 if (ret != HCMD_SUCCESS) { 9126 dd_dev_err(dd, "read idle message: type %d, err %d\n", 9127 (u32)type, ret); 9128 return -EINVAL; 9129 } 9130 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); 9131 /* return only the payload as we already know the type */ 9132 *data_out >>= IDLE_PAYLOAD_SHIFT; 9133 return 0; 9134 } 9135 9136 /* 9137 * Read an idle SMA message. To be done in response to a notification from 9138 * the 8051. 9139 * 9140 * Returns 0 on success, -EINVAL on error 9141 */ 9142 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) 9143 { 9144 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, 9145 data); 9146 } 9147 9148 /* 9149 * Send an idle LCB message. 9150 * 9151 * Returns 0 on success, -EINVAL on error 9152 */ 9153 static int send_idle_message(struct hfi1_devdata *dd, u64 data) 9154 { 9155 int ret; 9156 9157 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); 9158 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); 9159 if (ret != HCMD_SUCCESS) { 9160 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", 9161 data, ret); 9162 return -EINVAL; 9163 } 9164 return 0; 9165 } 9166 9167 /* 9168 * Send an idle SMA message. 9169 * 9170 * Returns 0 on success, -EINVAL on error 9171 */ 9172 int send_idle_sma(struct hfi1_devdata *dd, u64 message) 9173 { 9174 u64 data; 9175 9176 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | 9177 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); 9178 return send_idle_message(dd, data); 9179 } 9180 9181 /* 9182 * Initialize the LCB then do a quick link up. This may or may not be 9183 * in loopback. 9184 * 9185 * return 0 on success, -errno on error 9186 */ 9187 static int do_quick_linkup(struct hfi1_devdata *dd) 9188 { 9189 int ret; 9190 9191 lcb_shutdown(dd, 0); 9192 9193 if (loopback) { 9194 /* LCB_CFG_LOOPBACK.VAL = 2 */ 9195 /* LCB_CFG_LANE_WIDTH.VAL = 0 */ 9196 write_csr(dd, DC_LCB_CFG_LOOPBACK, 9197 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); 9198 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 9199 } 9200 9201 /* start the LCBs */ 9202 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */ 9203 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 9204 9205 /* simulator only loopback steps */ 9206 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 9207 /* LCB_CFG_RUN.EN = 1 */ 9208 write_csr(dd, DC_LCB_CFG_RUN, 9209 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 9210 9211 ret = wait_link_transfer_active(dd, 10); 9212 if (ret) 9213 return ret; 9214 9215 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 9216 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); 9217 } 9218 9219 if (!loopback) { 9220 /* 9221 * When doing quick linkup and not in loopback, both 9222 * sides must be done with LCB set-up before either 9223 * starts the quick linkup. Put a delay here so that 9224 * both sides can be started and have a chance to be 9225 * done with LCB set up before resuming. 9226 */ 9227 dd_dev_err(dd, 9228 "Pausing for peer to be finished with LCB set up\n"); 9229 msleep(5000); 9230 dd_dev_err(dd, "Continuing with quick linkup\n"); 9231 } 9232 9233 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 9234 set_8051_lcb_access(dd); 9235 9236 /* 9237 * State "quick" LinkUp request sets the physical link state to 9238 * LinkUp without a verify capability sequence. 9239 * This state is in simulator v37 and later. 9240 */ 9241 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); 9242 if (ret != HCMD_SUCCESS) { 9243 dd_dev_err(dd, 9244 "%s: set physical link state to quick LinkUp failed with return %d\n", 9245 __func__, ret); 9246 9247 set_host_lcb_access(dd); 9248 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 9249 9250 if (ret >= 0) 9251 ret = -EINVAL; 9252 return ret; 9253 } 9254 9255 return 0; /* success */ 9256 } 9257 9258 /* 9259 * Do all special steps to set up loopback. 9260 */ 9261 static int init_loopback(struct hfi1_devdata *dd) 9262 { 9263 dd_dev_info(dd, "Entering loopback mode\n"); 9264 9265 /* all loopbacks should disable self GUID check */ 9266 write_csr(dd, DC_DC8051_CFG_MODE, 9267 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); 9268 9269 /* 9270 * The simulator has only one loopback option - LCB. Switch 9271 * to that option, which includes quick link up. 9272 * 9273 * Accept all valid loopback values. 9274 */ 9275 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && 9276 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || 9277 loopback == LOOPBACK_CABLE)) { 9278 loopback = LOOPBACK_LCB; 9279 quick_linkup = 1; 9280 return 0; 9281 } 9282 9283 /* 9284 * SerDes loopback init sequence is handled in set_local_link_attributes 9285 */ 9286 if (loopback == LOOPBACK_SERDES) 9287 return 0; 9288 9289 /* LCB loopback - handled at poll time */ 9290 if (loopback == LOOPBACK_LCB) { 9291 quick_linkup = 1; /* LCB is always quick linkup */ 9292 9293 /* not supported in emulation due to emulation RTL changes */ 9294 if (dd->icode == ICODE_FPGA_EMULATION) { 9295 dd_dev_err(dd, 9296 "LCB loopback not supported in emulation\n"); 9297 return -EINVAL; 9298 } 9299 return 0; 9300 } 9301 9302 /* external cable loopback requires no extra steps */ 9303 if (loopback == LOOPBACK_CABLE) 9304 return 0; 9305 9306 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); 9307 return -EINVAL; 9308 } 9309 9310 /* 9311 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits 9312 * used in the Verify Capability link width attribute. 9313 */ 9314 static u16 opa_to_vc_link_widths(u16 opa_widths) 9315 { 9316 int i; 9317 u16 result = 0; 9318 9319 static const struct link_bits { 9320 u16 from; 9321 u16 to; 9322 } opa_link_xlate[] = { 9323 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, 9324 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, 9325 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, 9326 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, 9327 }; 9328 9329 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { 9330 if (opa_widths & opa_link_xlate[i].from) 9331 result |= opa_link_xlate[i].to; 9332 } 9333 return result; 9334 } 9335 9336 /* 9337 * Set link attributes before moving to polling. 9338 */ 9339 static int set_local_link_attributes(struct hfi1_pportdata *ppd) 9340 { 9341 struct hfi1_devdata *dd = ppd->dd; 9342 u8 enable_lane_tx; 9343 u8 tx_polarity_inversion; 9344 u8 rx_polarity_inversion; 9345 int ret; 9346 u32 misc_bits = 0; 9347 /* reset our fabric serdes to clear any lingering problems */ 9348 fabric_serdes_reset(dd); 9349 9350 /* set the local tx rate - need to read-modify-write */ 9351 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 9352 &rx_polarity_inversion, &ppd->local_tx_rate); 9353 if (ret) 9354 goto set_local_link_attributes_fail; 9355 9356 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 9357 /* set the tx rate to the fastest enabled */ 9358 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9359 ppd->local_tx_rate = 1; 9360 else 9361 ppd->local_tx_rate = 0; 9362 } else { 9363 /* set the tx rate to all enabled */ 9364 ppd->local_tx_rate = 0; 9365 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9366 ppd->local_tx_rate |= 2; 9367 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) 9368 ppd->local_tx_rate |= 1; 9369 } 9370 9371 enable_lane_tx = 0xF; /* enable all four lanes */ 9372 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, 9373 rx_polarity_inversion, ppd->local_tx_rate); 9374 if (ret != HCMD_SUCCESS) 9375 goto set_local_link_attributes_fail; 9376 9377 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); 9378 if (ret != HCMD_SUCCESS) { 9379 dd_dev_err(dd, 9380 "Failed to set host interface version, return 0x%x\n", 9381 ret); 9382 goto set_local_link_attributes_fail; 9383 } 9384 9385 /* 9386 * DC supports continuous updates. 9387 */ 9388 ret = write_vc_local_phy(dd, 9389 0 /* no power management */, 9390 1 /* continuous updates */); 9391 if (ret != HCMD_SUCCESS) 9392 goto set_local_link_attributes_fail; 9393 9394 /* z=1 in the next call: AU of 0 is not supported by the hardware */ 9395 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, 9396 ppd->port_crc_mode_enabled); 9397 if (ret != HCMD_SUCCESS) 9398 goto set_local_link_attributes_fail; 9399 9400 /* 9401 * SerDes loopback init sequence requires 9402 * setting bit 0 of MISC_CONFIG_BITS 9403 */ 9404 if (loopback == LOOPBACK_SERDES) 9405 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT; 9406 9407 /* 9408 * An external device configuration request is used to reset the LCB 9409 * to retry to obtain operational lanes when the first attempt is 9410 * unsuccesful. 9411 */ 9412 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) 9413 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT; 9414 9415 ret = write_vc_local_link_mode(dd, misc_bits, 0, 9416 opa_to_vc_link_widths( 9417 ppd->link_width_enabled)); 9418 if (ret != HCMD_SUCCESS) 9419 goto set_local_link_attributes_fail; 9420 9421 /* let peer know who we are */ 9422 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); 9423 if (ret == HCMD_SUCCESS) 9424 return 0; 9425 9426 set_local_link_attributes_fail: 9427 dd_dev_err(dd, 9428 "Failed to set local link attributes, return 0x%x\n", 9429 ret); 9430 return ret; 9431 } 9432 9433 /* 9434 * Call this to start the link. 9435 * Do not do anything if the link is disabled. 9436 * Returns 0 if link is disabled, moved to polling, or the driver is not ready. 9437 */ 9438 int start_link(struct hfi1_pportdata *ppd) 9439 { 9440 /* 9441 * Tune the SerDes to a ballpark setting for optimal signal and bit 9442 * error rate. Needs to be done before starting the link. 9443 */ 9444 tune_serdes(ppd); 9445 9446 if (!ppd->driver_link_ready) { 9447 dd_dev_info(ppd->dd, 9448 "%s: stopping link start because driver is not ready\n", 9449 __func__); 9450 return 0; 9451 } 9452 9453 /* 9454 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the 9455 * pkey table can be configured properly if the HFI unit is connected 9456 * to switch port with MgmtAllowed=NO 9457 */ 9458 clear_full_mgmt_pkey(ppd); 9459 9460 return set_link_state(ppd, HLS_DN_POLL); 9461 } 9462 9463 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) 9464 { 9465 struct hfi1_devdata *dd = ppd->dd; 9466 u64 mask; 9467 unsigned long timeout; 9468 9469 /* 9470 * Some QSFP cables have a quirk that asserts the IntN line as a side 9471 * effect of power up on plug-in. We ignore this false positive 9472 * interrupt until the module has finished powering up by waiting for 9473 * a minimum timeout of the module inrush initialization time of 9474 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the 9475 * module have stabilized. 9476 */ 9477 msleep(500); 9478 9479 /* 9480 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) 9481 */ 9482 timeout = jiffies + msecs_to_jiffies(2000); 9483 while (1) { 9484 mask = read_csr(dd, dd->hfi1_id ? 9485 ASIC_QSFP2_IN : ASIC_QSFP1_IN); 9486 if (!(mask & QSFP_HFI0_INT_N)) 9487 break; 9488 if (time_after(jiffies, timeout)) { 9489 dd_dev_info(dd, "%s: No IntN detected, reset complete\n", 9490 __func__); 9491 break; 9492 } 9493 udelay(2); 9494 } 9495 } 9496 9497 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) 9498 { 9499 struct hfi1_devdata *dd = ppd->dd; 9500 u64 mask; 9501 9502 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); 9503 if (enable) { 9504 /* 9505 * Clear the status register to avoid an immediate interrupt 9506 * when we re-enable the IntN pin 9507 */ 9508 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9509 QSFP_HFI0_INT_N); 9510 mask |= (u64)QSFP_HFI0_INT_N; 9511 } else { 9512 mask &= ~(u64)QSFP_HFI0_INT_N; 9513 } 9514 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9515 } 9516 9517 int reset_qsfp(struct hfi1_pportdata *ppd) 9518 { 9519 struct hfi1_devdata *dd = ppd->dd; 9520 u64 mask, qsfp_mask; 9521 9522 /* Disable INT_N from triggering QSFP interrupts */ 9523 set_qsfp_int_n(ppd, 0); 9524 9525 /* Reset the QSFP */ 9526 mask = (u64)QSFP_HFI0_RESET_N; 9527 9528 qsfp_mask = read_csr(dd, 9529 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); 9530 qsfp_mask &= ~mask; 9531 write_csr(dd, 9532 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9533 9534 udelay(10); 9535 9536 qsfp_mask |= mask; 9537 write_csr(dd, 9538 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9539 9540 wait_for_qsfp_init(ppd); 9541 9542 /* 9543 * Allow INT_N to trigger the QSFP interrupt to watch 9544 * for alarms and warnings 9545 */ 9546 set_qsfp_int_n(ppd, 1); 9547 9548 /* 9549 * After the reset, AOC transmitters are enabled by default. They need 9550 * to be turned off to complete the QSFP setup before they can be 9551 * enabled again. 9552 */ 9553 return set_qsfp_tx(ppd, 0); 9554 } 9555 9556 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9557 u8 *qsfp_interrupt_status) 9558 { 9559 struct hfi1_devdata *dd = ppd->dd; 9560 9561 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || 9562 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) 9563 dd_dev_err(dd, "%s: QSFP cable temperature too high\n", 9564 __func__); 9565 9566 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || 9567 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) 9568 dd_dev_err(dd, "%s: QSFP cable temperature too low\n", 9569 __func__); 9570 9571 /* 9572 * The remaining alarms/warnings don't matter if the link is down. 9573 */ 9574 if (ppd->host_link_state & HLS_DOWN) 9575 return 0; 9576 9577 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || 9578 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) 9579 dd_dev_err(dd, "%s: QSFP supply voltage too high\n", 9580 __func__); 9581 9582 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || 9583 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) 9584 dd_dev_err(dd, "%s: QSFP supply voltage too low\n", 9585 __func__); 9586 9587 /* Byte 2 is vendor specific */ 9588 9589 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || 9590 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) 9591 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", 9592 __func__); 9593 9594 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || 9595 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) 9596 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", 9597 __func__); 9598 9599 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || 9600 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) 9601 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", 9602 __func__); 9603 9604 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || 9605 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) 9606 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", 9607 __func__); 9608 9609 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || 9610 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) 9611 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", 9612 __func__); 9613 9614 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || 9615 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) 9616 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", 9617 __func__); 9618 9619 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || 9620 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) 9621 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", 9622 __func__); 9623 9624 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || 9625 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) 9626 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", 9627 __func__); 9628 9629 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || 9630 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) 9631 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", 9632 __func__); 9633 9634 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || 9635 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) 9636 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", 9637 __func__); 9638 9639 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || 9640 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) 9641 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", 9642 __func__); 9643 9644 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || 9645 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) 9646 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", 9647 __func__); 9648 9649 /* Bytes 9-10 and 11-12 are reserved */ 9650 /* Bytes 13-15 are vendor specific */ 9651 9652 return 0; 9653 } 9654 9655 /* This routine will only be scheduled if the QSFP module present is asserted */ 9656 void qsfp_event(struct work_struct *work) 9657 { 9658 struct qsfp_data *qd; 9659 struct hfi1_pportdata *ppd; 9660 struct hfi1_devdata *dd; 9661 9662 qd = container_of(work, struct qsfp_data, qsfp_work); 9663 ppd = qd->ppd; 9664 dd = ppd->dd; 9665 9666 /* Sanity check */ 9667 if (!qsfp_mod_present(ppd)) 9668 return; 9669 9670 if (ppd->host_link_state == HLS_DN_DISABLE) { 9671 dd_dev_info(ppd->dd, 9672 "%s: stopping link start because link is disabled\n", 9673 __func__); 9674 return; 9675 } 9676 9677 /* 9678 * Turn DC back on after cable has been re-inserted. Up until 9679 * now, the DC has been in reset to save power. 9680 */ 9681 dc_start(dd); 9682 9683 if (qd->cache_refresh_required) { 9684 set_qsfp_int_n(ppd, 0); 9685 9686 wait_for_qsfp_init(ppd); 9687 9688 /* 9689 * Allow INT_N to trigger the QSFP interrupt to watch 9690 * for alarms and warnings 9691 */ 9692 set_qsfp_int_n(ppd, 1); 9693 9694 start_link(ppd); 9695 } 9696 9697 if (qd->check_interrupt_flags) { 9698 u8 qsfp_interrupt_status[16] = {0,}; 9699 9700 if (one_qsfp_read(ppd, dd->hfi1_id, 6, 9701 &qsfp_interrupt_status[0], 16) != 16) { 9702 dd_dev_info(dd, 9703 "%s: Failed to read status of QSFP module\n", 9704 __func__); 9705 } else { 9706 unsigned long flags; 9707 9708 handle_qsfp_error_conditions( 9709 ppd, qsfp_interrupt_status); 9710 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 9711 ppd->qsfp_info.check_interrupt_flags = 0; 9712 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 9713 flags); 9714 } 9715 } 9716 } 9717 9718 void init_qsfp_int(struct hfi1_devdata *dd) 9719 { 9720 struct hfi1_pportdata *ppd = dd->pport; 9721 u64 qsfp_mask; 9722 9723 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 9724 /* Clear current status to avoid spurious interrupts */ 9725 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9726 qsfp_mask); 9727 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, 9728 qsfp_mask); 9729 9730 set_qsfp_int_n(ppd, 0); 9731 9732 /* Handle active low nature of INT_N and MODPRST_N pins */ 9733 if (qsfp_mod_present(ppd)) 9734 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N; 9735 write_csr(dd, 9736 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, 9737 qsfp_mask); 9738 9739 /* Enable the appropriate QSFP IRQ source */ 9740 if (!dd->hfi1_id) 9741 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); 9742 else 9743 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); 9744 } 9745 9746 /* 9747 * Do a one-time initialize of the LCB block. 9748 */ 9749 static void init_lcb(struct hfi1_devdata *dd) 9750 { 9751 /* simulator does not correctly handle LCB cclk loopback, skip */ 9752 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 9753 return; 9754 9755 /* the DC has been reset earlier in the driver load */ 9756 9757 /* set LCB for cclk loopback on the port */ 9758 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); 9759 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); 9760 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); 9761 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 9762 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); 9763 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); 9764 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); 9765 } 9766 9767 /* 9768 * Perform a test read on the QSFP. Return 0 on success, -ERRNO 9769 * on error. 9770 */ 9771 static int test_qsfp_read(struct hfi1_pportdata *ppd) 9772 { 9773 int ret; 9774 u8 status; 9775 9776 /* 9777 * Report success if not a QSFP or, if it is a QSFP, but the cable is 9778 * not present 9779 */ 9780 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) 9781 return 0; 9782 9783 /* read byte 2, the status byte */ 9784 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); 9785 if (ret < 0) 9786 return ret; 9787 if (ret != 1) 9788 return -EIO; 9789 9790 return 0; /* success */ 9791 } 9792 9793 /* 9794 * Values for QSFP retry. 9795 * 9796 * Give up after 10s (20 x 500ms). The overall timeout was empirically 9797 * arrived at from experience on a large cluster. 9798 */ 9799 #define MAX_QSFP_RETRIES 20 9800 #define QSFP_RETRY_WAIT 500 /* msec */ 9801 9802 /* 9803 * Try a QSFP read. If it fails, schedule a retry for later. 9804 * Called on first link activation after driver load. 9805 */ 9806 static void try_start_link(struct hfi1_pportdata *ppd) 9807 { 9808 if (test_qsfp_read(ppd)) { 9809 /* read failed */ 9810 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { 9811 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); 9812 return; 9813 } 9814 dd_dev_info(ppd->dd, 9815 "QSFP not responding, waiting and retrying %d\n", 9816 (int)ppd->qsfp_retry_count); 9817 ppd->qsfp_retry_count++; 9818 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, 9819 msecs_to_jiffies(QSFP_RETRY_WAIT)); 9820 return; 9821 } 9822 ppd->qsfp_retry_count = 0; 9823 9824 start_link(ppd); 9825 } 9826 9827 /* 9828 * Workqueue function to start the link after a delay. 9829 */ 9830 void handle_start_link(struct work_struct *work) 9831 { 9832 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 9833 start_link_work.work); 9834 try_start_link(ppd); 9835 } 9836 9837 int bringup_serdes(struct hfi1_pportdata *ppd) 9838 { 9839 struct hfi1_devdata *dd = ppd->dd; 9840 u64 guid; 9841 int ret; 9842 9843 if (HFI1_CAP_IS_KSET(EXTENDED_PSN)) 9844 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); 9845 9846 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; 9847 if (!guid) { 9848 if (dd->base_guid) 9849 guid = dd->base_guid + ppd->port - 1; 9850 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; 9851 } 9852 9853 /* Set linkinit_reason on power up per OPA spec */ 9854 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; 9855 9856 /* one-time init of the LCB */ 9857 init_lcb(dd); 9858 9859 if (loopback) { 9860 ret = init_loopback(dd); 9861 if (ret < 0) 9862 return ret; 9863 } 9864 9865 get_port_type(ppd); 9866 if (ppd->port_type == PORT_TYPE_QSFP) { 9867 set_qsfp_int_n(ppd, 0); 9868 wait_for_qsfp_init(ppd); 9869 set_qsfp_int_n(ppd, 1); 9870 } 9871 9872 try_start_link(ppd); 9873 return 0; 9874 } 9875 9876 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) 9877 { 9878 struct hfi1_devdata *dd = ppd->dd; 9879 9880 /* 9881 * Shut down the link and keep it down. First turn off that the 9882 * driver wants to allow the link to be up (driver_link_ready). 9883 * Then make sure the link is not automatically restarted 9884 * (link_enabled). Cancel any pending restart. And finally 9885 * go offline. 9886 */ 9887 ppd->driver_link_ready = 0; 9888 ppd->link_enabled = 0; 9889 9890 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ 9891 flush_delayed_work(&ppd->start_link_work); 9892 cancel_delayed_work_sync(&ppd->start_link_work); 9893 9894 ppd->offline_disabled_reason = 9895 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT); 9896 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0, 9897 OPA_LINKDOWN_REASON_REBOOT); 9898 set_link_state(ppd, HLS_DN_OFFLINE); 9899 9900 /* disable the port */ 9901 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 9902 cancel_work_sync(&ppd->freeze_work); 9903 } 9904 9905 static inline int init_cpu_counters(struct hfi1_devdata *dd) 9906 { 9907 struct hfi1_pportdata *ppd; 9908 int i; 9909 9910 ppd = (struct hfi1_pportdata *)(dd + 1); 9911 for (i = 0; i < dd->num_pports; i++, ppd++) { 9912 ppd->ibport_data.rvp.rc_acks = NULL; 9913 ppd->ibport_data.rvp.rc_qacks = NULL; 9914 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); 9915 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); 9916 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); 9917 if (!ppd->ibport_data.rvp.rc_acks || 9918 !ppd->ibport_data.rvp.rc_delayed_comp || 9919 !ppd->ibport_data.rvp.rc_qacks) 9920 return -ENOMEM; 9921 } 9922 9923 return 0; 9924 } 9925 9926 /* 9927 * index is the index into the receive array 9928 */ 9929 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, 9930 u32 type, unsigned long pa, u16 order) 9931 { 9932 u64 reg; 9933 9934 if (!(dd->flags & HFI1_PRESENT)) 9935 goto done; 9936 9937 if (type == PT_INVALID || type == PT_INVALID_FLUSH) { 9938 pa = 0; 9939 order = 0; 9940 } else if (type > PT_INVALID) { 9941 dd_dev_err(dd, 9942 "unexpected receive array type %u for index %u, not handled\n", 9943 type, index); 9944 goto done; 9945 } 9946 trace_hfi1_put_tid(dd, index, type, pa, order); 9947 9948 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ 9949 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK 9950 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT 9951 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) 9952 << RCV_ARRAY_RT_ADDR_SHIFT; 9953 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); 9954 writeq(reg, dd->rcvarray_wc + (index * 8)); 9955 9956 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3) 9957 /* 9958 * Eager entries are written and flushed 9959 * 9960 * Expected entries are flushed every 4 writes 9961 */ 9962 flush_wc(); 9963 done: 9964 return; 9965 } 9966 9967 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) 9968 { 9969 struct hfi1_devdata *dd = rcd->dd; 9970 u32 i; 9971 9972 /* this could be optimized */ 9973 for (i = rcd->eager_base; i < rcd->eager_base + 9974 rcd->egrbufs.alloced; i++) 9975 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9976 9977 for (i = rcd->expected_base; 9978 i < rcd->expected_base + rcd->expected_count; i++) 9979 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9980 } 9981 9982 static const char * const ib_cfg_name_strings[] = { 9983 "HFI1_IB_CFG_LIDLMC", 9984 "HFI1_IB_CFG_LWID_DG_ENB", 9985 "HFI1_IB_CFG_LWID_ENB", 9986 "HFI1_IB_CFG_LWID", 9987 "HFI1_IB_CFG_SPD_ENB", 9988 "HFI1_IB_CFG_SPD", 9989 "HFI1_IB_CFG_RXPOL_ENB", 9990 "HFI1_IB_CFG_LREV_ENB", 9991 "HFI1_IB_CFG_LINKLATENCY", 9992 "HFI1_IB_CFG_HRTBT", 9993 "HFI1_IB_CFG_OP_VLS", 9994 "HFI1_IB_CFG_VL_HIGH_CAP", 9995 "HFI1_IB_CFG_VL_LOW_CAP", 9996 "HFI1_IB_CFG_OVERRUN_THRESH", 9997 "HFI1_IB_CFG_PHYERR_THRESH", 9998 "HFI1_IB_CFG_LINKDEFAULT", 9999 "HFI1_IB_CFG_PKEYS", 10000 "HFI1_IB_CFG_MTU", 10001 "HFI1_IB_CFG_LSTATE", 10002 "HFI1_IB_CFG_VL_HIGH_LIMIT", 10003 "HFI1_IB_CFG_PMA_TICKS", 10004 "HFI1_IB_CFG_PORT" 10005 }; 10006 10007 static const char *ib_cfg_name(int which) 10008 { 10009 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings)) 10010 return "invalid"; 10011 return ib_cfg_name_strings[which]; 10012 } 10013 10014 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) 10015 { 10016 struct hfi1_devdata *dd = ppd->dd; 10017 int val = 0; 10018 10019 switch (which) { 10020 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ 10021 val = ppd->link_width_enabled; 10022 break; 10023 case HFI1_IB_CFG_LWID: /* currently active Link-width */ 10024 val = ppd->link_width_active; 10025 break; 10026 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 10027 val = ppd->link_speed_enabled; 10028 break; 10029 case HFI1_IB_CFG_SPD: /* current Link speed */ 10030 val = ppd->link_speed_active; 10031 break; 10032 10033 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ 10034 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ 10035 case HFI1_IB_CFG_LINKLATENCY: 10036 goto unimplemented; 10037 10038 case HFI1_IB_CFG_OP_VLS: 10039 val = ppd->actual_vls_operational; 10040 break; 10041 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ 10042 val = VL_ARB_HIGH_PRIO_TABLE_SIZE; 10043 break; 10044 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */ 10045 val = VL_ARB_LOW_PRIO_TABLE_SIZE; 10046 break; 10047 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 10048 val = ppd->overrun_threshold; 10049 break; 10050 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 10051 val = ppd->phy_error_threshold; 10052 break; 10053 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 10054 val = HLS_DEFAULT; 10055 break; 10056 10057 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */ 10058 case HFI1_IB_CFG_PMA_TICKS: 10059 default: 10060 unimplemented: 10061 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 10062 dd_dev_info( 10063 dd, 10064 "%s: which %s: not implemented\n", 10065 __func__, 10066 ib_cfg_name(which)); 10067 break; 10068 } 10069 10070 return val; 10071 } 10072 10073 /* 10074 * The largest MAD packet size. 10075 */ 10076 #define MAX_MAD_PACKET 2048 10077 10078 /* 10079 * Return the maximum header bytes that can go on the _wire_ 10080 * for this device. This count includes the ICRC which is 10081 * not part of the packet held in memory but it is appended 10082 * by the HW. 10083 * This is dependent on the device's receive header entry size. 10084 * HFI allows this to be set per-receive context, but the 10085 * driver presently enforces a global value. 10086 */ 10087 u32 lrh_max_header_bytes(struct hfi1_devdata *dd) 10088 { 10089 /* 10090 * The maximum non-payload (MTU) bytes in LRH.PktLen are 10091 * the Receive Header Entry Size minus the PBC (or RHF) size 10092 * plus one DW for the ICRC appended by HW. 10093 * 10094 * dd->rcd[0].rcvhdrqentsize is in DW. 10095 * We use rcd[0] as all context will have the same value. Also, 10096 * the first kernel context would have been allocated by now so 10097 * we are guaranteed a valid value. 10098 */ 10099 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; 10100 } 10101 10102 /* 10103 * Set Send Length 10104 * @ppd: per port data 10105 * 10106 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck* 10107 * registers compare against LRH.PktLen, so use the max bytes included 10108 * in the LRH. 10109 * 10110 * This routine changes all VL values except VL15, which it maintains at 10111 * the same value. 10112 */ 10113 static void set_send_length(struct hfi1_pportdata *ppd) 10114 { 10115 struct hfi1_devdata *dd = ppd->dd; 10116 u32 max_hb = lrh_max_header_bytes(dd), dcmtu; 10117 u32 maxvlmtu = dd->vld[15].mtu; 10118 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) 10119 & SEND_LEN_CHECK1_LEN_VL15_MASK) << 10120 SEND_LEN_CHECK1_LEN_VL15_SHIFT; 10121 int i, j; 10122 u32 thres; 10123 10124 for (i = 0; i < ppd->vls_supported; i++) { 10125 if (dd->vld[i].mtu > maxvlmtu) 10126 maxvlmtu = dd->vld[i].mtu; 10127 if (i <= 3) 10128 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) 10129 & SEND_LEN_CHECK0_LEN_VL0_MASK) << 10130 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT); 10131 else 10132 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) 10133 & SEND_LEN_CHECK1_LEN_VL4_MASK) << 10134 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT); 10135 } 10136 write_csr(dd, SEND_LEN_CHECK0, len1); 10137 write_csr(dd, SEND_LEN_CHECK1, len2); 10138 /* adjust kernel credit return thresholds based on new MTUs */ 10139 /* all kernel receive contexts have the same hdrqentsize */ 10140 for (i = 0; i < ppd->vls_supported; i++) { 10141 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), 10142 sc_mtu_to_threshold(dd->vld[i].sc, 10143 dd->vld[i].mtu, 10144 get_hdrqentsize(dd->rcd[0]))); 10145 for (j = 0; j < INIT_SC_PER_VL; j++) 10146 sc_set_cr_threshold( 10147 pio_select_send_context_vl(dd, j, i), 10148 thres); 10149 } 10150 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), 10151 sc_mtu_to_threshold(dd->vld[15].sc, 10152 dd->vld[15].mtu, 10153 dd->rcd[0]->rcvhdrqentsize)); 10154 sc_set_cr_threshold(dd->vld[15].sc, thres); 10155 10156 /* Adjust maximum MTU for the port in DC */ 10157 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : 10158 (ilog2(maxvlmtu >> 8) + 1); 10159 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); 10160 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK; 10161 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) << 10162 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT; 10163 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); 10164 } 10165 10166 static void set_lidlmc(struct hfi1_pportdata *ppd) 10167 { 10168 int i; 10169 u64 sreg = 0; 10170 struct hfi1_devdata *dd = ppd->dd; 10171 u32 mask = ~((1U << ppd->lmc) - 1); 10172 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 10173 u32 lid; 10174 10175 /* 10176 * Program 0 in CSR if port lid is extended. This prevents 10177 * 9B packets being sent out for large lids. 10178 */ 10179 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; 10180 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 10181 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 10182 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) 10183 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | 10184 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) 10185 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); 10186 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); 10187 10188 /* 10189 * Iterate over all the send contexts and set their SLID check 10190 */ 10191 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) << 10192 SEND_CTXT_CHECK_SLID_MASK_SHIFT) | 10193 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) << 10194 SEND_CTXT_CHECK_SLID_VALUE_SHIFT); 10195 10196 for (i = 0; i < chip_send_contexts(dd); i++) { 10197 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x", 10198 i, (u32)sreg); 10199 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); 10200 } 10201 10202 /* Now we have to do the same thing for the sdma engines */ 10203 sdma_update_lmc(dd, mask, lid); 10204 } 10205 10206 static const char *state_completed_string(u32 completed) 10207 { 10208 static const char * const state_completed[] = { 10209 "EstablishComm", 10210 "OptimizeEQ", 10211 "VerifyCap" 10212 }; 10213 10214 if (completed < ARRAY_SIZE(state_completed)) 10215 return state_completed[completed]; 10216 10217 return "unknown"; 10218 } 10219 10220 static const char all_lanes_dead_timeout_expired[] = 10221 "All lanes were inactive – was the interconnect media removed?"; 10222 static const char tx_out_of_policy[] = 10223 "Passing lanes on local port do not meet the local link width policy"; 10224 static const char no_state_complete[] = 10225 "State timeout occurred before link partner completed the state"; 10226 static const char * const state_complete_reasons[] = { 10227 [0x00] = "Reason unknown", 10228 [0x01] = "Link was halted by driver, refer to LinkDownReason", 10229 [0x02] = "Link partner reported failure", 10230 [0x10] = "Unable to achieve frame sync on any lane", 10231 [0x11] = 10232 "Unable to find a common bit rate with the link partner", 10233 [0x12] = 10234 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy", 10235 [0x13] = 10236 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy", 10237 [0x14] = no_state_complete, 10238 [0x15] = 10239 "State timeout occurred before link partner identified equalization presets", 10240 [0x16] = 10241 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy", 10242 [0x17] = tx_out_of_policy, 10243 [0x20] = all_lanes_dead_timeout_expired, 10244 [0x21] = 10245 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy", 10246 [0x22] = no_state_complete, 10247 [0x23] = 10248 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy", 10249 [0x24] = tx_out_of_policy, 10250 [0x30] = all_lanes_dead_timeout_expired, 10251 [0x31] = 10252 "State timeout occurred waiting for host to process received frames", 10253 [0x32] = no_state_complete, 10254 [0x33] = 10255 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy", 10256 [0x34] = tx_out_of_policy, 10257 [0x35] = "Negotiated link width is mutually exclusive", 10258 [0x36] = 10259 "Timed out before receiving verifycap frames in VerifyCap.Exchange", 10260 [0x37] = "Unable to resolve secure data exchange", 10261 }; 10262 10263 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd, 10264 u32 code) 10265 { 10266 const char *str = NULL; 10267 10268 if (code < ARRAY_SIZE(state_complete_reasons)) 10269 str = state_complete_reasons[code]; 10270 10271 if (str) 10272 return str; 10273 return "Reserved"; 10274 } 10275 10276 /* describe the given last state complete frame */ 10277 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, 10278 const char *prefix) 10279 { 10280 struct hfi1_devdata *dd = ppd->dd; 10281 u32 success; 10282 u32 state; 10283 u32 reason; 10284 u32 lanes; 10285 10286 /* 10287 * Decode frame: 10288 * [ 0: 0] - success 10289 * [ 3: 1] - state 10290 * [ 7: 4] - next state timeout 10291 * [15: 8] - reason code 10292 * [31:16] - lanes 10293 */ 10294 success = frame & 0x1; 10295 state = (frame >> 1) & 0x7; 10296 reason = (frame >> 8) & 0xff; 10297 lanes = (frame >> 16) & 0xffff; 10298 10299 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", 10300 prefix, frame); 10301 dd_dev_err(dd, " last reported state state: %s (0x%x)\n", 10302 state_completed_string(state), state); 10303 dd_dev_err(dd, " state successfully completed: %s\n", 10304 success ? "yes" : "no"); 10305 dd_dev_err(dd, " fail reason 0x%x: %s\n", 10306 reason, state_complete_reason_code_string(ppd, reason)); 10307 dd_dev_err(dd, " passing lane mask: 0x%x", lanes); 10308 } 10309 10310 /* 10311 * Read the last state complete frames and explain them. This routine 10312 * expects to be called if the link went down during link negotiation 10313 * and initialization (LNI). That is, anywhere between polling and link up. 10314 */ 10315 static void check_lni_states(struct hfi1_pportdata *ppd) 10316 { 10317 u32 last_local_state; 10318 u32 last_remote_state; 10319 10320 read_last_local_state(ppd->dd, &last_local_state); 10321 read_last_remote_state(ppd->dd, &last_remote_state); 10322 10323 /* 10324 * Don't report anything if there is nothing to report. A value of 10325 * 0 means the link was taken down while polling and there was no 10326 * training in-process. 10327 */ 10328 if (last_local_state == 0 && last_remote_state == 0) 10329 return; 10330 10331 decode_state_complete(ppd, last_local_state, "transmitted"); 10332 decode_state_complete(ppd, last_remote_state, "received"); 10333 } 10334 10335 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */ 10336 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) 10337 { 10338 u64 reg; 10339 unsigned long timeout; 10340 10341 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ 10342 timeout = jiffies + msecs_to_jiffies(wait_ms); 10343 while (1) { 10344 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); 10345 if (reg) 10346 break; 10347 if (time_after(jiffies, timeout)) { 10348 dd_dev_err(dd, 10349 "timeout waiting for LINK_TRANSFER_ACTIVE\n"); 10350 return -ETIMEDOUT; 10351 } 10352 udelay(2); 10353 } 10354 return 0; 10355 } 10356 10357 /* called when the logical link state is not down as it should be */ 10358 static void force_logical_link_state_down(struct hfi1_pportdata *ppd) 10359 { 10360 struct hfi1_devdata *dd = ppd->dd; 10361 10362 /* 10363 * Bring link up in LCB loopback 10364 */ 10365 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10366 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 10367 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 10368 10369 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 10370 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); 10371 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 10372 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); 10373 10374 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 10375 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); 10376 udelay(3); 10377 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); 10378 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 10379 10380 wait_link_transfer_active(dd, 100); 10381 10382 /* 10383 * Bring the link down again. 10384 */ 10385 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10386 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); 10387 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); 10388 10389 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); 10390 } 10391 10392 /* 10393 * Helper for set_link_state(). Do not call except from that routine. 10394 * Expects ppd->hls_mutex to be held. 10395 * 10396 * @rem_reason value to be sent to the neighbor 10397 * 10398 * LinkDownReasons only set if transition succeeds. 10399 */ 10400 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) 10401 { 10402 struct hfi1_devdata *dd = ppd->dd; 10403 u32 previous_state; 10404 int offline_state_ret; 10405 int ret; 10406 10407 update_lcb_cache(dd); 10408 10409 previous_state = ppd->host_link_state; 10410 ppd->host_link_state = HLS_GOING_OFFLINE; 10411 10412 /* start offline transition */ 10413 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); 10414 10415 if (ret != HCMD_SUCCESS) { 10416 dd_dev_err(dd, 10417 "Failed to transition to Offline link state, return %d\n", 10418 ret); 10419 return -EINVAL; 10420 } 10421 if (ppd->offline_disabled_reason == 10422 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) 10423 ppd->offline_disabled_reason = 10424 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10425 10426 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); 10427 if (offline_state_ret < 0) 10428 return offline_state_ret; 10429 10430 /* Disabling AOC transmitters */ 10431 if (ppd->port_type == PORT_TYPE_QSFP && 10432 ppd->qsfp_info.limiting_active && 10433 qsfp_mod_present(ppd)) { 10434 int ret; 10435 10436 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); 10437 if (ret == 0) { 10438 set_qsfp_tx(ppd, 0); 10439 release_chip_resource(dd, qsfp_resource(dd)); 10440 } else { 10441 /* not fatal, but should warn */ 10442 dd_dev_err(dd, 10443 "Unable to acquire lock to turn off QSFP TX\n"); 10444 } 10445 } 10446 10447 /* 10448 * Wait for the offline.Quiet transition if it hasn't happened yet. It 10449 * can take a while for the link to go down. 10450 */ 10451 if (offline_state_ret != PLS_OFFLINE_QUIET) { 10452 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); 10453 if (ret < 0) 10454 return ret; 10455 } 10456 10457 /* 10458 * Now in charge of LCB - must be after the physical state is 10459 * offline.quiet and before host_link_state is changed. 10460 */ 10461 set_host_lcb_access(dd); 10462 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 10463 10464 /* make sure the logical state is also down */ 10465 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); 10466 if (ret) 10467 force_logical_link_state_down(ppd); 10468 10469 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ 10470 update_statusp(ppd, IB_PORT_DOWN); 10471 10472 /* 10473 * The LNI has a mandatory wait time after the physical state 10474 * moves to Offline.Quiet. The wait time may be different 10475 * depending on how the link went down. The 8051 firmware 10476 * will observe the needed wait time and only move to ready 10477 * when that is completed. The largest of the quiet timeouts 10478 * is 6s, so wait that long and then at least 0.5s more for 10479 * other transitions, and another 0.5s for a buffer. 10480 */ 10481 ret = wait_fm_ready(dd, 7000); 10482 if (ret) { 10483 dd_dev_err(dd, 10484 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); 10485 /* state is really offline, so make it so */ 10486 ppd->host_link_state = HLS_DN_OFFLINE; 10487 return ret; 10488 } 10489 10490 /* 10491 * The state is now offline and the 8051 is ready to accept host 10492 * requests. 10493 * - change our state 10494 * - notify others if we were previously in a linkup state 10495 */ 10496 ppd->host_link_state = HLS_DN_OFFLINE; 10497 if (previous_state & HLS_UP) { 10498 /* went down while link was up */ 10499 handle_linkup_change(dd, 0); 10500 } else if (previous_state 10501 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10502 /* went down while attempting link up */ 10503 check_lni_states(ppd); 10504 10505 /* The QSFP doesn't need to be reset on LNI failure */ 10506 ppd->qsfp_info.reset_needed = 0; 10507 } 10508 10509 /* the active link width (downgrade) is 0 on link down */ 10510 ppd->link_width_active = 0; 10511 ppd->link_width_downgrade_tx_active = 0; 10512 ppd->link_width_downgrade_rx_active = 0; 10513 ppd->current_egress_rate = 0; 10514 return 0; 10515 } 10516 10517 /* return the link state name */ 10518 static const char *link_state_name(u32 state) 10519 { 10520 const char *name; 10521 int n = ilog2(state); 10522 static const char * const names[] = { 10523 [__HLS_UP_INIT_BP] = "INIT", 10524 [__HLS_UP_ARMED_BP] = "ARMED", 10525 [__HLS_UP_ACTIVE_BP] = "ACTIVE", 10526 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF", 10527 [__HLS_DN_POLL_BP] = "POLL", 10528 [__HLS_DN_DISABLE_BP] = "DISABLE", 10529 [__HLS_DN_OFFLINE_BP] = "OFFLINE", 10530 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP", 10531 [__HLS_GOING_UP_BP] = "GOING_UP", 10532 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE", 10533 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN" 10534 }; 10535 10536 name = n < ARRAY_SIZE(names) ? names[n] : NULL; 10537 return name ? name : "unknown"; 10538 } 10539 10540 /* return the link state reason name */ 10541 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state) 10542 { 10543 if (state == HLS_UP_INIT) { 10544 switch (ppd->linkinit_reason) { 10545 case OPA_LINKINIT_REASON_LINKUP: 10546 return "(LINKUP)"; 10547 case OPA_LINKINIT_REASON_FLAPPING: 10548 return "(FLAPPING)"; 10549 case OPA_LINKINIT_OUTSIDE_POLICY: 10550 return "(OUTSIDE_POLICY)"; 10551 case OPA_LINKINIT_QUARANTINED: 10552 return "(QUARANTINED)"; 10553 case OPA_LINKINIT_INSUFIC_CAPABILITY: 10554 return "(INSUFIC_CAPABILITY)"; 10555 default: 10556 break; 10557 } 10558 } 10559 return ""; 10560 } 10561 10562 /* 10563 * driver_pstate - convert the driver's notion of a port's 10564 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*). 10565 * Return -1 (converted to a u32) to indicate error. 10566 */ 10567 u32 driver_pstate(struct hfi1_pportdata *ppd) 10568 { 10569 switch (ppd->host_link_state) { 10570 case HLS_UP_INIT: 10571 case HLS_UP_ARMED: 10572 case HLS_UP_ACTIVE: 10573 return IB_PORTPHYSSTATE_LINKUP; 10574 case HLS_DN_POLL: 10575 return IB_PORTPHYSSTATE_POLLING; 10576 case HLS_DN_DISABLE: 10577 return IB_PORTPHYSSTATE_DISABLED; 10578 case HLS_DN_OFFLINE: 10579 return OPA_PORTPHYSSTATE_OFFLINE; 10580 case HLS_VERIFY_CAP: 10581 return IB_PORTPHYSSTATE_TRAINING; 10582 case HLS_GOING_UP: 10583 return IB_PORTPHYSSTATE_TRAINING; 10584 case HLS_GOING_OFFLINE: 10585 return OPA_PORTPHYSSTATE_OFFLINE; 10586 case HLS_LINK_COOLDOWN: 10587 return OPA_PORTPHYSSTATE_OFFLINE; 10588 case HLS_DN_DOWNDEF: 10589 default: 10590 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10591 ppd->host_link_state); 10592 return -1; 10593 } 10594 } 10595 10596 /* 10597 * driver_lstate - convert the driver's notion of a port's 10598 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1 10599 * (converted to a u32) to indicate error. 10600 */ 10601 u32 driver_lstate(struct hfi1_pportdata *ppd) 10602 { 10603 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) 10604 return IB_PORT_DOWN; 10605 10606 switch (ppd->host_link_state & HLS_UP) { 10607 case HLS_UP_INIT: 10608 return IB_PORT_INIT; 10609 case HLS_UP_ARMED: 10610 return IB_PORT_ARMED; 10611 case HLS_UP_ACTIVE: 10612 return IB_PORT_ACTIVE; 10613 default: 10614 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10615 ppd->host_link_state); 10616 return -1; 10617 } 10618 } 10619 10620 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, 10621 u8 neigh_reason, u8 rem_reason) 10622 { 10623 if (ppd->local_link_down_reason.latest == 0 && 10624 ppd->neigh_link_down_reason.latest == 0) { 10625 ppd->local_link_down_reason.latest = lcl_reason; 10626 ppd->neigh_link_down_reason.latest = neigh_reason; 10627 ppd->remote_link_down_reason = rem_reason; 10628 } 10629 } 10630 10631 /** 10632 * data_vls_operational() - Verify if data VL BCT credits and MTU 10633 * are both set. 10634 * @ppd: pointer to hfi1_pportdata structure 10635 * 10636 * Return: true - Ok, false -otherwise. 10637 */ 10638 static inline bool data_vls_operational(struct hfi1_pportdata *ppd) 10639 { 10640 int i; 10641 u64 reg; 10642 10643 if (!ppd->actual_vls_operational) 10644 return false; 10645 10646 for (i = 0; i < ppd->vls_supported; i++) { 10647 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); 10648 if ((reg && !ppd->dd->vld[i].mtu) || 10649 (!reg && ppd->dd->vld[i].mtu)) 10650 return false; 10651 } 10652 10653 return true; 10654 } 10655 10656 /* 10657 * Change the physical and/or logical link state. 10658 * 10659 * Do not call this routine while inside an interrupt. It contains 10660 * calls to routines that can take multiple seconds to finish. 10661 * 10662 * Returns 0 on success, -errno on failure. 10663 */ 10664 int set_link_state(struct hfi1_pportdata *ppd, u32 state) 10665 { 10666 struct hfi1_devdata *dd = ppd->dd; 10667 struct ib_event event = {.device = NULL}; 10668 int ret1, ret = 0; 10669 int orig_new_state, poll_bounce; 10670 10671 mutex_lock(&ppd->hls_lock); 10672 10673 orig_new_state = state; 10674 if (state == HLS_DN_DOWNDEF) 10675 state = HLS_DEFAULT; 10676 10677 /* interpret poll -> poll as a link bounce */ 10678 poll_bounce = ppd->host_link_state == HLS_DN_POLL && 10679 state == HLS_DN_POLL; 10680 10681 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, 10682 link_state_name(ppd->host_link_state), 10683 link_state_name(orig_new_state), 10684 poll_bounce ? "(bounce) " : "", 10685 link_state_reason_name(ppd, state)); 10686 10687 /* 10688 * If we're going to a (HLS_*) link state that implies the logical 10689 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then 10690 * reset is_sm_config_started to 0. 10691 */ 10692 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE))) 10693 ppd->is_sm_config_started = 0; 10694 10695 /* 10696 * Do nothing if the states match. Let a poll to poll link bounce 10697 * go through. 10698 */ 10699 if (ppd->host_link_state == state && !poll_bounce) 10700 goto done; 10701 10702 switch (state) { 10703 case HLS_UP_INIT: 10704 if (ppd->host_link_state == HLS_DN_POLL && 10705 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { 10706 /* 10707 * Quick link up jumps from polling to here. 10708 * 10709 * Whether in normal or loopback mode, the 10710 * simulator jumps from polling to link up. 10711 * Accept that here. 10712 */ 10713 /* OK */ 10714 } else if (ppd->host_link_state != HLS_GOING_UP) { 10715 goto unexpected; 10716 } 10717 10718 /* 10719 * Wait for Link_Up physical state. 10720 * Physical and Logical states should already be 10721 * be transitioned to LinkUp and LinkInit respectively. 10722 */ 10723 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000); 10724 if (ret) { 10725 dd_dev_err(dd, 10726 "%s: physical state did not change to LINK-UP\n", 10727 __func__); 10728 break; 10729 } 10730 10731 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000); 10732 if (ret) { 10733 dd_dev_err(dd, 10734 "%s: logical state did not change to INIT\n", 10735 __func__); 10736 break; 10737 } 10738 10739 /* clear old transient LINKINIT_REASON code */ 10740 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) 10741 ppd->linkinit_reason = 10742 OPA_LINKINIT_REASON_LINKUP; 10743 10744 /* enable the port */ 10745 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10746 10747 handle_linkup_change(dd, 1); 10748 pio_kernel_linkup(dd); 10749 10750 /* 10751 * After link up, a new link width will have been set. 10752 * Update the xmit counters with regards to the new 10753 * link width. 10754 */ 10755 update_xmit_counters(ppd, ppd->link_width_active); 10756 10757 ppd->host_link_state = HLS_UP_INIT; 10758 update_statusp(ppd, IB_PORT_INIT); 10759 break; 10760 case HLS_UP_ARMED: 10761 if (ppd->host_link_state != HLS_UP_INIT) 10762 goto unexpected; 10763 10764 if (!data_vls_operational(ppd)) { 10765 dd_dev_err(dd, 10766 "%s: Invalid data VL credits or mtu\n", 10767 __func__); 10768 ret = -EINVAL; 10769 break; 10770 } 10771 10772 set_logical_state(dd, LSTATE_ARMED); 10773 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000); 10774 if (ret) { 10775 dd_dev_err(dd, 10776 "%s: logical state did not change to ARMED\n", 10777 __func__); 10778 break; 10779 } 10780 ppd->host_link_state = HLS_UP_ARMED; 10781 update_statusp(ppd, IB_PORT_ARMED); 10782 /* 10783 * The simulator does not currently implement SMA messages, 10784 * so neighbor_normal is not set. Set it here when we first 10785 * move to Armed. 10786 */ 10787 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 10788 ppd->neighbor_normal = 1; 10789 break; 10790 case HLS_UP_ACTIVE: 10791 if (ppd->host_link_state != HLS_UP_ARMED) 10792 goto unexpected; 10793 10794 set_logical_state(dd, LSTATE_ACTIVE); 10795 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000); 10796 if (ret) { 10797 dd_dev_err(dd, 10798 "%s: logical state did not change to ACTIVE\n", 10799 __func__); 10800 } else { 10801 /* tell all engines to go running */ 10802 sdma_all_running(dd); 10803 ppd->host_link_state = HLS_UP_ACTIVE; 10804 update_statusp(ppd, IB_PORT_ACTIVE); 10805 10806 /* Signal the IB layer that the port has went active */ 10807 event.device = &dd->verbs_dev.rdi.ibdev; 10808 event.element.port_num = ppd->port; 10809 event.event = IB_EVENT_PORT_ACTIVE; 10810 } 10811 break; 10812 case HLS_DN_POLL: 10813 if ((ppd->host_link_state == HLS_DN_DISABLE || 10814 ppd->host_link_state == HLS_DN_OFFLINE) && 10815 dd->dc_shutdown) 10816 dc_start(dd); 10817 /* Hand LED control to the DC */ 10818 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 10819 10820 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10821 u8 tmp = ppd->link_enabled; 10822 10823 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10824 if (ret) { 10825 ppd->link_enabled = tmp; 10826 break; 10827 } 10828 ppd->remote_link_down_reason = 0; 10829 10830 if (ppd->driver_link_ready) 10831 ppd->link_enabled = 1; 10832 } 10833 10834 set_all_slowpath(ppd->dd); 10835 ret = set_local_link_attributes(ppd); 10836 if (ret) 10837 break; 10838 10839 ppd->port_error_action = 0; 10840 10841 if (quick_linkup) { 10842 /* quick linkup does not go into polling */ 10843 ret = do_quick_linkup(dd); 10844 } else { 10845 ret1 = set_physical_link_state(dd, PLS_POLLING); 10846 if (!ret1) 10847 ret1 = wait_phys_link_out_of_offline(ppd, 10848 3000); 10849 if (ret1 != HCMD_SUCCESS) { 10850 dd_dev_err(dd, 10851 "Failed to transition to Polling link state, return 0x%x\n", 10852 ret1); 10853 ret = -EINVAL; 10854 } 10855 } 10856 10857 /* 10858 * Change the host link state after requesting DC8051 to 10859 * change its physical state so that we can ignore any 10860 * interrupt with stale LNI(XX) error, which will not be 10861 * cleared until DC8051 transitions to Polling state. 10862 */ 10863 ppd->host_link_state = HLS_DN_POLL; 10864 ppd->offline_disabled_reason = 10865 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); 10866 /* 10867 * If an error occurred above, go back to offline. The 10868 * caller may reschedule another attempt. 10869 */ 10870 if (ret) 10871 goto_offline(ppd, 0); 10872 else 10873 log_physical_state(ppd, PLS_POLLING); 10874 break; 10875 case HLS_DN_DISABLE: 10876 /* link is disabled */ 10877 ppd->link_enabled = 0; 10878 10879 /* allow any state to transition to disabled */ 10880 10881 /* must transition to offline first */ 10882 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10883 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10884 if (ret) 10885 break; 10886 ppd->remote_link_down_reason = 0; 10887 } 10888 10889 if (!dd->dc_shutdown) { 10890 ret1 = set_physical_link_state(dd, PLS_DISABLED); 10891 if (ret1 != HCMD_SUCCESS) { 10892 dd_dev_err(dd, 10893 "Failed to transition to Disabled link state, return 0x%x\n", 10894 ret1); 10895 ret = -EINVAL; 10896 break; 10897 } 10898 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000); 10899 if (ret) { 10900 dd_dev_err(dd, 10901 "%s: physical state did not change to DISABLED\n", 10902 __func__); 10903 break; 10904 } 10905 dc_shutdown(dd); 10906 } 10907 ppd->host_link_state = HLS_DN_DISABLE; 10908 break; 10909 case HLS_DN_OFFLINE: 10910 if (ppd->host_link_state == HLS_DN_DISABLE) 10911 dc_start(dd); 10912 10913 /* allow any state to transition to offline */ 10914 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10915 if (!ret) 10916 ppd->remote_link_down_reason = 0; 10917 break; 10918 case HLS_VERIFY_CAP: 10919 if (ppd->host_link_state != HLS_DN_POLL) 10920 goto unexpected; 10921 ppd->host_link_state = HLS_VERIFY_CAP; 10922 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP); 10923 break; 10924 case HLS_GOING_UP: 10925 if (ppd->host_link_state != HLS_VERIFY_CAP) 10926 goto unexpected; 10927 10928 ret1 = set_physical_link_state(dd, PLS_LINKUP); 10929 if (ret1 != HCMD_SUCCESS) { 10930 dd_dev_err(dd, 10931 "Failed to transition to link up state, return 0x%x\n", 10932 ret1); 10933 ret = -EINVAL; 10934 break; 10935 } 10936 ppd->host_link_state = HLS_GOING_UP; 10937 break; 10938 10939 case HLS_GOING_OFFLINE: /* transient within goto_offline() */ 10940 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ 10941 default: 10942 dd_dev_info(dd, "%s: state 0x%x: not supported\n", 10943 __func__, state); 10944 ret = -EINVAL; 10945 break; 10946 } 10947 10948 goto done; 10949 10950 unexpected: 10951 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", 10952 __func__, link_state_name(ppd->host_link_state), 10953 link_state_name(state)); 10954 ret = -EINVAL; 10955 10956 done: 10957 mutex_unlock(&ppd->hls_lock); 10958 10959 if (event.device) 10960 ib_dispatch_event(&event); 10961 10962 return ret; 10963 } 10964 10965 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) 10966 { 10967 u64 reg; 10968 int ret = 0; 10969 10970 switch (which) { 10971 case HFI1_IB_CFG_LIDLMC: 10972 set_lidlmc(ppd); 10973 break; 10974 case HFI1_IB_CFG_VL_HIGH_LIMIT: 10975 /* 10976 * The VL Arbitrator high limit is sent in units of 4k 10977 * bytes, while HFI stores it in units of 64 bytes. 10978 */ 10979 val *= 4096 / 64; 10980 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) 10981 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; 10982 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); 10983 break; 10984 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 10985 /* HFI only supports POLL as the default link down state */ 10986 if (val != HLS_DN_POLL) 10987 ret = -EINVAL; 10988 break; 10989 case HFI1_IB_CFG_OP_VLS: 10990 if (ppd->vls_operational != val) { 10991 ppd->vls_operational = val; 10992 if (!ppd->port) 10993 ret = -EINVAL; 10994 } 10995 break; 10996 /* 10997 * For link width, link width downgrade, and speed enable, always AND 10998 * the setting with what is actually supported. This has two benefits. 10999 * First, enabled can't have unsupported values, no matter what the 11000 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean 11001 * "fill in with your supported value" have all the bits in the 11002 * field set, so simply ANDing with supported has the desired result. 11003 */ 11004 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ 11005 ppd->link_width_enabled = val & ppd->link_width_supported; 11006 break; 11007 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */ 11008 ppd->link_width_downgrade_enabled = 11009 val & ppd->link_width_downgrade_supported; 11010 break; 11011 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 11012 ppd->link_speed_enabled = val & ppd->link_speed_supported; 11013 break; 11014 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 11015 /* 11016 * HFI does not follow IB specs, save this value 11017 * so we can report it, if asked. 11018 */ 11019 ppd->overrun_threshold = val; 11020 break; 11021 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 11022 /* 11023 * HFI does not follow IB specs, save this value 11024 * so we can report it, if asked. 11025 */ 11026 ppd->phy_error_threshold = val; 11027 break; 11028 11029 case HFI1_IB_CFG_MTU: 11030 set_send_length(ppd); 11031 break; 11032 11033 case HFI1_IB_CFG_PKEYS: 11034 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 11035 set_partition_keys(ppd); 11036 break; 11037 11038 default: 11039 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 11040 dd_dev_info(ppd->dd, 11041 "%s: which %s, val 0x%x: not implemented\n", 11042 __func__, ib_cfg_name(which), val); 11043 break; 11044 } 11045 return ret; 11046 } 11047 11048 /* begin functions related to vl arbitration table caching */ 11049 static void init_vl_arb_caches(struct hfi1_pportdata *ppd) 11050 { 11051 int i; 11052 11053 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 11054 VL_ARB_LOW_PRIO_TABLE_SIZE); 11055 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 11056 VL_ARB_HIGH_PRIO_TABLE_SIZE); 11057 11058 /* 11059 * Note that we always return values directly from the 11060 * 'vl_arb_cache' (and do no CSR reads) in response to a 11061 * 'Get(VLArbTable)'. This is obviously correct after a 11062 * 'Set(VLArbTable)', since the cache will then be up to 11063 * date. But it's also correct prior to any 'Set(VLArbTable)' 11064 * since then both the cache, and the relevant h/w registers 11065 * will be zeroed. 11066 */ 11067 11068 for (i = 0; i < MAX_PRIO_TABLE; i++) 11069 spin_lock_init(&ppd->vl_arb_cache[i].lock); 11070 } 11071 11072 /* 11073 * vl_arb_lock_cache 11074 * 11075 * All other vl_arb_* functions should be called only after locking 11076 * the cache. 11077 */ 11078 static inline struct vl_arb_cache * 11079 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx) 11080 { 11081 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE) 11082 return NULL; 11083 spin_lock(&ppd->vl_arb_cache[idx].lock); 11084 return &ppd->vl_arb_cache[idx]; 11085 } 11086 11087 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx) 11088 { 11089 spin_unlock(&ppd->vl_arb_cache[idx].lock); 11090 } 11091 11092 static void vl_arb_get_cache(struct vl_arb_cache *cache, 11093 struct ib_vl_weight_elem *vl) 11094 { 11095 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11096 } 11097 11098 static void vl_arb_set_cache(struct vl_arb_cache *cache, 11099 struct ib_vl_weight_elem *vl) 11100 { 11101 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11102 } 11103 11104 static int vl_arb_match_cache(struct vl_arb_cache *cache, 11105 struct ib_vl_weight_elem *vl) 11106 { 11107 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11108 } 11109 11110 /* end functions related to vl arbitration table caching */ 11111 11112 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, 11113 u32 size, struct ib_vl_weight_elem *vl) 11114 { 11115 struct hfi1_devdata *dd = ppd->dd; 11116 u64 reg; 11117 unsigned int i, is_up = 0; 11118 int drain, ret = 0; 11119 11120 mutex_lock(&ppd->hls_lock); 11121 11122 if (ppd->host_link_state & HLS_UP) 11123 is_up = 1; 11124 11125 drain = !is_ax(dd) && is_up; 11126 11127 if (drain) 11128 /* 11129 * Before adjusting VL arbitration weights, empty per-VL 11130 * FIFOs, otherwise a packet whose VL weight is being 11131 * set to 0 could get stuck in a FIFO with no chance to 11132 * egress. 11133 */ 11134 ret = stop_drain_data_vls(dd); 11135 11136 if (ret) { 11137 dd_dev_err( 11138 dd, 11139 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", 11140 __func__); 11141 goto err; 11142 } 11143 11144 for (i = 0; i < size; i++, vl++) { 11145 /* 11146 * NOTE: The low priority shift and mask are used here, but 11147 * they are the same for both the low and high registers. 11148 */ 11149 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) 11150 << SEND_LOW_PRIORITY_LIST_VL_SHIFT) 11151 | (((u64)vl->weight 11152 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK) 11153 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT); 11154 write_csr(dd, target + (i * 8), reg); 11155 } 11156 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); 11157 11158 if (drain) 11159 open_fill_data_vls(dd); /* reopen all VLs */ 11160 11161 err: 11162 mutex_unlock(&ppd->hls_lock); 11163 11164 return ret; 11165 } 11166 11167 /* 11168 * Read one credit merge VL register. 11169 */ 11170 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, 11171 struct vl_limit *vll) 11172 { 11173 u64 reg = read_csr(dd, csr); 11174 11175 vll->dedicated = cpu_to_be16( 11176 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT) 11177 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK); 11178 vll->shared = cpu_to_be16( 11179 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT) 11180 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK); 11181 } 11182 11183 /* 11184 * Read the current credit merge limits. 11185 */ 11186 static int get_buffer_control(struct hfi1_devdata *dd, 11187 struct buffer_control *bc, u16 *overall_limit) 11188 { 11189 u64 reg; 11190 int i; 11191 11192 /* not all entries are filled in */ 11193 memset(bc, 0, sizeof(*bc)); 11194 11195 /* OPA and HFI have a 1-1 mapping */ 11196 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11197 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); 11198 11199 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ 11200 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); 11201 11202 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11203 bc->overall_shared_limit = cpu_to_be16( 11204 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) 11205 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK); 11206 if (overall_limit) 11207 *overall_limit = (reg 11208 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) 11209 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK; 11210 return sizeof(struct buffer_control); 11211 } 11212 11213 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11214 { 11215 u64 reg; 11216 int i; 11217 11218 /* each register contains 16 SC->VLnt mappings, 4 bits each */ 11219 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); 11220 for (i = 0; i < sizeof(u64); i++) { 11221 u8 byte = *(((u8 *)®) + i); 11222 11223 dp->vlnt[2 * i] = byte & 0xf; 11224 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; 11225 } 11226 11227 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); 11228 for (i = 0; i < sizeof(u64); i++) { 11229 u8 byte = *(((u8 *)®) + i); 11230 11231 dp->vlnt[16 + (2 * i)] = byte & 0xf; 11232 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; 11233 } 11234 return sizeof(struct sc2vlnt); 11235 } 11236 11237 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, 11238 struct ib_vl_weight_elem *vl) 11239 { 11240 unsigned int i; 11241 11242 for (i = 0; i < nelems; i++, vl++) { 11243 vl->vl = 0xf; 11244 vl->weight = 0; 11245 } 11246 } 11247 11248 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11249 { 11250 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, 11251 DC_SC_VL_VAL(15_0, 11252 0, dp->vlnt[0] & 0xf, 11253 1, dp->vlnt[1] & 0xf, 11254 2, dp->vlnt[2] & 0xf, 11255 3, dp->vlnt[3] & 0xf, 11256 4, dp->vlnt[4] & 0xf, 11257 5, dp->vlnt[5] & 0xf, 11258 6, dp->vlnt[6] & 0xf, 11259 7, dp->vlnt[7] & 0xf, 11260 8, dp->vlnt[8] & 0xf, 11261 9, dp->vlnt[9] & 0xf, 11262 10, dp->vlnt[10] & 0xf, 11263 11, dp->vlnt[11] & 0xf, 11264 12, dp->vlnt[12] & 0xf, 11265 13, dp->vlnt[13] & 0xf, 11266 14, dp->vlnt[14] & 0xf, 11267 15, dp->vlnt[15] & 0xf)); 11268 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, 11269 DC_SC_VL_VAL(31_16, 11270 16, dp->vlnt[16] & 0xf, 11271 17, dp->vlnt[17] & 0xf, 11272 18, dp->vlnt[18] & 0xf, 11273 19, dp->vlnt[19] & 0xf, 11274 20, dp->vlnt[20] & 0xf, 11275 21, dp->vlnt[21] & 0xf, 11276 22, dp->vlnt[22] & 0xf, 11277 23, dp->vlnt[23] & 0xf, 11278 24, dp->vlnt[24] & 0xf, 11279 25, dp->vlnt[25] & 0xf, 11280 26, dp->vlnt[26] & 0xf, 11281 27, dp->vlnt[27] & 0xf, 11282 28, dp->vlnt[28] & 0xf, 11283 29, dp->vlnt[29] & 0xf, 11284 30, dp->vlnt[30] & 0xf, 11285 31, dp->vlnt[31] & 0xf)); 11286 } 11287 11288 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, 11289 u16 limit) 11290 { 11291 if (limit != 0) 11292 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", 11293 what, (int)limit, idx); 11294 } 11295 11296 /* change only the shared limit portion of SendCmGLobalCredit */ 11297 static void set_global_shared(struct hfi1_devdata *dd, u16 limit) 11298 { 11299 u64 reg; 11300 11301 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11302 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK; 11303 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT; 11304 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11305 } 11306 11307 /* change only the total credit limit portion of SendCmGLobalCredit */ 11308 static void set_global_limit(struct hfi1_devdata *dd, u16 limit) 11309 { 11310 u64 reg; 11311 11312 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11313 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK; 11314 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 11315 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11316 } 11317 11318 /* set the given per-VL shared limit */ 11319 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) 11320 { 11321 u64 reg; 11322 u32 addr; 11323 11324 if (vl < TXE_NUM_DATA_VL) 11325 addr = SEND_CM_CREDIT_VL + (8 * vl); 11326 else 11327 addr = SEND_CM_CREDIT_VL15; 11328 11329 reg = read_csr(dd, addr); 11330 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK; 11331 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT; 11332 write_csr(dd, addr, reg); 11333 } 11334 11335 /* set the given per-VL dedicated limit */ 11336 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) 11337 { 11338 u64 reg; 11339 u32 addr; 11340 11341 if (vl < TXE_NUM_DATA_VL) 11342 addr = SEND_CM_CREDIT_VL + (8 * vl); 11343 else 11344 addr = SEND_CM_CREDIT_VL15; 11345 11346 reg = read_csr(dd, addr); 11347 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK; 11348 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT; 11349 write_csr(dd, addr, reg); 11350 } 11351 11352 /* spin until the given per-VL status mask bits clear */ 11353 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, 11354 const char *which) 11355 { 11356 unsigned long timeout; 11357 u64 reg; 11358 11359 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT); 11360 while (1) { 11361 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; 11362 11363 if (reg == 0) 11364 return; /* success */ 11365 if (time_after(jiffies, timeout)) 11366 break; /* timed out */ 11367 udelay(1); 11368 } 11369 11370 dd_dev_err(dd, 11371 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", 11372 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); 11373 /* 11374 * If this occurs, it is likely there was a credit loss on the link. 11375 * The only recovery from that is a link bounce. 11376 */ 11377 dd_dev_err(dd, 11378 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); 11379 } 11380 11381 /* 11382 * The number of credits on the VLs may be changed while everything 11383 * is "live", but the following algorithm must be followed due to 11384 * how the hardware is actually implemented. In particular, 11385 * Return_Credit_Status[] is the only correct status check. 11386 * 11387 * if (reducing Global_Shared_Credit_Limit or any shared limit changing) 11388 * set Global_Shared_Credit_Limit = 0 11389 * use_all_vl = 1 11390 * mask0 = all VLs that are changing either dedicated or shared limits 11391 * set Shared_Limit[mask0] = 0 11392 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0 11393 * if (changing any dedicated limit) 11394 * mask1 = all VLs that are lowering dedicated limits 11395 * lower Dedicated_Limit[mask1] 11396 * spin until Return_Credit_Status[mask1] == 0 11397 * raise Dedicated_Limits 11398 * raise Shared_Limits 11399 * raise Global_Shared_Credit_Limit 11400 * 11401 * lower = if the new limit is lower, set the limit to the new value 11402 * raise = if the new limit is higher than the current value (may be changed 11403 * earlier in the algorithm), set the new limit to the new value 11404 */ 11405 int set_buffer_control(struct hfi1_pportdata *ppd, 11406 struct buffer_control *new_bc) 11407 { 11408 struct hfi1_devdata *dd = ppd->dd; 11409 u64 changing_mask, ld_mask, stat_mask; 11410 int change_count; 11411 int i, use_all_mask; 11412 int this_shared_changing; 11413 int vl_count = 0, ret; 11414 /* 11415 * A0: add the variable any_shared_limit_changing below and in the 11416 * algorithm above. If removing A0 support, it can be removed. 11417 */ 11418 int any_shared_limit_changing; 11419 struct buffer_control cur_bc; 11420 u8 changing[OPA_MAX_VLS]; 11421 u8 lowering_dedicated[OPA_MAX_VLS]; 11422 u16 cur_total; 11423 u32 new_total = 0; 11424 const u64 all_mask = 11425 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK 11426 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK 11427 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK 11428 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK 11429 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK 11430 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK 11431 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK 11432 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK 11433 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK; 11434 11435 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) 11436 #define NUM_USABLE_VLS 16 /* look at VL15 and less */ 11437 11438 /* find the new total credits, do sanity check on unused VLs */ 11439 for (i = 0; i < OPA_MAX_VLS; i++) { 11440 if (valid_vl(i)) { 11441 new_total += be16_to_cpu(new_bc->vl[i].dedicated); 11442 continue; 11443 } 11444 nonzero_msg(dd, i, "dedicated", 11445 be16_to_cpu(new_bc->vl[i].dedicated)); 11446 nonzero_msg(dd, i, "shared", 11447 be16_to_cpu(new_bc->vl[i].shared)); 11448 new_bc->vl[i].dedicated = 0; 11449 new_bc->vl[i].shared = 0; 11450 } 11451 new_total += be16_to_cpu(new_bc->overall_shared_limit); 11452 11453 /* fetch the current values */ 11454 get_buffer_control(dd, &cur_bc, &cur_total); 11455 11456 /* 11457 * Create the masks we will use. 11458 */ 11459 memset(changing, 0, sizeof(changing)); 11460 memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); 11461 /* 11462 * NOTE: Assumes that the individual VL bits are adjacent and in 11463 * increasing order 11464 */ 11465 stat_mask = 11466 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; 11467 changing_mask = 0; 11468 ld_mask = 0; 11469 change_count = 0; 11470 any_shared_limit_changing = 0; 11471 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) { 11472 if (!valid_vl(i)) 11473 continue; 11474 this_shared_changing = new_bc->vl[i].shared 11475 != cur_bc.vl[i].shared; 11476 if (this_shared_changing) 11477 any_shared_limit_changing = 1; 11478 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || 11479 this_shared_changing) { 11480 changing[i] = 1; 11481 changing_mask |= stat_mask; 11482 change_count++; 11483 } 11484 if (be16_to_cpu(new_bc->vl[i].dedicated) < 11485 be16_to_cpu(cur_bc.vl[i].dedicated)) { 11486 lowering_dedicated[i] = 1; 11487 ld_mask |= stat_mask; 11488 } 11489 } 11490 11491 /* bracket the credit change with a total adjustment */ 11492 if (new_total > cur_total) 11493 set_global_limit(dd, new_total); 11494 11495 /* 11496 * Start the credit change algorithm. 11497 */ 11498 use_all_mask = 0; 11499 if ((be16_to_cpu(new_bc->overall_shared_limit) < 11500 be16_to_cpu(cur_bc.overall_shared_limit)) || 11501 (is_ax(dd) && any_shared_limit_changing)) { 11502 set_global_shared(dd, 0); 11503 cur_bc.overall_shared_limit = 0; 11504 use_all_mask = 1; 11505 } 11506 11507 for (i = 0; i < NUM_USABLE_VLS; i++) { 11508 if (!valid_vl(i)) 11509 continue; 11510 11511 if (changing[i]) { 11512 set_vl_shared(dd, i, 0); 11513 cur_bc.vl[i].shared = 0; 11514 } 11515 } 11516 11517 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, 11518 "shared"); 11519 11520 if (change_count > 0) { 11521 for (i = 0; i < NUM_USABLE_VLS; i++) { 11522 if (!valid_vl(i)) 11523 continue; 11524 11525 if (lowering_dedicated[i]) { 11526 set_vl_dedicated(dd, i, 11527 be16_to_cpu(new_bc-> 11528 vl[i].dedicated)); 11529 cur_bc.vl[i].dedicated = 11530 new_bc->vl[i].dedicated; 11531 } 11532 } 11533 11534 wait_for_vl_status_clear(dd, ld_mask, "dedicated"); 11535 11536 /* now raise all dedicated that are going up */ 11537 for (i = 0; i < NUM_USABLE_VLS; i++) { 11538 if (!valid_vl(i)) 11539 continue; 11540 11541 if (be16_to_cpu(new_bc->vl[i].dedicated) > 11542 be16_to_cpu(cur_bc.vl[i].dedicated)) 11543 set_vl_dedicated(dd, i, 11544 be16_to_cpu(new_bc-> 11545 vl[i].dedicated)); 11546 } 11547 } 11548 11549 /* next raise all shared that are going up */ 11550 for (i = 0; i < NUM_USABLE_VLS; i++) { 11551 if (!valid_vl(i)) 11552 continue; 11553 11554 if (be16_to_cpu(new_bc->vl[i].shared) > 11555 be16_to_cpu(cur_bc.vl[i].shared)) 11556 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); 11557 } 11558 11559 /* finally raise the global shared */ 11560 if (be16_to_cpu(new_bc->overall_shared_limit) > 11561 be16_to_cpu(cur_bc.overall_shared_limit)) 11562 set_global_shared(dd, 11563 be16_to_cpu(new_bc->overall_shared_limit)); 11564 11565 /* bracket the credit change with a total adjustment */ 11566 if (new_total < cur_total) 11567 set_global_limit(dd, new_total); 11568 11569 /* 11570 * Determine the actual number of operational VLS using the number of 11571 * dedicated and shared credits for each VL. 11572 */ 11573 if (change_count > 0) { 11574 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11575 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || 11576 be16_to_cpu(new_bc->vl[i].shared) > 0) 11577 vl_count++; 11578 ppd->actual_vls_operational = vl_count; 11579 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? 11580 ppd->actual_vls_operational : 11581 ppd->vls_operational, 11582 NULL); 11583 if (ret == 0) 11584 ret = pio_map_init(dd, ppd->port - 1, vl_count ? 11585 ppd->actual_vls_operational : 11586 ppd->vls_operational, NULL); 11587 if (ret) 11588 return ret; 11589 } 11590 return 0; 11591 } 11592 11593 /* 11594 * Read the given fabric manager table. Return the size of the 11595 * table (in bytes) on success, and a negative error code on 11596 * failure. 11597 */ 11598 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t) 11599 11600 { 11601 int size; 11602 struct vl_arb_cache *vlc; 11603 11604 switch (which) { 11605 case FM_TBL_VL_HIGH_ARB: 11606 size = 256; 11607 /* 11608 * OPA specifies 128 elements (of 2 bytes each), though 11609 * HFI supports only 16 elements in h/w. 11610 */ 11611 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11612 vl_arb_get_cache(vlc, t); 11613 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11614 break; 11615 case FM_TBL_VL_LOW_ARB: 11616 size = 256; 11617 /* 11618 * OPA specifies 128 elements (of 2 bytes each), though 11619 * HFI supports only 16 elements in h/w. 11620 */ 11621 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11622 vl_arb_get_cache(vlc, t); 11623 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11624 break; 11625 case FM_TBL_BUFFER_CONTROL: 11626 size = get_buffer_control(ppd->dd, t, NULL); 11627 break; 11628 case FM_TBL_SC2VLNT: 11629 size = get_sc2vlnt(ppd->dd, t); 11630 break; 11631 case FM_TBL_VL_PREEMPT_ELEMS: 11632 size = 256; 11633 /* OPA specifies 128 elements, of 2 bytes each */ 11634 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); 11635 break; 11636 case FM_TBL_VL_PREEMPT_MATRIX: 11637 size = 256; 11638 /* 11639 * OPA specifies that this is the same size as the VL 11640 * arbitration tables (i.e., 256 bytes). 11641 */ 11642 break; 11643 default: 11644 return -EINVAL; 11645 } 11646 return size; 11647 } 11648 11649 /* 11650 * Write the given fabric manager table. 11651 */ 11652 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) 11653 { 11654 int ret = 0; 11655 struct vl_arb_cache *vlc; 11656 11657 switch (which) { 11658 case FM_TBL_VL_HIGH_ARB: 11659 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11660 if (vl_arb_match_cache(vlc, t)) { 11661 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11662 break; 11663 } 11664 vl_arb_set_cache(vlc, t); 11665 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11666 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST, 11667 VL_ARB_HIGH_PRIO_TABLE_SIZE, t); 11668 break; 11669 case FM_TBL_VL_LOW_ARB: 11670 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11671 if (vl_arb_match_cache(vlc, t)) { 11672 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11673 break; 11674 } 11675 vl_arb_set_cache(vlc, t); 11676 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11677 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST, 11678 VL_ARB_LOW_PRIO_TABLE_SIZE, t); 11679 break; 11680 case FM_TBL_BUFFER_CONTROL: 11681 ret = set_buffer_control(ppd, t); 11682 break; 11683 case FM_TBL_SC2VLNT: 11684 set_sc2vlnt(ppd->dd, t); 11685 break; 11686 default: 11687 ret = -EINVAL; 11688 } 11689 return ret; 11690 } 11691 11692 /* 11693 * Disable all data VLs. 11694 * 11695 * Return 0 if disabled, non-zero if the VLs cannot be disabled. 11696 */ 11697 static int disable_data_vls(struct hfi1_devdata *dd) 11698 { 11699 if (is_ax(dd)) 11700 return 1; 11701 11702 pio_send_control(dd, PSC_DATA_VL_DISABLE); 11703 11704 return 0; 11705 } 11706 11707 /* 11708 * open_fill_data_vls() - the counterpart to stop_drain_data_vls(). 11709 * Just re-enables all data VLs (the "fill" part happens 11710 * automatically - the name was chosen for symmetry with 11711 * stop_drain_data_vls()). 11712 * 11713 * Return 0 if successful, non-zero if the VLs cannot be enabled. 11714 */ 11715 int open_fill_data_vls(struct hfi1_devdata *dd) 11716 { 11717 if (is_ax(dd)) 11718 return 1; 11719 11720 pio_send_control(dd, PSC_DATA_VL_ENABLE); 11721 11722 return 0; 11723 } 11724 11725 /* 11726 * drain_data_vls() - assumes that disable_data_vls() has been called, 11727 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA 11728 * engines to drop to 0. 11729 */ 11730 static void drain_data_vls(struct hfi1_devdata *dd) 11731 { 11732 sc_wait(dd); 11733 sdma_wait(dd); 11734 pause_for_credit_return(dd); 11735 } 11736 11737 /* 11738 * stop_drain_data_vls() - disable, then drain all per-VL fifos. 11739 * 11740 * Use open_fill_data_vls() to resume using data VLs. This pair is 11741 * meant to be used like this: 11742 * 11743 * stop_drain_data_vls(dd); 11744 * // do things with per-VL resources 11745 * open_fill_data_vls(dd); 11746 */ 11747 int stop_drain_data_vls(struct hfi1_devdata *dd) 11748 { 11749 int ret; 11750 11751 ret = disable_data_vls(dd); 11752 if (ret == 0) 11753 drain_data_vls(dd); 11754 11755 return ret; 11756 } 11757 11758 /* 11759 * Convert a nanosecond time to a cclock count. No matter how slow 11760 * the cclock, a non-zero ns will always have a non-zero result. 11761 */ 11762 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) 11763 { 11764 u32 cclocks; 11765 11766 if (dd->icode == ICODE_FPGA_EMULATION) 11767 cclocks = (ns * 1000) / FPGA_CCLOCK_PS; 11768 else /* simulation pretends to be ASIC */ 11769 cclocks = (ns * 1000) / ASIC_CCLOCK_PS; 11770 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */ 11771 cclocks = 1; 11772 return cclocks; 11773 } 11774 11775 /* 11776 * Convert a cclock count to nanoseconds. Not matter how slow 11777 * the cclock, a non-zero cclocks will always have a non-zero result. 11778 */ 11779 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) 11780 { 11781 u32 ns; 11782 11783 if (dd->icode == ICODE_FPGA_EMULATION) 11784 ns = (cclocks * FPGA_CCLOCK_PS) / 1000; 11785 else /* simulation pretends to be ASIC */ 11786 ns = (cclocks * ASIC_CCLOCK_PS) / 1000; 11787 if (cclocks && !ns) 11788 ns = 1; 11789 return ns; 11790 } 11791 11792 /* 11793 * Dynamically adjust the receive interrupt timeout for a context based on 11794 * incoming packet rate. 11795 * 11796 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero. 11797 */ 11798 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) 11799 { 11800 struct hfi1_devdata *dd = rcd->dd; 11801 u32 timeout = rcd->rcvavail_timeout; 11802 11803 /* 11804 * This algorithm doubles or halves the timeout depending on whether 11805 * the number of packets received in this interrupt were less than or 11806 * greater equal the interrupt count. 11807 * 11808 * The calculations below do not allow a steady state to be achieved. 11809 * Only at the endpoints it is possible to have an unchanging 11810 * timeout. 11811 */ 11812 if (npkts < rcv_intr_count) { 11813 /* 11814 * Not enough packets arrived before the timeout, adjust 11815 * timeout downward. 11816 */ 11817 if (timeout < 2) /* already at minimum? */ 11818 return; 11819 timeout >>= 1; 11820 } else { 11821 /* 11822 * More than enough packets arrived before the timeout, adjust 11823 * timeout upward. 11824 */ 11825 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ 11826 return; 11827 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); 11828 } 11829 11830 rcd->rcvavail_timeout = timeout; 11831 /* 11832 * timeout cannot be larger than rcv_intr_timeout_csr which has already 11833 * been verified to be in range 11834 */ 11835 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, 11836 (u64)timeout << 11837 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 11838 } 11839 11840 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, 11841 u32 intr_adjust, u32 npkts) 11842 { 11843 struct hfi1_devdata *dd = rcd->dd; 11844 u64 reg; 11845 u32 ctxt = rcd->ctxt; 11846 11847 /* 11848 * Need to write timeout register before updating RcvHdrHead to ensure 11849 * that a new value is used when the HW decides to restart counting. 11850 */ 11851 if (intr_adjust) 11852 adjust_rcv_timeout(rcd, npkts); 11853 if (updegr) { 11854 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK) 11855 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; 11856 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); 11857 } 11858 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | 11859 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) 11860 << RCV_HDR_HEAD_HEAD_SHIFT); 11861 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 11862 } 11863 11864 u32 hdrqempty(struct hfi1_ctxtdata *rcd) 11865 { 11866 u32 head, tail; 11867 11868 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) 11869 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; 11870 11871 if (hfi1_rcvhdrtail_kvaddr(rcd)) 11872 tail = get_rcvhdrtail(rcd); 11873 else 11874 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 11875 11876 return head == tail; 11877 } 11878 11879 /* 11880 * Context Control and Receive Array encoding for buffer size: 11881 * 0x0 invalid 11882 * 0x1 4 KB 11883 * 0x2 8 KB 11884 * 0x3 16 KB 11885 * 0x4 32 KB 11886 * 0x5 64 KB 11887 * 0x6 128 KB 11888 * 0x7 256 KB 11889 * 0x8 512 KB (Receive Array only) 11890 * 0x9 1 MB (Receive Array only) 11891 * 0xa 2 MB (Receive Array only) 11892 * 11893 * 0xB-0xF - reserved (Receive Array only) 11894 * 11895 * 11896 * This routine assumes that the value has already been sanity checked. 11897 */ 11898 static u32 encoded_size(u32 size) 11899 { 11900 switch (size) { 11901 case 4 * 1024: return 0x1; 11902 case 8 * 1024: return 0x2; 11903 case 16 * 1024: return 0x3; 11904 case 32 * 1024: return 0x4; 11905 case 64 * 1024: return 0x5; 11906 case 128 * 1024: return 0x6; 11907 case 256 * 1024: return 0x7; 11908 case 512 * 1024: return 0x8; 11909 case 1 * 1024 * 1024: return 0x9; 11910 case 2 * 1024 * 1024: return 0xa; 11911 } 11912 return 0x1; /* if invalid, go with the minimum size */ 11913 } 11914 11915 /** 11916 * encode_rcv_header_entry_size - return chip specific encoding for size 11917 * @size: size in dwords 11918 * 11919 * Convert a receive header entry size that to the encoding used in the CSR. 11920 * 11921 * Return a zero if the given size is invalid, otherwise the encoding. 11922 */ 11923 u8 encode_rcv_header_entry_size(u8 size) 11924 { 11925 /* there are only 3 valid receive header entry sizes */ 11926 if (size == 2) 11927 return 1; 11928 if (size == 16) 11929 return 2; 11930 if (size == 32) 11931 return 4; 11932 return 0; /* invalid */ 11933 } 11934 11935 /** 11936 * hfi1_validate_rcvhdrcnt - validate hdrcnt 11937 * @dd: the device data 11938 * @thecnt: the header count 11939 */ 11940 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) 11941 { 11942 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 11943 dd_dev_err(dd, "Receive header queue count too small\n"); 11944 return -EINVAL; 11945 } 11946 11947 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 11948 dd_dev_err(dd, 11949 "Receive header queue count cannot be greater than %u\n", 11950 HFI1_MAX_HDRQ_EGRBUF_CNT); 11951 return -EINVAL; 11952 } 11953 11954 if (thecnt % HDRQ_INCREMENT) { 11955 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", 11956 thecnt, HDRQ_INCREMENT); 11957 return -EINVAL; 11958 } 11959 11960 return 0; 11961 } 11962 11963 /** 11964 * set_hdrq_regs - set header queue registers for context 11965 * @dd: the device data 11966 * @ctxt: the context 11967 * @entsize: the dword entry size 11968 * @hdrcnt: the number of header entries 11969 */ 11970 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) 11971 { 11972 u64 reg; 11973 11974 reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << 11975 RCV_HDR_CNT_CNT_SHIFT; 11976 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); 11977 reg = ((u64)encode_rcv_header_entry_size(entsize) & 11978 RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << 11979 RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 11980 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); 11981 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << 11982 RCV_HDR_SIZE_HDR_SIZE_SHIFT; 11983 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); 11984 11985 /* 11986 * Program dummy tail address for every receive context 11987 * before enabling any receive context 11988 */ 11989 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 11990 dd->rcvhdrtail_dummy_dma); 11991 } 11992 11993 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, 11994 struct hfi1_ctxtdata *rcd) 11995 { 11996 u64 rcvctrl, reg; 11997 int did_enable = 0; 11998 u16 ctxt; 11999 12000 if (!rcd) 12001 return; 12002 12003 ctxt = rcd->ctxt; 12004 12005 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); 12006 12007 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); 12008 /* if the context already enabled, don't do the extra steps */ 12009 if ((op & HFI1_RCVCTRL_CTXT_ENB) && 12010 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { 12011 /* reset the tail and hdr addresses, and sequence count */ 12012 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, 12013 rcd->rcvhdrq_dma); 12014 if (hfi1_rcvhdrtail_kvaddr(rcd)) 12015 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12016 rcd->rcvhdrqtailaddr_dma); 12017 hfi1_set_seq_cnt(rcd, 1); 12018 12019 /* reset the cached receive header queue head value */ 12020 hfi1_set_rcd_head(rcd, 0); 12021 12022 /* 12023 * Zero the receive header queue so we don't get false 12024 * positives when checking the sequence number. The 12025 * sequence numbers could land exactly on the same spot. 12026 * E.g. a rcd restart before the receive header wrapped. 12027 */ 12028 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); 12029 12030 /* starting timeout */ 12031 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; 12032 12033 /* enable the context */ 12034 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK; 12035 12036 /* clean the egr buffer size first */ 12037 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 12038 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) 12039 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK) 12040 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT; 12041 12042 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ 12043 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); 12044 did_enable = 1; 12045 12046 /* zero RcvEgrIndexHead */ 12047 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); 12048 12049 /* set eager count and base index */ 12050 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) 12051 & RCV_EGR_CTRL_EGR_CNT_MASK) 12052 << RCV_EGR_CTRL_EGR_CNT_SHIFT) | 12053 (((rcd->eager_base >> RCV_SHIFT) 12054 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK) 12055 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT); 12056 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); 12057 12058 /* 12059 * Set TID (expected) count and base index. 12060 * rcd->expected_count is set to individual RcvArray entries, 12061 * not pairs, and the CSR takes a pair-count in groups of 12062 * four, so divide by 8. 12063 */ 12064 reg = (((rcd->expected_count >> RCV_SHIFT) 12065 & RCV_TID_CTRL_TID_PAIR_CNT_MASK) 12066 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) | 12067 (((rcd->expected_base >> RCV_SHIFT) 12068 & RCV_TID_CTRL_TID_BASE_INDEX_MASK) 12069 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT); 12070 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); 12071 if (ctxt == HFI1_CTRL_CTXT) 12072 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); 12073 } 12074 if (op & HFI1_RCVCTRL_CTXT_DIS) { 12075 write_csr(dd, RCV_VL15, 0); 12076 /* 12077 * When receive context is being disabled turn on tail 12078 * update with a dummy tail address and then disable 12079 * receive context. 12080 */ 12081 if (dd->rcvhdrtail_dummy_dma) { 12082 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12083 dd->rcvhdrtail_dummy_dma); 12084 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ 12085 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12086 } 12087 12088 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; 12089 } 12090 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { 12091 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 12092 IS_RCVAVAIL_START + rcd->ctxt, true); 12093 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 12094 } 12095 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { 12096 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 12097 IS_RCVAVAIL_START + rcd->ctxt, false); 12098 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 12099 } 12100 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) 12101 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12102 if (op & HFI1_RCVCTRL_TAILUPD_DIS) { 12103 /* See comment on RcvCtxtCtrl.TailUpd above */ 12104 if (!(op & HFI1_RCVCTRL_CTXT_DIS)) 12105 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12106 } 12107 if (op & HFI1_RCVCTRL_TIDFLOW_ENB) 12108 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 12109 if (op & HFI1_RCVCTRL_TIDFLOW_DIS) 12110 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 12111 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { 12112 /* 12113 * In one-packet-per-eager mode, the size comes from 12114 * the RcvArray entry. 12115 */ 12116 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 12117 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 12118 } 12119 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS) 12120 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 12121 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB) 12122 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 12123 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS) 12124 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 12125 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB) 12126 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 12127 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) 12128 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 12129 if (op & HFI1_RCVCTRL_URGENT_ENB) 12130 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 12131 IS_RCVURGENT_START + rcd->ctxt, true); 12132 if (op & HFI1_RCVCTRL_URGENT_DIS) 12133 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 12134 IS_RCVURGENT_START + rcd->ctxt, false); 12135 12136 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl); 12137 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); 12138 12139 /* work around sticky RcvCtxtStatus.BlockedRHQFull */ 12140 if (did_enable && 12141 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { 12142 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 12143 if (reg != 0) { 12144 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", 12145 ctxt, reg); 12146 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 12147 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); 12148 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); 12149 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 12150 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 12151 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", 12152 ctxt, reg, reg == 0 ? "not" : "still"); 12153 } 12154 } 12155 12156 if (did_enable) { 12157 /* 12158 * The interrupt timeout and count must be set after 12159 * the context is enabled to take effect. 12160 */ 12161 /* set interrupt timeout */ 12162 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, 12163 (u64)rcd->rcvavail_timeout << 12164 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 12165 12166 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ 12167 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT; 12168 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 12169 } 12170 12171 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS)) 12172 /* 12173 * If the context has been disabled and the Tail Update has 12174 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address 12175 * so it doesn't contain an address that is invalid. 12176 */ 12177 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12178 dd->rcvhdrtail_dummy_dma); 12179 } 12180 12181 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) 12182 { 12183 int ret; 12184 u64 val = 0; 12185 12186 if (namep) { 12187 ret = dd->cntrnameslen; 12188 *namep = dd->cntrnames; 12189 } else { 12190 const struct cntr_entry *entry; 12191 int i, j; 12192 12193 ret = (dd->ndevcntrs) * sizeof(u64); 12194 12195 /* Get the start of the block of counters */ 12196 *cntrp = dd->cntrs; 12197 12198 /* 12199 * Now go and fill in each counter in the block. 12200 */ 12201 for (i = 0; i < DEV_CNTR_LAST; i++) { 12202 entry = &dev_cntrs[i]; 12203 hfi1_cdbg(CNTR, "reading %s", entry->name); 12204 if (entry->flags & CNTR_DISABLED) { 12205 /* Nothing */ 12206 hfi1_cdbg(CNTR, "\tDisabled\n"); 12207 } else { 12208 if (entry->flags & CNTR_VL) { 12209 hfi1_cdbg(CNTR, "\tPer VL\n"); 12210 for (j = 0; j < C_VL_COUNT; j++) { 12211 val = entry->rw_cntr(entry, 12212 dd, j, 12213 CNTR_MODE_R, 12214 0); 12215 hfi1_cdbg( 12216 CNTR, 12217 "\t\tRead 0x%llx for %d\n", 12218 val, j); 12219 dd->cntrs[entry->offset + j] = 12220 val; 12221 } 12222 } else if (entry->flags & CNTR_SDMA) { 12223 hfi1_cdbg(CNTR, 12224 "\t Per SDMA Engine\n"); 12225 for (j = 0; j < chip_sdma_engines(dd); 12226 j++) { 12227 val = 12228 entry->rw_cntr(entry, dd, j, 12229 CNTR_MODE_R, 0); 12230 hfi1_cdbg(CNTR, 12231 "\t\tRead 0x%llx for %d\n", 12232 val, j); 12233 dd->cntrs[entry->offset + j] = 12234 val; 12235 } 12236 } else { 12237 val = entry->rw_cntr(entry, dd, 12238 CNTR_INVALID_VL, 12239 CNTR_MODE_R, 0); 12240 dd->cntrs[entry->offset] = val; 12241 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12242 } 12243 } 12244 } 12245 } 12246 return ret; 12247 } 12248 12249 /* 12250 * Used by sysfs to create files for hfi stats to read 12251 */ 12252 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) 12253 { 12254 int ret; 12255 u64 val = 0; 12256 12257 if (namep) { 12258 ret = ppd->dd->portcntrnameslen; 12259 *namep = ppd->dd->portcntrnames; 12260 } else { 12261 const struct cntr_entry *entry; 12262 int i, j; 12263 12264 ret = ppd->dd->nportcntrs * sizeof(u64); 12265 *cntrp = ppd->cntrs; 12266 12267 for (i = 0; i < PORT_CNTR_LAST; i++) { 12268 entry = &port_cntrs[i]; 12269 hfi1_cdbg(CNTR, "reading %s", entry->name); 12270 if (entry->flags & CNTR_DISABLED) { 12271 /* Nothing */ 12272 hfi1_cdbg(CNTR, "\tDisabled\n"); 12273 continue; 12274 } 12275 12276 if (entry->flags & CNTR_VL) { 12277 hfi1_cdbg(CNTR, "\tPer VL"); 12278 for (j = 0; j < C_VL_COUNT; j++) { 12279 val = entry->rw_cntr(entry, ppd, j, 12280 CNTR_MODE_R, 12281 0); 12282 hfi1_cdbg( 12283 CNTR, 12284 "\t\tRead 0x%llx for %d", 12285 val, j); 12286 ppd->cntrs[entry->offset + j] = val; 12287 } 12288 } else { 12289 val = entry->rw_cntr(entry, ppd, 12290 CNTR_INVALID_VL, 12291 CNTR_MODE_R, 12292 0); 12293 ppd->cntrs[entry->offset] = val; 12294 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12295 } 12296 } 12297 } 12298 return ret; 12299 } 12300 12301 static void free_cntrs(struct hfi1_devdata *dd) 12302 { 12303 struct hfi1_pportdata *ppd; 12304 int i; 12305 12306 if (dd->synth_stats_timer.function) 12307 del_timer_sync(&dd->synth_stats_timer); 12308 ppd = (struct hfi1_pportdata *)(dd + 1); 12309 for (i = 0; i < dd->num_pports; i++, ppd++) { 12310 kfree(ppd->cntrs); 12311 kfree(ppd->scntrs); 12312 free_percpu(ppd->ibport_data.rvp.rc_acks); 12313 free_percpu(ppd->ibport_data.rvp.rc_qacks); 12314 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); 12315 ppd->cntrs = NULL; 12316 ppd->scntrs = NULL; 12317 ppd->ibport_data.rvp.rc_acks = NULL; 12318 ppd->ibport_data.rvp.rc_qacks = NULL; 12319 ppd->ibport_data.rvp.rc_delayed_comp = NULL; 12320 } 12321 kfree(dd->portcntrnames); 12322 dd->portcntrnames = NULL; 12323 kfree(dd->cntrs); 12324 dd->cntrs = NULL; 12325 kfree(dd->scntrs); 12326 dd->scntrs = NULL; 12327 kfree(dd->cntrnames); 12328 dd->cntrnames = NULL; 12329 if (dd->update_cntr_wq) { 12330 destroy_workqueue(dd->update_cntr_wq); 12331 dd->update_cntr_wq = NULL; 12332 } 12333 } 12334 12335 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, 12336 u64 *psval, void *context, int vl) 12337 { 12338 u64 val; 12339 u64 sval = *psval; 12340 12341 if (entry->flags & CNTR_DISABLED) { 12342 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12343 return 0; 12344 } 12345 12346 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12347 12348 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); 12349 12350 /* If its a synthetic counter there is more work we need to do */ 12351 if (entry->flags & CNTR_SYNTH) { 12352 if (sval == CNTR_MAX) { 12353 /* No need to read already saturated */ 12354 return CNTR_MAX; 12355 } 12356 12357 if (entry->flags & CNTR_32BIT) { 12358 /* 32bit counters can wrap multiple times */ 12359 u64 upper = sval >> 32; 12360 u64 lower = (sval << 32) >> 32; 12361 12362 if (lower > val) { /* hw wrapped */ 12363 if (upper == CNTR_32BIT_MAX) 12364 val = CNTR_MAX; 12365 else 12366 upper++; 12367 } 12368 12369 if (val != CNTR_MAX) 12370 val = (upper << 32) | val; 12371 12372 } else { 12373 /* If we rolled we are saturated */ 12374 if ((val < sval) || (val > CNTR_MAX)) 12375 val = CNTR_MAX; 12376 } 12377 } 12378 12379 *psval = val; 12380 12381 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12382 12383 return val; 12384 } 12385 12386 static u64 write_dev_port_cntr(struct hfi1_devdata *dd, 12387 struct cntr_entry *entry, 12388 u64 *psval, void *context, int vl, u64 data) 12389 { 12390 u64 val; 12391 12392 if (entry->flags & CNTR_DISABLED) { 12393 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12394 return 0; 12395 } 12396 12397 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12398 12399 if (entry->flags & CNTR_SYNTH) { 12400 *psval = data; 12401 if (entry->flags & CNTR_32BIT) { 12402 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12403 (data << 32) >> 32); 12404 val = data; /* return the full 64bit value */ 12405 } else { 12406 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12407 data); 12408 } 12409 } else { 12410 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); 12411 } 12412 12413 *psval = val; 12414 12415 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12416 12417 return val; 12418 } 12419 12420 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) 12421 { 12422 struct cntr_entry *entry; 12423 u64 *sval; 12424 12425 entry = &dev_cntrs[index]; 12426 sval = dd->scntrs + entry->offset; 12427 12428 if (vl != CNTR_INVALID_VL) 12429 sval += vl; 12430 12431 return read_dev_port_cntr(dd, entry, sval, dd, vl); 12432 } 12433 12434 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) 12435 { 12436 struct cntr_entry *entry; 12437 u64 *sval; 12438 12439 entry = &dev_cntrs[index]; 12440 sval = dd->scntrs + entry->offset; 12441 12442 if (vl != CNTR_INVALID_VL) 12443 sval += vl; 12444 12445 return write_dev_port_cntr(dd, entry, sval, dd, vl, data); 12446 } 12447 12448 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl) 12449 { 12450 struct cntr_entry *entry; 12451 u64 *sval; 12452 12453 entry = &port_cntrs[index]; 12454 sval = ppd->scntrs + entry->offset; 12455 12456 if (vl != CNTR_INVALID_VL) 12457 sval += vl; 12458 12459 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12460 (index <= C_RCV_HDR_OVF_LAST)) { 12461 /* We do not want to bother for disabled contexts */ 12462 return 0; 12463 } 12464 12465 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); 12466 } 12467 12468 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data) 12469 { 12470 struct cntr_entry *entry; 12471 u64 *sval; 12472 12473 entry = &port_cntrs[index]; 12474 sval = ppd->scntrs + entry->offset; 12475 12476 if (vl != CNTR_INVALID_VL) 12477 sval += vl; 12478 12479 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12480 (index <= C_RCV_HDR_OVF_LAST)) { 12481 /* We do not want to bother for disabled contexts */ 12482 return 0; 12483 } 12484 12485 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); 12486 } 12487 12488 static void do_update_synth_timer(struct work_struct *work) 12489 { 12490 u64 cur_tx; 12491 u64 cur_rx; 12492 u64 total_flits; 12493 u8 update = 0; 12494 int i, j, vl; 12495 struct hfi1_pportdata *ppd; 12496 struct cntr_entry *entry; 12497 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, 12498 update_cntr_work); 12499 12500 /* 12501 * Rather than keep beating on the CSRs pick a minimal set that we can 12502 * check to watch for potential roll over. We can do this by looking at 12503 * the number of flits sent/recv. If the total flits exceeds 32bits then 12504 * we have to iterate all the counters and update. 12505 */ 12506 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12507 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12508 12509 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12510 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12511 12512 hfi1_cdbg( 12513 CNTR, 12514 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n", 12515 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); 12516 12517 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { 12518 /* 12519 * May not be strictly necessary to update but it won't hurt and 12520 * simplifies the logic here. 12521 */ 12522 update = 1; 12523 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating", 12524 dd->unit); 12525 } else { 12526 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); 12527 hfi1_cdbg(CNTR, 12528 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, 12529 total_flits, (u64)CNTR_32BIT_MAX); 12530 if (total_flits >= CNTR_32BIT_MAX) { 12531 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating", 12532 dd->unit); 12533 update = 1; 12534 } 12535 } 12536 12537 if (update) { 12538 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); 12539 for (i = 0; i < DEV_CNTR_LAST; i++) { 12540 entry = &dev_cntrs[i]; 12541 if (entry->flags & CNTR_VL) { 12542 for (vl = 0; vl < C_VL_COUNT; vl++) 12543 read_dev_cntr(dd, i, vl); 12544 } else { 12545 read_dev_cntr(dd, i, CNTR_INVALID_VL); 12546 } 12547 } 12548 ppd = (struct hfi1_pportdata *)(dd + 1); 12549 for (i = 0; i < dd->num_pports; i++, ppd++) { 12550 for (j = 0; j < PORT_CNTR_LAST; j++) { 12551 entry = &port_cntrs[j]; 12552 if (entry->flags & CNTR_VL) { 12553 for (vl = 0; vl < C_VL_COUNT; vl++) 12554 read_port_cntr(ppd, j, vl); 12555 } else { 12556 read_port_cntr(ppd, j, CNTR_INVALID_VL); 12557 } 12558 } 12559 } 12560 12561 /* 12562 * We want the value in the register. The goal is to keep track 12563 * of the number of "ticks" not the counter value. In other 12564 * words if the register rolls we want to notice it and go ahead 12565 * and force an update. 12566 */ 12567 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12568 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12569 CNTR_MODE_R, 0); 12570 12571 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12572 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12573 CNTR_MODE_R, 0); 12574 12575 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx", 12576 dd->unit, dd->last_tx, dd->last_rx); 12577 12578 } else { 12579 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); 12580 } 12581 } 12582 12583 static void update_synth_timer(struct timer_list *t) 12584 { 12585 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); 12586 12587 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); 12588 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12589 } 12590 12591 #define C_MAX_NAME 16 /* 15 chars + one for /0 */ 12592 static int init_cntrs(struct hfi1_devdata *dd) 12593 { 12594 int i, rcv_ctxts, j; 12595 size_t sz; 12596 char *p; 12597 char name[C_MAX_NAME]; 12598 struct hfi1_pportdata *ppd; 12599 const char *bit_type_32 = ",32"; 12600 const int bit_type_32_sz = strlen(bit_type_32); 12601 u32 sdma_engines = chip_sdma_engines(dd); 12602 12603 /* set up the stats timer; the add_timer is done at the end */ 12604 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); 12605 12606 /***********************/ 12607 /* per device counters */ 12608 /***********************/ 12609 12610 /* size names and determine how many we have*/ 12611 dd->ndevcntrs = 0; 12612 sz = 0; 12613 12614 for (i = 0; i < DEV_CNTR_LAST; i++) { 12615 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12616 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name); 12617 continue; 12618 } 12619 12620 if (dev_cntrs[i].flags & CNTR_VL) { 12621 dev_cntrs[i].offset = dd->ndevcntrs; 12622 for (j = 0; j < C_VL_COUNT; j++) { 12623 snprintf(name, C_MAX_NAME, "%s%d", 12624 dev_cntrs[i].name, vl_from_idx(j)); 12625 sz += strlen(name); 12626 /* Add ",32" for 32-bit counters */ 12627 if (dev_cntrs[i].flags & CNTR_32BIT) 12628 sz += bit_type_32_sz; 12629 sz++; 12630 dd->ndevcntrs++; 12631 } 12632 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12633 dev_cntrs[i].offset = dd->ndevcntrs; 12634 for (j = 0; j < sdma_engines; j++) { 12635 snprintf(name, C_MAX_NAME, "%s%d", 12636 dev_cntrs[i].name, j); 12637 sz += strlen(name); 12638 /* Add ",32" for 32-bit counters */ 12639 if (dev_cntrs[i].flags & CNTR_32BIT) 12640 sz += bit_type_32_sz; 12641 sz++; 12642 dd->ndevcntrs++; 12643 } 12644 } else { 12645 /* +1 for newline. */ 12646 sz += strlen(dev_cntrs[i].name) + 1; 12647 /* Add ",32" for 32-bit counters */ 12648 if (dev_cntrs[i].flags & CNTR_32BIT) 12649 sz += bit_type_32_sz; 12650 dev_cntrs[i].offset = dd->ndevcntrs; 12651 dd->ndevcntrs++; 12652 } 12653 } 12654 12655 /* allocate space for the counter values */ 12656 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), 12657 GFP_KERNEL); 12658 if (!dd->cntrs) 12659 goto bail; 12660 12661 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); 12662 if (!dd->scntrs) 12663 goto bail; 12664 12665 /* allocate space for the counter names */ 12666 dd->cntrnameslen = sz; 12667 dd->cntrnames = kmalloc(sz, GFP_KERNEL); 12668 if (!dd->cntrnames) 12669 goto bail; 12670 12671 /* fill in the names */ 12672 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { 12673 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12674 /* Nothing */ 12675 } else if (dev_cntrs[i].flags & CNTR_VL) { 12676 for (j = 0; j < C_VL_COUNT; j++) { 12677 snprintf(name, C_MAX_NAME, "%s%d", 12678 dev_cntrs[i].name, 12679 vl_from_idx(j)); 12680 memcpy(p, name, strlen(name)); 12681 p += strlen(name); 12682 12683 /* Counter is 32 bits */ 12684 if (dev_cntrs[i].flags & CNTR_32BIT) { 12685 memcpy(p, bit_type_32, bit_type_32_sz); 12686 p += bit_type_32_sz; 12687 } 12688 12689 *p++ = '\n'; 12690 } 12691 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12692 for (j = 0; j < sdma_engines; j++) { 12693 snprintf(name, C_MAX_NAME, "%s%d", 12694 dev_cntrs[i].name, j); 12695 memcpy(p, name, strlen(name)); 12696 p += strlen(name); 12697 12698 /* Counter is 32 bits */ 12699 if (dev_cntrs[i].flags & CNTR_32BIT) { 12700 memcpy(p, bit_type_32, bit_type_32_sz); 12701 p += bit_type_32_sz; 12702 } 12703 12704 *p++ = '\n'; 12705 } 12706 } else { 12707 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); 12708 p += strlen(dev_cntrs[i].name); 12709 12710 /* Counter is 32 bits */ 12711 if (dev_cntrs[i].flags & CNTR_32BIT) { 12712 memcpy(p, bit_type_32, bit_type_32_sz); 12713 p += bit_type_32_sz; 12714 } 12715 12716 *p++ = '\n'; 12717 } 12718 } 12719 12720 /*********************/ 12721 /* per port counters */ 12722 /*********************/ 12723 12724 /* 12725 * Go through the counters for the overflows and disable the ones we 12726 * don't need. This varies based on platform so we need to do it 12727 * dynamically here. 12728 */ 12729 rcv_ctxts = dd->num_rcv_contexts; 12730 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts; 12731 i <= C_RCV_HDR_OVF_LAST; i++) { 12732 port_cntrs[i].flags |= CNTR_DISABLED; 12733 } 12734 12735 /* size port counter names and determine how many we have*/ 12736 sz = 0; 12737 dd->nportcntrs = 0; 12738 for (i = 0; i < PORT_CNTR_LAST; i++) { 12739 if (port_cntrs[i].flags & CNTR_DISABLED) { 12740 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name); 12741 continue; 12742 } 12743 12744 if (port_cntrs[i].flags & CNTR_VL) { 12745 port_cntrs[i].offset = dd->nportcntrs; 12746 for (j = 0; j < C_VL_COUNT; j++) { 12747 snprintf(name, C_MAX_NAME, "%s%d", 12748 port_cntrs[i].name, vl_from_idx(j)); 12749 sz += strlen(name); 12750 /* Add ",32" for 32-bit counters */ 12751 if (port_cntrs[i].flags & CNTR_32BIT) 12752 sz += bit_type_32_sz; 12753 sz++; 12754 dd->nportcntrs++; 12755 } 12756 } else { 12757 /* +1 for newline */ 12758 sz += strlen(port_cntrs[i].name) + 1; 12759 /* Add ",32" for 32-bit counters */ 12760 if (port_cntrs[i].flags & CNTR_32BIT) 12761 sz += bit_type_32_sz; 12762 port_cntrs[i].offset = dd->nportcntrs; 12763 dd->nportcntrs++; 12764 } 12765 } 12766 12767 /* allocate space for the counter names */ 12768 dd->portcntrnameslen = sz; 12769 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); 12770 if (!dd->portcntrnames) 12771 goto bail; 12772 12773 /* fill in port cntr names */ 12774 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { 12775 if (port_cntrs[i].flags & CNTR_DISABLED) 12776 continue; 12777 12778 if (port_cntrs[i].flags & CNTR_VL) { 12779 for (j = 0; j < C_VL_COUNT; j++) { 12780 snprintf(name, C_MAX_NAME, "%s%d", 12781 port_cntrs[i].name, vl_from_idx(j)); 12782 memcpy(p, name, strlen(name)); 12783 p += strlen(name); 12784 12785 /* Counter is 32 bits */ 12786 if (port_cntrs[i].flags & CNTR_32BIT) { 12787 memcpy(p, bit_type_32, bit_type_32_sz); 12788 p += bit_type_32_sz; 12789 } 12790 12791 *p++ = '\n'; 12792 } 12793 } else { 12794 memcpy(p, port_cntrs[i].name, 12795 strlen(port_cntrs[i].name)); 12796 p += strlen(port_cntrs[i].name); 12797 12798 /* Counter is 32 bits */ 12799 if (port_cntrs[i].flags & CNTR_32BIT) { 12800 memcpy(p, bit_type_32, bit_type_32_sz); 12801 p += bit_type_32_sz; 12802 } 12803 12804 *p++ = '\n'; 12805 } 12806 } 12807 12808 /* allocate per port storage for counter values */ 12809 ppd = (struct hfi1_pportdata *)(dd + 1); 12810 for (i = 0; i < dd->num_pports; i++, ppd++) { 12811 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12812 if (!ppd->cntrs) 12813 goto bail; 12814 12815 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12816 if (!ppd->scntrs) 12817 goto bail; 12818 } 12819 12820 /* CPU counters need to be allocated and zeroed */ 12821 if (init_cpu_counters(dd)) 12822 goto bail; 12823 12824 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", 12825 WQ_MEM_RECLAIM, dd->unit); 12826 if (!dd->update_cntr_wq) 12827 goto bail; 12828 12829 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); 12830 12831 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12832 return 0; 12833 bail: 12834 free_cntrs(dd); 12835 return -ENOMEM; 12836 } 12837 12838 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) 12839 { 12840 switch (chip_lstate) { 12841 case LSTATE_DOWN: 12842 return IB_PORT_DOWN; 12843 case LSTATE_INIT: 12844 return IB_PORT_INIT; 12845 case LSTATE_ARMED: 12846 return IB_PORT_ARMED; 12847 case LSTATE_ACTIVE: 12848 return IB_PORT_ACTIVE; 12849 default: 12850 dd_dev_err(dd, 12851 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", 12852 chip_lstate); 12853 return IB_PORT_DOWN; 12854 } 12855 } 12856 12857 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) 12858 { 12859 /* look at the HFI meta-states only */ 12860 switch (chip_pstate & 0xf0) { 12861 case PLS_DISABLED: 12862 return IB_PORTPHYSSTATE_DISABLED; 12863 case PLS_OFFLINE: 12864 return OPA_PORTPHYSSTATE_OFFLINE; 12865 case PLS_POLLING: 12866 return IB_PORTPHYSSTATE_POLLING; 12867 case PLS_CONFIGPHY: 12868 return IB_PORTPHYSSTATE_TRAINING; 12869 case PLS_LINKUP: 12870 return IB_PORTPHYSSTATE_LINKUP; 12871 case PLS_PHYTEST: 12872 return IB_PORTPHYSSTATE_PHY_TEST; 12873 default: 12874 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", 12875 chip_pstate); 12876 return IB_PORTPHYSSTATE_DISABLED; 12877 } 12878 } 12879 12880 /* return the OPA port logical state name */ 12881 const char *opa_lstate_name(u32 lstate) 12882 { 12883 static const char * const port_logical_names[] = { 12884 "PORT_NOP", 12885 "PORT_DOWN", 12886 "PORT_INIT", 12887 "PORT_ARMED", 12888 "PORT_ACTIVE", 12889 "PORT_ACTIVE_DEFER", 12890 }; 12891 if (lstate < ARRAY_SIZE(port_logical_names)) 12892 return port_logical_names[lstate]; 12893 return "unknown"; 12894 } 12895 12896 /* return the OPA port physical state name */ 12897 const char *opa_pstate_name(u32 pstate) 12898 { 12899 static const char * const port_physical_names[] = { 12900 "PHYS_NOP", 12901 "reserved1", 12902 "PHYS_POLL", 12903 "PHYS_DISABLED", 12904 "PHYS_TRAINING", 12905 "PHYS_LINKUP", 12906 "PHYS_LINK_ERR_RECOVER", 12907 "PHYS_PHY_TEST", 12908 "reserved8", 12909 "PHYS_OFFLINE", 12910 "PHYS_GANGED", 12911 "PHYS_TEST", 12912 }; 12913 if (pstate < ARRAY_SIZE(port_physical_names)) 12914 return port_physical_names[pstate]; 12915 return "unknown"; 12916 } 12917 12918 /** 12919 * update_statusp - Update userspace status flag 12920 * @ppd: Port data structure 12921 * @state: port state information 12922 * 12923 * Actual port status is determined by the host_link_state value 12924 * in the ppd. 12925 * 12926 * host_link_state MUST be updated before updating the user space 12927 * statusp. 12928 */ 12929 static void update_statusp(struct hfi1_pportdata *ppd, u32 state) 12930 { 12931 /* 12932 * Set port status flags in the page mapped into userspace 12933 * memory. Do it here to ensure a reliable state - this is 12934 * the only function called by all state handling code. 12935 * Always set the flags due to the fact that the cache value 12936 * might have been changed explicitly outside of this 12937 * function. 12938 */ 12939 if (ppd->statusp) { 12940 switch (state) { 12941 case IB_PORT_DOWN: 12942 case IB_PORT_INIT: 12943 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 12944 HFI1_STATUS_IB_READY); 12945 break; 12946 case IB_PORT_ARMED: 12947 *ppd->statusp |= HFI1_STATUS_IB_CONF; 12948 break; 12949 case IB_PORT_ACTIVE: 12950 *ppd->statusp |= HFI1_STATUS_IB_READY; 12951 break; 12952 } 12953 } 12954 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", 12955 opa_lstate_name(state), state); 12956 } 12957 12958 /** 12959 * wait_logical_linkstate - wait for an IB link state change to occur 12960 * @ppd: port device 12961 * @state: the state to wait for 12962 * @msecs: the number of milliseconds to wait 12963 * 12964 * Wait up to msecs milliseconds for IB link state change to occur. 12965 * For now, take the easy polling route. 12966 * Returns 0 if state reached, otherwise -ETIMEDOUT. 12967 */ 12968 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 12969 int msecs) 12970 { 12971 unsigned long timeout; 12972 u32 new_state; 12973 12974 timeout = jiffies + msecs_to_jiffies(msecs); 12975 while (1) { 12976 new_state = chip_to_opa_lstate(ppd->dd, 12977 read_logical_state(ppd->dd)); 12978 if (new_state == state) 12979 break; 12980 if (time_after(jiffies, timeout)) { 12981 dd_dev_err(ppd->dd, 12982 "timeout waiting for link state 0x%x\n", 12983 state); 12984 return -ETIMEDOUT; 12985 } 12986 msleep(20); 12987 } 12988 12989 return 0; 12990 } 12991 12992 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state) 12993 { 12994 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); 12995 12996 dd_dev_info(ppd->dd, 12997 "physical state changed to %s (0x%x), phy 0x%x\n", 12998 opa_pstate_name(ib_pstate), ib_pstate, state); 12999 } 13000 13001 /* 13002 * Read the physical hardware link state and check if it matches host 13003 * drivers anticipated state. 13004 */ 13005 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state) 13006 { 13007 u32 read_state = read_physical_state(ppd->dd); 13008 13009 if (read_state == state) { 13010 log_state_transition(ppd, state); 13011 } else { 13012 dd_dev_err(ppd->dd, 13013 "anticipated phy link state 0x%x, read 0x%x\n", 13014 state, read_state); 13015 } 13016 } 13017 13018 /* 13019 * wait_physical_linkstate - wait for an physical link state change to occur 13020 * @ppd: port device 13021 * @state: the state to wait for 13022 * @msecs: the number of milliseconds to wait 13023 * 13024 * Wait up to msecs milliseconds for physical link state change to occur. 13025 * Returns 0 if state reached, otherwise -ETIMEDOUT. 13026 */ 13027 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 13028 int msecs) 13029 { 13030 u32 read_state; 13031 unsigned long timeout; 13032 13033 timeout = jiffies + msecs_to_jiffies(msecs); 13034 while (1) { 13035 read_state = read_physical_state(ppd->dd); 13036 if (read_state == state) 13037 break; 13038 if (time_after(jiffies, timeout)) { 13039 dd_dev_err(ppd->dd, 13040 "timeout waiting for phy link state 0x%x\n", 13041 state); 13042 return -ETIMEDOUT; 13043 } 13044 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13045 } 13046 13047 log_state_transition(ppd, state); 13048 return 0; 13049 } 13050 13051 /* 13052 * wait_phys_link_offline_quiet_substates - wait for any offline substate 13053 * @ppd: port device 13054 * @msecs: the number of milliseconds to wait 13055 * 13056 * Wait up to msecs milliseconds for any offline physical link 13057 * state change to occur. 13058 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. 13059 */ 13060 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 13061 int msecs) 13062 { 13063 u32 read_state; 13064 unsigned long timeout; 13065 13066 timeout = jiffies + msecs_to_jiffies(msecs); 13067 while (1) { 13068 read_state = read_physical_state(ppd->dd); 13069 if ((read_state & 0xF0) == PLS_OFFLINE) 13070 break; 13071 if (time_after(jiffies, timeout)) { 13072 dd_dev_err(ppd->dd, 13073 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", 13074 read_state, msecs); 13075 return -ETIMEDOUT; 13076 } 13077 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13078 } 13079 13080 log_state_transition(ppd, read_state); 13081 return read_state; 13082 } 13083 13084 /* 13085 * wait_phys_link_out_of_offline - wait for any out of offline state 13086 * @ppd: port device 13087 * @msecs: the number of milliseconds to wait 13088 * 13089 * Wait up to msecs milliseconds for any out of offline physical link 13090 * state change to occur. 13091 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. 13092 */ 13093 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, 13094 int msecs) 13095 { 13096 u32 read_state; 13097 unsigned long timeout; 13098 13099 timeout = jiffies + msecs_to_jiffies(msecs); 13100 while (1) { 13101 read_state = read_physical_state(ppd->dd); 13102 if ((read_state & 0xF0) != PLS_OFFLINE) 13103 break; 13104 if (time_after(jiffies, timeout)) { 13105 dd_dev_err(ppd->dd, 13106 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n", 13107 read_state, msecs); 13108 return -ETIMEDOUT; 13109 } 13110 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13111 } 13112 13113 log_state_transition(ppd, read_state); 13114 return read_state; 13115 } 13116 13117 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 13118 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 13119 13120 #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 13121 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 13122 13123 void hfi1_init_ctxt(struct send_context *sc) 13124 { 13125 if (sc) { 13126 struct hfi1_devdata *dd = sc->dd; 13127 u64 reg; 13128 u8 set = (sc->type == SC_USER ? 13129 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) : 13130 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)); 13131 reg = read_kctxt_csr(dd, sc->hw_context, 13132 SEND_CTXT_CHECK_ENABLE); 13133 if (set) 13134 CLEAR_STATIC_RATE_CONTROL_SMASK(reg); 13135 else 13136 SET_STATIC_RATE_CONTROL_SMASK(reg); 13137 write_kctxt_csr(dd, sc->hw_context, 13138 SEND_CTXT_CHECK_ENABLE, reg); 13139 } 13140 } 13141 13142 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) 13143 { 13144 int ret = 0; 13145 u64 reg; 13146 13147 if (dd->icode != ICODE_RTL_SILICON) { 13148 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 13149 dd_dev_info(dd, "%s: tempsense not supported by HW\n", 13150 __func__); 13151 return -EINVAL; 13152 } 13153 reg = read_csr(dd, ASIC_STS_THERM); 13154 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & 13155 ASIC_STS_THERM_CURR_TEMP_MASK); 13156 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & 13157 ASIC_STS_THERM_LO_TEMP_MASK); 13158 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & 13159 ASIC_STS_THERM_HI_TEMP_MASK); 13160 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & 13161 ASIC_STS_THERM_CRIT_TEMP_MASK); 13162 /* triggers is a 3-bit value - 1 bit per trigger. */ 13163 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); 13164 13165 return ret; 13166 } 13167 13168 /* ========================================================================= */ 13169 13170 /** 13171 * read_mod_write() - Calculate the IRQ register index and set/clear the bits 13172 * @dd: valid devdata 13173 * @src: IRQ source to determine register index from 13174 * @bits: the bits to set or clear 13175 * @set: true == set the bits, false == clear the bits 13176 * 13177 */ 13178 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, 13179 bool set) 13180 { 13181 u64 reg; 13182 u16 idx = src / BITS_PER_REGISTER; 13183 13184 spin_lock(&dd->irq_src_lock); 13185 reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); 13186 if (set) 13187 reg |= bits; 13188 else 13189 reg &= ~bits; 13190 write_csr(dd, CCE_INT_MASK + (8 * idx), reg); 13191 spin_unlock(&dd->irq_src_lock); 13192 } 13193 13194 /** 13195 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources 13196 * @dd: valid devdata 13197 * @first: first IRQ source to set/clear 13198 * @last: last IRQ source (inclusive) to set/clear 13199 * @set: true == set the bits, false == clear the bits 13200 * 13201 * If first == last, set the exact source. 13202 */ 13203 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) 13204 { 13205 u64 bits = 0; 13206 u64 bit; 13207 u16 src; 13208 13209 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) 13210 return -EINVAL; 13211 13212 if (last < first) 13213 return -ERANGE; 13214 13215 for (src = first; src <= last; src++) { 13216 bit = src % BITS_PER_REGISTER; 13217 /* wrapped to next register? */ 13218 if (!bit && bits) { 13219 read_mod_write(dd, src - 1, bits, set); 13220 bits = 0; 13221 } 13222 bits |= BIT_ULL(bit); 13223 } 13224 read_mod_write(dd, last, bits, set); 13225 13226 return 0; 13227 } 13228 13229 /* 13230 * Clear all interrupt sources on the chip. 13231 */ 13232 void clear_all_interrupts(struct hfi1_devdata *dd) 13233 { 13234 int i; 13235 13236 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13237 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); 13238 13239 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); 13240 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); 13241 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); 13242 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); 13243 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); 13244 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); 13245 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); 13246 for (i = 0; i < chip_send_contexts(dd); i++) 13247 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); 13248 for (i = 0; i < chip_sdma_engines(dd); i++) 13249 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); 13250 13251 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); 13252 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); 13253 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); 13254 } 13255 13256 /* 13257 * Remap the interrupt source from the general handler to the given MSI-X 13258 * interrupt. 13259 */ 13260 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) 13261 { 13262 u64 reg; 13263 int m, n; 13264 13265 /* clear from the handled mask of the general interrupt */ 13266 m = isrc / 64; 13267 n = isrc % 64; 13268 if (likely(m < CCE_NUM_INT_CSRS)) { 13269 dd->gi_mask[m] &= ~((u64)1 << n); 13270 } else { 13271 dd_dev_err(dd, "remap interrupt err\n"); 13272 return; 13273 } 13274 13275 /* direct the chip source to the given MSI-X interrupt */ 13276 m = isrc / 8; 13277 n = isrc % 8; 13278 reg = read_csr(dd, CCE_INT_MAP + (8 * m)); 13279 reg &= ~((u64)0xff << (8 * n)); 13280 reg |= ((u64)msix_intr & 0xff) << (8 * n); 13281 write_csr(dd, CCE_INT_MAP + (8 * m), reg); 13282 } 13283 13284 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) 13285 { 13286 /* 13287 * SDMA engine interrupt sources grouped by type, rather than 13288 * engine. Per-engine interrupts are as follows: 13289 * SDMA 13290 * SDMAProgress 13291 * SDMAIdle 13292 */ 13293 remap_intr(dd, IS_SDMA_START + engine, msix_intr); 13294 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); 13295 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); 13296 } 13297 13298 /* 13299 * Set the general handler to accept all interrupts, remap all 13300 * chip interrupts back to MSI-X 0. 13301 */ 13302 void reset_interrupts(struct hfi1_devdata *dd) 13303 { 13304 int i; 13305 13306 /* all interrupts handled by the general handler */ 13307 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13308 dd->gi_mask[i] = ~(u64)0; 13309 13310 /* all chip interrupts map to MSI-X 0 */ 13311 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13312 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13313 } 13314 13315 /** 13316 * set_up_interrupts() - Initialize the IRQ resources and state 13317 * @dd: valid devdata 13318 * 13319 */ 13320 static int set_up_interrupts(struct hfi1_devdata *dd) 13321 { 13322 int ret; 13323 13324 /* mask all interrupts */ 13325 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 13326 13327 /* clear all pending interrupts */ 13328 clear_all_interrupts(dd); 13329 13330 /* reset general handler mask, chip MSI-X mappings */ 13331 reset_interrupts(dd); 13332 13333 /* ask for MSI-X interrupts */ 13334 ret = msix_initialize(dd); 13335 if (ret) 13336 return ret; 13337 13338 ret = msix_request_irqs(dd); 13339 if (ret) 13340 msix_clean_up_interrupts(dd); 13341 13342 return ret; 13343 } 13344 13345 /* 13346 * Set up context values in dd. Sets: 13347 * 13348 * num_rcv_contexts - number of contexts being used 13349 * n_krcv_queues - number of kernel contexts 13350 * first_dyn_alloc_ctxt - first dynamically allocated context 13351 * in array of contexts 13352 * freectxts - number of free user contexts 13353 * num_send_contexts - number of PIO send contexts being used 13354 * num_netdev_contexts - number of contexts reserved for netdev 13355 */ 13356 static int set_up_context_variables(struct hfi1_devdata *dd) 13357 { 13358 unsigned long num_kernel_contexts; 13359 u16 num_netdev_contexts; 13360 int ret; 13361 unsigned ngroups; 13362 int rmt_count; 13363 int user_rmt_reduced; 13364 u32 n_usr_ctxts; 13365 u32 send_contexts = chip_send_contexts(dd); 13366 u32 rcv_contexts = chip_rcv_contexts(dd); 13367 13368 /* 13369 * Kernel receive contexts: 13370 * - Context 0 - control context (VL15/multicast/error) 13371 * - Context 1 - first kernel context 13372 * - Context 2 - second kernel context 13373 * ... 13374 */ 13375 if (n_krcvqs) 13376 /* 13377 * n_krcvqs is the sum of module parameter kernel receive 13378 * contexts, krcvqs[]. It does not include the control 13379 * context, so add that. 13380 */ 13381 num_kernel_contexts = n_krcvqs + 1; 13382 else 13383 num_kernel_contexts = DEFAULT_KRCVQS + 1; 13384 /* 13385 * Every kernel receive context needs an ACK send context. 13386 * one send context is allocated for each VL{0-7} and VL15 13387 */ 13388 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { 13389 dd_dev_err(dd, 13390 "Reducing # kernel rcv contexts to: %d, from %lu\n", 13391 send_contexts - num_vls - 1, 13392 num_kernel_contexts); 13393 num_kernel_contexts = send_contexts - num_vls - 1; 13394 } 13395 13396 /* 13397 * User contexts: 13398 * - default to 1 user context per real (non-HT) CPU core if 13399 * num_user_contexts is negative 13400 */ 13401 if (num_user_contexts < 0) 13402 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask); 13403 else 13404 n_usr_ctxts = num_user_contexts; 13405 /* 13406 * Adjust the counts given a global max. 13407 */ 13408 if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) { 13409 dd_dev_err(dd, 13410 "Reducing # user receive contexts to: %u, from %u\n", 13411 (u32)(rcv_contexts - num_kernel_contexts), 13412 n_usr_ctxts); 13413 /* recalculate */ 13414 n_usr_ctxts = rcv_contexts - num_kernel_contexts; 13415 } 13416 13417 num_netdev_contexts = 13418 hfi1_num_netdev_contexts(dd, rcv_contexts - 13419 (num_kernel_contexts + n_usr_ctxts), 13420 &node_affinity.real_cpu_mask); 13421 /* 13422 * The RMT entries are currently allocated as shown below: 13423 * 1. QOS (0 to 128 entries); 13424 * 2. FECN (num_kernel_context - 1 + num_user_contexts + 13425 * num_netdev_contexts); 13426 * 3. netdev (num_netdev_contexts). 13427 * It should be noted that FECN oversubscribe num_netdev_contexts 13428 * entries of RMT because both netdev and PSM could allocate any receive 13429 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, 13430 * and PSM FECN must reserve an RMT entry for each possible PSM receive 13431 * context. 13432 */ 13433 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2); 13434 if (HFI1_CAP_IS_KSET(TID_RDMA)) 13435 rmt_count += num_kernel_contexts - 1; 13436 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { 13437 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; 13438 dd_dev_err(dd, 13439 "RMT size is reducing the number of user receive contexts from %u to %d\n", 13440 n_usr_ctxts, 13441 user_rmt_reduced); 13442 /* recalculate */ 13443 n_usr_ctxts = user_rmt_reduced; 13444 } 13445 13446 /* the first N are kernel contexts, the rest are user/netdev contexts */ 13447 dd->num_rcv_contexts = 13448 num_kernel_contexts + n_usr_ctxts + num_netdev_contexts; 13449 dd->n_krcv_queues = num_kernel_contexts; 13450 dd->first_dyn_alloc_ctxt = num_kernel_contexts; 13451 dd->num_netdev_contexts = num_netdev_contexts; 13452 dd->num_user_contexts = n_usr_ctxts; 13453 dd->freectxts = n_usr_ctxts; 13454 dd_dev_info(dd, 13455 "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n", 13456 rcv_contexts, 13457 (int)dd->num_rcv_contexts, 13458 (int)dd->n_krcv_queues, 13459 dd->num_netdev_contexts, 13460 dd->num_user_contexts); 13461 13462 /* 13463 * Receive array allocation: 13464 * All RcvArray entries are divided into groups of 8. This 13465 * is required by the hardware and will speed up writes to 13466 * consecutive entries by using write-combining of the entire 13467 * cacheline. 13468 * 13469 * The number of groups are evenly divided among all contexts. 13470 * any left over groups will be given to the first N user 13471 * contexts. 13472 */ 13473 dd->rcv_entries.group_size = RCV_INCREMENT; 13474 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; 13475 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; 13476 dd->rcv_entries.nctxt_extra = ngroups - 13477 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); 13478 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", 13479 dd->rcv_entries.ngroups, 13480 dd->rcv_entries.nctxt_extra); 13481 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > 13482 MAX_EAGER_ENTRIES * 2) { 13483 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / 13484 dd->rcv_entries.group_size; 13485 dd_dev_info(dd, 13486 "RcvArray group count too high, change to %u\n", 13487 dd->rcv_entries.ngroups); 13488 dd->rcv_entries.nctxt_extra = 0; 13489 } 13490 /* 13491 * PIO send contexts 13492 */ 13493 ret = init_sc_pools_and_sizes(dd); 13494 if (ret >= 0) { /* success */ 13495 dd->num_send_contexts = ret; 13496 dd_dev_info( 13497 dd, 13498 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n", 13499 send_contexts, 13500 dd->num_send_contexts, 13501 dd->sc_sizes[SC_KERNEL].count, 13502 dd->sc_sizes[SC_ACK].count, 13503 dd->sc_sizes[SC_USER].count, 13504 dd->sc_sizes[SC_VL15].count); 13505 ret = 0; /* success */ 13506 } 13507 13508 return ret; 13509 } 13510 13511 /* 13512 * Set the device/port partition key table. The MAD code 13513 * will ensure that, at least, the partial management 13514 * partition key is present in the table. 13515 */ 13516 static void set_partition_keys(struct hfi1_pportdata *ppd) 13517 { 13518 struct hfi1_devdata *dd = ppd->dd; 13519 u64 reg = 0; 13520 int i; 13521 13522 dd_dev_info(dd, "Setting partition keys\n"); 13523 for (i = 0; i < hfi1_get_npkeys(dd); i++) { 13524 reg |= (ppd->pkeys[i] & 13525 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) << 13526 ((i % 4) * 13527 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT); 13528 /* Each register holds 4 PKey values. */ 13529 if ((i % 4) == 3) { 13530 write_csr(dd, RCV_PARTITION_KEY + 13531 ((i - 3) * 2), reg); 13532 reg = 0; 13533 } 13534 } 13535 13536 /* Always enable HW pkeys check when pkeys table is set */ 13537 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); 13538 } 13539 13540 /* 13541 * These CSRs and memories are uninitialized on reset and must be 13542 * written before reading to set the ECC/parity bits. 13543 * 13544 * NOTE: All user context CSRs that are not mmaped write-only 13545 * (e.g. the TID flows) must be initialized even if the driver never 13546 * reads them. 13547 */ 13548 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) 13549 { 13550 int i, j; 13551 13552 /* CceIntMap */ 13553 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13554 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13555 13556 /* SendCtxtCreditReturnAddr */ 13557 for (i = 0; i < chip_send_contexts(dd); i++) 13558 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13559 13560 /* PIO Send buffers */ 13561 /* SDMA Send buffers */ 13562 /* 13563 * These are not normally read, and (presently) have no method 13564 * to be read, so are not pre-initialized 13565 */ 13566 13567 /* RcvHdrAddr */ 13568 /* RcvHdrTailAddr */ 13569 /* RcvTidFlowTable */ 13570 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13571 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13572 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13573 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) 13574 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); 13575 } 13576 13577 /* RcvArray */ 13578 for (i = 0; i < chip_rcv_array_count(dd); i++) 13579 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); 13580 13581 /* RcvQPMapTable */ 13582 for (i = 0; i < 32; i++) 13583 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13584 } 13585 13586 /* 13587 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus. 13588 */ 13589 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, 13590 u64 ctrl_bits) 13591 { 13592 unsigned long timeout; 13593 u64 reg; 13594 13595 /* is the condition present? */ 13596 reg = read_csr(dd, CCE_STATUS); 13597 if ((reg & status_bits) == 0) 13598 return; 13599 13600 /* clear the condition */ 13601 write_csr(dd, CCE_CTRL, ctrl_bits); 13602 13603 /* wait for the condition to clear */ 13604 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT); 13605 while (1) { 13606 reg = read_csr(dd, CCE_STATUS); 13607 if ((reg & status_bits) == 0) 13608 return; 13609 if (time_after(jiffies, timeout)) { 13610 dd_dev_err(dd, 13611 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", 13612 status_bits, reg & status_bits); 13613 return; 13614 } 13615 udelay(1); 13616 } 13617 } 13618 13619 /* set CCE CSRs to chip reset defaults */ 13620 static void reset_cce_csrs(struct hfi1_devdata *dd) 13621 { 13622 int i; 13623 13624 /* CCE_REVISION read-only */ 13625 /* CCE_REVISION2 read-only */ 13626 /* CCE_CTRL - bits clear automatically */ 13627 /* CCE_STATUS read-only, use CceCtrl to clear */ 13628 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); 13629 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); 13630 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); 13631 for (i = 0; i < CCE_NUM_SCRATCH; i++) 13632 write_csr(dd, CCE_SCRATCH + (8 * i), 0); 13633 /* CCE_ERR_STATUS read-only */ 13634 write_csr(dd, CCE_ERR_MASK, 0); 13635 write_csr(dd, CCE_ERR_CLEAR, ~0ull); 13636 /* CCE_ERR_FORCE leave alone */ 13637 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++) 13638 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); 13639 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); 13640 /* CCE_PCIE_CTRL leave alone */ 13641 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { 13642 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); 13643 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), 13644 CCE_MSIX_TABLE_UPPER_RESETCSR); 13645 } 13646 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { 13647 /* CCE_MSIX_PBA read-only */ 13648 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); 13649 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); 13650 } 13651 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13652 write_csr(dd, CCE_INT_MAP, 0); 13653 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 13654 /* CCE_INT_STATUS read-only */ 13655 write_csr(dd, CCE_INT_MASK + (8 * i), 0); 13656 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); 13657 /* CCE_INT_FORCE leave alone */ 13658 /* CCE_INT_BLOCKED read-only */ 13659 } 13660 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++) 13661 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); 13662 } 13663 13664 /* set MISC CSRs to chip reset defaults */ 13665 static void reset_misc_csrs(struct hfi1_devdata *dd) 13666 { 13667 int i; 13668 13669 for (i = 0; i < 32; i++) { 13670 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); 13671 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); 13672 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); 13673 } 13674 /* 13675 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can 13676 * only be written 128-byte chunks 13677 */ 13678 /* init RSA engine to clear lingering errors */ 13679 write_csr(dd, MISC_CFG_RSA_CMD, 1); 13680 write_csr(dd, MISC_CFG_RSA_MU, 0); 13681 write_csr(dd, MISC_CFG_FW_CTRL, 0); 13682 /* MISC_STS_8051_DIGEST read-only */ 13683 /* MISC_STS_SBM_DIGEST read-only */ 13684 /* MISC_STS_PCIE_DIGEST read-only */ 13685 /* MISC_STS_FAB_DIGEST read-only */ 13686 /* MISC_ERR_STATUS read-only */ 13687 write_csr(dd, MISC_ERR_MASK, 0); 13688 write_csr(dd, MISC_ERR_CLEAR, ~0ull); 13689 /* MISC_ERR_FORCE leave alone */ 13690 } 13691 13692 /* set TXE CSRs to chip reset defaults */ 13693 static void reset_txe_csrs(struct hfi1_devdata *dd) 13694 { 13695 int i; 13696 13697 /* 13698 * TXE Kernel CSRs 13699 */ 13700 write_csr(dd, SEND_CTRL, 0); 13701 __cm_reset(dd, 0); /* reset CM internal state */ 13702 /* SEND_CONTEXTS read-only */ 13703 /* SEND_DMA_ENGINES read-only */ 13704 /* SEND_PIO_MEM_SIZE read-only */ 13705 /* SEND_DMA_MEM_SIZE read-only */ 13706 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); 13707 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ 13708 /* SEND_PIO_ERR_STATUS read-only */ 13709 write_csr(dd, SEND_PIO_ERR_MASK, 0); 13710 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); 13711 /* SEND_PIO_ERR_FORCE leave alone */ 13712 /* SEND_DMA_ERR_STATUS read-only */ 13713 write_csr(dd, SEND_DMA_ERR_MASK, 0); 13714 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); 13715 /* SEND_DMA_ERR_FORCE leave alone */ 13716 /* SEND_EGRESS_ERR_STATUS read-only */ 13717 write_csr(dd, SEND_EGRESS_ERR_MASK, 0); 13718 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); 13719 /* SEND_EGRESS_ERR_FORCE leave alone */ 13720 write_csr(dd, SEND_BTH_QP, 0); 13721 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); 13722 write_csr(dd, SEND_SC2VLT0, 0); 13723 write_csr(dd, SEND_SC2VLT1, 0); 13724 write_csr(dd, SEND_SC2VLT2, 0); 13725 write_csr(dd, SEND_SC2VLT3, 0); 13726 write_csr(dd, SEND_LEN_CHECK0, 0); 13727 write_csr(dd, SEND_LEN_CHECK1, 0); 13728 /* SEND_ERR_STATUS read-only */ 13729 write_csr(dd, SEND_ERR_MASK, 0); 13730 write_csr(dd, SEND_ERR_CLEAR, ~0ull); 13731 /* SEND_ERR_FORCE read-only */ 13732 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) 13733 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); 13734 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) 13735 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); 13736 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) 13737 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); 13738 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) 13739 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); 13740 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) 13741 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); 13742 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); 13743 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); 13744 /* SEND_CM_CREDIT_USED_STATUS read-only */ 13745 write_csr(dd, SEND_CM_TIMER_CTRL, 0); 13746 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); 13747 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); 13748 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); 13749 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); 13750 for (i = 0; i < TXE_NUM_DATA_VL; i++) 13751 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 13752 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 13753 /* SEND_CM_CREDIT_USED_VL read-only */ 13754 /* SEND_CM_CREDIT_USED_VL15 read-only */ 13755 /* SEND_EGRESS_CTXT_STATUS read-only */ 13756 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ 13757 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); 13758 /* SEND_EGRESS_ERR_INFO read-only */ 13759 /* SEND_EGRESS_ERR_SOURCE read-only */ 13760 13761 /* 13762 * TXE Per-Context CSRs 13763 */ 13764 for (i = 0; i < chip_send_contexts(dd); i++) { 13765 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 13766 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); 13767 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13768 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); 13769 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); 13770 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); 13771 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); 13772 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); 13773 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); 13774 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); 13775 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); 13776 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); 13777 } 13778 13779 /* 13780 * TXE Per-SDMA CSRs 13781 */ 13782 for (i = 0; i < chip_sdma_engines(dd); i++) { 13783 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 13784 /* SEND_DMA_STATUS read-only */ 13785 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); 13786 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); 13787 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); 13788 /* SEND_DMA_HEAD read-only */ 13789 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); 13790 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); 13791 /* SEND_DMA_IDLE_CNT read-only */ 13792 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); 13793 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); 13794 /* SEND_DMA_DESC_FETCHED_CNT read-only */ 13795 /* SEND_DMA_ENG_ERR_STATUS read-only */ 13796 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); 13797 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); 13798 /* SEND_DMA_ENG_ERR_FORCE leave alone */ 13799 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); 13800 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); 13801 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); 13802 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); 13803 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); 13804 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); 13805 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); 13806 } 13807 } 13808 13809 /* 13810 * Expect on entry: 13811 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0 13812 */ 13813 static void init_rbufs(struct hfi1_devdata *dd) 13814 { 13815 u64 reg; 13816 int count; 13817 13818 /* 13819 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are 13820 * clear. 13821 */ 13822 count = 0; 13823 while (1) { 13824 reg = read_csr(dd, RCV_STATUS); 13825 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 13826 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0) 13827 break; 13828 /* 13829 * Give up after 1ms - maximum wait time. 13830 * 13831 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at 13832 * 250MB/s bandwidth. Lower rate to 66% for overhead to get: 13833 * 136 KB / (66% * 250MB/s) = 844us 13834 */ 13835 if (count++ > 500) { 13836 dd_dev_err(dd, 13837 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", 13838 __func__, reg); 13839 break; 13840 } 13841 udelay(2); /* do not busy-wait the CSR */ 13842 } 13843 13844 /* start the init - expect RcvCtrl to be 0 */ 13845 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); 13846 13847 /* 13848 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief 13849 * period after the write before RcvStatus.RxRbufInitDone is valid. 13850 * The delay in the first run through the loop below is sufficient and 13851 * required before the first read of RcvStatus.RxRbufInintDone. 13852 */ 13853 read_csr(dd, RCV_CTRL); 13854 13855 /* wait for the init to finish */ 13856 count = 0; 13857 while (1) { 13858 /* delay is required first time through - see above */ 13859 udelay(2); /* do not busy-wait the CSR */ 13860 reg = read_csr(dd, RCV_STATUS); 13861 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK)) 13862 break; 13863 13864 /* give up after 100us - slowest possible at 33MHz is 73us */ 13865 if (count++ > 50) { 13866 dd_dev_err(dd, 13867 "%s: RcvStatus.RxRbufInit not set, continuing\n", 13868 __func__); 13869 break; 13870 } 13871 } 13872 } 13873 13874 /* set RXE CSRs to chip reset defaults */ 13875 static void reset_rxe_csrs(struct hfi1_devdata *dd) 13876 { 13877 int i, j; 13878 13879 /* 13880 * RXE Kernel CSRs 13881 */ 13882 write_csr(dd, RCV_CTRL, 0); 13883 init_rbufs(dd); 13884 /* RCV_STATUS read-only */ 13885 /* RCV_CONTEXTS read-only */ 13886 /* RCV_ARRAY_CNT read-only */ 13887 /* RCV_BUF_SIZE read-only */ 13888 write_csr(dd, RCV_BTH_QP, 0); 13889 write_csr(dd, RCV_MULTICAST, 0); 13890 write_csr(dd, RCV_BYPASS, 0); 13891 write_csr(dd, RCV_VL15, 0); 13892 /* this is a clear-down */ 13893 write_csr(dd, RCV_ERR_INFO, 13894 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); 13895 /* RCV_ERR_STATUS read-only */ 13896 write_csr(dd, RCV_ERR_MASK, 0); 13897 write_csr(dd, RCV_ERR_CLEAR, ~0ull); 13898 /* RCV_ERR_FORCE leave alone */ 13899 for (i = 0; i < 32; i++) 13900 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13901 for (i = 0; i < 4; i++) 13902 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); 13903 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++) 13904 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); 13905 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++) 13906 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); 13907 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) 13908 clear_rsm_rule(dd, i); 13909 for (i = 0; i < 32; i++) 13910 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); 13911 13912 /* 13913 * RXE Kernel and User Per-Context CSRs 13914 */ 13915 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13916 /* kernel */ 13917 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); 13918 /* RCV_CTXT_STATUS read-only */ 13919 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); 13920 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); 13921 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); 13922 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13923 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); 13924 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); 13925 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); 13926 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13927 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); 13928 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); 13929 13930 /* user */ 13931 /* RCV_HDR_TAIL read-only */ 13932 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); 13933 /* RCV_EGR_INDEX_TAIL read-only */ 13934 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); 13935 /* RCV_EGR_OFFSET_TAIL read-only */ 13936 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { 13937 write_uctxt_csr(dd, i, 13938 RCV_TID_FLOW_TABLE + (8 * j), 0); 13939 } 13940 } 13941 } 13942 13943 /* 13944 * Set sc2vl tables. 13945 * 13946 * They power on to zeros, so to avoid send context errors 13947 * they need to be set: 13948 * 13949 * SC 0-7 -> VL 0-7 (respectively) 13950 * SC 15 -> VL 15 13951 * otherwise 13952 * -> VL 0 13953 */ 13954 static void init_sc2vl_tables(struct hfi1_devdata *dd) 13955 { 13956 int i; 13957 /* init per architecture spec, constrained by hardware capability */ 13958 13959 /* HFI maps sent packets */ 13960 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( 13961 0, 13962 0, 0, 1, 1, 13963 2, 2, 3, 3, 13964 4, 4, 5, 5, 13965 6, 6, 7, 7)); 13966 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( 13967 1, 13968 8, 0, 9, 0, 13969 10, 0, 11, 0, 13970 12, 0, 13, 0, 13971 14, 0, 15, 15)); 13972 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( 13973 2, 13974 16, 0, 17, 0, 13975 18, 0, 19, 0, 13976 20, 0, 21, 0, 13977 22, 0, 23, 0)); 13978 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( 13979 3, 13980 24, 0, 25, 0, 13981 26, 0, 27, 0, 13982 28, 0, 29, 0, 13983 30, 0, 31, 0)); 13984 13985 /* DC maps received packets */ 13986 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( 13987 15_0, 13988 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 13989 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); 13990 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( 13991 31_16, 13992 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 13993 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); 13994 13995 /* initialize the cached sc2vl values consistently with h/w */ 13996 for (i = 0; i < 32; i++) { 13997 if (i < 8 || i == 15) 13998 *((u8 *)(dd->sc2vl) + i) = (u8)i; 13999 else 14000 *((u8 *)(dd->sc2vl) + i) = 0; 14001 } 14002 } 14003 14004 /* 14005 * Read chip sizes and then reset parts to sane, disabled, values. We cannot 14006 * depend on the chip going through a power-on reset - a driver may be loaded 14007 * and unloaded many times. 14008 * 14009 * Do not write any CSR values to the chip in this routine - there may be 14010 * a reset following the (possible) FLR in this routine. 14011 * 14012 */ 14013 static int init_chip(struct hfi1_devdata *dd) 14014 { 14015 int i; 14016 int ret = 0; 14017 14018 /* 14019 * Put the HFI CSRs in a known state. 14020 * Combine this with a DC reset. 14021 * 14022 * Stop the device from doing anything while we do a 14023 * reset. We know there are no other active users of 14024 * the device since we are now in charge. Turn off 14025 * off all outbound and inbound traffic and make sure 14026 * the device does not generate any interrupts. 14027 */ 14028 14029 /* disable send contexts and SDMA engines */ 14030 write_csr(dd, SEND_CTRL, 0); 14031 for (i = 0; i < chip_send_contexts(dd); i++) 14032 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 14033 for (i = 0; i < chip_sdma_engines(dd); i++) 14034 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 14035 /* disable port (turn off RXE inbound traffic) and contexts */ 14036 write_csr(dd, RCV_CTRL, 0); 14037 for (i = 0; i < chip_rcv_contexts(dd); i++) 14038 write_csr(dd, RCV_CTXT_CTRL, 0); 14039 /* mask all interrupt sources */ 14040 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 14041 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); 14042 14043 /* 14044 * DC Reset: do a full DC reset before the register clear. 14045 * A recommended length of time to hold is one CSR read, 14046 * so reread the CceDcCtrl. Then, hold the DC in reset 14047 * across the clear. 14048 */ 14049 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); 14050 (void)read_csr(dd, CCE_DC_CTRL); 14051 14052 if (use_flr) { 14053 /* 14054 * A FLR will reset the SPC core and part of the PCIe. 14055 * The parts that need to be restored have already been 14056 * saved. 14057 */ 14058 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 14059 14060 /* do the FLR, the DC reset will remain */ 14061 pcie_flr(dd->pcidev); 14062 14063 /* restore command and BARs */ 14064 ret = restore_pci_variables(dd); 14065 if (ret) { 14066 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 14067 __func__); 14068 return ret; 14069 } 14070 14071 if (is_ax(dd)) { 14072 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 14073 pcie_flr(dd->pcidev); 14074 ret = restore_pci_variables(dd); 14075 if (ret) { 14076 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 14077 __func__); 14078 return ret; 14079 } 14080 } 14081 } else { 14082 dd_dev_info(dd, "Resetting CSRs with writes\n"); 14083 reset_cce_csrs(dd); 14084 reset_txe_csrs(dd); 14085 reset_rxe_csrs(dd); 14086 reset_misc_csrs(dd); 14087 } 14088 /* clear the DC reset */ 14089 write_csr(dd, CCE_DC_CTRL, 0); 14090 14091 /* Set the LED off */ 14092 setextled(dd, 0); 14093 14094 /* 14095 * Clear the QSFP reset. 14096 * An FLR enforces a 0 on all out pins. The driver does not touch 14097 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and 14098 * anything plugged constantly in reset, if it pays attention 14099 * to RESET_N. 14100 * Prime examples of this are optical cables. Set all pins high. 14101 * I2CCLK and I2CDAT will change per direction, and INT_N and 14102 * MODPRS_N are input only and their value is ignored. 14103 */ 14104 write_csr(dd, ASIC_QSFP1_OUT, 0x1f); 14105 write_csr(dd, ASIC_QSFP2_OUT, 0x1f); 14106 init_chip_resources(dd); 14107 return ret; 14108 } 14109 14110 static void init_early_variables(struct hfi1_devdata *dd) 14111 { 14112 int i; 14113 14114 /* assign link credit variables */ 14115 dd->vau = CM_VAU; 14116 dd->link_credits = CM_GLOBAL_CREDITS; 14117 if (is_ax(dd)) 14118 dd->link_credits--; 14119 dd->vcu = cu_to_vcu(hfi1_cu); 14120 /* enough room for 8 MAD packets plus header - 17K */ 14121 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); 14122 if (dd->vl15_init > dd->link_credits) 14123 dd->vl15_init = dd->link_credits; 14124 14125 write_uninitialized_csrs_and_memories(dd); 14126 14127 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 14128 for (i = 0; i < dd->num_pports; i++) { 14129 struct hfi1_pportdata *ppd = &dd->pport[i]; 14130 14131 set_partition_keys(ppd); 14132 } 14133 init_sc2vl_tables(dd); 14134 } 14135 14136 static void init_kdeth_qp(struct hfi1_devdata *dd) 14137 { 14138 write_csr(dd, SEND_BTH_QP, 14139 (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) << 14140 SEND_BTH_QP_KDETH_QP_SHIFT); 14141 14142 write_csr(dd, RCV_BTH_QP, 14143 (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) << 14144 RCV_BTH_QP_KDETH_QP_SHIFT); 14145 } 14146 14147 /** 14148 * hfi1_get_qp_map - get qp map 14149 * @dd: device data 14150 * @idx: index to read 14151 */ 14152 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) 14153 { 14154 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); 14155 14156 reg >>= (idx % 8) * 8; 14157 return reg; 14158 } 14159 14160 /** 14161 * init_qpmap_table - init qp map 14162 * @dd: device data 14163 * @first_ctxt: first context 14164 * @last_ctxt: first context 14165 * 14166 * This return sets the qpn mapping table that 14167 * is indexed by qpn[8:1]. 14168 * 14169 * The routine will round robin the 256 settings 14170 * from first_ctxt to last_ctxt. 14171 * 14172 * The first/last looks ahead to having specialized 14173 * receive contexts for mgmt and bypass. Normal 14174 * verbs traffic will assumed to be on a range 14175 * of receive contexts. 14176 */ 14177 static void init_qpmap_table(struct hfi1_devdata *dd, 14178 u32 first_ctxt, 14179 u32 last_ctxt) 14180 { 14181 u64 reg = 0; 14182 u64 regno = RCV_QP_MAP_TABLE; 14183 int i; 14184 u64 ctxt = first_ctxt; 14185 14186 for (i = 0; i < 256; i++) { 14187 reg |= ctxt << (8 * (i % 8)); 14188 ctxt++; 14189 if (ctxt > last_ctxt) 14190 ctxt = first_ctxt; 14191 if (i % 8 == 7) { 14192 write_csr(dd, regno, reg); 14193 reg = 0; 14194 regno += 8; 14195 } 14196 } 14197 14198 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 14199 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK); 14200 } 14201 14202 struct rsm_map_table { 14203 u64 map[NUM_MAP_REGS]; 14204 unsigned int used; 14205 }; 14206 14207 struct rsm_rule_data { 14208 u8 offset; 14209 u8 pkt_type; 14210 u32 field1_off; 14211 u32 field2_off; 14212 u32 index1_off; 14213 u32 index1_width; 14214 u32 index2_off; 14215 u32 index2_width; 14216 u32 mask1; 14217 u32 value1; 14218 u32 mask2; 14219 u32 value2; 14220 }; 14221 14222 /* 14223 * Return an initialized RMT map table for users to fill in. OK if it 14224 * returns NULL, indicating no table. 14225 */ 14226 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) 14227 { 14228 struct rsm_map_table *rmt; 14229 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ 14230 14231 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL); 14232 if (rmt) { 14233 memset(rmt->map, rxcontext, sizeof(rmt->map)); 14234 rmt->used = 0; 14235 } 14236 14237 return rmt; 14238 } 14239 14240 /* 14241 * Write the final RMT map table to the chip and free the table. OK if 14242 * table is NULL. 14243 */ 14244 static void complete_rsm_map_table(struct hfi1_devdata *dd, 14245 struct rsm_map_table *rmt) 14246 { 14247 int i; 14248 14249 if (rmt) { 14250 /* write table to chip */ 14251 for (i = 0; i < NUM_MAP_REGS; i++) 14252 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); 14253 14254 /* enable RSM */ 14255 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14256 } 14257 } 14258 14259 /* Is a receive side mapping rule */ 14260 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) 14261 { 14262 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; 14263 } 14264 14265 /* 14266 * Add a receive side mapping rule. 14267 */ 14268 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, 14269 struct rsm_rule_data *rrd) 14270 { 14271 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 14272 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | 14273 1ull << rule_index | /* enable bit */ 14274 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); 14275 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 14276 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | 14277 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | 14278 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | 14279 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | 14280 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | 14281 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); 14282 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 14283 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | 14284 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | 14285 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | 14286 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); 14287 } 14288 14289 /* 14290 * Clear a receive side mapping rule. 14291 */ 14292 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) 14293 { 14294 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); 14295 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); 14296 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); 14297 } 14298 14299 /* return the number of RSM map table entries that will be used for QOS */ 14300 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 14301 unsigned int *np) 14302 { 14303 int i; 14304 unsigned int m, n; 14305 u8 max_by_vl = 0; 14306 14307 /* is QOS active at all? */ 14308 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || 14309 num_vls == 1 || 14310 krcvqsset <= 1) 14311 goto no_qos; 14312 14313 /* determine bits for qpn */ 14314 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) 14315 if (krcvqs[i] > max_by_vl) 14316 max_by_vl = krcvqs[i]; 14317 if (max_by_vl > 32) 14318 goto no_qos; 14319 m = ilog2(__roundup_pow_of_two(max_by_vl)); 14320 14321 /* determine bits for vl */ 14322 n = ilog2(__roundup_pow_of_two(num_vls)); 14323 14324 /* reject if too much is used */ 14325 if ((m + n) > 7) 14326 goto no_qos; 14327 14328 if (mp) 14329 *mp = m; 14330 if (np) 14331 *np = n; 14332 14333 return 1 << (m + n); 14334 14335 no_qos: 14336 if (mp) 14337 *mp = 0; 14338 if (np) 14339 *np = 0; 14340 return 0; 14341 } 14342 14343 /** 14344 * init_qos - init RX qos 14345 * @dd: device data 14346 * @rmt: RSM map table 14347 * 14348 * This routine initializes Rule 0 and the RSM map table to implement 14349 * quality of service (qos). 14350 * 14351 * If all of the limit tests succeed, qos is applied based on the array 14352 * interpretation of krcvqs where entry 0 is VL0. 14353 * 14354 * The number of vl bits (n) and the number of qpn bits (m) are computed to 14355 * feed both the RSM map table and the single rule. 14356 */ 14357 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) 14358 { 14359 struct rsm_rule_data rrd; 14360 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; 14361 unsigned int rmt_entries; 14362 u64 reg; 14363 14364 if (!rmt) 14365 goto bail; 14366 rmt_entries = qos_rmt_entries(dd, &m, &n); 14367 if (rmt_entries == 0) 14368 goto bail; 14369 qpns_per_vl = 1 << m; 14370 14371 /* enough room in the map table? */ 14372 rmt_entries = 1 << (m + n); 14373 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) 14374 goto bail; 14375 14376 /* add qos entries to the RSM map table */ 14377 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { 14378 unsigned tctxt; 14379 14380 for (qpn = 0, tctxt = ctxt; 14381 krcvqs[i] && qpn < qpns_per_vl; qpn++) { 14382 unsigned idx, regoff, regidx; 14383 14384 /* generate the index the hardware will produce */ 14385 idx = rmt->used + ((qpn << n) ^ i); 14386 regoff = (idx % 8) * 8; 14387 regidx = idx / 8; 14388 /* replace default with context number */ 14389 reg = rmt->map[regidx]; 14390 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 14391 << regoff); 14392 reg |= (u64)(tctxt++) << regoff; 14393 rmt->map[regidx] = reg; 14394 if (tctxt == ctxt + krcvqs[i]) 14395 tctxt = ctxt; 14396 } 14397 ctxt += krcvqs[i]; 14398 } 14399 14400 rrd.offset = rmt->used; 14401 rrd.pkt_type = 2; 14402 rrd.field1_off = LRH_BTH_MATCH_OFFSET; 14403 rrd.field2_off = LRH_SC_MATCH_OFFSET; 14404 rrd.index1_off = LRH_SC_SELECT_OFFSET; 14405 rrd.index1_width = n; 14406 rrd.index2_off = QPN_SELECT_OFFSET; 14407 rrd.index2_width = m + n; 14408 rrd.mask1 = LRH_BTH_MASK; 14409 rrd.value1 = LRH_BTH_VALUE; 14410 rrd.mask2 = LRH_SC_MASK; 14411 rrd.value2 = LRH_SC_VALUE; 14412 14413 /* add rule 0 */ 14414 add_rsm_rule(dd, RSM_INS_VERBS, &rrd); 14415 14416 /* mark RSM map entries as used */ 14417 rmt->used += rmt_entries; 14418 /* map everything else to the mcast/err/vl15 context */ 14419 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); 14420 dd->qos_shift = n + 1; 14421 return; 14422 bail: 14423 dd->qos_shift = 1; 14424 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); 14425 } 14426 14427 static void init_fecn_handling(struct hfi1_devdata *dd, 14428 struct rsm_map_table *rmt) 14429 { 14430 struct rsm_rule_data rrd; 14431 u64 reg; 14432 int i, idx, regoff, regidx, start; 14433 u8 offset; 14434 u32 total_cnt; 14435 14436 if (HFI1_CAP_IS_KSET(TID_RDMA)) 14437 /* Exclude context 0 */ 14438 start = 1; 14439 else 14440 start = dd->first_dyn_alloc_ctxt; 14441 14442 total_cnt = dd->num_rcv_contexts - start; 14443 14444 /* there needs to be enough room in the map table */ 14445 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { 14446 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); 14447 return; 14448 } 14449 14450 /* 14451 * RSM will extract the destination context as an index into the 14452 * map table. The destination contexts are a sequential block 14453 * in the range start...num_rcv_contexts-1 (inclusive). 14454 * Map entries are accessed as offset + extracted value. Adjust 14455 * the added offset so this sequence can be placed anywhere in 14456 * the table - as long as the entries themselves do not wrap. 14457 * There are only enough bits in offset for the table size, so 14458 * start with that to allow for a "negative" offset. 14459 */ 14460 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); 14461 14462 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; 14463 i++, idx++) { 14464 /* replace with identity mapping */ 14465 regoff = (idx % 8) * 8; 14466 regidx = idx / 8; 14467 reg = rmt->map[regidx]; 14468 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); 14469 reg |= (u64)i << regoff; 14470 rmt->map[regidx] = reg; 14471 } 14472 14473 /* 14474 * For RSM intercept of Expected FECN packets: 14475 * o packet type 0 - expected 14476 * o match on F (bit 95), using select/match 1, and 14477 * o match on SH (bit 133), using select/match 2. 14478 * 14479 * Use index 1 to extract the 8-bit receive context from DestQP 14480 * (start at bit 64). Use that as the RSM map table index. 14481 */ 14482 rrd.offset = offset; 14483 rrd.pkt_type = 0; 14484 rrd.field1_off = 95; 14485 rrd.field2_off = 133; 14486 rrd.index1_off = 64; 14487 rrd.index1_width = 8; 14488 rrd.index2_off = 0; 14489 rrd.index2_width = 0; 14490 rrd.mask1 = 1; 14491 rrd.value1 = 1; 14492 rrd.mask2 = 1; 14493 rrd.value2 = 1; 14494 14495 /* add rule 1 */ 14496 add_rsm_rule(dd, RSM_INS_FECN, &rrd); 14497 14498 rmt->used += total_cnt; 14499 } 14500 14501 static inline bool hfi1_is_rmt_full(int start, int spare) 14502 { 14503 return (start + spare) > NUM_MAP_ENTRIES; 14504 } 14505 14506 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) 14507 { 14508 u8 i, j; 14509 u8 ctx_id = 0; 14510 u64 reg; 14511 u32 regoff; 14512 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); 14513 int ctxt_count = hfi1_netdev_ctxt_count(dd); 14514 14515 /* We already have contexts mapped in RMT */ 14516 if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { 14517 dd_dev_info(dd, "Contexts are already mapped in RMT\n"); 14518 return true; 14519 } 14520 14521 if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) { 14522 dd_dev_err(dd, "Not enough RMT entries used = %d\n", 14523 rmt_start); 14524 return false; 14525 } 14526 14527 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", 14528 rmt_start, 14529 rmt_start + NUM_NETDEV_MAP_ENTRIES); 14530 14531 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ 14532 regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8; 14533 reg = read_csr(dd, regoff); 14534 for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) { 14535 /* Update map register with netdev context */ 14536 j = (rmt_start + i) % 8; 14537 reg &= ~(0xffllu << (j * 8)); 14538 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); 14539 /* Wrap up netdev ctx index */ 14540 ctx_id %= ctxt_count; 14541 /* Write back map register */ 14542 if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) { 14543 dev_dbg(&(dd)->pcidev->dev, 14544 "RMT[%d] =0x%llx\n", 14545 regoff - RCV_RSM_MAP_TABLE, reg); 14546 14547 write_csr(dd, regoff, reg); 14548 regoff += 8; 14549 if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) 14550 reg = read_csr(dd, regoff); 14551 } 14552 } 14553 14554 return true; 14555 } 14556 14557 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, 14558 int rule, struct rsm_rule_data *rrd) 14559 { 14560 if (!hfi1_netdev_update_rmt(dd)) { 14561 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); 14562 return; 14563 } 14564 14565 add_rsm_rule(dd, rule, rrd); 14566 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14567 } 14568 14569 void hfi1_init_aip_rsm(struct hfi1_devdata *dd) 14570 { 14571 /* 14572 * go through with the initialisation only if this rule actually doesn't 14573 * exist yet 14574 */ 14575 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { 14576 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); 14577 struct rsm_rule_data rrd = { 14578 .offset = rmt_start, 14579 .pkt_type = IB_PACKET_TYPE, 14580 .field1_off = LRH_BTH_MATCH_OFFSET, 14581 .mask1 = LRH_BTH_MASK, 14582 .value1 = LRH_BTH_VALUE, 14583 .field2_off = BTH_DESTQP_MATCH_OFFSET, 14584 .mask2 = BTH_DESTQP_MASK, 14585 .value2 = BTH_DESTQP_VALUE, 14586 .index1_off = DETH_AIP_SQPN_SELECT_OFFSET + 14587 ilog2(NUM_NETDEV_MAP_ENTRIES), 14588 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), 14589 .index2_off = DETH_AIP_SQPN_SELECT_OFFSET, 14590 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) 14591 }; 14592 14593 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); 14594 } 14595 } 14596 14597 /* Initialize RSM for VNIC */ 14598 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) 14599 { 14600 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); 14601 struct rsm_rule_data rrd = { 14602 /* Add rule for vnic */ 14603 .offset = rmt_start, 14604 .pkt_type = 4, 14605 /* Match 16B packets */ 14606 .field1_off = L2_TYPE_MATCH_OFFSET, 14607 .mask1 = L2_TYPE_MASK, 14608 .value1 = L2_16B_VALUE, 14609 /* Match ETH L4 packets */ 14610 .field2_off = L4_TYPE_MATCH_OFFSET, 14611 .mask2 = L4_16B_TYPE_MASK, 14612 .value2 = L4_16B_ETH_VALUE, 14613 /* Calc context from veswid and entropy */ 14614 .index1_off = L4_16B_HDR_VESWID_OFFSET, 14615 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), 14616 .index2_off = L2_16B_ENTROPY_OFFSET, 14617 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) 14618 }; 14619 14620 hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd); 14621 } 14622 14623 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) 14624 { 14625 clear_rsm_rule(dd, RSM_INS_VNIC); 14626 } 14627 14628 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) 14629 { 14630 /* only actually clear the rule if it's the last user asking to do so */ 14631 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) 14632 clear_rsm_rule(dd, RSM_INS_AIP); 14633 } 14634 14635 static int init_rxe(struct hfi1_devdata *dd) 14636 { 14637 struct rsm_map_table *rmt; 14638 u64 val; 14639 14640 /* enable all receive errors */ 14641 write_csr(dd, RCV_ERR_MASK, ~0ull); 14642 14643 rmt = alloc_rsm_map_table(dd); 14644 if (!rmt) 14645 return -ENOMEM; 14646 14647 /* set up QOS, including the QPN map table */ 14648 init_qos(dd, rmt); 14649 init_fecn_handling(dd, rmt); 14650 complete_rsm_map_table(dd, rmt); 14651 /* record number of used rsm map entries for netdev */ 14652 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); 14653 kfree(rmt); 14654 14655 /* 14656 * make sure RcvCtrl.RcvWcb <= PCIe Device Control 14657 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config 14658 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one 14659 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and 14660 * Max_PayLoad_Size set to its minimum of 128. 14661 * 14662 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0 14663 * (64 bytes). Max_Payload_Size is possibly modified upward in 14664 * tune_pcie_caps() which is called after this routine. 14665 */ 14666 14667 /* Have 16 bytes (4DW) of bypass header available in header queue */ 14668 val = read_csr(dd, RCV_BYPASS); 14669 val &= ~RCV_BYPASS_HDR_SIZE_SMASK; 14670 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << 14671 RCV_BYPASS_HDR_SIZE_SHIFT); 14672 write_csr(dd, RCV_BYPASS, val); 14673 return 0; 14674 } 14675 14676 static void init_other(struct hfi1_devdata *dd) 14677 { 14678 /* enable all CCE errors */ 14679 write_csr(dd, CCE_ERR_MASK, ~0ull); 14680 /* enable *some* Misc errors */ 14681 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); 14682 /* enable all DC errors, except LCB */ 14683 write_csr(dd, DCC_ERR_FLG_EN, ~0ull); 14684 write_csr(dd, DC_DC8051_ERR_EN, ~0ull); 14685 } 14686 14687 /* 14688 * Fill out the given AU table using the given CU. A CU is defined in terms 14689 * AUs. The table is a an encoding: given the index, how many AUs does that 14690 * represent? 14691 * 14692 * NOTE: Assumes that the register layout is the same for the 14693 * local and remote tables. 14694 */ 14695 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, 14696 u32 csr0to3, u32 csr4to7) 14697 { 14698 write_csr(dd, csr0to3, 14699 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | 14700 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | 14701 2ull * cu << 14702 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | 14703 4ull * cu << 14704 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); 14705 write_csr(dd, csr4to7, 14706 8ull * cu << 14707 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | 14708 16ull * cu << 14709 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | 14710 32ull * cu << 14711 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | 14712 64ull * cu << 14713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); 14714 } 14715 14716 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14717 { 14718 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, 14719 SEND_CM_LOCAL_AU_TABLE4_TO7); 14720 } 14721 14722 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14723 { 14724 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, 14725 SEND_CM_REMOTE_AU_TABLE4_TO7); 14726 } 14727 14728 static void init_txe(struct hfi1_devdata *dd) 14729 { 14730 int i; 14731 14732 /* enable all PIO, SDMA, general, and Egress errors */ 14733 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); 14734 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); 14735 write_csr(dd, SEND_ERR_MASK, ~0ull); 14736 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); 14737 14738 /* enable all per-context and per-SDMA engine errors */ 14739 for (i = 0; i < chip_send_contexts(dd); i++) 14740 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); 14741 for (i = 0; i < chip_sdma_engines(dd); i++) 14742 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); 14743 14744 /* set the local CU to AU mapping */ 14745 assign_local_cm_au_table(dd, dd->vcu); 14746 14747 /* 14748 * Set reasonable default for Credit Return Timer 14749 * Don't set on Simulator - causes it to choke. 14750 */ 14751 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) 14752 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); 14753 } 14754 14755 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14756 u16 jkey) 14757 { 14758 u8 hw_ctxt; 14759 u64 reg; 14760 14761 if (!rcd || !rcd->sc) 14762 return -EINVAL; 14763 14764 hw_ctxt = rcd->sc->hw_context; 14765 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */ 14766 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) << 14767 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT); 14768 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */ 14769 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) 14770 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK; 14771 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); 14772 /* 14773 * Enable send-side J_KEY integrity check, unless this is A0 h/w 14774 */ 14775 if (!is_ax(dd)) { 14776 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14777 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14778 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14779 } 14780 14781 /* Enable J_KEY check on receive context. */ 14782 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK | 14783 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) << 14784 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT); 14785 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); 14786 14787 return 0; 14788 } 14789 14790 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 14791 { 14792 u8 hw_ctxt; 14793 u64 reg; 14794 14795 if (!rcd || !rcd->sc) 14796 return -EINVAL; 14797 14798 hw_ctxt = rcd->sc->hw_context; 14799 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); 14800 /* 14801 * Disable send-side J_KEY integrity check, unless this is A0 h/w. 14802 * This check would not have been enabled for A0 h/w, see 14803 * set_ctxt_jkey(). 14804 */ 14805 if (!is_ax(dd)) { 14806 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14807 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14808 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14809 } 14810 /* Turn off the J_KEY on the receive side */ 14811 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); 14812 14813 return 0; 14814 } 14815 14816 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14817 u16 pkey) 14818 { 14819 u8 hw_ctxt; 14820 u64 reg; 14821 14822 if (!rcd || !rcd->sc) 14823 return -EINVAL; 14824 14825 hw_ctxt = rcd->sc->hw_context; 14826 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) << 14827 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT; 14828 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); 14829 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14830 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14831 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK; 14832 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14833 14834 return 0; 14835 } 14836 14837 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) 14838 { 14839 u8 hw_ctxt; 14840 u64 reg; 14841 14842 if (!ctxt || !ctxt->sc) 14843 return -EINVAL; 14844 14845 hw_ctxt = ctxt->sc->hw_context; 14846 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14847 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14848 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14849 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); 14850 14851 return 0; 14852 } 14853 14854 /* 14855 * Start doing the clean up the chip. Our clean up happens in multiple 14856 * stages and this is just the first. 14857 */ 14858 void hfi1_start_cleanup(struct hfi1_devdata *dd) 14859 { 14860 aspm_exit(dd); 14861 free_cntrs(dd); 14862 free_rcverr(dd); 14863 finish_chip_resources(dd); 14864 } 14865 14866 #define HFI_BASE_GUID(dev) \ 14867 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) 14868 14869 /* 14870 * Information can be shared between the two HFIs on the same ASIC 14871 * in the same OS. This function finds the peer device and sets 14872 * up a shared structure. 14873 */ 14874 static int init_asic_data(struct hfi1_devdata *dd) 14875 { 14876 unsigned long index; 14877 struct hfi1_devdata *peer; 14878 struct hfi1_asic_data *asic_data; 14879 int ret = 0; 14880 14881 /* pre-allocate the asic structure in case we are the first device */ 14882 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); 14883 if (!asic_data) 14884 return -ENOMEM; 14885 14886 xa_lock_irq(&hfi1_dev_table); 14887 /* Find our peer device */ 14888 xa_for_each(&hfi1_dev_table, index, peer) { 14889 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && 14890 dd->unit != peer->unit) 14891 break; 14892 } 14893 14894 if (peer) { 14895 /* use already allocated structure */ 14896 dd->asic_data = peer->asic_data; 14897 kfree(asic_data); 14898 } else { 14899 dd->asic_data = asic_data; 14900 mutex_init(&dd->asic_data->asic_resource_mutex); 14901 } 14902 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ 14903 xa_unlock_irq(&hfi1_dev_table); 14904 14905 /* first one through - set up i2c devices */ 14906 if (!peer) 14907 ret = set_up_i2c(dd, dd->asic_data); 14908 14909 return ret; 14910 } 14911 14912 /* 14913 * Set dd->boardname. Use a generic name if a name is not returned from 14914 * EFI variable space. 14915 * 14916 * Return 0 on success, -ENOMEM if space could not be allocated. 14917 */ 14918 static int obtain_boardname(struct hfi1_devdata *dd) 14919 { 14920 /* generic board description */ 14921 const char generic[] = 14922 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; 14923 unsigned long size; 14924 int ret; 14925 14926 ret = read_hfi1_efi_var(dd, "description", &size, 14927 (void **)&dd->boardname); 14928 if (ret) { 14929 dd_dev_info(dd, "Board description not found\n"); 14930 /* use generic description */ 14931 dd->boardname = kstrdup(generic, GFP_KERNEL); 14932 if (!dd->boardname) 14933 return -ENOMEM; 14934 } 14935 return 0; 14936 } 14937 14938 /* 14939 * Check the interrupt registers to make sure that they are mapped correctly. 14940 * It is intended to help user identify any mismapping by VMM when the driver 14941 * is running in a VM. This function should only be called before interrupt 14942 * is set up properly. 14943 * 14944 * Return 0 on success, -EINVAL on failure. 14945 */ 14946 static int check_int_registers(struct hfi1_devdata *dd) 14947 { 14948 u64 reg; 14949 u64 all_bits = ~(u64)0; 14950 u64 mask; 14951 14952 /* Clear CceIntMask[0] to avoid raising any interrupts */ 14953 mask = read_csr(dd, CCE_INT_MASK); 14954 write_csr(dd, CCE_INT_MASK, 0ull); 14955 reg = read_csr(dd, CCE_INT_MASK); 14956 if (reg) 14957 goto err_exit; 14958 14959 /* Clear all interrupt status bits */ 14960 write_csr(dd, CCE_INT_CLEAR, all_bits); 14961 reg = read_csr(dd, CCE_INT_STATUS); 14962 if (reg) 14963 goto err_exit; 14964 14965 /* Set all interrupt status bits */ 14966 write_csr(dd, CCE_INT_FORCE, all_bits); 14967 reg = read_csr(dd, CCE_INT_STATUS); 14968 if (reg != all_bits) 14969 goto err_exit; 14970 14971 /* Restore the interrupt mask */ 14972 write_csr(dd, CCE_INT_CLEAR, all_bits); 14973 write_csr(dd, CCE_INT_MASK, mask); 14974 14975 return 0; 14976 err_exit: 14977 write_csr(dd, CCE_INT_MASK, mask); 14978 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); 14979 return -EINVAL; 14980 } 14981 14982 /** 14983 * hfi1_init_dd() - Initialize most of the dd structure. 14984 * @dd: the dd device 14985 * 14986 * This is global, and is called directly at init to set up the 14987 * chip-specific function pointers for later use. 14988 */ 14989 int hfi1_init_dd(struct hfi1_devdata *dd) 14990 { 14991 struct pci_dev *pdev = dd->pcidev; 14992 struct hfi1_pportdata *ppd; 14993 u64 reg; 14994 int i, ret; 14995 static const char * const inames[] = { /* implementation names */ 14996 "RTL silicon", 14997 "RTL VCS simulation", 14998 "RTL FPGA emulation", 14999 "Functional simulator" 15000 }; 15001 struct pci_dev *parent = pdev->bus->self; 15002 u32 sdma_engines = chip_sdma_engines(dd); 15003 15004 ppd = dd->pport; 15005 for (i = 0; i < dd->num_pports; i++, ppd++) { 15006 int vl; 15007 /* init common fields */ 15008 hfi1_init_pportdata(pdev, ppd, dd, 0, 1); 15009 /* DC supports 4 link widths */ 15010 ppd->link_width_supported = 15011 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X | 15012 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X; 15013 ppd->link_width_downgrade_supported = 15014 ppd->link_width_supported; 15015 /* start out enabling only 4X */ 15016 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; 15017 ppd->link_width_downgrade_enabled = 15018 ppd->link_width_downgrade_supported; 15019 /* link width active is 0 when link is down */ 15020 /* link width downgrade active is 0 when link is down */ 15021 15022 if (num_vls < HFI1_MIN_VLS_SUPPORTED || 15023 num_vls > HFI1_MAX_VLS_SUPPORTED) { 15024 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", 15025 num_vls, HFI1_MAX_VLS_SUPPORTED); 15026 num_vls = HFI1_MAX_VLS_SUPPORTED; 15027 } 15028 ppd->vls_supported = num_vls; 15029 ppd->vls_operational = ppd->vls_supported; 15030 /* Set the default MTU. */ 15031 for (vl = 0; vl < num_vls; vl++) 15032 dd->vld[vl].mtu = hfi1_max_mtu; 15033 dd->vld[15].mtu = MAX_MAD_PACKET; 15034 /* 15035 * Set the initial values to reasonable default, will be set 15036 * for real when link is up. 15037 */ 15038 ppd->overrun_threshold = 0x4; 15039 ppd->phy_error_threshold = 0xf; 15040 ppd->port_crc_mode_enabled = link_crc_mask; 15041 /* initialize supported LTP CRC mode */ 15042 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 15043 /* initialize enabled LTP CRC mode */ 15044 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; 15045 /* start in offline */ 15046 ppd->host_link_state = HLS_DN_OFFLINE; 15047 init_vl_arb_caches(ppd); 15048 } 15049 15050 /* 15051 * Do remaining PCIe setup and save PCIe values in dd. 15052 * Any error printing is already done by the init code. 15053 * On return, we have the chip mapped. 15054 */ 15055 ret = hfi1_pcie_ddinit(dd, pdev); 15056 if (ret < 0) 15057 goto bail_free; 15058 15059 /* Save PCI space registers to rewrite after device reset */ 15060 ret = save_pci_variables(dd); 15061 if (ret < 0) 15062 goto bail_cleanup; 15063 15064 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) 15065 & CCE_REVISION_CHIP_REV_MAJOR_MASK; 15066 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) 15067 & CCE_REVISION_CHIP_REV_MINOR_MASK; 15068 15069 /* 15070 * Check interrupt registers mapping if the driver has no access to 15071 * the upstream component. In this case, it is likely that the driver 15072 * is running in a VM. 15073 */ 15074 if (!parent) { 15075 ret = check_int_registers(dd); 15076 if (ret) 15077 goto bail_cleanup; 15078 } 15079 15080 /* 15081 * obtain the hardware ID - NOT related to unit, which is a 15082 * software enumeration 15083 */ 15084 reg = read_csr(dd, CCE_REVISION2); 15085 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) 15086 & CCE_REVISION2_HFI_ID_MASK; 15087 /* the variable size will remove unwanted bits */ 15088 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; 15089 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; 15090 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", 15091 dd->icode < ARRAY_SIZE(inames) ? 15092 inames[dd->icode] : "unknown", (int)dd->irev); 15093 15094 /* speeds the hardware can support */ 15095 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; 15096 /* speeds allowed to run at */ 15097 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; 15098 /* give a reasonable active value, will be set on link up */ 15099 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; 15100 15101 /* fix up link widths for emulation _p */ 15102 ppd = dd->pport; 15103 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { 15104 ppd->link_width_supported = 15105 ppd->link_width_enabled = 15106 ppd->link_width_downgrade_supported = 15107 ppd->link_width_downgrade_enabled = 15108 OPA_LINK_WIDTH_1X; 15109 } 15110 /* insure num_vls isn't larger than number of sdma engines */ 15111 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) { 15112 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", 15113 num_vls, sdma_engines); 15114 num_vls = sdma_engines; 15115 ppd->vls_supported = sdma_engines; 15116 ppd->vls_operational = ppd->vls_supported; 15117 } 15118 15119 /* 15120 * Convert the ns parameter to the 64 * cclocks used in the CSR. 15121 * Limit the max if larger than the field holds. If timeout is 15122 * non-zero, then the calculated field will be at least 1. 15123 * 15124 * Must be after icode is set up - the cclock rate depends 15125 * on knowing the hardware being used. 15126 */ 15127 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; 15128 if (dd->rcv_intr_timeout_csr > 15129 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK) 15130 dd->rcv_intr_timeout_csr = 15131 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK; 15132 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) 15133 dd->rcv_intr_timeout_csr = 1; 15134 15135 /* needs to be done before we look for the peer device */ 15136 read_guid(dd); 15137 15138 /* set up shared ASIC data with peer device */ 15139 ret = init_asic_data(dd); 15140 if (ret) 15141 goto bail_cleanup; 15142 15143 /* obtain chip sizes, reset chip CSRs */ 15144 ret = init_chip(dd); 15145 if (ret) 15146 goto bail_cleanup; 15147 15148 /* read in the PCIe link speed information */ 15149 ret = pcie_speeds(dd); 15150 if (ret) 15151 goto bail_cleanup; 15152 15153 /* call before get_platform_config(), after init_chip_resources() */ 15154 ret = eprom_init(dd); 15155 if (ret) 15156 goto bail_free_rcverr; 15157 15158 /* Needs to be called before hfi1_firmware_init */ 15159 get_platform_config(dd); 15160 15161 /* read in firmware */ 15162 ret = hfi1_firmware_init(dd); 15163 if (ret) 15164 goto bail_cleanup; 15165 15166 /* 15167 * In general, the PCIe Gen3 transition must occur after the 15168 * chip has been idled (so it won't initiate any PCIe transactions 15169 * e.g. an interrupt) and before the driver changes any registers 15170 * (the transition will reset the registers). 15171 * 15172 * In particular, place this call after: 15173 * - init_chip() - the chip will not initiate any PCIe transactions 15174 * - pcie_speeds() - reads the current link speed 15175 * - hfi1_firmware_init() - the needed firmware is ready to be 15176 * downloaded 15177 */ 15178 ret = do_pcie_gen3_transition(dd); 15179 if (ret) 15180 goto bail_cleanup; 15181 15182 /* 15183 * This should probably occur in hfi1_pcie_init(), but historically 15184 * occurs after the do_pcie_gen3_transition() code. 15185 */ 15186 tune_pcie_caps(dd); 15187 15188 /* start setting dd values and adjusting CSRs */ 15189 init_early_variables(dd); 15190 15191 parse_platform_config(dd); 15192 15193 ret = obtain_boardname(dd); 15194 if (ret) 15195 goto bail_cleanup; 15196 15197 snprintf(dd->boardversion, BOARD_VERS_MAX, 15198 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n", 15199 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN, 15200 (u32)dd->majrev, 15201 (u32)dd->minrev, 15202 (dd->revision >> CCE_REVISION_SW_SHIFT) 15203 & CCE_REVISION_SW_MASK); 15204 15205 /* alloc VNIC/AIP rx data */ 15206 ret = hfi1_alloc_rx(dd); 15207 if (ret) 15208 goto bail_cleanup; 15209 15210 ret = set_up_context_variables(dd); 15211 if (ret) 15212 goto bail_cleanup; 15213 15214 /* set initial RXE CSRs */ 15215 ret = init_rxe(dd); 15216 if (ret) 15217 goto bail_cleanup; 15218 15219 /* set initial TXE CSRs */ 15220 init_txe(dd); 15221 /* set initial non-RXE, non-TXE CSRs */ 15222 init_other(dd); 15223 /* set up KDETH QP prefix in both RX and TX CSRs */ 15224 init_kdeth_qp(dd); 15225 15226 ret = hfi1_dev_affinity_init(dd); 15227 if (ret) 15228 goto bail_cleanup; 15229 15230 /* send contexts must be set up before receive contexts */ 15231 ret = init_send_contexts(dd); 15232 if (ret) 15233 goto bail_cleanup; 15234 15235 ret = hfi1_create_kctxts(dd); 15236 if (ret) 15237 goto bail_cleanup; 15238 15239 /* 15240 * Initialize aspm, to be done after gen3 transition and setting up 15241 * contexts and before enabling interrupts 15242 */ 15243 aspm_init(dd); 15244 15245 ret = init_pervl_scs(dd); 15246 if (ret) 15247 goto bail_cleanup; 15248 15249 /* sdma init */ 15250 for (i = 0; i < dd->num_pports; ++i) { 15251 ret = sdma_init(dd, i); 15252 if (ret) 15253 goto bail_cleanup; 15254 } 15255 15256 /* use contexts created by hfi1_create_kctxts */ 15257 ret = set_up_interrupts(dd); 15258 if (ret) 15259 goto bail_cleanup; 15260 15261 ret = hfi1_comp_vectors_set_up(dd); 15262 if (ret) 15263 goto bail_clear_intr; 15264 15265 /* set up LCB access - must be after set_up_interrupts() */ 15266 init_lcb_access(dd); 15267 15268 /* 15269 * Serial number is created from the base guid: 15270 * [27:24] = base guid [38:35] 15271 * [23: 0] = base guid [23: 0] 15272 */ 15273 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", 15274 (dd->base_guid & 0xFFFFFF) | 15275 ((dd->base_guid >> 11) & 0xF000000)); 15276 15277 dd->oui1 = dd->base_guid >> 56 & 0xFF; 15278 dd->oui2 = dd->base_guid >> 48 & 0xFF; 15279 dd->oui3 = dd->base_guid >> 40 & 0xFF; 15280 15281 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ 15282 if (ret) 15283 goto bail_clear_intr; 15284 15285 thermal_init(dd); 15286 15287 ret = init_cntrs(dd); 15288 if (ret) 15289 goto bail_clear_intr; 15290 15291 ret = init_rcverr(dd); 15292 if (ret) 15293 goto bail_free_cntrs; 15294 15295 init_completion(&dd->user_comp); 15296 15297 /* The user refcount starts with one to inidicate an active device */ 15298 refcount_set(&dd->user_refcount, 1); 15299 15300 goto bail; 15301 15302 bail_free_rcverr: 15303 free_rcverr(dd); 15304 bail_free_cntrs: 15305 free_cntrs(dd); 15306 bail_clear_intr: 15307 hfi1_comp_vectors_clean_up(dd); 15308 msix_clean_up_interrupts(dd); 15309 bail_cleanup: 15310 hfi1_free_rx(dd); 15311 hfi1_pcie_ddcleanup(dd); 15312 bail_free: 15313 hfi1_free_devdata(dd); 15314 bail: 15315 return ret; 15316 } 15317 15318 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, 15319 u32 dw_len) 15320 { 15321 u32 delta_cycles; 15322 u32 current_egress_rate = ppd->current_egress_rate; 15323 /* rates here are in units of 10^6 bits/sec */ 15324 15325 if (desired_egress_rate == -1) 15326 return 0; /* shouldn't happen */ 15327 15328 if (desired_egress_rate >= current_egress_rate) 15329 return 0; /* we can't help go faster, only slower */ 15330 15331 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - 15332 egress_cycles(dw_len * 4, current_egress_rate); 15333 15334 return (u16)delta_cycles; 15335 } 15336 15337 /** 15338 * create_pbc - build a pbc for transmission 15339 * @ppd: info of physical Hfi port 15340 * @flags: special case flags or-ed in built pbc 15341 * @srate_mbs: static rate 15342 * @vl: vl 15343 * @dw_len: dword length (header words + data words + pbc words) 15344 * 15345 * Create a PBC with the given flags, rate, VL, and length. 15346 * 15347 * NOTE: The PBC created will not insert any HCRC - all callers but one are 15348 * for verbs, which does not use this PSM feature. The lone other caller 15349 * is for the diagnostic interface which calls this if the user does not 15350 * supply their own PBC. 15351 */ 15352 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, 15353 u32 dw_len) 15354 { 15355 u64 pbc, delay = 0; 15356 15357 if (unlikely(srate_mbs)) 15358 delay = delay_cycles(ppd, srate_mbs, dw_len); 15359 15360 pbc = flags 15361 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT) 15362 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) 15363 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT 15364 | (dw_len & PBC_LENGTH_DWS_MASK) 15365 << PBC_LENGTH_DWS_SHIFT; 15366 15367 return pbc; 15368 } 15369 15370 #define SBUS_THERMAL 0x4f 15371 #define SBUS_THERM_MONITOR_MODE 0x1 15372 15373 #define THERM_FAILURE(dev, ret, reason) \ 15374 dd_dev_err((dd), \ 15375 "Thermal sensor initialization failed: %s (%d)\n", \ 15376 (reason), (ret)) 15377 15378 /* 15379 * Initialize the thermal sensor. 15380 * 15381 * After initialization, enable polling of thermal sensor through 15382 * SBus interface. In order for this to work, the SBus Master 15383 * firmware has to be loaded due to the fact that the HW polling 15384 * logic uses SBus interrupts, which are not supported with 15385 * default firmware. Otherwise, no data will be returned through 15386 * the ASIC_STS_THERM CSR. 15387 */ 15388 static int thermal_init(struct hfi1_devdata *dd) 15389 { 15390 int ret = 0; 15391 15392 if (dd->icode != ICODE_RTL_SILICON || 15393 check_chip_resource(dd, CR_THERM_INIT, NULL)) 15394 return ret; 15395 15396 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 15397 if (ret) { 15398 THERM_FAILURE(dd, ret, "Acquire SBus"); 15399 return ret; 15400 } 15401 15402 dd_dev_info(dd, "Initializing thermal sensor\n"); 15403 /* Disable polling of thermal readings */ 15404 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); 15405 msleep(100); 15406 /* Thermal Sensor Initialization */ 15407 /* Step 1: Reset the Thermal SBus Receiver */ 15408 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15409 RESET_SBUS_RECEIVER, 0); 15410 if (ret) { 15411 THERM_FAILURE(dd, ret, "Bus Reset"); 15412 goto done; 15413 } 15414 /* Step 2: Set Reset bit in Thermal block */ 15415 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15416 WRITE_SBUS_RECEIVER, 0x1); 15417 if (ret) { 15418 THERM_FAILURE(dd, ret, "Therm Block Reset"); 15419 goto done; 15420 } 15421 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ 15422 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, 15423 WRITE_SBUS_RECEIVER, 0x32); 15424 if (ret) { 15425 THERM_FAILURE(dd, ret, "Write Clock Div"); 15426 goto done; 15427 } 15428 /* Step 4: Select temperature mode */ 15429 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, 15430 WRITE_SBUS_RECEIVER, 15431 SBUS_THERM_MONITOR_MODE); 15432 if (ret) { 15433 THERM_FAILURE(dd, ret, "Write Mode Sel"); 15434 goto done; 15435 } 15436 /* Step 5: De-assert block reset and start conversion */ 15437 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15438 WRITE_SBUS_RECEIVER, 0x2); 15439 if (ret) { 15440 THERM_FAILURE(dd, ret, "Write Reset Deassert"); 15441 goto done; 15442 } 15443 /* Step 5.1: Wait for first conversion (21.5ms per spec) */ 15444 msleep(22); 15445 15446 /* Enable polling of thermal readings */ 15447 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); 15448 15449 /* Set initialized flag */ 15450 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); 15451 if (ret) 15452 THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); 15453 15454 done: 15455 release_chip_resource(dd, CR_SBUS); 15456 return ret; 15457 } 15458 15459 static void handle_temp_err(struct hfi1_devdata *dd) 15460 { 15461 struct hfi1_pportdata *ppd = &dd->pport[0]; 15462 /* 15463 * Thermal Critical Interrupt 15464 * Put the device into forced freeze mode, take link down to 15465 * offline, and put DC into reset. 15466 */ 15467 dd_dev_emerg(dd, 15468 "Critical temperature reached! Forcing device into freeze mode!\n"); 15469 dd->flags |= HFI1_FORCED_FREEZE; 15470 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); 15471 /* 15472 * Shut DC down as much and as quickly as possible. 15473 * 15474 * Step 1: Take the link down to OFFLINE. This will cause the 15475 * 8051 to put the Serdes in reset. However, we don't want to 15476 * go through the entire link state machine since we want to 15477 * shutdown ASAP. Furthermore, this is not a graceful shutdown 15478 * but rather an attempt to save the chip. 15479 * Code below is almost the same as quiet_serdes() but avoids 15480 * all the extra work and the sleeps. 15481 */ 15482 ppd->driver_link_ready = 0; 15483 ppd->link_enabled = 0; 15484 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | 15485 PLS_OFFLINE); 15486 /* 15487 * Step 2: Shutdown LCB and 8051 15488 * After shutdown, do not restore DC_CFG_RESET value. 15489 */ 15490 dc_shutdown(dd); 15491 } 15492