1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "umc_v6_1.h" 24 #include "amdgpu_ras.h" 25 #include "amdgpu.h" 26 27 #include "rsmu/rsmu_0_0_2_offset.h" 28 #include "rsmu/rsmu_0_0_2_sh_mask.h" 29 #include "umc/umc_6_1_1_offset.h" 30 #include "umc/umc_6_1_1_sh_mask.h" 31 #include "umc/umc_6_1_2_offset.h" 32 33 #define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 34 35 /* 36 * (addr / 256) * 8192, the higher 26 bits in ErrorAddr 37 * is the index of 8KB block 38 */ 39 #define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) 40 /* channel index is the index of 256B block */ 41 #define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) 42 /* offset in 256B block */ 43 #define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) 44 45 const uint32_t 46 umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { 47 {2, 18, 11, 27}, {4, 20, 13, 29}, 48 {1, 17, 8, 24}, {7, 23, 14, 30}, 49 {10, 26, 3, 19}, {12, 28, 5, 21}, 50 {9, 25, 0, 16}, {15, 31, 6, 22} 51 }; 52 53 static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, 54 uint32_t umc_instance) 55 { 56 uint32_t rsmu_umc_index; 57 58 rsmu_umc_index = RREG32_SOC15(RSMU, 0, 59 mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); 60 rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 61 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 62 RSMU_UMC_INDEX_MODE_EN, 1); 63 rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 64 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 65 RSMU_UMC_INDEX_INSTANCE, umc_instance); 66 rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 67 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 68 RSMU_UMC_INDEX_WREN, 1 << umc_instance); 69 WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 70 rsmu_umc_index); 71 } 72 73 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) 74 { 75 WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 76 RSMU_UMC_INDEX_MODE_EN, 0); 77 } 78 79 static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) 80 { 81 uint32_t rsmu_umc_index; 82 83 rsmu_umc_index = RREG32_SOC15(RSMU, 0, 84 mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); 85 return REG_GET_FIELD(rsmu_umc_index, 86 RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 87 RSMU_UMC_INDEX_INSTANCE); 88 } 89 90 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, 91 uint32_t umc_reg_offset, 92 unsigned long *error_count) 93 { 94 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; 95 uint32_t ecc_err_cnt, ecc_err_cnt_addr; 96 uint64_t mc_umc_status; 97 uint32_t mc_umc_status_addr; 98 99 if (adev->asic_type == CHIP_ARCTURUS) { 100 /* UMC 6_1_2 registers */ 101 ecc_err_cnt_sel_addr = 102 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); 103 ecc_err_cnt_addr = 104 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); 105 mc_umc_status_addr = 106 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 107 } else { 108 /* UMC 6_1_1 registers */ 109 ecc_err_cnt_sel_addr = 110 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); 111 ecc_err_cnt_addr = 112 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); 113 mc_umc_status_addr = 114 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); 115 } 116 117 /* select the lower chip and check the error count */ 118 ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); 119 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 120 EccErrCntCsSel, 0); 121 WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 122 ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); 123 *error_count += 124 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - 125 UMC_V6_1_CE_CNT_INIT); 126 /* clear the lower chip err count */ 127 WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 128 129 /* select the higher chip and check the err counter */ 130 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 131 EccErrCntCsSel, 1); 132 WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 133 ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); 134 *error_count += 135 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - 136 UMC_V6_1_CE_CNT_INIT); 137 /* clear the higher chip err count */ 138 WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 139 140 /* check for SRAM correctable error 141 MCUMC_STATUS is a 64 bit register */ 142 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 143 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && 144 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 145 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) 146 *error_count += 1; 147 } 148 149 static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev, 150 uint32_t umc_reg_offset, 151 unsigned long *error_count) 152 { 153 uint64_t mc_umc_status; 154 uint32_t mc_umc_status_addr; 155 156 if (adev->asic_type == CHIP_ARCTURUS) { 157 /* UMC 6_1_2 registers */ 158 mc_umc_status_addr = 159 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 160 } else { 161 /* UMC 6_1_1 registers */ 162 mc_umc_status_addr = 163 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); 164 } 165 166 /* check the MCUMC_STATUS */ 167 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 168 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 169 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || 170 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || 171 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 172 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 173 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) 174 *error_count += 1; 175 } 176 177 static void umc_v6_1_query_error_count(struct amdgpu_device *adev, 178 struct ras_err_data *err_data, uint32_t umc_reg_offset, 179 uint32_t channel_index) 180 { 181 umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, 182 &(err_data->ce_count)); 183 umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, 184 &(err_data->ue_count)); 185 } 186 187 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, 188 void *ras_error_status) 189 { 190 amdgpu_umc_for_each_channel(umc_v6_1_query_error_count); 191 } 192 193 static void umc_v6_1_query_error_address(struct amdgpu_device *adev, 194 struct ras_err_data *err_data, 195 uint32_t umc_reg_offset, uint32_t channel_index) 196 { 197 uint32_t lsb, mc_umc_status_addr; 198 uint64_t mc_umc_status, err_addr, retired_page; 199 struct eeprom_table_record *err_rec; 200 201 if (adev->asic_type == CHIP_ARCTURUS) { 202 /* UMC 6_1_2 registers */ 203 mc_umc_status_addr = 204 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 205 } else { 206 /* UMC 6_1_1 registers */ 207 mc_umc_status_addr = 208 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); 209 } 210 211 /* skip error address process if -ENOMEM */ 212 if (!err_data->err_addr) { 213 /* clear umc status */ 214 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 215 return; 216 } 217 218 err_rec = &err_data->err_addr[err_data->err_addr_cnt]; 219 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 220 221 /* calculate error address if ue/ce error is detected */ 222 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 223 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || 224 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { 225 err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4); 226 227 /* the lowest lsb bits should be ignored */ 228 lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); 229 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); 230 err_addr &= ~((0x1ULL << lsb) - 1); 231 232 /* translate umc channel address to soc pa, 3 parts are included */ 233 retired_page = ADDR_OF_8KB_BLOCK(err_addr) | 234 ADDR_OF_256B_BLOCK(channel_index) | 235 OFFSET_IN_256B_BLOCK(err_addr); 236 237 /* we only save ue error information currently, ce is skipped */ 238 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) 239 == 1) { 240 err_rec->address = err_addr; 241 /* page frame address is saved */ 242 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; 243 err_rec->ts = (uint64_t)ktime_get_real_seconds(); 244 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; 245 err_rec->cu = 0; 246 err_rec->mem_channel = channel_index; 247 err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); 248 249 err_data->err_addr_cnt++; 250 } 251 } 252 253 /* clear umc status */ 254 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 255 } 256 257 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, 258 void *ras_error_status) 259 { 260 amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); 261 } 262 263 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, 264 struct ras_err_data *err_data, 265 uint32_t umc_reg_offset, uint32_t channel_index) 266 { 267 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; 268 uint32_t ecc_err_cnt_addr; 269 270 if (adev->asic_type == CHIP_ARCTURUS) { 271 /* UMC 6_1_2 registers */ 272 ecc_err_cnt_sel_addr = 273 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); 274 ecc_err_cnt_addr = 275 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); 276 } else { 277 /* UMC 6_1_1 registers */ 278 ecc_err_cnt_sel_addr = 279 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); 280 ecc_err_cnt_addr = 281 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); 282 } 283 284 /* select the lower chip and check the error count */ 285 ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); 286 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 287 EccErrCntCsSel, 0); 288 /* set ce error interrupt type to APIC based interrupt */ 289 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 290 EccErrInt, 0x1); 291 WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 292 /* set error count to initial value */ 293 WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 294 295 /* select the higher chip and check the err counter */ 296 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 297 EccErrCntCsSel, 1); 298 WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 299 WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 300 } 301 302 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) 303 { 304 void *ras_error_status = NULL; 305 306 amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); 307 } 308 309 const struct amdgpu_umc_funcs umc_v6_1_funcs = { 310 .err_cnt_init = umc_v6_1_err_cnt_init, 311 .ras_late_init = amdgpu_umc_ras_late_init, 312 .query_ras_error_count = umc_v6_1_query_ras_error_count, 313 .query_ras_error_address = umc_v6_1_query_ras_error_address, 314 .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, 315 .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode, 316 }; 317