1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_atombios.h" 25 #include "nbio_v7_4.h" 26 #include "amdgpu_ras.h" 27 28 #include "nbio/nbio_7_4_offset.h" 29 #include "nbio/nbio_7_4_sh_mask.h" 30 #include "nbio/nbio_7_4_0_smn.h" 31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c 35 36 /* 37 * These are nbio v7_4_1 registers mask. Temporarily define these here since 38 * nbio v7_4_1 header is incomplete. 39 */ 40 #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L 41 #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L 42 #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L 43 #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L 44 #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L 45 #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L 46 47 #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc 48 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 49 //BIF_MMSCH1_DOORBELL_RANGE 50 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2 51 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10 52 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL 53 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L 54 55 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, 56 void *ras_error_status); 57 58 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) 59 { 60 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, 61 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); 62 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, 63 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); 64 } 65 66 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) 67 { 68 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 69 70 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 71 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; 72 73 return tmp; 74 } 75 76 static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable) 77 { 78 if (enable) 79 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 80 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); 81 else 82 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 83 } 84 85 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) 86 { 87 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); 88 } 89 90 static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 91 bool use_doorbell, int doorbell_index, int doorbell_size) 92 { 93 u32 reg, doorbell_range; 94 95 if (instance < 2) { 96 reg = instance + 97 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); 98 } else { 99 /* 100 * These registers address of SDMA2~7 is not consecutive 101 * from SDMA0~1. Need plus 4 dwords offset. 102 * 103 * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0 104 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4 105 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8 106 + * BIF_SDMA4_DOORBELL_RANGE: 107 + * ARCTURUS: 0x3be0 108 + * ALDEBARAN: 0x3be4 109 */ 110 if (adev->asic_type == CHIP_ALDEBARAN && instance == 4) 111 reg = instance + 0x4 + 0x1 + 112 SOC15_REG_OFFSET(NBIO, 0, 113 mmBIF_SDMA0_DOORBELL_RANGE); 114 else 115 reg = instance + 0x4 + 116 SOC15_REG_OFFSET(NBIO, 0, 117 mmBIF_SDMA0_DOORBELL_RANGE); 118 } 119 120 doorbell_range = RREG32(reg); 121 122 if (use_doorbell) { 123 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 124 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); 125 } else 126 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 127 128 WREG32(reg, doorbell_range); 129 } 130 131 static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, 132 int doorbell_index, int instance) 133 { 134 u32 reg; 135 u32 doorbell_range; 136 137 if (instance) 138 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE); 139 else 140 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); 141 142 doorbell_range = RREG32(reg); 143 144 if (use_doorbell) { 145 doorbell_range = REG_SET_FIELD(doorbell_range, 146 BIF_MMSCH0_DOORBELL_RANGE, OFFSET, 147 doorbell_index); 148 doorbell_range = REG_SET_FIELD(doorbell_range, 149 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8); 150 } else 151 doorbell_range = REG_SET_FIELD(doorbell_range, 152 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0); 153 154 WREG32(reg, doorbell_range); 155 } 156 157 static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, 158 bool enable) 159 { 160 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0); 161 } 162 163 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 164 bool enable) 165 { 166 u32 tmp = 0; 167 168 if (enable) { 169 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | 170 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | 171 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); 172 173 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, 174 lower_32_bits(adev->doorbell.base)); 175 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, 176 upper_32_bits(adev->doorbell.base)); 177 } 178 179 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); 180 } 181 182 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, 183 bool use_doorbell, int doorbell_index) 184 { 185 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE); 186 187 if (use_doorbell) { 188 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); 189 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4); 190 } else 191 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); 192 193 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range); 194 } 195 196 197 static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev, 198 bool enable) 199 { 200 //TODO: Add support for v7.4 201 } 202 203 static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev, 204 bool enable) 205 { 206 uint32_t def, data; 207 208 def = data = RREG32_PCIE(smnPCIE_CNTL2); 209 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { 210 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 211 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 212 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 213 } else { 214 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 215 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 216 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 217 } 218 219 if (def != data) 220 WREG32_PCIE(smnPCIE_CNTL2, data); 221 } 222 223 static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev, 224 u32 *flags) 225 { 226 int data; 227 228 /* AMD_CG_SUPPORT_BIF_MGCG */ 229 data = RREG32_PCIE(smnCPM_CONTROL); 230 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) 231 *flags |= AMD_CG_SUPPORT_BIF_MGCG; 232 233 /* AMD_CG_SUPPORT_BIF_LS */ 234 data = RREG32_PCIE(smnPCIE_CNTL2); 235 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 236 *flags |= AMD_CG_SUPPORT_BIF_LS; 237 } 238 239 static void nbio_v7_4_ih_control(struct amdgpu_device *adev) 240 { 241 u32 interrupt_cntl; 242 243 /* setup interrupt control */ 244 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 245 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 246 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 247 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 248 */ 249 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); 250 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ 251 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); 252 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); 253 } 254 255 static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev) 256 { 257 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ); 258 } 259 260 static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev) 261 { 262 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE); 263 } 264 265 static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev) 266 { 267 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); 268 } 269 270 static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) 271 { 272 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 273 } 274 275 const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { 276 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, 277 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, 278 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, 279 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, 280 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, 281 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, 282 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, 283 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, 284 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, 285 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, 286 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, 287 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, 288 .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, 289 .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, 290 .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, 291 .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, 292 .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, 293 .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, 294 }; 295 296 static void nbio_v7_4_init_registers(struct amdgpu_device *adev) 297 { 298 299 } 300 301 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) 302 { 303 uint32_t bif_doorbell_intr_cntl; 304 struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); 305 struct ras_err_data err_data = {0, 0, 0, NULL}; 306 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 307 308 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); 309 if (REG_GET_FIELD(bif_doorbell_intr_cntl, 310 BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { 311 /* driver has to clear the interrupt status when bif ring is disabled */ 312 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 313 BIF_DOORBELL_INT_CNTL, 314 RAS_CNTLR_INTERRUPT_CLEAR, 1); 315 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 316 317 if (!ras->disable_ras_err_cnt_harvest) { 318 /* 319 * clear error status after ras_controller_intr 320 * according to hw team and count ue number 321 * for query 322 */ 323 nbio_v7_4_query_ras_error_count(adev, &err_data); 324 325 /* logging on error cnt and printing for awareness */ 326 obj->err_data.ue_count += err_data.ue_count; 327 obj->err_data.ce_count += err_data.ce_count; 328 329 if (err_data.ce_count) 330 dev_info(adev->dev, "%ld correctable hardware " 331 "errors detected in %s block, " 332 "no user action is needed.\n", 333 obj->err_data.ce_count, 334 adev->nbio.ras_if->name); 335 336 if (err_data.ue_count) 337 dev_info(adev->dev, "%ld uncorrectable hardware " 338 "errors detected in %s block\n", 339 obj->err_data.ue_count, 340 adev->nbio.ras_if->name); 341 } 342 343 dev_info(adev->dev, "RAS controller interrupt triggered " 344 "by NBIF error\n"); 345 346 /* ras_controller_int is dedicated for nbif ras error, 347 * not the global interrupt for sync flood 348 */ 349 amdgpu_ras_reset_gpu(adev); 350 } 351 } 352 353 static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) 354 { 355 uint32_t bif_doorbell_intr_cntl; 356 357 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); 358 if (REG_GET_FIELD(bif_doorbell_intr_cntl, 359 BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { 360 /* driver has to clear the interrupt status when bif ring is disabled */ 361 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 362 BIF_DOORBELL_INT_CNTL, 363 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); 364 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 365 366 amdgpu_ras_global_ras_isr(adev); 367 } 368 } 369 370 371 static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, 372 struct amdgpu_irq_src *src, 373 unsigned type, 374 enum amdgpu_interrupt_state state) 375 { 376 /* The ras_controller_irq enablement should be done in psp bl when it 377 * tries to enable ras feature. Driver only need to set the correct interrupt 378 * vector for bare-metal and sriov use case respectively 379 */ 380 uint32_t bif_intr_cntl; 381 382 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); 383 if (state == AMDGPU_IRQ_STATE_ENABLE) { 384 /* set interrupt vector select bit to 0 to select 385 * vetcor 1 for bare metal case */ 386 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, 387 BIF_INTR_CNTL, 388 RAS_INTR_VEC_SEL, 0); 389 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); 390 } 391 392 return 0; 393 } 394 395 static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev, 396 struct amdgpu_irq_src *source, 397 struct amdgpu_iv_entry *entry) 398 { 399 /* By design, the ih cookie for ras_controller_irq should be written 400 * to BIFring instead of general iv ring. However, due to known bif ring 401 * hw bug, it has to be disabled. There is no chance the process function 402 * will be involked. Just left it as a dummy one. 403 */ 404 return 0; 405 } 406 407 static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, 408 struct amdgpu_irq_src *src, 409 unsigned type, 410 enum amdgpu_interrupt_state state) 411 { 412 /* The ras_controller_irq enablement should be done in psp bl when it 413 * tries to enable ras feature. Driver only need to set the correct interrupt 414 * vector for bare-metal and sriov use case respectively 415 */ 416 uint32_t bif_intr_cntl; 417 418 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); 419 if (state == AMDGPU_IRQ_STATE_ENABLE) { 420 /* set interrupt vector select bit to 0 to select 421 * vetcor 1 for bare metal case */ 422 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, 423 BIF_INTR_CNTL, 424 RAS_INTR_VEC_SEL, 0); 425 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); 426 } 427 428 return 0; 429 } 430 431 static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev, 432 struct amdgpu_irq_src *source, 433 struct amdgpu_iv_entry *entry) 434 { 435 /* By design, the ih cookie for err_event_athub_irq should be written 436 * to BIFring instead of general iv ring. However, due to known bif ring 437 * hw bug, it has to be disabled. There is no chance the process function 438 * will be involked. Just left it as a dummy one. 439 */ 440 return 0; 441 } 442 443 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = { 444 .set = nbio_v7_4_set_ras_controller_irq_state, 445 .process = nbio_v7_4_process_ras_controller_irq, 446 }; 447 448 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = { 449 .set = nbio_v7_4_set_ras_err_event_athub_irq_state, 450 .process = nbio_v7_4_process_err_event_athub_irq, 451 }; 452 453 static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) 454 { 455 int r; 456 457 /* init the irq funcs */ 458 adev->nbio.ras_controller_irq.funcs = 459 &nbio_v7_4_ras_controller_irq_funcs; 460 adev->nbio.ras_controller_irq.num_types = 1; 461 462 /* register ras controller interrupt */ 463 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 464 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, 465 &adev->nbio.ras_controller_irq); 466 467 return r; 468 } 469 470 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) 471 { 472 473 int r; 474 475 /* init the irq funcs */ 476 adev->nbio.ras_err_event_athub_irq.funcs = 477 &nbio_v7_4_ras_err_event_athub_irq_funcs; 478 adev->nbio.ras_err_event_athub_irq.num_types = 1; 479 480 /* register ras err event athub interrupt */ 481 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 482 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, 483 &adev->nbio.ras_err_event_athub_irq); 484 485 return r; 486 } 487 488 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 489 490 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, 491 void *ras_error_status) 492 { 493 uint32_t global_sts, central_sts, int_eoi, parity_sts; 494 uint32_t corr, fatal, non_fatal; 495 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 496 497 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); 498 corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); 499 fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); 500 non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, 501 ParityErrNonFatal); 502 parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); 503 504 if (corr) 505 err_data->ce_count++; 506 if (fatal) 507 err_data->ue_count++; 508 509 if (corr || fatal || non_fatal) { 510 central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); 511 /* clear error status register */ 512 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); 513 514 if (fatal) 515 /* clear parity fatal error indication field */ 516 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, 517 parity_sts); 518 519 if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, 520 BIFL_RasContller_Intr_Recv)) { 521 /* clear interrupt status register */ 522 WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts); 523 int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI); 524 int_eoi = REG_SET_FIELD(int_eoi, 525 IOHC_INTERRUPT_EOI, SMI_EOI, 1); 526 WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi); 527 } 528 } 529 } 530 531 static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, 532 bool enable) 533 { 534 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, 535 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); 536 } 537 538 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { 539 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, 540 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, 541 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, 542 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset, 543 .get_rev_id = nbio_v7_4_get_rev_id, 544 .mc_access_enable = nbio_v7_4_mc_access_enable, 545 .get_memsize = nbio_v7_4_get_memsize, 546 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, 547 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range, 548 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, 549 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, 550 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, 551 .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt, 552 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, 553 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, 554 .get_clockgating_state = nbio_v7_4_get_clockgating_state, 555 .ih_control = nbio_v7_4_ih_control, 556 .init_registers = nbio_v7_4_init_registers, 557 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, 558 .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, 559 .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, 560 .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, 561 .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, 562 .query_ras_error_count = nbio_v7_4_query_ras_error_count, 563 .ras_late_init = amdgpu_nbio_ras_late_init, 564 }; 565