1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_ras.h" 25 #include "mmhub_v1_0.h" 26 27 #include "mmhub/mmhub_1_0_offset.h" 28 #include "mmhub/mmhub_1_0_sh_mask.h" 29 #include "mmhub/mmhub_1_0_default.h" 30 #include "vega10_enum.h" 31 #include "soc15.h" 32 #include "soc15_common.h" 33 34 #define mmDAGB0_CNTL_MISC2_RV 0x008f 35 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 36 37 static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) 38 { 39 u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); 40 u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); 41 42 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 43 base <<= 24; 44 45 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; 46 top <<= 24; 47 48 adev->gmc.fb_start = base; 49 adev->gmc.fb_end = top; 50 51 return base; 52 } 53 54 static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 55 uint64_t page_table_base) 56 { 57 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 58 59 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 60 hub->ctx_addr_distance * vmid, 61 lower_32_bits(page_table_base)); 62 63 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 64 hub->ctx_addr_distance * vmid, 65 upper_32_bits(page_table_base)); 66 } 67 68 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) 69 { 70 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 71 72 mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); 73 74 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 75 (u32)(adev->gmc.gart_start >> 12)); 76 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 77 (u32)(adev->gmc.gart_start >> 44)); 78 79 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 80 (u32)(adev->gmc.gart_end >> 12)); 81 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 82 (u32)(adev->gmc.gart_end >> 44)); 83 } 84 85 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) 86 { 87 uint64_t value; 88 uint32_t tmp; 89 90 /* Program the AGP BAR */ 91 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0); 92 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); 93 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); 94 95 /* Program the system aperture low logical page number. */ 96 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 97 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 98 99 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 100 /* 101 * Raven2 has a HW issue that it is unable to use the vram which 102 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 103 * workaround that increase system aperture high address (add 1) 104 * to get rid of the VM fault and hardware hang. 105 */ 106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 107 max((adev->gmc.fb_end >> 18) + 0x1, 108 adev->gmc.agp_end >> 18)); 109 else 110 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 111 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 112 113 if (amdgpu_sriov_vf(adev)) 114 return; 115 116 /* Set default page address. */ 117 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 118 adev->vm_manager.vram_base_offset; 119 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 120 (u32)(value >> 12)); 121 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 122 (u32)(value >> 44)); 123 124 /* Program "protection fault". */ 125 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 126 (u32)(adev->dummy_page_addr >> 12)); 127 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 128 (u32)((u64)adev->dummy_page_addr >> 44)); 129 130 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2); 131 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 132 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 133 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp); 134 } 135 136 static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) 137 { 138 uint32_t tmp; 139 140 /* Setup TLB control */ 141 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL); 142 143 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 144 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 145 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 146 ENABLE_ADVANCED_DRIVER_MODEL, 1); 147 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 148 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 149 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 150 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 151 MTYPE, MTYPE_UC);/* XXX for emulation. */ 152 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); 153 154 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); 155 } 156 157 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 158 { 159 uint32_t tmp; 160 161 if (amdgpu_sriov_vf(adev)) 162 return; 163 164 /* Setup L2 cache */ 165 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); 166 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 167 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); 168 /* XXX for emulation, Refer to closed source code.*/ 169 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 170 0); 171 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 172 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 173 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 174 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp); 175 176 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2); 177 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 178 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 179 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); 180 181 if (adev->gmc.translate_further) { 182 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 183 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 184 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 185 } else { 186 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 187 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 188 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 189 } 190 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); 191 192 tmp = mmVM_L2_CNTL4_DEFAULT; 193 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 194 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 195 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp); 196 } 197 198 static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) 199 { 200 uint32_t tmp; 201 202 tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL); 203 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 204 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 205 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, 206 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 207 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp); 208 } 209 210 static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) 211 { 212 if (amdgpu_sriov_vf(adev)) 213 return; 214 215 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 216 0XFFFFFFFF); 217 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 218 0x0000000F); 219 220 WREG32_SOC15(MMHUB, 0, 221 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); 222 WREG32_SOC15(MMHUB, 0, 223 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); 224 225 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 226 0); 227 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 228 0); 229 } 230 231 static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) 232 { 233 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 234 unsigned num_level, block_size; 235 uint32_t tmp; 236 int i; 237 238 num_level = adev->vm_manager.num_level; 239 block_size = adev->vm_manager.block_size; 240 if (adev->gmc.translate_further) 241 num_level -= 1; 242 else 243 block_size -= 9; 244 245 for (i = 0; i <= 14; i++) { 246 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i); 247 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 248 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 249 num_level); 250 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 251 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 252 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 253 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 254 1); 255 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 256 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 257 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 258 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 259 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 260 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 261 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 262 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 263 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 264 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 265 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 266 PAGE_TABLE_BLOCK_SIZE, 267 block_size); 268 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 269 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 270 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 271 !adev->gmc.noretry); 272 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, 273 i * hub->ctx_distance, tmp); 274 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 275 i * hub->ctx_addr_distance, 0); 276 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 277 i * hub->ctx_addr_distance, 0); 278 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 279 i * hub->ctx_addr_distance, 280 lower_32_bits(adev->vm_manager.max_pfn - 1)); 281 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 282 i * hub->ctx_addr_distance, 283 upper_32_bits(adev->vm_manager.max_pfn - 1)); 284 } 285 } 286 287 static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) 288 { 289 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 290 unsigned i; 291 292 for (i = 0; i < 18; ++i) { 293 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 294 i * hub->eng_addr_distance, 0xffffffff); 295 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 296 i * hub->eng_addr_distance, 0x1f); 297 } 298 } 299 300 static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, 301 bool enable) 302 { 303 if (amdgpu_sriov_vf(adev)) 304 return; 305 306 if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { 307 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); 308 309 } 310 } 311 312 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) 313 { 314 if (amdgpu_sriov_vf(adev)) { 315 /* 316 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 317 * VF copy registers so vbios post doesn't program them, for 318 * SRIOV driver need to program them 319 */ 320 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE, 321 adev->gmc.vram_start >> 24); 322 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP, 323 adev->gmc.vram_end >> 24); 324 } 325 326 /* GART Enable. */ 327 mmhub_v1_0_init_gart_aperture_regs(adev); 328 mmhub_v1_0_init_system_aperture_regs(adev); 329 mmhub_v1_0_init_tlb_regs(adev); 330 mmhub_v1_0_init_cache_regs(adev); 331 332 mmhub_v1_0_enable_system_domain(adev); 333 mmhub_v1_0_disable_identity_aperture(adev); 334 mmhub_v1_0_setup_vmid_config(adev); 335 mmhub_v1_0_program_invalidation(adev); 336 337 return 0; 338 } 339 340 static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) 341 { 342 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 343 u32 tmp; 344 u32 i; 345 346 /* Disable all tables */ 347 for (i = 0; i < AMDGPU_NUM_VMID; i++) 348 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, 349 i * hub->ctx_distance, 0); 350 351 /* Setup TLB control */ 352 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL); 353 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 354 tmp = REG_SET_FIELD(tmp, 355 MC_VM_MX_L1_TLB_CNTL, 356 ENABLE_ADVANCED_DRIVER_MODEL, 357 0); 358 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); 359 360 if (!amdgpu_sriov_vf(adev)) { 361 /* Setup L2 cache */ 362 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); 363 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 364 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp); 365 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0); 366 } 367 } 368 369 /** 370 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling 371 * 372 * @adev: amdgpu_device pointer 373 * @value: true redirects VM faults to the default page 374 */ 375 static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) 376 { 377 u32 tmp; 378 379 if (amdgpu_sriov_vf(adev)) 380 return; 381 382 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 383 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 384 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 385 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 386 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 387 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 388 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 389 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 390 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 391 tmp = REG_SET_FIELD(tmp, 392 VM_L2_PROTECTION_FAULT_CNTL, 393 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 394 value); 395 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 396 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 397 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 398 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 399 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 400 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 401 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 402 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 403 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 404 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 405 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 406 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 407 if (!value) { 408 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 409 CRASH_ON_NO_RETRY_FAULT, 1); 410 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 411 CRASH_ON_RETRY_FAULT, 1); 412 } 413 414 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); 415 } 416 417 static void mmhub_v1_0_init(struct amdgpu_device *adev) 418 { 419 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 420 421 hub->ctx0_ptb_addr_lo32 = 422 SOC15_REG_OFFSET(MMHUB, 0, 423 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 424 hub->ctx0_ptb_addr_hi32 = 425 SOC15_REG_OFFSET(MMHUB, 0, 426 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 427 hub->vm_inv_eng0_sem = 428 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); 429 hub->vm_inv_eng0_req = 430 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); 431 hub->vm_inv_eng0_ack = 432 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK); 433 hub->vm_context0_cntl = 434 SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL); 435 hub->vm_l2_pro_fault_status = 436 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS); 437 hub->vm_l2_pro_fault_cntl = 438 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 439 440 hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL; 441 hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 442 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 443 hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ; 444 hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 445 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 446 } 447 448 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 449 bool enable) 450 { 451 uint32_t def, data, def1, data1, def2 = 0, data2 = 0; 452 453 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 454 455 if (adev->asic_type != CHIP_RAVEN) { 456 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 457 def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2); 458 } else 459 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV); 460 461 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 462 data |= ATC_L2_MISC_CG__ENABLE_MASK; 463 464 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 465 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 466 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 467 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 468 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 469 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 470 471 if (adev->asic_type != CHIP_RAVEN) 472 data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 473 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 474 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 475 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 476 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 477 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 478 } else { 479 data &= ~ATC_L2_MISC_CG__ENABLE_MASK; 480 481 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 482 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 483 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 484 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 485 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 486 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 487 488 if (adev->asic_type != CHIP_RAVEN) 489 data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 490 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 491 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 492 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 493 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 494 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 495 } 496 497 if (def != data) 498 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); 499 500 if (def1 != data1) { 501 if (adev->asic_type != CHIP_RAVEN) 502 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 503 else 504 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1); 505 } 506 507 if (adev->asic_type != CHIP_RAVEN && def2 != data2) 508 WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2); 509 } 510 511 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 512 bool enable) 513 { 514 uint32_t def, data; 515 516 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 517 518 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 519 data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 520 else 521 data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 522 523 if (def != data) 524 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); 525 } 526 527 static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, 528 enum amd_clockgating_state state) 529 { 530 if (amdgpu_sriov_vf(adev)) 531 return 0; 532 533 switch (adev->asic_type) { 534 case CHIP_VEGA10: 535 case CHIP_VEGA12: 536 case CHIP_VEGA20: 537 case CHIP_RAVEN: 538 case CHIP_RENOIR: 539 mmhub_v1_0_update_medium_grain_clock_gating(adev, 540 state == AMD_CG_STATE_GATE); 541 mmhub_v1_0_update_medium_grain_light_sleep(adev, 542 state == AMD_CG_STATE_GATE); 543 break; 544 default: 545 break; 546 } 547 548 return 0; 549 } 550 551 static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) 552 { 553 int data, data1; 554 555 if (amdgpu_sriov_vf(adev)) 556 *flags = 0; 557 558 data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 559 560 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 561 562 /* AMD_CG_SUPPORT_MC_MGCG */ 563 if ((data & ATC_L2_MISC_CG__ENABLE_MASK) && 564 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 565 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 566 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 567 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 568 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 569 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) 570 *flags |= AMD_CG_SUPPORT_MC_MGCG; 571 572 /* AMD_CG_SUPPORT_MC_LS */ 573 if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) 574 *flags |= AMD_CG_SUPPORT_MC_LS; 575 } 576 577 static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = { 578 { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 579 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), 580 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), 581 }, 582 { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 583 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), 584 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), 585 }, 586 { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 587 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), 588 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), 589 }, 590 { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 591 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), 592 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), 593 }, 594 { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 595 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), 596 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), 597 }, 598 { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 599 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), 600 0, 0, 601 }, 602 { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 603 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), 604 0, 0, 605 }, 606 { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 607 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), 608 0, 0, 609 }, 610 { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 611 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), 612 0, 0, 613 }, 614 { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 615 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), 616 0, 0, 617 }, 618 { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 619 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), 620 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), 621 }, 622 { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 623 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), 624 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), 625 }, 626 { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 627 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), 628 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), 629 }, 630 { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 631 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), 632 0, 0, 633 }, 634 { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 635 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), 636 0, 0, 637 }, 638 { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 639 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), 640 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), 641 }, 642 { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 643 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), 644 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), 645 }, 646 { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 647 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), 648 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), 649 }, 650 { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 651 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), 652 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), 653 }, 654 { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 655 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), 656 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), 657 }, 658 { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 659 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), 660 0, 0, 661 }, 662 { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 663 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), 664 0, 0, 665 }, 666 { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 667 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), 668 0, 0, 669 }, 670 { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 671 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), 672 0, 0, 673 }, 674 { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 675 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), 676 0, 0, 677 }, 678 { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 679 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), 680 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), 681 }, 682 { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 683 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), 684 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), 685 }, 686 { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 687 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), 688 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), 689 }, 690 { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 691 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), 692 0, 0, 693 }, 694 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 695 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), 696 0, 0, 697 } 698 }; 699 700 static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { 701 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0}, 702 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0}, 703 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0}, 704 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, 705 }; 706 707 static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev, 708 const struct soc15_reg_entry *reg, 709 uint32_t value, uint32_t *sec_count, uint32_t *ded_count) 710 { 711 uint32_t i; 712 uint32_t sec_cnt, ded_cnt; 713 714 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) { 715 if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset) 716 continue; 717 718 sec_cnt = (value & 719 mmhub_v1_0_ras_fields[i].sec_count_mask) >> 720 mmhub_v1_0_ras_fields[i].sec_count_shift; 721 if (sec_cnt) { 722 dev_info(adev->dev, 723 "MMHUB SubBlock %s, SEC %d\n", 724 mmhub_v1_0_ras_fields[i].name, 725 sec_cnt); 726 *sec_count += sec_cnt; 727 } 728 729 ded_cnt = (value & 730 mmhub_v1_0_ras_fields[i].ded_count_mask) >> 731 mmhub_v1_0_ras_fields[i].ded_count_shift; 732 if (ded_cnt) { 733 dev_info(adev->dev, 734 "MMHUB SubBlock %s, DED %d\n", 735 mmhub_v1_0_ras_fields[i].name, 736 ded_cnt); 737 *ded_count += ded_cnt; 738 } 739 } 740 741 return 0; 742 } 743 744 static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, 745 void *ras_error_status) 746 { 747 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 748 uint32_t sec_count = 0, ded_count = 0; 749 uint32_t i; 750 uint32_t reg_value; 751 752 err_data->ue_count = 0; 753 err_data->ce_count = 0; 754 755 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) { 756 reg_value = 757 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); 758 if (reg_value) 759 mmhub_v1_0_get_ras_error_count(adev, 760 &mmhub_v1_0_edc_cnt_regs[i], 761 reg_value, &sec_count, &ded_count); 762 } 763 764 err_data->ce_count += sec_count; 765 err_data->ue_count += ded_count; 766 } 767 768 static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) 769 { 770 uint32_t i; 771 772 /* read back edc counter registers to reset the counters to 0 */ 773 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 774 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) 775 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); 776 } 777 } 778 779 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { 780 .ras_late_init = amdgpu_mmhub_ras_late_init, 781 .query_ras_error_count = mmhub_v1_0_query_ras_error_count, 782 .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, 783 .get_fb_location = mmhub_v1_0_get_fb_location, 784 .init = mmhub_v1_0_init, 785 .gart_enable = mmhub_v1_0_gart_enable, 786 .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default, 787 .gart_disable = mmhub_v1_0_gart_disable, 788 .set_clockgating = mmhub_v1_0_set_clockgating, 789 .get_clockgating = mmhub_v1_0_get_clockgating, 790 .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs, 791 .update_power_gating = mmhub_v1_0_update_power_gating, 792 }; 793