1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_ras.h" 25 #include "mmhub_v1_0.h" 26 27 #include "mmhub/mmhub_1_0_offset.h" 28 #include "mmhub/mmhub_1_0_sh_mask.h" 29 #include "mmhub/mmhub_1_0_default.h" 30 #include "vega10_enum.h" 31 #include "soc15.h" 32 #include "soc15_common.h" 33 34 #define mmDAGB0_CNTL_MISC2_RV 0x008f 35 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 36 37 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) 38 { 39 u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); 40 u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); 41 42 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 43 base <<= 24; 44 45 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; 46 top <<= 24; 47 48 adev->gmc.fb_start = base; 49 adev->gmc.fb_end = top; 50 51 return base; 52 } 53 54 void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 55 uint64_t page_table_base) 56 { 57 /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ 58 int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 59 - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 60 61 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 62 offset * vmid, lower_32_bits(page_table_base)); 63 64 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 65 offset * vmid, upper_32_bits(page_table_base)); 66 } 67 68 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) 69 { 70 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 71 72 mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); 73 74 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 75 (u32)(adev->gmc.gart_start >> 12)); 76 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 77 (u32)(adev->gmc.gart_start >> 44)); 78 79 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 80 (u32)(adev->gmc.gart_end >> 12)); 81 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 82 (u32)(adev->gmc.gart_end >> 44)); 83 } 84 85 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) 86 { 87 uint64_t value; 88 uint32_t tmp; 89 90 /* Program the AGP BAR */ 91 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0); 92 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); 93 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); 94 95 /* Program the system aperture low logical page number. */ 96 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 97 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 98 99 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 100 /* 101 * Raven2 has a HW issue that it is unable to use the vram which 102 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 103 * workaround that increase system aperture high address (add 1) 104 * to get rid of the VM fault and hardware hang. 105 */ 106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 107 max((adev->gmc.fb_end >> 18) + 0x1, 108 adev->gmc.agp_end >> 18)); 109 else 110 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 111 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 112 113 if (amdgpu_sriov_vf(adev)) 114 return; 115 116 /* Set default page address. */ 117 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 118 adev->vm_manager.vram_base_offset; 119 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 120 (u32)(value >> 12)); 121 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 122 (u32)(value >> 44)); 123 124 /* Program "protection fault". */ 125 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 126 (u32)(adev->dummy_page_addr >> 12)); 127 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 128 (u32)((u64)adev->dummy_page_addr >> 44)); 129 130 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2); 131 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 132 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 133 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp); 134 } 135 136 static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) 137 { 138 uint32_t tmp; 139 140 /* Setup TLB control */ 141 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL); 142 143 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 144 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 145 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 146 ENABLE_ADVANCED_DRIVER_MODEL, 1); 147 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 148 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 149 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 150 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 151 MTYPE, MTYPE_UC);/* XXX for emulation. */ 152 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); 153 154 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); 155 } 156 157 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 158 { 159 uint32_t tmp; 160 161 if (amdgpu_sriov_vf(adev)) 162 return; 163 164 /* Setup L2 cache */ 165 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); 166 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 167 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); 168 /* XXX for emulation, Refer to closed source code.*/ 169 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 170 0); 171 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 172 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 173 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 174 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp); 175 176 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2); 177 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 178 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 179 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); 180 181 if (adev->gmc.translate_further) { 182 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 183 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 184 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 185 } else { 186 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 187 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 188 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 189 } 190 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); 191 192 tmp = mmVM_L2_CNTL4_DEFAULT; 193 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 194 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 195 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp); 196 } 197 198 static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) 199 { 200 uint32_t tmp; 201 202 tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL); 203 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 204 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 205 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, 206 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 207 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp); 208 } 209 210 static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) 211 { 212 if (amdgpu_sriov_vf(adev)) 213 return; 214 215 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 216 0XFFFFFFFF); 217 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 218 0x0000000F); 219 220 WREG32_SOC15(MMHUB, 0, 221 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); 222 WREG32_SOC15(MMHUB, 0, 223 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); 224 225 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 226 0); 227 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 228 0); 229 } 230 231 static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) 232 { 233 unsigned num_level, block_size; 234 uint32_t tmp; 235 int i; 236 237 num_level = adev->vm_manager.num_level; 238 block_size = adev->vm_manager.block_size; 239 if (adev->gmc.translate_further) 240 num_level -= 1; 241 else 242 block_size -= 9; 243 244 for (i = 0; i <= 14; i++) { 245 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i); 246 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 247 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 248 num_level); 249 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 250 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 251 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 252 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 253 1); 254 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 255 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 256 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 257 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 258 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 259 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 260 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 261 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 262 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 263 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 264 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 265 PAGE_TABLE_BLOCK_SIZE, 266 block_size); 267 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 268 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 269 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 270 !amdgpu_noretry); 271 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp); 272 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); 273 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); 274 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, 275 lower_32_bits(adev->vm_manager.max_pfn - 1)); 276 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, 277 upper_32_bits(adev->vm_manager.max_pfn - 1)); 278 } 279 } 280 281 static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) 282 { 283 unsigned i; 284 285 for (i = 0; i < 18; ++i) { 286 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 287 2 * i, 0xffffffff); 288 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 289 2 * i, 0x1f); 290 } 291 } 292 293 void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, 294 bool enable) 295 { 296 if (amdgpu_sriov_vf(adev)) 297 return; 298 299 if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { 300 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) 301 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); 302 303 } 304 } 305 306 int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) 307 { 308 if (amdgpu_sriov_vf(adev)) { 309 /* 310 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 311 * VF copy registers so vbios post doesn't program them, for 312 * SRIOV driver need to program them 313 */ 314 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE, 315 adev->gmc.vram_start >> 24); 316 WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP, 317 adev->gmc.vram_end >> 24); 318 } 319 320 /* GART Enable. */ 321 mmhub_v1_0_init_gart_aperture_regs(adev); 322 mmhub_v1_0_init_system_aperture_regs(adev); 323 mmhub_v1_0_init_tlb_regs(adev); 324 mmhub_v1_0_init_cache_regs(adev); 325 326 mmhub_v1_0_enable_system_domain(adev); 327 mmhub_v1_0_disable_identity_aperture(adev); 328 mmhub_v1_0_setup_vmid_config(adev); 329 mmhub_v1_0_program_invalidation(adev); 330 331 return 0; 332 } 333 334 void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) 335 { 336 u32 tmp; 337 u32 i; 338 339 /* Disable all tables */ 340 for (i = 0; i < 16; i++) 341 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0); 342 343 /* Setup TLB control */ 344 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL); 345 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 346 tmp = REG_SET_FIELD(tmp, 347 MC_VM_MX_L1_TLB_CNTL, 348 ENABLE_ADVANCED_DRIVER_MODEL, 349 0); 350 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); 351 352 if (!amdgpu_sriov_vf(adev)) { 353 /* Setup L2 cache */ 354 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); 355 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 356 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp); 357 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0); 358 } 359 } 360 361 /** 362 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling 363 * 364 * @adev: amdgpu_device pointer 365 * @value: true redirects VM faults to the default page 366 */ 367 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) 368 { 369 u32 tmp; 370 371 if (amdgpu_sriov_vf(adev)) 372 return; 373 374 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 375 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 376 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 377 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 378 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 379 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 380 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 381 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 382 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 383 tmp = REG_SET_FIELD(tmp, 384 VM_L2_PROTECTION_FAULT_CNTL, 385 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 386 value); 387 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 388 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 389 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 390 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 391 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 392 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 393 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 394 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 395 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 396 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 397 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 398 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 399 if (!value) { 400 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 401 CRASH_ON_NO_RETRY_FAULT, 1); 402 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 403 CRASH_ON_RETRY_FAULT, 1); 404 } 405 406 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); 407 } 408 409 void mmhub_v1_0_init(struct amdgpu_device *adev) 410 { 411 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 412 413 hub->ctx0_ptb_addr_lo32 = 414 SOC15_REG_OFFSET(MMHUB, 0, 415 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 416 hub->ctx0_ptb_addr_hi32 = 417 SOC15_REG_OFFSET(MMHUB, 0, 418 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 419 hub->vm_inv_eng0_sem = 420 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); 421 hub->vm_inv_eng0_req = 422 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); 423 hub->vm_inv_eng0_ack = 424 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK); 425 hub->vm_context0_cntl = 426 SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL); 427 hub->vm_l2_pro_fault_status = 428 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS); 429 hub->vm_l2_pro_fault_cntl = 430 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 431 432 } 433 434 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 435 bool enable) 436 { 437 uint32_t def, data, def1, data1, def2 = 0, data2 = 0; 438 439 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 440 441 if (adev->asic_type != CHIP_RAVEN) { 442 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 443 def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2); 444 } else 445 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV); 446 447 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 448 data |= ATC_L2_MISC_CG__ENABLE_MASK; 449 450 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 451 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 452 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 453 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 454 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 455 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 456 457 if (adev->asic_type != CHIP_RAVEN) 458 data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 459 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 460 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 461 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 462 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 463 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 464 } else { 465 data &= ~ATC_L2_MISC_CG__ENABLE_MASK; 466 467 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 468 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 469 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 470 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 471 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 472 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 473 474 if (adev->asic_type != CHIP_RAVEN) 475 data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 476 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 477 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 478 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 479 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 480 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 481 } 482 483 if (def != data) 484 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); 485 486 if (def1 != data1) { 487 if (adev->asic_type != CHIP_RAVEN) 488 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 489 else 490 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1); 491 } 492 493 if (adev->asic_type != CHIP_RAVEN && def2 != data2) 494 WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2); 495 } 496 497 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 498 bool enable) 499 { 500 uint32_t def, data; 501 502 def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 503 504 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 505 data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 506 else 507 data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 508 509 if (def != data) 510 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); 511 } 512 513 int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, 514 enum amd_clockgating_state state) 515 { 516 if (amdgpu_sriov_vf(adev)) 517 return 0; 518 519 switch (adev->asic_type) { 520 case CHIP_VEGA10: 521 case CHIP_VEGA12: 522 case CHIP_VEGA20: 523 case CHIP_RAVEN: 524 case CHIP_RENOIR: 525 mmhub_v1_0_update_medium_grain_clock_gating(adev, 526 state == AMD_CG_STATE_GATE); 527 mmhub_v1_0_update_medium_grain_light_sleep(adev, 528 state == AMD_CG_STATE_GATE); 529 break; 530 default: 531 break; 532 } 533 534 return 0; 535 } 536 537 void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) 538 { 539 int data, data1; 540 541 if (amdgpu_sriov_vf(adev)) 542 *flags = 0; 543 544 data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); 545 546 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 547 548 /* AMD_CG_SUPPORT_MC_MGCG */ 549 if ((data & ATC_L2_MISC_CG__ENABLE_MASK) && 550 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 551 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 552 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 553 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 554 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 555 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) 556 *flags |= AMD_CG_SUPPORT_MC_MGCG; 557 558 /* AMD_CG_SUPPORT_MC_LS */ 559 if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) 560 *flags |= AMD_CG_SUPPORT_MC_LS; 561 } 562 563 static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = { 564 { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 565 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), 566 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), 567 }, 568 { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 569 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), 570 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), 571 }, 572 { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 573 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), 574 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), 575 }, 576 { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 577 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), 578 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), 579 }, 580 { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 581 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), 582 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), 583 }, 584 { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 585 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), 586 0, 0, 587 }, 588 { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 589 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), 590 0, 0, 591 }, 592 { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 593 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), 594 0, 0, 595 }, 596 { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 597 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), 598 0, 0, 599 }, 600 { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 601 SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), 602 0, 0, 603 }, 604 { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 605 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), 606 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), 607 }, 608 { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 609 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), 610 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), 611 }, 612 { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 613 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), 614 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), 615 }, 616 { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 617 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), 618 0, 0, 619 }, 620 { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 621 SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), 622 0, 0, 623 }, 624 { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 625 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), 626 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), 627 }, 628 { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 629 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), 630 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), 631 }, 632 { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 633 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), 634 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), 635 }, 636 { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 637 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), 638 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), 639 }, 640 { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 641 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), 642 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), 643 }, 644 { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 645 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), 646 0, 0, 647 }, 648 { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 649 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), 650 0, 0, 651 }, 652 { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 653 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), 654 0, 0, 655 }, 656 { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 657 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), 658 0, 0, 659 }, 660 { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 661 SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), 662 0, 0, 663 }, 664 { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 665 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), 666 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), 667 }, 668 { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 669 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), 670 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), 671 }, 672 { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 673 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), 674 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), 675 }, 676 { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 677 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), 678 0, 0, 679 }, 680 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 681 SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), 682 0, 0, 683 } 684 }; 685 686 static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { 687 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0}, 688 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0}, 689 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0}, 690 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, 691 }; 692 693 static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev, 694 const struct soc15_reg_entry *reg, 695 uint32_t value, uint32_t *sec_count, uint32_t *ded_count) 696 { 697 uint32_t i; 698 uint32_t sec_cnt, ded_cnt; 699 700 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) { 701 if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset) 702 continue; 703 704 sec_cnt = (value & 705 mmhub_v1_0_ras_fields[i].sec_count_mask) >> 706 mmhub_v1_0_ras_fields[i].sec_count_shift; 707 if (sec_cnt) { 708 dev_info(adev->dev, 709 "MMHUB SubBlock %s, SEC %d\n", 710 mmhub_v1_0_ras_fields[i].name, 711 sec_cnt); 712 *sec_count += sec_cnt; 713 } 714 715 ded_cnt = (value & 716 mmhub_v1_0_ras_fields[i].ded_count_mask) >> 717 mmhub_v1_0_ras_fields[i].ded_count_shift; 718 if (ded_cnt) { 719 dev_info(adev->dev, 720 "MMHUB SubBlock %s, DED %d\n", 721 mmhub_v1_0_ras_fields[i].name, 722 ded_cnt); 723 *ded_count += ded_cnt; 724 } 725 } 726 727 return 0; 728 } 729 730 static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, 731 void *ras_error_status) 732 { 733 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 734 uint32_t sec_count = 0, ded_count = 0; 735 uint32_t i; 736 uint32_t reg_value; 737 738 err_data->ue_count = 0; 739 err_data->ce_count = 0; 740 741 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) { 742 reg_value = 743 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); 744 if (reg_value) 745 mmhub_v1_0_get_ras_error_count(adev, 746 &mmhub_v1_0_edc_cnt_regs[i], 747 reg_value, &sec_count, &ded_count); 748 } 749 750 err_data->ce_count += sec_count; 751 err_data->ue_count += ded_count; 752 } 753 754 static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) 755 { 756 uint32_t i; 757 758 /* read back edc counter registers to reset the counters to 0 */ 759 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 760 for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) 761 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); 762 } 763 } 764 765 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { 766 .ras_late_init = amdgpu_mmhub_ras_late_init, 767 .query_ras_error_count = mmhub_v1_0_query_ras_error_count, 768 .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, 769 }; 770