1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "mmhub_v2_0.h" 26 27 #include "mmhub/mmhub_2_0_0_offset.h" 28 #include "mmhub/mmhub_2_0_0_sh_mask.h" 29 #include "mmhub/mmhub_2_0_0_default.h" 30 #include "navi10_enum.h" 31 32 #include "soc15_common.h" 33 34 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d 35 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0 36 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 37 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 38 39 void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 40 uint64_t page_table_base) 41 { 42 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 43 44 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 45 hub->ctx_addr_distance * vmid, 46 lower_32_bits(page_table_base)); 47 48 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 49 hub->ctx_addr_distance * vmid, 50 upper_32_bits(page_table_base)); 51 } 52 53 static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) 54 { 55 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 56 57 mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); 58 59 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 60 (u32)(adev->gmc.gart_start >> 12)); 61 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 62 (u32)(adev->gmc.gart_start >> 44)); 63 64 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 65 (u32)(adev->gmc.gart_end >> 12)); 66 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 67 (u32)(adev->gmc.gart_end >> 44)); 68 } 69 70 static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) 71 { 72 uint64_t value; 73 uint32_t tmp; 74 75 /* Disable AGP. */ 76 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); 77 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); 78 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); 79 80 if (!amdgpu_sriov_vf(adev)) { 81 /* 82 * the new L1 policy will block SRIOV guest from writing 83 * these regs, and they will be programed at host. 84 * so skip programing these regs. 85 */ 86 /* Program the system aperture low logical page number. */ 87 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, 88 adev->gmc.vram_start >> 18); 89 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 90 adev->gmc.vram_end >> 18); 91 } 92 93 /* Set default page address. */ 94 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 95 adev->vm_manager.vram_base_offset; 96 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 97 (u32)(value >> 12)); 98 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 99 (u32)(value >> 44)); 100 101 /* Program "protection fault". */ 102 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 103 (u32)(adev->dummy_page_addr >> 12)); 104 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 105 (u32)((u64)adev->dummy_page_addr >> 44)); 106 107 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2); 108 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, 109 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 110 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); 111 } 112 113 static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) 114 { 115 uint32_t tmp; 116 117 /* Setup TLB control */ 118 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 119 120 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 121 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 122 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 123 ENABLE_ADVANCED_DRIVER_MODEL, 1); 124 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 125 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 126 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 127 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 128 MTYPE, MTYPE_UC); /* UC, uncached */ 129 130 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 131 } 132 133 static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) 134 { 135 uint32_t tmp; 136 137 /* Setup L2 cache */ 138 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 139 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); 140 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); 141 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, 142 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 143 /* XXX for emulation, Refer to closed source code.*/ 144 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 145 0); 146 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 147 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 148 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 149 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 150 151 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2); 152 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 153 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 154 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); 155 156 tmp = mmMMVM_L2_CNTL3_DEFAULT; 157 if (adev->gmc.translate_further) { 158 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); 159 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 160 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 161 } else { 162 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); 163 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 164 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 165 } 166 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); 167 168 tmp = mmMMVM_L2_CNTL4_DEFAULT; 169 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 170 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 171 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); 172 173 tmp = mmMMVM_L2_CNTL5_DEFAULT; 174 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); 175 WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); 176 } 177 178 static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) 179 { 180 uint32_t tmp; 181 182 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 183 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 184 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 185 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 186 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 187 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); 188 } 189 190 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) 191 { 192 WREG32_SOC15(MMHUB, 0, 193 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 194 0xFFFFFFFF); 195 WREG32_SOC15(MMHUB, 0, 196 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 197 0x0000000F); 198 199 WREG32_SOC15(MMHUB, 0, 200 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); 201 WREG32_SOC15(MMHUB, 0, 202 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); 203 204 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 205 0); 206 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 207 0); 208 } 209 210 static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) 211 { 212 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 213 int i; 214 uint32_t tmp; 215 216 for (i = 0; i <= 14; i++) { 217 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i); 218 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 219 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 220 adev->vm_manager.num_level); 221 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 222 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 223 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 224 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 225 1); 226 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 227 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 228 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 229 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 230 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 231 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 232 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 233 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 234 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 235 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 236 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 237 PAGE_TABLE_BLOCK_SIZE, 238 adev->vm_manager.block_size - 9); 239 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 240 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 241 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 242 !amdgpu_noretry); 243 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, 244 i * hub->ctx_distance, tmp); 245 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 246 i * hub->ctx_addr_distance, 0); 247 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 248 i * hub->ctx_addr_distance, 0); 249 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 250 i * hub->ctx_addr_distance, 251 lower_32_bits(adev->vm_manager.max_pfn - 1)); 252 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 253 i * hub->ctx_addr_distance, 254 upper_32_bits(adev->vm_manager.max_pfn - 1)); 255 } 256 } 257 258 static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) 259 { 260 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 261 unsigned i; 262 263 for (i = 0; i < 18; ++i) { 264 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 265 i * hub->eng_addr_distance, 0xffffffff); 266 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 267 i * hub->eng_addr_distance, 0x1f); 268 } 269 } 270 271 int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) 272 { 273 /* GART Enable. */ 274 mmhub_v2_0_init_gart_aperture_regs(adev); 275 mmhub_v2_0_init_system_aperture_regs(adev); 276 mmhub_v2_0_init_tlb_regs(adev); 277 mmhub_v2_0_init_cache_regs(adev); 278 279 mmhub_v2_0_enable_system_domain(adev); 280 mmhub_v2_0_disable_identity_aperture(adev); 281 mmhub_v2_0_setup_vmid_config(adev); 282 mmhub_v2_0_program_invalidation(adev); 283 284 return 0; 285 } 286 287 void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) 288 { 289 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 290 u32 tmp; 291 u32 i; 292 293 /* Disable all tables */ 294 for (i = 0; i < 16; i++) 295 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, 296 i * hub->ctx_distance, 0); 297 298 /* Setup TLB control */ 299 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 300 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 301 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 302 ENABLE_ADVANCED_DRIVER_MODEL, 0); 303 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 304 305 /* Setup L2 cache */ 306 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 307 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); 308 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 309 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0); 310 } 311 312 /** 313 * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling 314 * 315 * @adev: amdgpu_device pointer 316 * @value: true redirects VM faults to the default page 317 */ 318 void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) 319 { 320 u32 tmp; 321 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 322 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 323 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 324 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 325 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 326 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 327 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 328 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 329 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 330 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 331 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 332 value); 333 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 334 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 335 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 336 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 337 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 338 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 339 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 340 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 341 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 342 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 343 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 344 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 345 if (!value) { 346 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 347 CRASH_ON_NO_RETRY_FAULT, 1); 348 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 349 CRASH_ON_RETRY_FAULT, 1); 350 } 351 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); 352 } 353 354 void mmhub_v2_0_init(struct amdgpu_device *adev) 355 { 356 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 357 358 hub->ctx0_ptb_addr_lo32 = 359 SOC15_REG_OFFSET(MMHUB, 0, 360 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 361 hub->ctx0_ptb_addr_hi32 = 362 SOC15_REG_OFFSET(MMHUB, 0, 363 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 364 hub->vm_inv_eng0_sem = 365 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); 366 hub->vm_inv_eng0_req = 367 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); 368 hub->vm_inv_eng0_ack = 369 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK); 370 hub->vm_context0_cntl = 371 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 372 hub->vm_l2_pro_fault_status = 373 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS); 374 hub->vm_l2_pro_fault_cntl = 375 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 376 377 hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL; 378 hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 379 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 380 hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ - 381 mmMMVM_INVALIDATE_ENG0_REQ; 382 hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 383 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 384 } 385 386 static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 387 bool enable) 388 { 389 uint32_t def, data, def1, data1; 390 391 switch (adev->asic_type) { 392 case CHIP_SIENNA_CICHLID: 393 case CHIP_NAVY_FLOUNDER: 394 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); 395 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); 396 break; 397 default: 398 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 399 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 400 break; 401 } 402 403 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 404 data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; 405 406 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 407 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 408 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 409 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 410 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 411 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 412 413 } else { 414 data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; 415 416 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 417 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 418 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 419 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 420 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 421 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 422 } 423 424 switch (adev->asic_type) { 425 case CHIP_SIENNA_CICHLID: 426 case CHIP_NAVY_FLOUNDER: 427 if (def != data) 428 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); 429 if (def1 != data1) 430 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1); 431 break; 432 default: 433 if (def != data) 434 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 435 if (def1 != data1) 436 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 437 break; 438 } 439 } 440 441 static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 442 bool enable) 443 { 444 uint32_t def, data; 445 446 switch (adev->asic_type) { 447 case CHIP_SIENNA_CICHLID: 448 case CHIP_NAVY_FLOUNDER: 449 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); 450 break; 451 default: 452 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 453 break; 454 } 455 456 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 457 data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 458 else 459 data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 460 461 if (def != data) { 462 switch (adev->asic_type) { 463 case CHIP_SIENNA_CICHLID: 464 case CHIP_NAVY_FLOUNDER: 465 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); 466 break; 467 default: 468 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 469 break; 470 } 471 } 472 } 473 474 int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, 475 enum amd_clockgating_state state) 476 { 477 if (amdgpu_sriov_vf(adev)) 478 return 0; 479 480 switch (adev->asic_type) { 481 case CHIP_NAVI10: 482 case CHIP_NAVI14: 483 case CHIP_NAVI12: 484 case CHIP_SIENNA_CICHLID: 485 case CHIP_NAVY_FLOUNDER: 486 mmhub_v2_0_update_medium_grain_clock_gating(adev, 487 state == AMD_CG_STATE_GATE); 488 mmhub_v2_0_update_medium_grain_light_sleep(adev, 489 state == AMD_CG_STATE_GATE); 490 break; 491 default: 492 break; 493 } 494 495 return 0; 496 } 497 498 void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) 499 { 500 int data, data1; 501 502 if (amdgpu_sriov_vf(adev)) 503 *flags = 0; 504 505 switch (adev->asic_type) { 506 case CHIP_SIENNA_CICHLID: 507 case CHIP_NAVY_FLOUNDER: 508 data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); 509 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); 510 break; 511 default: 512 data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 513 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 514 break; 515 } 516 517 /* AMD_CG_SUPPORT_MC_MGCG */ 518 if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && 519 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 520 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 521 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 522 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 523 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 524 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) 525 *flags |= AMD_CG_SUPPORT_MC_MGCG; 526 527 /* AMD_CG_SUPPORT_MC_LS */ 528 if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) 529 *flags |= AMD_CG_SUPPORT_MC_LS; 530 } 531