1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "mmhub_v1_8.h" 25 26 #include "mmhub/mmhub_1_8_0_offset.h" 27 #include "mmhub/mmhub_1_8_0_sh_mask.h" 28 #include "vega10_enum.h" 29 30 #include "soc15_common.h" 31 #include "soc15.h" 32 33 #define regVM_L2_CNTL3_DEFAULT 0x80100007 34 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 35 36 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) 37 { 38 u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE); 39 u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP); 40 41 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 42 base <<= 24; 43 44 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; 45 top <<= 24; 46 47 adev->gmc.fb_start = base; 48 adev->gmc.fb_end = top; 49 50 return base; 51 } 52 53 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 54 uint64_t page_table_base) 55 { 56 struct amdgpu_vmhub *hub; 57 u32 inst_mask; 58 int i; 59 60 inst_mask = adev->aid_mask; 61 for_each_inst(i, inst_mask) { 62 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 63 WREG32_SOC15_OFFSET(MMHUB, i, 64 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 65 hub->ctx_addr_distance * vmid, 66 lower_32_bits(page_table_base)); 67 68 WREG32_SOC15_OFFSET(MMHUB, i, 69 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 70 hub->ctx_addr_distance * vmid, 71 upper_32_bits(page_table_base)); 72 } 73 } 74 75 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) 76 { 77 uint64_t pt_base; 78 u32 inst_mask; 79 int i; 80 81 if (adev->gmc.pdb0_bo) 82 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); 83 else 84 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 85 86 mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base); 87 88 /* If use GART for FB translation, vmid0 page table covers both 89 * vram and system memory (gart) 90 */ 91 inst_mask = adev->aid_mask; 92 for_each_inst(i, inst_mask) { 93 if (adev->gmc.pdb0_bo) { 94 WREG32_SOC15(MMHUB, i, 95 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 96 (u32)(adev->gmc.fb_start >> 12)); 97 WREG32_SOC15(MMHUB, i, 98 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 99 (u32)(adev->gmc.fb_start >> 44)); 100 101 WREG32_SOC15(MMHUB, i, 102 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 103 (u32)(adev->gmc.gart_end >> 12)); 104 WREG32_SOC15(MMHUB, i, 105 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 106 (u32)(adev->gmc.gart_end >> 44)); 107 108 } else { 109 WREG32_SOC15(MMHUB, i, 110 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 111 (u32)(adev->gmc.gart_start >> 12)); 112 WREG32_SOC15(MMHUB, i, 113 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 114 (u32)(adev->gmc.gart_start >> 44)); 115 116 WREG32_SOC15(MMHUB, i, 117 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 118 (u32)(adev->gmc.gart_end >> 12)); 119 WREG32_SOC15(MMHUB, i, 120 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 121 (u32)(adev->gmc.gart_end >> 44)); 122 } 123 } 124 } 125 126 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) 127 { 128 uint32_t tmp, inst_mask; 129 uint64_t value; 130 int i; 131 132 inst_mask = adev->aid_mask; 133 for_each_inst(i, inst_mask) { 134 /* Program the AGP BAR */ 135 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); 136 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 137 adev->gmc.agp_start >> 24); 138 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 139 adev->gmc.agp_end >> 24); 140 141 if (amdgpu_sriov_vf(adev)) 142 return; 143 144 /* Program the system aperture low logical page number. */ 145 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 146 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 147 148 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 149 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 150 151 /* In the case squeezing vram into GART aperture, we don't use 152 * FB aperture and AGP aperture. Disable them. 153 */ 154 if (adev->gmc.pdb0_bo) { 155 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF); 156 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0); 157 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0); 158 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE, 159 0x00FFFFFF); 160 WREG32_SOC15(MMHUB, i, 161 regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 162 0x3FFFFFFF); 163 WREG32_SOC15(MMHUB, i, 164 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 165 } 166 167 /* Set default page address. */ 168 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 169 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 170 (u32)(value >> 12)); 171 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 172 (u32)(value >> 44)); 173 174 /* Program "protection fault". */ 175 WREG32_SOC15(MMHUB, i, 176 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 177 (u32)(adev->dummy_page_addr >> 12)); 178 WREG32_SOC15(MMHUB, i, 179 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 180 (u32)((u64)adev->dummy_page_addr >> 44)); 181 182 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2); 183 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 184 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 185 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); 186 } 187 } 188 189 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) 190 { 191 uint32_t tmp, inst_mask; 192 int i; 193 194 /* Setup TLB control */ 195 inst_mask = adev->aid_mask; 196 for_each_inst(i, inst_mask) { 197 tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); 198 199 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 200 1); 201 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 202 SYSTEM_ACCESS_MODE, 3); 203 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 204 ENABLE_ADVANCED_DRIVER_MODEL, 1); 205 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 206 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 207 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 208 MTYPE, MTYPE_UC);/* XXX for emulation. */ 209 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); 210 211 WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); 212 } 213 } 214 215 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) 216 { 217 uint32_t tmp, inst_mask; 218 int i; 219 220 if (amdgpu_sriov_vf(adev)) 221 return; 222 223 /* Setup L2 cache */ 224 inst_mask = adev->aid_mask; 225 for_each_inst(i, inst_mask) { 226 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); 227 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 228 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 229 ENABLE_L2_FRAGMENT_PROCESSING, 1); 230 /* XXX for emulation, Refer to closed source code.*/ 231 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 232 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); 233 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 234 0); 235 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 236 CONTEXT1_IDENTITY_ACCESS_MODE, 1); 237 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 238 IDENTITY_MODE_FRAGMENT_SIZE, 0); 239 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp); 240 241 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2); 242 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 243 1); 244 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 245 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp); 246 247 tmp = regVM_L2_CNTL3_DEFAULT; 248 if (adev->gmc.translate_further) { 249 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 250 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 251 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 252 } else { 253 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 254 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 255 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 256 } 257 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp); 258 259 tmp = regVM_L2_CNTL4_DEFAULT; 260 /* For AMD APP APUs setup WC memory */ 261 if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) { 262 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 263 VMC_TAP_PDE_REQUEST_PHYSICAL, 1); 264 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 265 VMC_TAP_PTE_REQUEST_PHYSICAL, 1); 266 } else { 267 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 268 VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 269 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 270 VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 271 } 272 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp); 273 } 274 } 275 276 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) 277 { 278 uint32_t tmp, inst_mask; 279 int i; 280 281 inst_mask = adev->aid_mask; 282 for_each_inst(i, inst_mask) { 283 tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); 284 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 285 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 286 adev->gmc.vmid0_page_table_depth); 287 tmp = REG_SET_FIELD(tmp, 288 VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, 289 adev->gmc.vmid0_page_table_block_size); 290 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, 291 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 292 WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp); 293 } 294 } 295 296 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) 297 { 298 u32 inst_mask; 299 int i; 300 301 if (amdgpu_sriov_vf(adev)) 302 return; 303 304 inst_mask = adev->aid_mask; 305 for_each_inst(i, inst_mask) { 306 WREG32_SOC15(MMHUB, i, 307 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 308 0XFFFFFFFF); 309 WREG32_SOC15(MMHUB, i, 310 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 311 0x0000000F); 312 313 WREG32_SOC15(MMHUB, i, 314 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 315 0); 316 WREG32_SOC15(MMHUB, i, 317 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 318 0); 319 320 WREG32_SOC15(MMHUB, i, 321 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); 322 WREG32_SOC15(MMHUB, i, 323 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); 324 } 325 } 326 327 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) 328 { 329 struct amdgpu_vmhub *hub; 330 unsigned num_level, block_size; 331 uint32_t tmp, inst_mask; 332 int i, j; 333 334 num_level = adev->vm_manager.num_level; 335 block_size = adev->vm_manager.block_size; 336 if (adev->gmc.translate_further) 337 num_level -= 1; 338 else 339 block_size -= 9; 340 341 inst_mask = adev->aid_mask; 342 for_each_inst(j, inst_mask) { 343 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 344 for (i = 0; i <= 14; i++) { 345 tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 346 i); 347 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 348 ENABLE_CONTEXT, 1); 349 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 350 PAGE_TABLE_DEPTH, num_level); 351 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 352 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 353 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 354 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 355 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 356 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 357 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 358 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 359 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 360 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 361 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 362 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 363 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 364 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 365 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 366 PAGE_TABLE_BLOCK_SIZE, 367 block_size); 368 /* On 9.4.3, XNACK can be enabled in the SQ 369 * per-process. Retry faults need to be enabled for 370 * that to work. 371 */ 372 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 373 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1); 374 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 375 i * hub->ctx_distance, tmp); 376 WREG32_SOC15_OFFSET(MMHUB, j, 377 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 378 i * hub->ctx_addr_distance, 0); 379 WREG32_SOC15_OFFSET(MMHUB, j, 380 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 381 i * hub->ctx_addr_distance, 0); 382 WREG32_SOC15_OFFSET(MMHUB, j, 383 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 384 i * hub->ctx_addr_distance, 385 lower_32_bits(adev->vm_manager.max_pfn - 1)); 386 WREG32_SOC15_OFFSET(MMHUB, j, 387 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 388 i * hub->ctx_addr_distance, 389 upper_32_bits(adev->vm_manager.max_pfn - 1)); 390 } 391 } 392 } 393 394 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) 395 { 396 struct amdgpu_vmhub *hub; 397 u32 i, j, inst_mask; 398 399 inst_mask = adev->aid_mask; 400 for_each_inst(j, inst_mask) { 401 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 402 for (i = 0; i < 18; ++i) { 403 WREG32_SOC15_OFFSET(MMHUB, j, 404 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 405 i * hub->eng_addr_distance, 0xffffffff); 406 WREG32_SOC15_OFFSET(MMHUB, j, 407 regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 408 i * hub->eng_addr_distance, 0x1f); 409 } 410 } 411 } 412 413 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) 414 { 415 if (amdgpu_sriov_vf(adev)) { 416 /* 417 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 418 * VF copy registers so vbios post doesn't program them, for 419 * SRIOV driver need to program them 420 */ 421 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 422 adev->gmc.vram_start >> 24); 423 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 424 adev->gmc.vram_end >> 24); 425 } 426 427 /* GART Enable. */ 428 mmhub_v1_8_init_gart_aperture_regs(adev); 429 mmhub_v1_8_init_system_aperture_regs(adev); 430 mmhub_v1_8_init_tlb_regs(adev); 431 mmhub_v1_8_init_cache_regs(adev); 432 433 mmhub_v1_8_enable_system_domain(adev); 434 mmhub_v1_8_disable_identity_aperture(adev); 435 mmhub_v1_8_setup_vmid_config(adev); 436 mmhub_v1_8_program_invalidation(adev); 437 438 return 0; 439 } 440 441 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) 442 { 443 struct amdgpu_vmhub *hub; 444 u32 tmp; 445 u32 i, j, inst_mask; 446 447 /* Disable all tables */ 448 inst_mask = adev->aid_mask; 449 for_each_inst(j, inst_mask) { 450 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 451 for (i = 0; i < 16; i++) 452 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, 453 i * hub->ctx_distance, 0); 454 455 /* Setup TLB control */ 456 tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL); 457 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 458 0); 459 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 460 ENABLE_ADVANCED_DRIVER_MODEL, 0); 461 WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp); 462 463 if (!amdgpu_sriov_vf(adev)) { 464 /* Setup L2 cache */ 465 tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL); 466 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 467 0); 468 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp); 469 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0); 470 } 471 } 472 } 473 474 /** 475 * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling 476 * 477 * @adev: amdgpu_device pointer 478 * @value: true redirects VM faults to the default page 479 */ 480 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) 481 { 482 u32 tmp, inst_mask; 483 int i; 484 485 if (amdgpu_sriov_vf(adev)) 486 return; 487 488 inst_mask = adev->aid_mask; 489 for_each_inst(i, inst_mask) { 490 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); 491 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 492 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 493 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 494 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 495 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 496 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 497 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 498 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 499 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 500 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 501 value); 502 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 503 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 504 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 505 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 506 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 507 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 508 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 509 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 510 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 511 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 512 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 513 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 514 if (!value) { 515 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 516 CRASH_ON_NO_RETRY_FAULT, 1); 517 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 518 CRASH_ON_RETRY_FAULT, 1); 519 } 520 521 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); 522 } 523 } 524 525 static void mmhub_v1_8_init(struct amdgpu_device *adev) 526 { 527 struct amdgpu_vmhub *hub; 528 u32 inst_mask; 529 int i; 530 531 inst_mask = adev->aid_mask; 532 for_each_inst(i, inst_mask) { 533 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 534 535 hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, 536 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 537 hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i, 538 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 539 hub->vm_inv_eng0_req = 540 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ); 541 hub->vm_inv_eng0_ack = 542 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK); 543 hub->vm_context0_cntl = 544 SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL); 545 hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i, 546 regVM_L2_PROTECTION_FAULT_STATUS); 547 hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i, 548 regVM_L2_PROTECTION_FAULT_CNTL); 549 550 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; 551 hub->ctx_addr_distance = 552 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 553 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 554 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - 555 regVM_INVALIDATE_ENG0_REQ; 556 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 557 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 558 } 559 } 560 561 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev, 562 enum amd_clockgating_state state) 563 { 564 return 0; 565 } 566 567 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) 568 { 569 570 } 571 572 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { 573 .get_fb_location = mmhub_v1_8_get_fb_location, 574 .init = mmhub_v1_8_init, 575 .gart_enable = mmhub_v1_8_gart_enable, 576 .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default, 577 .gart_disable = mmhub_v1_8_gart_disable, 578 .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, 579 .set_clockgating = mmhub_v1_8_set_clockgating, 580 .get_clockgating = mmhub_v1_8_get_clockgating, 581 }; 582