1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "gfxhub_v2_0.h" 26 27 #include "gc/gc_10_1_0_offset.h" 28 #include "gc/gc_10_1_0_sh_mask.h" 29 #include "gc/gc_10_1_0_default.h" 30 #include "navi10_enum.h" 31 32 #include "soc15_common.h" 33 34 u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) 35 { 36 u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); 37 38 base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 39 base <<= 24; 40 41 return base; 42 } 43 44 u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) 45 { 46 return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; 47 } 48 49 void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 50 uint64_t page_table_base) 51 { 52 /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */ 53 int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 54 - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 55 56 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 57 offset * vmid, lower_32_bits(page_table_base)); 58 59 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 60 offset * vmid, upper_32_bits(page_table_base)); 61 } 62 63 static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) 64 { 65 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 66 67 gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); 68 69 WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 70 (u32)(adev->gmc.gart_start >> 12)); 71 WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 72 (u32)(adev->gmc.gart_start >> 44)); 73 74 WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 75 (u32)(adev->gmc.gart_end >> 12)); 76 WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 77 (u32)(adev->gmc.gart_end >> 44)); 78 } 79 80 static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) 81 { 82 uint64_t value; 83 84 /* Disable AGP. */ 85 WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); 86 WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); 87 WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); 88 89 /* Program the system aperture low logical page number. */ 90 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 91 adev->gmc.vram_start >> 18); 92 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 93 adev->gmc.vram_end >> 18); 94 95 /* Set default page address. */ 96 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 97 + adev->vm_manager.vram_base_offset; 98 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 99 (u32)(value >> 12)); 100 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 101 (u32)(value >> 44)); 102 103 /* Program "protection fault". */ 104 WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 105 (u32)(adev->dummy_page_addr >> 12)); 106 WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 107 (u32)((u64)adev->dummy_page_addr >> 44)); 108 109 WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2, 110 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 111 } 112 113 114 static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) 115 { 116 uint32_t tmp; 117 118 /* Setup TLB control */ 119 tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL); 120 121 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 122 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 123 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 124 ENABLE_ADVANCED_DRIVER_MODEL, 1); 125 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 126 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 127 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 128 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 129 MTYPE, MTYPE_UC); /* UC, uncached */ 130 131 WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); 132 } 133 134 static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) 135 { 136 uint32_t tmp; 137 138 /* Setup L2 cache */ 139 tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); 140 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); 141 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); 142 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, 143 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 144 /* XXX for emulation, Refer to closed source code.*/ 145 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, 146 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); 147 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 148 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 149 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 150 WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp); 151 152 tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2); 153 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 154 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 155 WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp); 156 157 tmp = mmGCVM_L2_CNTL3_DEFAULT; 158 if (adev->gmc.translate_further) { 159 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); 160 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, 161 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 162 } else { 163 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); 164 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, 165 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 166 } 167 WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp); 168 169 tmp = mmGCVM_L2_CNTL4_DEFAULT; 170 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 171 tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 172 WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp); 173 } 174 175 static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev) 176 { 177 uint32_t tmp; 178 179 tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL); 180 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 181 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 182 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, 183 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 184 WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp); 185 } 186 187 static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) 188 { 189 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 190 0xFFFFFFFF); 191 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 192 0x0000000F); 193 194 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 195 0); 196 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 197 0); 198 199 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); 200 WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); 201 202 } 203 204 static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) 205 { 206 int i; 207 uint32_t tmp; 208 209 for (i = 0; i <= 14; i++) { 210 tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i); 211 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 212 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 213 adev->vm_manager.num_level); 214 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 215 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 216 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 217 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 218 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 219 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 220 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 221 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 222 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 223 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 224 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 225 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 226 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 227 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 228 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 229 PAGE_TABLE_BLOCK_SIZE, 230 adev->vm_manager.block_size - 9); 231 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 232 tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, 233 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 234 !amdgpu_noretry); 235 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i, tmp); 236 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); 237 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); 238 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, 239 lower_32_bits(adev->vm_manager.max_pfn - 1)); 240 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, 241 upper_32_bits(adev->vm_manager.max_pfn - 1)); 242 } 243 } 244 245 static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) 246 { 247 unsigned i; 248 249 for (i = 0 ; i < 18; ++i) { 250 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 251 2 * i, 0xffffffff); 252 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 253 2 * i, 0x1f); 254 } 255 } 256 257 int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) 258 { 259 if (amdgpu_sriov_vf(adev)) { 260 /* 261 * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 262 * VF copy registers so vbios post doesn't program them, for 263 * SRIOV driver need to program them 264 */ 265 WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, 266 adev->gmc.vram_start >> 24); 267 WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, 268 adev->gmc.vram_end >> 24); 269 } 270 271 /* GART Enable. */ 272 gfxhub_v2_0_init_gart_aperture_regs(adev); 273 gfxhub_v2_0_init_system_aperture_regs(adev); 274 gfxhub_v2_0_init_tlb_regs(adev); 275 gfxhub_v2_0_init_cache_regs(adev); 276 277 gfxhub_v2_0_enable_system_domain(adev); 278 gfxhub_v2_0_disable_identity_aperture(adev); 279 gfxhub_v2_0_setup_vmid_config(adev); 280 gfxhub_v2_0_program_invalidation(adev); 281 282 return 0; 283 } 284 285 void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) 286 { 287 u32 tmp; 288 u32 i; 289 290 /* Disable all tables */ 291 for (i = 0; i < 16; i++) 292 WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i, 0); 293 294 /* Setup TLB control */ 295 tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL); 296 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 297 tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, 298 ENABLE_ADVANCED_DRIVER_MODEL, 0); 299 WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); 300 301 /* Setup L2 cache */ 302 WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); 303 WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); 304 } 305 306 /** 307 * gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling 308 * 309 * @adev: amdgpu_device pointer 310 * @value: true redirects VM faults to the default page 311 */ 312 void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, 313 bool value) 314 { 315 u32 tmp; 316 tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); 317 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 318 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 319 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 320 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 321 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 322 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 323 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 324 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 325 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 326 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 327 value); 328 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 329 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 330 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 331 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 332 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 333 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 334 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 335 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 336 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 337 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 338 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 339 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 340 if (!value) { 341 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 342 CRASH_ON_NO_RETRY_FAULT, 1); 343 tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, 344 CRASH_ON_RETRY_FAULT, 1); 345 } 346 WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); 347 } 348 349 void gfxhub_v2_0_init(struct amdgpu_device *adev) 350 { 351 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 352 353 hub->ctx0_ptb_addr_lo32 = 354 SOC15_REG_OFFSET(GC, 0, 355 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 356 hub->ctx0_ptb_addr_hi32 = 357 SOC15_REG_OFFSET(GC, 0, 358 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 359 hub->vm_inv_eng0_sem = 360 SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); 361 hub->vm_inv_eng0_req = 362 SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); 363 hub->vm_inv_eng0_ack = 364 SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK); 365 hub->vm_context0_cntl = 366 SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL); 367 hub->vm_l2_pro_fault_status = 368 SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS); 369 hub->vm_l2_pro_fault_cntl = 370 SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); 371 } 372