1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "gfxhub_v1_0.h" 25 26 #include "vega10/soc15ip.h" 27 #include "vega10/GC/gc_9_0_offset.h" 28 #include "vega10/GC/gc_9_0_sh_mask.h" 29 #include "vega10/GC/gc_9_0_default.h" 30 #include "vega10/vega10_enum.h" 31 32 #include "soc15_common.h" 33 34 int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) 35 { 36 u32 tmp; 37 u64 value; 38 u32 i; 39 40 /* Program MC. */ 41 /* Update configuration */ 42 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR), 43 adev->mc.vram_start >> 18); 44 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR), 45 adev->mc.vram_end >> 18); 46 47 value = adev->vram_scratch.gpu_addr - adev->mc.vram_start 48 + adev->vm_manager.vram_base_offset; 49 WREG32(SOC15_REG_OFFSET(GC, 0, 50 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB), 51 (u32)(value >> 12)); 52 WREG32(SOC15_REG_OFFSET(GC, 0, 53 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB), 54 (u32)(value >> 44)); 55 56 if (amdgpu_sriov_vf(adev)) { 57 /* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so 58 vbios post doesn't program them, for SRIOV driver need to program them */ 59 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_BASE), 60 adev->mc.vram_start >> 24); 61 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_TOP), 62 adev->mc.vram_end >> 24); 63 } 64 65 /* Disable AGP. */ 66 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BASE), 0); 67 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_TOP), 0); 68 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BOT), 0xFFFFFFFF); 69 70 /* GART Enable. */ 71 72 /* Setup TLB control */ 73 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL)); 74 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 75 tmp = REG_SET_FIELD(tmp, 76 MC_VM_MX_L1_TLB_CNTL, 77 SYSTEM_ACCESS_MODE, 78 3); 79 tmp = REG_SET_FIELD(tmp, 80 MC_VM_MX_L1_TLB_CNTL, 81 ENABLE_ADVANCED_DRIVER_MODEL, 82 1); 83 tmp = REG_SET_FIELD(tmp, 84 MC_VM_MX_L1_TLB_CNTL, 85 SYSTEM_APERTURE_UNMAPPED_ACCESS, 86 0); 87 tmp = REG_SET_FIELD(tmp, 88 MC_VM_MX_L1_TLB_CNTL, 89 ECO_BITS, 90 0); 91 tmp = REG_SET_FIELD(tmp, 92 MC_VM_MX_L1_TLB_CNTL, 93 MTYPE, 94 MTYPE_UC);/* XXX for emulation. */ 95 tmp = REG_SET_FIELD(tmp, 96 MC_VM_MX_L1_TLB_CNTL, 97 ATC_EN, 98 1); 99 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp); 100 101 /* Setup L2 cache */ 102 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL)); 103 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 104 tmp = REG_SET_FIELD(tmp, 105 VM_L2_CNTL, 106 ENABLE_L2_FRAGMENT_PROCESSING, 107 0); 108 tmp = REG_SET_FIELD(tmp, 109 VM_L2_CNTL, 110 L2_PDE0_CACHE_TAG_GENERATION_MODE, 111 0);/* XXX for emulation, Refer to closed source code.*/ 112 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1); 113 tmp = REG_SET_FIELD(tmp, 114 VM_L2_CNTL, 115 CONTEXT1_IDENTITY_ACCESS_MODE, 116 1); 117 tmp = REG_SET_FIELD(tmp, 118 VM_L2_CNTL, 119 IDENTITY_MODE_FRAGMENT_SIZE, 120 0); 121 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp); 122 123 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2)); 124 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 125 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 126 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2), tmp); 127 128 tmp = mmVM_L2_CNTL3_DEFAULT; 129 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), tmp); 130 131 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4)); 132 tmp = REG_SET_FIELD(tmp, 133 VM_L2_CNTL4, 134 VMC_TAP_PDE_REQUEST_PHYSICAL, 135 0); 136 tmp = REG_SET_FIELD(tmp, 137 VM_L2_CNTL4, 138 VMC_TAP_PTE_REQUEST_PHYSICAL, 139 0); 140 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4), tmp); 141 142 /* setup context0 */ 143 WREG32(SOC15_REG_OFFSET(GC, 0, 144 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32), 145 (u32)(adev->mc.gtt_start >> 12)); 146 WREG32(SOC15_REG_OFFSET(GC, 0, 147 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32), 148 (u32)(adev->mc.gtt_start >> 44)); 149 150 WREG32(SOC15_REG_OFFSET(GC, 0, 151 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32), 152 (u32)(adev->mc.gtt_end >> 12)); 153 WREG32(SOC15_REG_OFFSET(GC, 0, 154 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32), 155 (u32)(adev->mc.gtt_end >> 44)); 156 157 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); 158 value = adev->gart.table_addr - adev->mc.vram_start 159 + adev->vm_manager.vram_base_offset; 160 value &= 0x0000FFFFFFFFF000ULL; 161 value |= 0x1; /*valid bit*/ 162 163 WREG32(SOC15_REG_OFFSET(GC, 0, 164 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32), 165 (u32)value); 166 WREG32(SOC15_REG_OFFSET(GC, 0, 167 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32), 168 (u32)(value >> 32)); 169 170 WREG32(SOC15_REG_OFFSET(GC, 0, 171 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32), 172 (u32)(adev->dummy_page.addr >> 12)); 173 WREG32(SOC15_REG_OFFSET(GC, 0, 174 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32), 175 (u32)((u64)adev->dummy_page.addr >> 44)); 176 177 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2)); 178 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 179 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 180 1); 181 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp); 182 183 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL)); 184 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 185 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 186 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL), tmp); 187 188 /* Disable identity aperture.*/ 189 WREG32(SOC15_REG_OFFSET(GC, 0, 190 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF); 191 WREG32(SOC15_REG_OFFSET(GC, 0, 192 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F); 193 194 WREG32(SOC15_REG_OFFSET(GC, 0, 195 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0); 196 WREG32(SOC15_REG_OFFSET(GC, 0, 197 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0); 198 199 WREG32(SOC15_REG_OFFSET(GC, 0, 200 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0); 201 WREG32(SOC15_REG_OFFSET(GC, 0, 202 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0); 203 204 for (i = 0; i <= 14; i++) { 205 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i); 206 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 207 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 208 adev->vm_manager.num_level); 209 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 210 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 211 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 212 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 213 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 214 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 215 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 216 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 217 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 218 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 219 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 220 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 221 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 222 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 223 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 224 PAGE_TABLE_BLOCK_SIZE, 225 adev->vm_manager.block_size - 9); 226 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp); 227 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); 228 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); 229 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2, 230 lower_32_bits(adev->vm_manager.max_pfn - 1)); 231 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2, 232 upper_32_bits(adev->vm_manager.max_pfn - 1)); 233 } 234 235 236 return 0; 237 } 238 239 void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) 240 { 241 u32 tmp; 242 u32 i; 243 244 /* Disable all tables */ 245 for (i = 0; i < 16; i++) 246 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL) + i, 0); 247 248 /* Setup TLB control */ 249 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL)); 250 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 251 tmp = REG_SET_FIELD(tmp, 252 MC_VM_MX_L1_TLB_CNTL, 253 ENABLE_ADVANCED_DRIVER_MODEL, 254 0); 255 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp); 256 257 /* Setup L2 cache */ 258 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL)); 259 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 260 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp); 261 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), 0); 262 } 263 264 /** 265 * gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling 266 * 267 * @adev: amdgpu_device pointer 268 * @value: true redirects VM faults to the default page 269 */ 270 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, 271 bool value) 272 { 273 u32 tmp; 274 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL)); 275 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 276 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 277 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 278 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 279 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 280 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 281 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 282 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 283 tmp = REG_SET_FIELD(tmp, 284 VM_L2_PROTECTION_FAULT_CNTL, 285 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 286 value); 287 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 288 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 289 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 290 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 291 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 292 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 293 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 294 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 295 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 296 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 297 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 298 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 300 } 301 302 static int gfxhub_v1_0_early_init(void *handle) 303 { 304 return 0; 305 } 306 307 static int gfxhub_v1_0_late_init(void *handle) 308 { 309 return 0; 310 } 311 312 static int gfxhub_v1_0_sw_init(void *handle) 313 { 314 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 315 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB]; 316 317 hub->ctx0_ptb_addr_lo32 = 318 SOC15_REG_OFFSET(GC, 0, 319 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 320 hub->ctx0_ptb_addr_hi32 = 321 SOC15_REG_OFFSET(GC, 0, 322 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 323 hub->vm_inv_eng0_req = 324 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); 325 hub->vm_inv_eng0_ack = 326 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK); 327 hub->vm_context0_cntl = 328 SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL); 329 hub->vm_l2_pro_fault_status = 330 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS); 331 hub->vm_l2_pro_fault_cntl = 332 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 333 334 return 0; 335 } 336 337 static int gfxhub_v1_0_sw_fini(void *handle) 338 { 339 return 0; 340 } 341 342 static int gfxhub_v1_0_hw_init(void *handle) 343 { 344 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 345 unsigned i; 346 347 for (i = 0 ; i < 18; ++i) { 348 WREG32(SOC15_REG_OFFSET(GC, 0, 349 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) + 350 2 * i, 0xffffffff); 351 WREG32(SOC15_REG_OFFSET(GC, 0, 352 mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) + 353 2 * i, 0x1f); 354 } 355 356 return 0; 357 } 358 359 static int gfxhub_v1_0_hw_fini(void *handle) 360 { 361 return 0; 362 } 363 364 static int gfxhub_v1_0_suspend(void *handle) 365 { 366 return 0; 367 } 368 369 static int gfxhub_v1_0_resume(void *handle) 370 { 371 return 0; 372 } 373 374 static bool gfxhub_v1_0_is_idle(void *handle) 375 { 376 return true; 377 } 378 379 static int gfxhub_v1_0_wait_for_idle(void *handle) 380 { 381 return 0; 382 } 383 384 static int gfxhub_v1_0_soft_reset(void *handle) 385 { 386 return 0; 387 } 388 389 static int gfxhub_v1_0_set_clockgating_state(void *handle, 390 enum amd_clockgating_state state) 391 { 392 return 0; 393 } 394 395 static int gfxhub_v1_0_set_powergating_state(void *handle, 396 enum amd_powergating_state state) 397 { 398 return 0; 399 } 400 401 const struct amd_ip_funcs gfxhub_v1_0_ip_funcs = { 402 .name = "gfxhub_v1_0", 403 .early_init = gfxhub_v1_0_early_init, 404 .late_init = gfxhub_v1_0_late_init, 405 .sw_init = gfxhub_v1_0_sw_init, 406 .sw_fini = gfxhub_v1_0_sw_fini, 407 .hw_init = gfxhub_v1_0_hw_init, 408 .hw_fini = gfxhub_v1_0_hw_fini, 409 .suspend = gfxhub_v1_0_suspend, 410 .resume = gfxhub_v1_0_resume, 411 .is_idle = gfxhub_v1_0_is_idle, 412 .wait_for_idle = gfxhub_v1_0_wait_for_idle, 413 .soft_reset = gfxhub_v1_0_soft_reset, 414 .set_clockgating_state = gfxhub_v1_0_set_clockgating_state, 415 .set_powergating_state = gfxhub_v1_0_set_powergating_state, 416 }; 417 418 const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block = 419 { 420 .type = AMD_IP_BLOCK_TYPE_GFXHUB, 421 .major = 1, 422 .minor = 0, 423 .rev = 0, 424 .funcs = &gfxhub_v1_0_ip_funcs, 425 }; 426