1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/pci.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atomfirmware.h" 27 #include "gmc_v10_0.h" 28 29 #include "hdp/hdp_5_0_0_offset.h" 30 #include "hdp/hdp_5_0_0_sh_mask.h" 31 #include "gc/gc_10_1_0_sh_mask.h" 32 #include "mmhub/mmhub_2_0_0_sh_mask.h" 33 #include "dcn/dcn_2_0_0_offset.h" 34 #include "dcn/dcn_2_0_0_sh_mask.h" 35 #include "oss/osssys_5_0_0_offset.h" 36 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 37 #include "navi10_enum.h" 38 39 #include "soc15.h" 40 #include "soc15_common.h" 41 42 #include "nbio_v2_3.h" 43 44 #include "gfxhub_v2_0.h" 45 #include "mmhub_v2_0.h" 46 #include "athub_v2_0.h" 47 /* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/ 48 #define AMDGPU_NUM_OF_VMIDS 8 49 50 #if 0 51 static const struct soc15_reg_golden golden_settings_navi10_hdp[] = 52 { 53 /* TODO add golden setting for hdp */ 54 }; 55 #endif 56 57 static int 58 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 59 struct amdgpu_irq_src *src, unsigned type, 60 enum amdgpu_interrupt_state state) 61 { 62 struct amdgpu_vmhub *hub; 63 u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i; 64 65 bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 66 GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 67 GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 68 GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 69 GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 70 GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 71 GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 72 73 bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 74 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 75 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 76 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 77 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 78 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 79 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 80 81 switch (state) { 82 case AMDGPU_IRQ_STATE_DISABLE: 83 /* MM HUB */ 84 hub = &adev->vmhub[AMDGPU_MMHUB_0]; 85 for (i = 0; i < 16; i++) { 86 reg = hub->vm_context0_cntl + i; 87 tmp = RREG32(reg); 88 tmp &= ~bits[AMDGPU_MMHUB_0]; 89 WREG32(reg, tmp); 90 } 91 92 /* GFX HUB */ 93 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 94 for (i = 0; i < 16; i++) { 95 reg = hub->vm_context0_cntl + i; 96 tmp = RREG32(reg); 97 tmp &= ~bits[AMDGPU_GFXHUB_0]; 98 WREG32(reg, tmp); 99 } 100 break; 101 case AMDGPU_IRQ_STATE_ENABLE: 102 /* MM HUB */ 103 hub = &adev->vmhub[AMDGPU_MMHUB_0]; 104 for (i = 0; i < 16; i++) { 105 reg = hub->vm_context0_cntl + i; 106 tmp = RREG32(reg); 107 tmp |= bits[AMDGPU_MMHUB_0]; 108 WREG32(reg, tmp); 109 } 110 111 /* GFX HUB */ 112 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 113 for (i = 0; i < 16; i++) { 114 reg = hub->vm_context0_cntl + i; 115 tmp = RREG32(reg); 116 tmp |= bits[AMDGPU_GFXHUB_0]; 117 WREG32(reg, tmp); 118 } 119 break; 120 default: 121 break; 122 } 123 124 return 0; 125 } 126 127 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, 128 struct amdgpu_irq_src *source, 129 struct amdgpu_iv_entry *entry) 130 { 131 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; 132 uint32_t status = 0; 133 u64 addr; 134 135 addr = (u64)entry->src_data[0] << 12; 136 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 137 138 if (!amdgpu_sriov_vf(adev)) { 139 /* 140 * Issue a dummy read to wait for the status register to 141 * be updated to avoid reading an incorrect value due to 142 * the new fast GRBM interface. 143 */ 144 if (entry->vmid_src == AMDGPU_GFXHUB_0) 145 RREG32(hub->vm_l2_pro_fault_status); 146 147 status = RREG32(hub->vm_l2_pro_fault_status); 148 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 149 } 150 151 if (printk_ratelimit()) { 152 struct amdgpu_task_info task_info; 153 154 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 155 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 156 157 dev_err(adev->dev, 158 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " 159 "for process %s pid %d thread %s pid %d)\n", 160 entry->vmid_src ? "mmhub" : "gfxhub", 161 entry->src_id, entry->ring_id, entry->vmid, 162 entry->pasid, task_info.process_name, task_info.tgid, 163 task_info.task_name, task_info.pid); 164 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 165 addr, entry->client_id); 166 if (!amdgpu_sriov_vf(adev)) { 167 dev_err(adev->dev, 168 "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 169 status); 170 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 171 REG_GET_FIELD(status, 172 GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 173 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 174 REG_GET_FIELD(status, 175 GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 176 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 177 REG_GET_FIELD(status, 178 GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 179 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 180 REG_GET_FIELD(status, 181 GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 182 dev_err(adev->dev, "\t RW: 0x%lx\n", 183 REG_GET_FIELD(status, 184 GCVM_L2_PROTECTION_FAULT_STATUS, RW)); 185 } 186 } 187 188 return 0; 189 } 190 191 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = { 192 .set = gmc_v10_0_vm_fault_interrupt_state, 193 .process = gmc_v10_0_process_interrupt, 194 }; 195 196 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) 197 { 198 adev->gmc.vm_fault.num_types = 1; 199 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; 200 } 201 202 static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, 203 uint32_t flush_type) 204 { 205 u32 req = 0; 206 207 /* invalidate using legacy mode on vmid*/ 208 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, 209 PER_VMID_INVALIDATE_REQ, 1 << vmid); 210 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 211 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 212 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 213 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 214 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 215 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 216 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, 217 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 218 219 return req; 220 } 221 222 /* 223 * GART 224 * VMID 0 is the physical GPU addresses as used by the kernel. 225 * VMIDs 1-15 are used for userspace clients and are handled 226 * by the amdgpu vm/hsa code. 227 */ 228 229 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, 230 unsigned int vmhub, uint32_t flush_type) 231 { 232 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 233 u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); 234 /* Use register 17 for GART */ 235 const unsigned eng = 17; 236 unsigned int i; 237 238 spin_lock(&adev->gmc.invalidate_lock); 239 /* 240 * It may lose gpuvm invalidate acknowldege state across power-gating 241 * off cycle, add semaphore acquire before invalidation and semaphore 242 * release after invalidation to avoid entering power gated state 243 * to WA the Issue 244 */ 245 246 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 247 if (vmhub == AMDGPU_MMHUB_0 || 248 vmhub == AMDGPU_MMHUB_1) { 249 for (i = 0; i < adev->usec_timeout; i++) { 250 /* a read return value of 1 means semaphore acuqire */ 251 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); 252 if (tmp & 0x1) 253 break; 254 udelay(1); 255 } 256 257 if (i >= adev->usec_timeout) 258 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 259 } 260 261 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 262 263 /* 264 * Issue a dummy read to wait for the ACK register to be cleared 265 * to avoid a false ACK due to the new fast GRBM interface. 266 */ 267 if (vmhub == AMDGPU_GFXHUB_0) 268 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); 269 270 /* Wait for ACK with a delay.*/ 271 for (i = 0; i < adev->usec_timeout; i++) { 272 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); 273 tmp &= 1 << vmid; 274 if (tmp) 275 break; 276 277 udelay(1); 278 } 279 280 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 281 if (vmhub == AMDGPU_MMHUB_0 || 282 vmhub == AMDGPU_MMHUB_1) 283 /* 284 * add semaphore release after invalidation, 285 * write with 0 means semaphore release 286 */ 287 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); 288 289 spin_unlock(&adev->gmc.invalidate_lock); 290 291 if (i < adev->usec_timeout) 292 return; 293 294 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 295 } 296 297 /** 298 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback 299 * 300 * @adev: amdgpu_device pointer 301 * @vmid: vm instance to flush 302 * 303 * Flush the TLB for the requested page table. 304 */ 305 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 306 uint32_t vmhub, uint32_t flush_type) 307 { 308 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 309 struct dma_fence *fence; 310 struct amdgpu_job *job; 311 312 int r; 313 314 /* flush hdp cache */ 315 adev->nbio.funcs->hdp_flush(adev, NULL); 316 317 mutex_lock(&adev->mman.gtt_window_lock); 318 319 if (vmhub == AMDGPU_MMHUB_0) { 320 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); 321 mutex_unlock(&adev->mman.gtt_window_lock); 322 return; 323 } 324 325 BUG_ON(vmhub != AMDGPU_GFXHUB_0); 326 327 if (!adev->mman.buffer_funcs_enabled || 328 !adev->ib_pool_ready || 329 adev->in_gpu_reset) { 330 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); 331 mutex_unlock(&adev->mman.gtt_window_lock); 332 return; 333 } 334 335 /* The SDMA on Navi has a bug which can theoretically result in memory 336 * corruption if an invalidation happens at the same time as an VA 337 * translation. Avoid this by doing the invalidation from the SDMA 338 * itself. 339 */ 340 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job); 341 if (r) 342 goto error_alloc; 343 344 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); 345 job->vm_needs_flush = true; 346 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; 347 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 348 r = amdgpu_job_submit(job, &adev->mman.entity, 349 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 350 if (r) 351 goto error_submit; 352 353 mutex_unlock(&adev->mman.gtt_window_lock); 354 355 dma_fence_wait(fence, false); 356 dma_fence_put(fence); 357 358 return; 359 360 error_submit: 361 amdgpu_job_free(job); 362 363 error_alloc: 364 mutex_unlock(&adev->mman.gtt_window_lock); 365 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); 366 } 367 368 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 369 unsigned vmid, uint64_t pd_addr) 370 { 371 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 372 uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); 373 unsigned eng = ring->vm_inv_eng; 374 375 /* 376 * It may lose gpuvm invalidate acknowldege state across power-gating 377 * off cycle, add semaphore acquire before invalidation and semaphore 378 * release after invalidation to avoid entering power gated state 379 * to WA the Issue 380 */ 381 382 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 383 if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || 384 ring->funcs->vmhub == AMDGPU_MMHUB_1) 385 /* a read return value of 1 means semaphore acuqire */ 386 amdgpu_ring_emit_reg_wait(ring, 387 hub->vm_inv_eng0_sem + eng, 0x1, 0x1); 388 389 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), 390 lower_32_bits(pd_addr)); 391 392 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 393 upper_32_bits(pd_addr)); 394 395 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, 396 hub->vm_inv_eng0_ack + eng, 397 req, 1 << vmid); 398 399 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 400 if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || 401 ring->funcs->vmhub == AMDGPU_MMHUB_1) 402 /* 403 * add semaphore release after invalidation, 404 * write with 0 means semaphore release 405 */ 406 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); 407 408 return pd_addr; 409 } 410 411 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 412 unsigned pasid) 413 { 414 struct amdgpu_device *adev = ring->adev; 415 uint32_t reg; 416 417 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) 418 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 419 else 420 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 421 422 amdgpu_ring_emit_wreg(ring, reg, pasid); 423 } 424 425 /* 426 * PTE format on NAVI 10: 427 * 63:59 reserved 428 * 58:57 reserved 429 * 56 F 430 * 55 L 431 * 54 reserved 432 * 53:52 SW 433 * 51 T 434 * 50:48 mtype 435 * 47:12 4k physical page base address 436 * 11:7 fragment 437 * 6 write 438 * 5 read 439 * 4 exe 440 * 3 Z 441 * 2 snooped 442 * 1 system 443 * 0 valid 444 * 445 * PDE format on NAVI 10: 446 * 63:59 block fragment size 447 * 58:55 reserved 448 * 54 P 449 * 53:48 reserved 450 * 47:6 physical base address of PD or PTE 451 * 5:3 reserved 452 * 2 C 453 * 1 system 454 * 0 valid 455 */ 456 457 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 458 { 459 switch (flags) { 460 case AMDGPU_VM_MTYPE_DEFAULT: 461 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 462 case AMDGPU_VM_MTYPE_NC: 463 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 464 case AMDGPU_VM_MTYPE_WC: 465 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); 466 case AMDGPU_VM_MTYPE_CC: 467 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); 468 case AMDGPU_VM_MTYPE_UC: 469 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); 470 default: 471 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 472 } 473 } 474 475 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, 476 uint64_t *addr, uint64_t *flags) 477 { 478 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 479 *addr = adev->vm_manager.vram_base_offset + *addr - 480 adev->gmc.vram_start; 481 BUG_ON(*addr & 0xFFFF00000000003FULL); 482 483 if (!adev->gmc.translate_further) 484 return; 485 486 if (level == AMDGPU_VM_PDB1) { 487 /* Set the block fragment size */ 488 if (!(*flags & AMDGPU_PDE_PTE)) 489 *flags |= AMDGPU_PDE_BFS(0x9); 490 491 } else if (level == AMDGPU_VM_PDB0) { 492 if (*flags & AMDGPU_PDE_PTE) 493 *flags &= ~AMDGPU_PDE_PTE; 494 else 495 *flags |= AMDGPU_PTE_TF; 496 } 497 } 498 499 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, 500 struct amdgpu_bo_va_mapping *mapping, 501 uint64_t *flags) 502 { 503 *flags &= ~AMDGPU_PTE_EXECUTABLE; 504 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 505 506 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 507 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 508 509 if (mapping->flags & AMDGPU_PTE_PRT) { 510 *flags |= AMDGPU_PTE_PRT; 511 *flags |= AMDGPU_PTE_SNOOPED; 512 *flags |= AMDGPU_PTE_LOG; 513 *flags |= AMDGPU_PTE_SYSTEM; 514 *flags &= ~AMDGPU_PTE_VALID; 515 } 516 } 517 518 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { 519 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, 520 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, 521 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, 522 .map_mtype = gmc_v10_0_map_mtype, 523 .get_vm_pde = gmc_v10_0_get_vm_pde, 524 .get_vm_pte = gmc_v10_0_get_vm_pte 525 }; 526 527 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) 528 { 529 if (adev->gmc.gmc_funcs == NULL) 530 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; 531 } 532 533 static int gmc_v10_0_early_init(void *handle) 534 { 535 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 536 537 gmc_v10_0_set_gmc_funcs(adev); 538 gmc_v10_0_set_irq_funcs(adev); 539 540 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 541 adev->gmc.shared_aperture_end = 542 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 543 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 544 adev->gmc.private_aperture_end = 545 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 546 547 return 0; 548 } 549 550 static int gmc_v10_0_late_init(void *handle) 551 { 552 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 553 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; 554 unsigned i; 555 556 for(i = 0; i < adev->num_rings; ++i) { 557 struct amdgpu_ring *ring = adev->rings[i]; 558 unsigned vmhub = ring->funcs->vmhub; 559 560 ring->vm_inv_eng = vm_inv_eng[vmhub]++; 561 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", 562 ring->idx, ring->name, ring->vm_inv_eng, 563 ring->funcs->vmhub); 564 } 565 566 /* Engine 17 is used for GART flushes */ 567 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 568 BUG_ON(vm_inv_eng[i] > 17); 569 570 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 571 } 572 573 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, 574 struct amdgpu_gmc *mc) 575 { 576 u64 base = 0; 577 578 base = gfxhub_v2_0_get_fb_location(adev); 579 580 amdgpu_gmc_vram_location(adev, &adev->gmc, base); 581 amdgpu_gmc_gart_location(adev, mc); 582 583 /* base offset of vram pages */ 584 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev); 585 } 586 587 /** 588 * gmc_v10_0_mc_init - initialize the memory controller driver params 589 * 590 * @adev: amdgpu_device pointer 591 * 592 * Look up the amount of vram, vram width, and decide how to place 593 * vram and gart within the GPU's physical address space. 594 * Returns 0 for success. 595 */ 596 static int gmc_v10_0_mc_init(struct amdgpu_device *adev) 597 { 598 /* Could aper size report 0 ? */ 599 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 600 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 601 602 /* size in MB on si */ 603 adev->gmc.mc_vram_size = 604 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 605 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 606 adev->gmc.visible_vram_size = adev->gmc.aper_size; 607 608 /* In case the PCI BAR is larger than the actual amount of vram */ 609 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 610 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 611 612 /* set the gart size */ 613 if (amdgpu_gart_size == -1) { 614 switch (adev->asic_type) { 615 case CHIP_NAVI10: 616 case CHIP_NAVI14: 617 case CHIP_NAVI12: 618 default: 619 adev->gmc.gart_size = 512ULL << 20; 620 break; 621 } 622 } else 623 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 624 625 gmc_v10_0_vram_gtt_location(adev, &adev->gmc); 626 627 return 0; 628 } 629 630 static int gmc_v10_0_gart_init(struct amdgpu_device *adev) 631 { 632 int r; 633 634 if (adev->gart.bo) { 635 WARN(1, "NAVI10 PCIE GART already initialized\n"); 636 return 0; 637 } 638 639 /* Initialize common gart structure */ 640 r = amdgpu_gart_init(adev); 641 if (r) 642 return r; 643 644 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 645 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | 646 AMDGPU_PTE_EXECUTABLE; 647 648 return amdgpu_gart_table_vram_alloc(adev); 649 } 650 651 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) 652 { 653 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 654 unsigned size; 655 656 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 657 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ 658 } else { 659 u32 viewport; 660 u32 pitch; 661 662 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 663 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); 664 size = (REG_GET_FIELD(viewport, 665 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 666 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * 667 4); 668 } 669 /* return 0 if the pre-OS buffer uses up most of vram */ 670 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) { 671 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \ 672 be aware of gart table overwrite\n"); 673 return 0; 674 } 675 676 return size; 677 } 678 679 680 681 static int gmc_v10_0_sw_init(void *handle) 682 { 683 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 684 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 685 686 gfxhub_v2_0_init(adev); 687 mmhub_v2_0_init(adev); 688 689 spin_lock_init(&adev->gmc.invalidate_lock); 690 691 r = amdgpu_atomfirmware_get_vram_info(adev, 692 &vram_width, &vram_type, &vram_vendor); 693 if (!amdgpu_emu_mode) 694 adev->gmc.vram_width = vram_width; 695 else 696 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ 697 698 adev->gmc.vram_type = vram_type; 699 adev->gmc.vram_vendor = vram_vendor; 700 switch (adev->asic_type) { 701 case CHIP_NAVI10: 702 case CHIP_NAVI14: 703 case CHIP_NAVI12: 704 adev->num_vmhubs = 2; 705 /* 706 * To fulfill 4-level page support, 707 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, 708 * block size 512 (9bit) 709 */ 710 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 711 break; 712 default: 713 break; 714 } 715 716 /* This interrupt is VMC page fault.*/ 717 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 718 VMC_1_0__SRCID__VM_FAULT, 719 &adev->gmc.vm_fault); 720 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 721 UTCL2_1_0__SRCID__FAULT, 722 &adev->gmc.vm_fault); 723 if (r) 724 return r; 725 726 /* 727 * Set the internal MC address mask This is the max address of the GPU's 728 * internal address space. 729 */ 730 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 731 732 /* 733 * Reserve 8M stolen memory for navi10 like vega10 734 * TODO: will check if it's really needed on asic. 735 */ 736 if (amdgpu_emu_mode == 1) 737 adev->gmc.stolen_size = 0; 738 else 739 adev->gmc.stolen_size = 9 * 1024 *1024; 740 741 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 742 if (r) { 743 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 744 return r; 745 } 746 747 r = gmc_v10_0_mc_init(adev); 748 if (r) 749 return r; 750 751 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); 752 753 /* Memory manager */ 754 r = amdgpu_bo_init(adev); 755 if (r) 756 return r; 757 758 r = gmc_v10_0_gart_init(adev); 759 if (r) 760 return r; 761 762 /* 763 * number of VMs 764 * VMID 0 is reserved for System 765 * amdgpu graphics/compute will use VMIDs 1-7 766 * amdkfd will use VMIDs 8-15 767 */ 768 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 769 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 770 771 amdgpu_vm_manager_init(adev); 772 773 return 0; 774 } 775 776 /** 777 * gmc_v8_0_gart_fini - vm fini callback 778 * 779 * @adev: amdgpu_device pointer 780 * 781 * Tears down the driver GART/VM setup (CIK). 782 */ 783 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) 784 { 785 amdgpu_gart_table_vram_free(adev); 786 amdgpu_gart_fini(adev); 787 } 788 789 static int gmc_v10_0_sw_fini(void *handle) 790 { 791 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 793 amdgpu_vm_manager_fini(adev); 794 gmc_v10_0_gart_fini(adev); 795 amdgpu_gem_force_release(adev); 796 amdgpu_bo_fini(adev); 797 798 return 0; 799 } 800 801 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) 802 { 803 switch (adev->asic_type) { 804 case CHIP_NAVI10: 805 case CHIP_NAVI14: 806 case CHIP_NAVI12: 807 break; 808 default: 809 break; 810 } 811 } 812 813 /** 814 * gmc_v10_0_gart_enable - gart enable 815 * 816 * @adev: amdgpu_device pointer 817 */ 818 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) 819 { 820 int r; 821 bool value; 822 u32 tmp; 823 824 if (adev->gart.bo == NULL) { 825 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 826 return -EINVAL; 827 } 828 829 r = amdgpu_gart_table_vram_pin(adev); 830 if (r) 831 return r; 832 833 r = gfxhub_v2_0_gart_enable(adev); 834 if (r) 835 return r; 836 837 r = mmhub_v2_0_gart_enable(adev); 838 if (r) 839 return r; 840 841 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); 842 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; 843 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); 844 845 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); 846 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 847 848 /* Flush HDP after it is initialized */ 849 adev->nbio.funcs->hdp_flush(adev, NULL); 850 851 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 852 false : true; 853 854 gfxhub_v2_0_set_fault_enable_default(adev, value); 855 mmhub_v2_0_set_fault_enable_default(adev, value); 856 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); 857 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 858 859 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 860 (unsigned)(adev->gmc.gart_size >> 20), 861 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 862 863 adev->gart.ready = true; 864 865 return 0; 866 } 867 868 static int gmc_v10_0_hw_init(void *handle) 869 { 870 int r; 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 872 873 /* The sequence of these two function calls matters.*/ 874 gmc_v10_0_init_golden_registers(adev); 875 876 r = gmc_v10_0_gart_enable(adev); 877 if (r) 878 return r; 879 880 return 0; 881 } 882 883 /** 884 * gmc_v10_0_gart_disable - gart disable 885 * 886 * @adev: amdgpu_device pointer 887 * 888 * This disables all VM page table. 889 */ 890 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) 891 { 892 gfxhub_v2_0_gart_disable(adev); 893 mmhub_v2_0_gart_disable(adev); 894 amdgpu_gart_table_vram_unpin(adev); 895 } 896 897 static int gmc_v10_0_hw_fini(void *handle) 898 { 899 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 900 901 if (amdgpu_sriov_vf(adev)) { 902 /* full access mode, so don't touch any GMC register */ 903 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 904 return 0; 905 } 906 907 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 908 gmc_v10_0_gart_disable(adev); 909 910 return 0; 911 } 912 913 static int gmc_v10_0_suspend(void *handle) 914 { 915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 916 917 gmc_v10_0_hw_fini(adev); 918 919 return 0; 920 } 921 922 static int gmc_v10_0_resume(void *handle) 923 { 924 int r; 925 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 926 927 r = gmc_v10_0_hw_init(adev); 928 if (r) 929 return r; 930 931 amdgpu_vmid_reset_all(adev); 932 933 return 0; 934 } 935 936 static bool gmc_v10_0_is_idle(void *handle) 937 { 938 /* MC is always ready in GMC v10.*/ 939 return true; 940 } 941 942 static int gmc_v10_0_wait_for_idle(void *handle) 943 { 944 /* There is no need to wait for MC idle in GMC v10.*/ 945 return 0; 946 } 947 948 static int gmc_v10_0_soft_reset(void *handle) 949 { 950 return 0; 951 } 952 953 static int gmc_v10_0_set_clockgating_state(void *handle, 954 enum amd_clockgating_state state) 955 { 956 int r; 957 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 958 959 r = mmhub_v2_0_set_clockgating(adev, state); 960 if (r) 961 return r; 962 963 return athub_v2_0_set_clockgating(adev, state); 964 } 965 966 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) 967 { 968 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 969 970 mmhub_v2_0_get_clockgating(adev, flags); 971 972 athub_v2_0_get_clockgating(adev, flags); 973 } 974 975 static int gmc_v10_0_set_powergating_state(void *handle, 976 enum amd_powergating_state state) 977 { 978 return 0; 979 } 980 981 const struct amd_ip_funcs gmc_v10_0_ip_funcs = { 982 .name = "gmc_v10_0", 983 .early_init = gmc_v10_0_early_init, 984 .late_init = gmc_v10_0_late_init, 985 .sw_init = gmc_v10_0_sw_init, 986 .sw_fini = gmc_v10_0_sw_fini, 987 .hw_init = gmc_v10_0_hw_init, 988 .hw_fini = gmc_v10_0_hw_fini, 989 .suspend = gmc_v10_0_suspend, 990 .resume = gmc_v10_0_resume, 991 .is_idle = gmc_v10_0_is_idle, 992 .wait_for_idle = gmc_v10_0_wait_for_idle, 993 .soft_reset = gmc_v10_0_soft_reset, 994 .set_clockgating_state = gmc_v10_0_set_clockgating_state, 995 .set_powergating_state = gmc_v10_0_set_powergating_state, 996 .get_clockgating_state = gmc_v10_0_get_clockgating_state, 997 }; 998 999 const struct amdgpu_ip_block_version gmc_v10_0_ip_block = 1000 { 1001 .type = AMD_IP_BLOCK_TYPE_GMC, 1002 .major = 10, 1003 .minor = 0, 1004 .rev = 0, 1005 .funcs = &gmc_v10_0_ip_funcs, 1006 }; 1007