1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/pci.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atomfirmware.h" 27 #include "gmc_v11_0.h" 28 #include "umc_v8_7.h" 29 #include "athub/athub_3_0_0_sh_mask.h" 30 #include "athub/athub_3_0_0_offset.h" 31 #include "oss/osssys_6_0_0_offset.h" 32 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 33 #include "navi10_enum.h" 34 #include "soc15.h" 35 #include "soc15d.h" 36 #include "soc15_common.h" 37 #include "nbio_v4_3.h" 38 #include "gfxhub_v3_0.h" 39 #include "mmhub_v3_0.h" 40 #include "mmhub_v3_0_1.h" 41 #include "mmhub_v3_0_2.h" 42 #include "athub_v3_0.h" 43 44 45 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, 46 struct amdgpu_irq_src *src, 47 unsigned type, 48 enum amdgpu_interrupt_state state) 49 { 50 return 0; 51 } 52 53 static int 54 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 55 struct amdgpu_irq_src *src, unsigned type, 56 enum amdgpu_interrupt_state state) 57 { 58 switch (state) { 59 case AMDGPU_IRQ_STATE_DISABLE: 60 /* MM HUB */ 61 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); 62 /* GFX HUB */ 63 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); 64 break; 65 case AMDGPU_IRQ_STATE_ENABLE: 66 /* MM HUB */ 67 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); 68 /* GFX HUB */ 69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); 70 break; 71 default: 72 break; 73 } 74 75 return 0; 76 } 77 78 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, 79 struct amdgpu_irq_src *source, 80 struct amdgpu_iv_entry *entry) 81 { 82 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; 83 uint32_t status = 0; 84 u64 addr; 85 86 addr = (u64)entry->src_data[0] << 12; 87 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 88 89 if (!amdgpu_sriov_vf(adev)) { 90 /* 91 * Issue a dummy read to wait for the status register to 92 * be updated to avoid reading an incorrect value due to 93 * the new fast GRBM interface. 94 */ 95 if (entry->vmid_src == AMDGPU_GFXHUB_0) 96 RREG32(hub->vm_l2_pro_fault_status); 97 98 status = RREG32(hub->vm_l2_pro_fault_status); 99 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 100 } 101 102 if (printk_ratelimit()) { 103 struct amdgpu_task_info task_info; 104 105 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 106 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 107 108 dev_err(adev->dev, 109 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " 110 "for process %s pid %d thread %s pid %d)\n", 111 entry->vmid_src ? "mmhub" : "gfxhub", 112 entry->src_id, entry->ring_id, entry->vmid, 113 entry->pasid, task_info.process_name, task_info.tgid, 114 task_info.task_name, task_info.pid); 115 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 116 addr, entry->client_id); 117 if (!amdgpu_sriov_vf(adev)) 118 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); 119 } 120 121 return 0; 122 } 123 124 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = { 125 .set = gmc_v11_0_vm_fault_interrupt_state, 126 .process = gmc_v11_0_process_interrupt, 127 }; 128 129 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = { 130 .set = gmc_v11_0_ecc_interrupt_state, 131 .process = amdgpu_umc_process_ecc_irq, 132 }; 133 134 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) 135 { 136 adev->gmc.vm_fault.num_types = 1; 137 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; 138 139 if (!amdgpu_sriov_vf(adev)) { 140 adev->gmc.ecc_irq.num_types = 1; 141 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; 142 } 143 } 144 145 /** 146 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore 147 * 148 * @adev: amdgpu_device pointer 149 * @vmhub: vmhub type 150 * 151 */ 152 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, 153 uint32_t vmhub) 154 { 155 return ((vmhub == AMDGPU_MMHUB_0) && 156 (!amdgpu_sriov_vf(adev))); 157 } 158 159 static bool gmc_v11_0_get_vmid_pasid_mapping_info( 160 struct amdgpu_device *adev, 161 uint8_t vmid, uint16_t *p_pasid) 162 { 163 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; 164 165 return !!(*p_pasid); 166 } 167 168 /* 169 * GART 170 * VMID 0 is the physical GPU addresses as used by the kernel. 171 * VMIDs 1-15 are used for userspace clients and are handled 172 * by the amdgpu vm/hsa code. 173 */ 174 175 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, 176 unsigned int vmhub, uint32_t flush_type) 177 { 178 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); 179 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 180 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 181 u32 tmp; 182 /* Use register 17 for GART */ 183 const unsigned eng = 17; 184 unsigned int i; 185 186 spin_lock(&adev->gmc.invalidate_lock); 187 /* 188 * It may lose gpuvm invalidate acknowldege state across power-gating 189 * off cycle, add semaphore acquire before invalidation and semaphore 190 * release after invalidation to avoid entering power gated state 191 * to WA the Issue 192 */ 193 194 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 195 if (use_semaphore) { 196 for (i = 0; i < adev->usec_timeout; i++) { 197 /* a read return value of 1 means semaphore acuqire */ 198 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + 199 hub->eng_distance * eng); 200 if (tmp & 0x1) 201 break; 202 udelay(1); 203 } 204 205 if (i >= adev->usec_timeout) 206 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 207 } 208 209 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); 210 211 /* Wait for ACK with a delay.*/ 212 for (i = 0; i < adev->usec_timeout; i++) { 213 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + 214 hub->eng_distance * eng); 215 tmp &= 1 << vmid; 216 if (tmp) 217 break; 218 219 udelay(1); 220 } 221 222 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 223 if (use_semaphore) 224 /* 225 * add semaphore release after invalidation, 226 * write with 0 means semaphore release 227 */ 228 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + 229 hub->eng_distance * eng, 0); 230 231 /* Issue additional private vm invalidation to MMHUB */ 232 if ((vmhub != AMDGPU_GFXHUB_0) && 233 (hub->vm_l2_bank_select_reserved_cid2)) { 234 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 235 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ 236 inv_req |= (1 << 25); 237 /* Issue private invalidation */ 238 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); 239 /* Read back to ensure invalidation is done*/ 240 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 241 } 242 243 spin_unlock(&adev->gmc.invalidate_lock); 244 245 if (i < adev->usec_timeout) 246 return; 247 248 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 249 } 250 251 /** 252 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback 253 * 254 * @adev: amdgpu_device pointer 255 * @vmid: vm instance to flush 256 * 257 * Flush the TLB for the requested page table. 258 */ 259 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 260 uint32_t vmhub, uint32_t flush_type) 261 { 262 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) 263 return; 264 265 /* flush hdp cache */ 266 adev->hdp.funcs->flush_hdp(adev, NULL); 267 268 /* For SRIOV run time, driver shouldn't access the register through MMIO 269 * Directly use kiq to do the vm invalidation instead 270 */ 271 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes && 272 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { 273 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 274 const unsigned eng = 17; 275 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 276 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 277 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 278 279 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 280 1 << vmid); 281 return; 282 } 283 284 mutex_lock(&adev->mman.gtt_window_lock); 285 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); 286 mutex_unlock(&adev->mman.gtt_window_lock); 287 return; 288 } 289 290 /** 291 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid 292 * 293 * @adev: amdgpu_device pointer 294 * @pasid: pasid to be flush 295 * 296 * Flush the TLB for the requested pasid. 297 */ 298 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 299 uint16_t pasid, uint32_t flush_type, 300 bool all_hub) 301 { 302 int vmid, i; 303 signed long r; 304 uint32_t seq; 305 uint16_t queried_pasid; 306 bool ret; 307 struct amdgpu_ring *ring = &adev->gfx.kiq.ring; 308 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 309 310 if (amdgpu_emu_mode == 0 && ring->sched.ready) { 311 spin_lock(&adev->gfx.kiq.ring_lock); 312 /* 2 dwords flush + 8 dwords fence */ 313 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); 314 kiq->pmf->kiq_invalidate_tlbs(ring, 315 pasid, flush_type, all_hub); 316 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 317 if (r) { 318 amdgpu_ring_undo(ring); 319 spin_unlock(&adev->gfx.kiq.ring_lock); 320 return -ETIME; 321 } 322 323 amdgpu_ring_commit(ring); 324 spin_unlock(&adev->gfx.kiq.ring_lock); 325 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); 326 if (r < 1) { 327 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); 328 return -ETIME; 329 } 330 331 return 0; 332 } 333 334 for (vmid = 1; vmid < 16; vmid++) { 335 336 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, 337 &queried_pasid); 338 if (ret && queried_pasid == pasid) { 339 if (all_hub) { 340 for (i = 0; i < adev->num_vmhubs; i++) 341 gmc_v11_0_flush_gpu_tlb(adev, vmid, 342 i, flush_type); 343 } else { 344 gmc_v11_0_flush_gpu_tlb(adev, vmid, 345 AMDGPU_GFXHUB_0, flush_type); 346 } 347 break; 348 } 349 } 350 351 return 0; 352 } 353 354 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 355 unsigned vmid, uint64_t pd_addr) 356 { 357 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); 358 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 359 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 360 unsigned eng = ring->vm_inv_eng; 361 362 /* 363 * It may lose gpuvm invalidate acknowldege state across power-gating 364 * off cycle, add semaphore acquire before invalidation and semaphore 365 * release after invalidation to avoid entering power gated state 366 * to WA the Issue 367 */ 368 369 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 370 if (use_semaphore) 371 /* a read return value of 1 means semaphore acuqire */ 372 amdgpu_ring_emit_reg_wait(ring, 373 hub->vm_inv_eng0_sem + 374 hub->eng_distance * eng, 0x1, 0x1); 375 376 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 377 (hub->ctx_addr_distance * vmid), 378 lower_32_bits(pd_addr)); 379 380 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 381 (hub->ctx_addr_distance * vmid), 382 upper_32_bits(pd_addr)); 383 384 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 385 hub->eng_distance * eng, 386 hub->vm_inv_eng0_ack + 387 hub->eng_distance * eng, 388 req, 1 << vmid); 389 390 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 391 if (use_semaphore) 392 /* 393 * add semaphore release after invalidation, 394 * write with 0 means semaphore release 395 */ 396 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 397 hub->eng_distance * eng, 0); 398 399 return pd_addr; 400 } 401 402 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 403 unsigned pasid) 404 { 405 struct amdgpu_device *adev = ring->adev; 406 uint32_t reg; 407 408 /* MES fw manages IH_VMID_x_LUT updating */ 409 if (ring->is_mes_queue) 410 return; 411 412 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) 413 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; 414 else 415 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; 416 417 amdgpu_ring_emit_wreg(ring, reg, pasid); 418 } 419 420 /* 421 * PTE format: 422 * 63:59 reserved 423 * 58:57 reserved 424 * 56 F 425 * 55 L 426 * 54 reserved 427 * 53:52 SW 428 * 51 T 429 * 50:48 mtype 430 * 47:12 4k physical page base address 431 * 11:7 fragment 432 * 6 write 433 * 5 read 434 * 4 exe 435 * 3 Z 436 * 2 snooped 437 * 1 system 438 * 0 valid 439 * 440 * PDE format: 441 * 63:59 block fragment size 442 * 58:55 reserved 443 * 54 P 444 * 53:48 reserved 445 * 47:6 physical base address of PD or PTE 446 * 5:3 reserved 447 * 2 C 448 * 1 system 449 * 0 valid 450 */ 451 452 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 453 { 454 switch (flags) { 455 case AMDGPU_VM_MTYPE_DEFAULT: 456 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 457 case AMDGPU_VM_MTYPE_NC: 458 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 459 case AMDGPU_VM_MTYPE_WC: 460 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); 461 case AMDGPU_VM_MTYPE_CC: 462 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); 463 case AMDGPU_VM_MTYPE_UC: 464 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); 465 default: 466 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); 467 } 468 } 469 470 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, 471 uint64_t *addr, uint64_t *flags) 472 { 473 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 474 *addr = adev->vm_manager.vram_base_offset + *addr - 475 adev->gmc.vram_start; 476 BUG_ON(*addr & 0xFFFF00000000003FULL); 477 478 if (!adev->gmc.translate_further) 479 return; 480 481 if (level == AMDGPU_VM_PDB1) { 482 /* Set the block fragment size */ 483 if (!(*flags & AMDGPU_PDE_PTE)) 484 *flags |= AMDGPU_PDE_BFS(0x9); 485 486 } else if (level == AMDGPU_VM_PDB0) { 487 if (*flags & AMDGPU_PDE_PTE) 488 *flags &= ~AMDGPU_PDE_PTE; 489 else 490 *flags |= AMDGPU_PTE_TF; 491 } 492 } 493 494 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, 495 struct amdgpu_bo_va_mapping *mapping, 496 uint64_t *flags) 497 { 498 *flags &= ~AMDGPU_PTE_EXECUTABLE; 499 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 500 501 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 502 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 503 504 *flags &= ~AMDGPU_PTE_NOALLOC; 505 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC); 506 507 if (mapping->flags & AMDGPU_PTE_PRT) { 508 *flags |= AMDGPU_PTE_PRT; 509 *flags |= AMDGPU_PTE_SNOOPED; 510 *flags |= AMDGPU_PTE_LOG; 511 *flags |= AMDGPU_PTE_SYSTEM; 512 *flags &= ~AMDGPU_PTE_VALID; 513 } 514 } 515 516 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) 517 { 518 return 0; 519 } 520 521 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { 522 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb, 523 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid, 524 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb, 525 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping, 526 .map_mtype = gmc_v11_0_map_mtype, 527 .get_vm_pde = gmc_v11_0_get_vm_pde, 528 .get_vm_pte = gmc_v11_0_get_vm_pte, 529 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size, 530 }; 531 532 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) 533 { 534 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; 535 } 536 537 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) 538 { 539 switch (adev->ip_versions[UMC_HWIP][0]) { 540 case IP_VERSION(8, 10, 0): 541 case IP_VERSION(8, 11, 0): 542 break; 543 default: 544 break; 545 } 546 } 547 548 549 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) 550 { 551 switch (adev->ip_versions[MMHUB_HWIP][0]) { 552 case IP_VERSION(3, 0, 1): 553 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; 554 break; 555 case IP_VERSION(3, 0, 2): 556 adev->mmhub.funcs = &mmhub_v3_0_2_funcs; 557 break; 558 default: 559 adev->mmhub.funcs = &mmhub_v3_0_funcs; 560 break; 561 } 562 } 563 564 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) 565 { 566 adev->gfxhub.funcs = &gfxhub_v3_0_funcs; 567 } 568 569 static int gmc_v11_0_early_init(void *handle) 570 { 571 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 572 573 gmc_v11_0_set_gfxhub_funcs(adev); 574 gmc_v11_0_set_mmhub_funcs(adev); 575 gmc_v11_0_set_gmc_funcs(adev); 576 gmc_v11_0_set_irq_funcs(adev); 577 gmc_v11_0_set_umc_funcs(adev); 578 579 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 580 adev->gmc.shared_aperture_end = 581 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 582 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 583 adev->gmc.private_aperture_end = 584 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 585 586 return 0; 587 } 588 589 static int gmc_v11_0_late_init(void *handle) 590 { 591 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 592 int r; 593 594 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 595 if (r) 596 return r; 597 598 r = amdgpu_gmc_ras_late_init(adev); 599 if (r) 600 return r; 601 602 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 603 } 604 605 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, 606 struct amdgpu_gmc *mc) 607 { 608 u64 base = 0; 609 610 base = adev->mmhub.funcs->get_fb_location(adev); 611 612 amdgpu_gmc_vram_location(adev, &adev->gmc, base); 613 amdgpu_gmc_gart_location(adev, mc); 614 615 /* base offset of vram pages */ 616 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); 617 } 618 619 /** 620 * gmc_v11_0_mc_init - initialize the memory controller driver params 621 * 622 * @adev: amdgpu_device pointer 623 * 624 * Look up the amount of vram, vram width, and decide how to place 625 * vram and gart within the GPU's physical address space. 626 * Returns 0 for success. 627 */ 628 static int gmc_v11_0_mc_init(struct amdgpu_device *adev) 629 { 630 int r; 631 632 /* size in MB on si */ 633 adev->gmc.mc_vram_size = 634 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 635 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 636 637 if (!(adev->flags & AMD_IS_APU)) { 638 r = amdgpu_device_resize_fb_bar(adev); 639 if (r) 640 return r; 641 } 642 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 643 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 644 645 #ifdef CONFIG_X86_64 646 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { 647 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); 648 adev->gmc.aper_size = adev->gmc.real_vram_size; 649 } 650 #endif 651 /* In case the PCI BAR is larger than the actual amount of vram */ 652 adev->gmc.visible_vram_size = adev->gmc.aper_size; 653 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 654 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 655 656 /* set the gart size */ 657 if (amdgpu_gart_size == -1) { 658 adev->gmc.gart_size = 512ULL << 20; 659 } else 660 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 661 662 gmc_v11_0_vram_gtt_location(adev, &adev->gmc); 663 664 return 0; 665 } 666 667 static int gmc_v11_0_gart_init(struct amdgpu_device *adev) 668 { 669 int r; 670 671 if (adev->gart.bo) { 672 WARN(1, "PCIE GART already initialized\n"); 673 return 0; 674 } 675 676 /* Initialize common gart structure */ 677 r = amdgpu_gart_init(adev); 678 if (r) 679 return r; 680 681 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 682 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | 683 AMDGPU_PTE_EXECUTABLE; 684 685 return amdgpu_gart_table_vram_alloc(adev); 686 } 687 688 static int gmc_v11_0_sw_init(void *handle) 689 { 690 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 691 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 692 693 adev->mmhub.funcs->init(adev); 694 695 spin_lock_init(&adev->gmc.invalidate_lock); 696 697 r = amdgpu_atomfirmware_get_vram_info(adev, 698 &vram_width, &vram_type, &vram_vendor); 699 adev->gmc.vram_width = vram_width; 700 701 adev->gmc.vram_type = vram_type; 702 adev->gmc.vram_vendor = vram_vendor; 703 704 switch (adev->ip_versions[GC_HWIP][0]) { 705 case IP_VERSION(11, 0, 0): 706 case IP_VERSION(11, 0, 1): 707 case IP_VERSION(11, 0, 2): 708 adev->num_vmhubs = 2; 709 /* 710 * To fulfill 4-level page support, 711 * vm size is 256TB (48bit), maximum size, 712 * block size 512 (9bit) 713 */ 714 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 715 break; 716 default: 717 break; 718 } 719 720 /* This interrupt is VMC page fault.*/ 721 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, 722 VMC_1_0__SRCID__VM_FAULT, 723 &adev->gmc.vm_fault); 724 725 if (r) 726 return r; 727 728 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 729 UTCL2_1_0__SRCID__FAULT, 730 &adev->gmc.vm_fault); 731 if (r) 732 return r; 733 734 if (!amdgpu_sriov_vf(adev)) { 735 /* interrupt sent to DF. */ 736 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, 737 &adev->gmc.ecc_irq); 738 if (r) 739 return r; 740 } 741 742 /* 743 * Set the internal MC address mask This is the max address of the GPU's 744 * internal address space. 745 */ 746 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 747 748 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 749 if (r) { 750 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 751 return r; 752 } 753 754 r = gmc_v11_0_mc_init(adev); 755 if (r) 756 return r; 757 758 amdgpu_gmc_get_vbios_allocations(adev); 759 760 /* Memory manager */ 761 r = amdgpu_bo_init(adev); 762 if (r) 763 return r; 764 765 r = gmc_v11_0_gart_init(adev); 766 if (r) 767 return r; 768 769 /* 770 * number of VMs 771 * VMID 0 is reserved for System 772 * amdgpu graphics/compute will use VMIDs 1-7 773 * amdkfd will use VMIDs 8-15 774 */ 775 adev->vm_manager.first_kfd_vmid = 8; 776 777 amdgpu_vm_manager_init(adev); 778 779 return 0; 780 } 781 782 /** 783 * gmc_v11_0_gart_fini - vm fini callback 784 * 785 * @adev: amdgpu_device pointer 786 * 787 * Tears down the driver GART/VM setup (CIK). 788 */ 789 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) 790 { 791 amdgpu_gart_table_vram_free(adev); 792 } 793 794 static int gmc_v11_0_sw_fini(void *handle) 795 { 796 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 797 798 amdgpu_vm_manager_fini(adev); 799 gmc_v11_0_gart_fini(adev); 800 amdgpu_gem_force_release(adev); 801 amdgpu_bo_fini(adev); 802 803 return 0; 804 } 805 806 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) 807 { 808 } 809 810 /** 811 * gmc_v11_0_gart_enable - gart enable 812 * 813 * @adev: amdgpu_device pointer 814 */ 815 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) 816 { 817 int r; 818 bool value; 819 820 if (adev->gart.bo == NULL) { 821 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 822 return -EINVAL; 823 } 824 825 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 826 827 r = adev->mmhub.funcs->gart_enable(adev); 828 if (r) 829 return r; 830 831 /* Flush HDP after it is initialized */ 832 adev->hdp.funcs->flush_hdp(adev, NULL); 833 834 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 835 false : true; 836 837 adev->mmhub.funcs->set_fault_enable_default(adev, value); 838 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); 839 840 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 841 (unsigned)(adev->gmc.gart_size >> 20), 842 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 843 844 return 0; 845 } 846 847 static int gmc_v11_0_hw_init(void *handle) 848 { 849 int r; 850 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 851 852 /* The sequence of these two function calls matters.*/ 853 gmc_v11_0_init_golden_registers(adev); 854 855 r = gmc_v11_0_gart_enable(adev); 856 if (r) 857 return r; 858 859 if (adev->umc.funcs && adev->umc.funcs->init_registers) 860 adev->umc.funcs->init_registers(adev); 861 862 return 0; 863 } 864 865 /** 866 * gmc_v11_0_gart_disable - gart disable 867 * 868 * @adev: amdgpu_device pointer 869 * 870 * This disables all VM page table. 871 */ 872 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) 873 { 874 adev->mmhub.funcs->gart_disable(adev); 875 } 876 877 static int gmc_v11_0_hw_fini(void *handle) 878 { 879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 880 881 if (amdgpu_sriov_vf(adev)) { 882 /* full access mode, so don't touch any GMC register */ 883 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 884 return 0; 885 } 886 887 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 888 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 889 gmc_v11_0_gart_disable(adev); 890 891 return 0; 892 } 893 894 static int gmc_v11_0_suspend(void *handle) 895 { 896 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 897 898 gmc_v11_0_hw_fini(adev); 899 900 return 0; 901 } 902 903 static int gmc_v11_0_resume(void *handle) 904 { 905 int r; 906 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 907 908 r = gmc_v11_0_hw_init(adev); 909 if (r) 910 return r; 911 912 amdgpu_vmid_reset_all(adev); 913 914 return 0; 915 } 916 917 static bool gmc_v11_0_is_idle(void *handle) 918 { 919 /* MC is always ready in GMC v11.*/ 920 return true; 921 } 922 923 static int gmc_v11_0_wait_for_idle(void *handle) 924 { 925 /* There is no need to wait for MC idle in GMC v11.*/ 926 return 0; 927 } 928 929 static int gmc_v11_0_soft_reset(void *handle) 930 { 931 return 0; 932 } 933 934 static int gmc_v11_0_set_clockgating_state(void *handle, 935 enum amd_clockgating_state state) 936 { 937 int r; 938 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 939 940 r = adev->mmhub.funcs->set_clockgating(adev, state); 941 if (r) 942 return r; 943 944 return athub_v3_0_set_clockgating(adev, state); 945 } 946 947 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags) 948 { 949 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 950 951 adev->mmhub.funcs->get_clockgating(adev, flags); 952 953 athub_v3_0_get_clockgating(adev, flags); 954 } 955 956 static int gmc_v11_0_set_powergating_state(void *handle, 957 enum amd_powergating_state state) 958 { 959 return 0; 960 } 961 962 const struct amd_ip_funcs gmc_v11_0_ip_funcs = { 963 .name = "gmc_v11_0", 964 .early_init = gmc_v11_0_early_init, 965 .sw_init = gmc_v11_0_sw_init, 966 .hw_init = gmc_v11_0_hw_init, 967 .late_init = gmc_v11_0_late_init, 968 .sw_fini = gmc_v11_0_sw_fini, 969 .hw_fini = gmc_v11_0_hw_fini, 970 .suspend = gmc_v11_0_suspend, 971 .resume = gmc_v11_0_resume, 972 .is_idle = gmc_v11_0_is_idle, 973 .wait_for_idle = gmc_v11_0_wait_for_idle, 974 .soft_reset = gmc_v11_0_soft_reset, 975 .set_clockgating_state = gmc_v11_0_set_clockgating_state, 976 .set_powergating_state = gmc_v11_0_set_powergating_state, 977 .get_clockgating_state = gmc_v11_0_get_clockgating_state, 978 }; 979 980 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = { 981 .type = AMD_IP_BLOCK_TYPE_GMC, 982 .major = 11, 983 .minor = 0, 984 .rev = 0, 985 .funcs = &gmc_v11_0_ip_funcs, 986 }; 987