1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/io-64-nonatomic-lo-hi.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_gmc.h" 31 #include "amdgpu_ras.h" 32 #include "amdgpu_xgmi.h" 33 #include "mmhub_v1_0.h" 34 #include "mmhub_v9_4.h" 35 #include "mmhub_v1_7.h" 36 37 /** 38 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 39 * 40 * @adev: amdgpu_device pointer 41 * 42 * Allocate video memory for pdb0 and map it for CPU access 43 * Returns 0 for success, error for failure. 44 */ 45 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev) 46 { 47 int r; 48 struct amdgpu_bo_param bp; 49 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; 50 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; 51 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift; 52 53 memset(&bp, 0, sizeof(bp)); 54 bp.size = PAGE_ALIGN((npdes + 1) * 8); 55 bp.byte_align = PAGE_SIZE; 56 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 57 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 58 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 59 bp.type = ttm_bo_type_kernel; 60 bp.resv = NULL; 61 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 62 63 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); 64 if (r) 65 return r; 66 67 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); 68 if (unlikely(r != 0)) 69 goto bo_reserve_failure; 70 71 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); 72 if (r) 73 goto bo_pin_failure; 74 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); 75 if (r) 76 goto bo_kmap_failure; 77 78 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); 79 return 0; 80 81 bo_kmap_failure: 82 amdgpu_bo_unpin(adev->gmc.pdb0_bo); 83 bo_pin_failure: 84 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); 85 bo_reserve_failure: 86 amdgpu_bo_unref(&adev->gmc.pdb0_bo); 87 return r; 88 } 89 90 /** 91 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO 92 * 93 * @bo: the BO to get the PDE for 94 * @level: the level in the PD hirarchy 95 * @addr: resulting addr 96 * @flags: resulting flags 97 * 98 * Get the address and flags to be used for a PDE (Page Directory Entry). 99 */ 100 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, 101 uint64_t *addr, uint64_t *flags) 102 { 103 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 104 105 switch (bo->tbo.mem.mem_type) { 106 case TTM_PL_TT: 107 *addr = bo->tbo.ttm->dma_address[0]; 108 break; 109 case TTM_PL_VRAM: 110 *addr = amdgpu_bo_gpu_offset(bo); 111 break; 112 default: 113 *addr = 0; 114 break; 115 } 116 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); 117 amdgpu_gmc_get_vm_pde(adev, level, addr, flags); 118 } 119 120 /* 121 * amdgpu_gmc_pd_addr - return the address of the root directory 122 */ 123 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) 124 { 125 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 126 uint64_t pd_addr; 127 128 /* TODO: move that into ASIC specific code */ 129 if (adev->asic_type >= CHIP_VEGA10) { 130 uint64_t flags = AMDGPU_PTE_VALID; 131 132 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags); 133 pd_addr |= flags; 134 } else { 135 pd_addr = amdgpu_bo_gpu_offset(bo); 136 } 137 return pd_addr; 138 } 139 140 /** 141 * amdgpu_gmc_set_pte_pde - update the page tables using CPU 142 * 143 * @adev: amdgpu_device pointer 144 * @cpu_pt_addr: cpu address of the page table 145 * @gpu_page_idx: entry in the page table to update 146 * @addr: dst addr to write into pte/pde 147 * @flags: access flags 148 * 149 * Update the page tables using CPU. 150 */ 151 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, 152 uint32_t gpu_page_idx, uint64_t addr, 153 uint64_t flags) 154 { 155 void __iomem *ptr = (void *)cpu_pt_addr; 156 uint64_t value; 157 158 /* 159 * The following is for PTE only. GART does not have PDEs. 160 */ 161 value = addr & 0x0000FFFFFFFFF000ULL; 162 value |= flags; 163 writeq(value, ptr + (gpu_page_idx * 8)); 164 return 0; 165 } 166 167 /** 168 * amdgpu_gmc_agp_addr - return the address in the AGP address space 169 * 170 * @bo: TTM BO which needs the address, must be in GTT domain 171 * 172 * Tries to figure out how to access the BO through the AGP aperture. Returns 173 * AMDGPU_BO_INVALID_OFFSET if that is not possible. 174 */ 175 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) 176 { 177 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 178 179 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) 180 return AMDGPU_BO_INVALID_OFFSET; 181 182 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) 183 return AMDGPU_BO_INVALID_OFFSET; 184 185 return adev->gmc.agp_start + bo->ttm->dma_address[0]; 186 } 187 188 /** 189 * amdgpu_gmc_vram_location - try to find VRAM location 190 * 191 * @adev: amdgpu device structure holding all necessary information 192 * @mc: memory controller structure holding memory information 193 * @base: base address at which to put VRAM 194 * 195 * Function will try to place VRAM at base address provided 196 * as parameter. 197 */ 198 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, 199 u64 base) 200 { 201 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 202 203 mc->vram_start = base; 204 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 205 if (limit && limit < mc->real_vram_size) 206 mc->real_vram_size = limit; 207 208 if (mc->xgmi.num_physical_nodes == 0) { 209 mc->fb_start = mc->vram_start; 210 mc->fb_end = mc->vram_end; 211 } 212 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 213 mc->mc_vram_size >> 20, mc->vram_start, 214 mc->vram_end, mc->real_vram_size >> 20); 215 } 216 217 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture 218 * 219 * @adev: amdgpu device structure holding all necessary information 220 * @mc: memory controller structure holding memory information 221 * 222 * This function is only used if use GART for FB translation. In such 223 * case, we use sysvm aperture (vmid0 page tables) for both vram 224 * and gart (aka system memory) access. 225 * 226 * GPUVM (and our organization of vmid0 page tables) require sysvm 227 * aperture to be placed at a location aligned with 8 times of native 228 * page size. For example, if vm_context0_cntl.page_table_block_size 229 * is 12, then native page size is 8G (2M*2^12), sysvm should start 230 * with a 64G aligned address. For simplicity, we just put sysvm at 231 * address 0. So vram start at address 0 and gart is right after vram. 232 */ 233 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 234 { 235 u64 hive_vram_start = 0; 236 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; 237 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; 238 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; 239 mc->gart_start = hive_vram_end + 1; 240 mc->gart_end = mc->gart_start + mc->gart_size - 1; 241 mc->fb_start = hive_vram_start; 242 mc->fb_end = hive_vram_end; 243 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 244 mc->mc_vram_size >> 20, mc->vram_start, 245 mc->vram_end, mc->real_vram_size >> 20); 246 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", 247 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 248 } 249 250 /** 251 * amdgpu_gmc_gart_location - try to find GART location 252 * 253 * @adev: amdgpu device structure holding all necessary information 254 * @mc: memory controller structure holding memory information 255 * 256 * Function will place try to place GART before or after VRAM. 257 * If GART size is bigger than space left then we ajust GART size. 258 * Thus function will never fails. 259 */ 260 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 261 { 262 const uint64_t four_gb = 0x100000000ULL; 263 u64 size_af, size_bf; 264 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ 265 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); 266 267 /* VCE doesn't like it when BOs cross a 4GB segment, so align 268 * the GART base on a 4GB boundary as well. 269 */ 270 size_bf = mc->fb_start; 271 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb); 272 273 if (mc->gart_size > max(size_bf, size_af)) { 274 dev_warn(adev->dev, "limiting GART\n"); 275 mc->gart_size = max(size_bf, size_af); 276 } 277 278 if ((size_bf >= mc->gart_size && size_bf < size_af) || 279 (size_af < mc->gart_size)) 280 mc->gart_start = 0; 281 else 282 mc->gart_start = max_mc_address - mc->gart_size + 1; 283 284 mc->gart_start &= ~(four_gb - 1); 285 mc->gart_end = mc->gart_start + mc->gart_size - 1; 286 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", 287 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 288 } 289 290 /** 291 * amdgpu_gmc_agp_location - try to find AGP location 292 * @adev: amdgpu device structure holding all necessary information 293 * @mc: memory controller structure holding memory information 294 * 295 * Function will place try to find a place for the AGP BAR in the MC address 296 * space. 297 * 298 * AGP BAR will be assigned the largest available hole in the address space. 299 * Should be called after VRAM and GART locations are setup. 300 */ 301 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 302 { 303 const uint64_t sixteen_gb = 1ULL << 34; 304 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); 305 u64 size_af, size_bf; 306 307 if (amdgpu_sriov_vf(adev)) { 308 mc->agp_start = 0xffffffffffff; 309 mc->agp_end = 0x0; 310 mc->agp_size = 0; 311 312 return; 313 } 314 315 if (mc->fb_start > mc->gart_start) { 316 size_bf = (mc->fb_start & sixteen_gb_mask) - 317 ALIGN(mc->gart_end + 1, sixteen_gb); 318 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb); 319 } else { 320 size_bf = mc->fb_start & sixteen_gb_mask; 321 size_af = (mc->gart_start & sixteen_gb_mask) - 322 ALIGN(mc->fb_end + 1, sixteen_gb); 323 } 324 325 if (size_bf > size_af) { 326 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask; 327 mc->agp_size = size_bf; 328 } else { 329 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb); 330 mc->agp_size = size_af; 331 } 332 333 mc->agp_end = mc->agp_start + mc->agp_size - 1; 334 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", 335 mc->agp_size >> 20, mc->agp_start, mc->agp_end); 336 } 337 338 /** 339 * amdgpu_gmc_filter_faults - filter VM faults 340 * 341 * @adev: amdgpu device structure 342 * @addr: address of the VM fault 343 * @pasid: PASID of the process causing the fault 344 * @timestamp: timestamp of the fault 345 * 346 * Returns: 347 * True if the fault was filtered and should not be processed further. 348 * False if the fault is a new one and needs to be handled. 349 */ 350 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, 351 uint16_t pasid, uint64_t timestamp) 352 { 353 struct amdgpu_gmc *gmc = &adev->gmc; 354 355 uint64_t stamp, key = addr << 4 | pasid; 356 struct amdgpu_gmc_fault *fault; 357 uint32_t hash; 358 359 /* If we don't have space left in the ring buffer return immediately */ 360 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - 361 AMDGPU_GMC_FAULT_TIMEOUT; 362 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) 363 return true; 364 365 /* Try to find the fault in the hash */ 366 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); 367 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; 368 while (fault->timestamp >= stamp) { 369 uint64_t tmp; 370 371 if (fault->key == key) 372 return true; 373 374 tmp = fault->timestamp; 375 fault = &gmc->fault_ring[fault->next]; 376 377 /* Check if the entry was reused */ 378 if (fault->timestamp >= tmp) 379 break; 380 } 381 382 /* Add the fault to the ring */ 383 fault = &gmc->fault_ring[gmc->last_fault]; 384 fault->key = key; 385 fault->timestamp = timestamp; 386 387 /* And update the hash */ 388 fault->next = gmc->fault_hash[hash].idx; 389 gmc->fault_hash[hash].idx = gmc->last_fault++; 390 return false; 391 } 392 393 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) 394 { 395 int r; 396 397 if (adev->umc.ras_funcs && 398 adev->umc.ras_funcs->ras_late_init) { 399 r = adev->umc.ras_funcs->ras_late_init(adev); 400 if (r) 401 return r; 402 } 403 404 /* initialize mmhub ras funcs */ 405 switch (adev->asic_type) { 406 case CHIP_VEGA20: 407 adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; 408 break; 409 case CHIP_ARCTURUS: 410 adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; 411 break; 412 case CHIP_ALDEBARAN: 413 adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; 414 break; 415 default: 416 /* mmhub ras is not available */ 417 break; 418 } 419 420 if (adev->mmhub.ras_funcs && 421 adev->mmhub.ras_funcs->ras_late_init) { 422 r = adev->mmhub.ras_funcs->ras_late_init(adev); 423 if (r) 424 return r; 425 } 426 427 if (!adev->gmc.xgmi.connected_to_cpu) 428 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs; 429 430 if (adev->gmc.xgmi.ras_funcs && 431 adev->gmc.xgmi.ras_funcs->ras_late_init) { 432 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev); 433 if (r) 434 return r; 435 } 436 437 return 0; 438 } 439 440 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) 441 { 442 if (adev->umc.ras_funcs && 443 adev->umc.ras_funcs->ras_fini) 444 adev->umc.ras_funcs->ras_fini(adev); 445 446 if (adev->mmhub.ras_funcs && 447 adev->mmhub.ras_funcs->ras_fini) 448 amdgpu_mmhub_ras_fini(adev); 449 450 if (adev->gmc.xgmi.ras_funcs && 451 adev->gmc.xgmi.ras_funcs->ras_fini) 452 adev->gmc.xgmi.ras_funcs->ras_fini(adev); 453 } 454 455 /* 456 * The latest engine allocation on gfx9/10 is: 457 * Engine 2, 3: firmware 458 * Engine 0, 1, 4~16: amdgpu ring, 459 * subject to change when ring number changes 460 * Engine 17: Gart flushes 461 */ 462 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 463 #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 464 465 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) 466 { 467 struct amdgpu_ring *ring; 468 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 469 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 470 GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 471 unsigned i; 472 unsigned vmhub, inv_eng; 473 474 for (i = 0; i < adev->num_rings; ++i) { 475 ring = adev->rings[i]; 476 vmhub = ring->funcs->vmhub; 477 478 if (ring == &adev->mes.ring) 479 continue; 480 481 inv_eng = ffs(vm_inv_engs[vmhub]); 482 if (!inv_eng) { 483 dev_err(adev->dev, "no VM inv eng for ring %s\n", 484 ring->name); 485 return -EINVAL; 486 } 487 488 ring->vm_inv_eng = inv_eng - 1; 489 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 490 491 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 492 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 493 } 494 495 return 0; 496 } 497 498 /** 499 * amdgpu_tmz_set -- check and set if a device supports TMZ 500 * @adev: amdgpu_device pointer 501 * 502 * Check and set if an the device @adev supports Trusted Memory 503 * Zones (TMZ). 504 */ 505 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) 506 { 507 switch (adev->asic_type) { 508 case CHIP_RAVEN: 509 if (amdgpu_tmz == 0) { 510 adev->gmc.tmz_enabled = false; 511 dev_info(adev->dev, 512 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n"); 513 } else { 514 adev->gmc.tmz_enabled = true; 515 dev_info(adev->dev, 516 "Trusted Memory Zone (TMZ) feature enabled\n"); 517 } 518 break; 519 case CHIP_RENOIR: 520 case CHIP_NAVI10: 521 case CHIP_NAVI14: 522 case CHIP_NAVI12: 523 case CHIP_VANGOGH: 524 /* Don't enable it by default yet. 525 */ 526 if (amdgpu_tmz < 1) { 527 adev->gmc.tmz_enabled = false; 528 dev_info(adev->dev, 529 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n"); 530 } else { 531 adev->gmc.tmz_enabled = true; 532 dev_info(adev->dev, 533 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n"); 534 } 535 break; 536 default: 537 adev->gmc.tmz_enabled = false; 538 dev_warn(adev->dev, 539 "Trusted Memory Zone (TMZ) feature not supported\n"); 540 break; 541 } 542 } 543 544 /** 545 * amdgpu_noretry_set -- set per asic noretry defaults 546 * @adev: amdgpu_device pointer 547 * 548 * Set a per asic default for the no-retry parameter. 549 * 550 */ 551 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) 552 { 553 struct amdgpu_gmc *gmc = &adev->gmc; 554 555 switch (adev->asic_type) { 556 case CHIP_VEGA10: 557 case CHIP_VEGA20: 558 case CHIP_ARCTURUS: 559 case CHIP_ALDEBARAN: 560 /* 561 * noretry = 0 will cause kfd page fault tests fail 562 * for some ASICs, so set default to 1 for these ASICs. 563 */ 564 if (amdgpu_noretry == -1) 565 gmc->noretry = 1; 566 else 567 gmc->noretry = amdgpu_noretry; 568 break; 569 case CHIP_RAVEN: 570 default: 571 /* Raven currently has issues with noretry 572 * regardless of what we decide for other 573 * asics, we should leave raven with 574 * noretry = 0 until we root cause the 575 * issues. 576 * 577 * default this to 0 for now, but we may want 578 * to change this in the future for certain 579 * GPUs as it can increase performance in 580 * certain cases. 581 */ 582 if (amdgpu_noretry == -1) 583 gmc->noretry = 0; 584 else 585 gmc->noretry = amdgpu_noretry; 586 break; 587 } 588 } 589 590 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, 591 bool enable) 592 { 593 struct amdgpu_vmhub *hub; 594 u32 tmp, reg, i; 595 596 hub = &adev->vmhub[hub_type]; 597 for (i = 0; i < 16; i++) { 598 reg = hub->vm_context0_cntl + hub->ctx_distance * i; 599 600 tmp = RREG32(reg); 601 if (enable) 602 tmp |= hub->vm_cntx_cntl_vm_fault; 603 else 604 tmp &= ~hub->vm_cntx_cntl_vm_fault; 605 606 WREG32(reg, tmp); 607 } 608 } 609 610 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) 611 { 612 unsigned size; 613 614 /* 615 * TODO: 616 * Currently there is a bug where some memory client outside 617 * of the driver writes to first 8M of VRAM on S3 resume, 618 * this overrides GART which by default gets placed in first 8M and 619 * causes VM_FAULTS once GTT is accessed. 620 * Keep the stolen memory reservation until the while this is not solved. 621 */ 622 switch (adev->asic_type) { 623 case CHIP_VEGA10: 624 case CHIP_RAVEN: 625 case CHIP_RENOIR: 626 adev->mman.keep_stolen_vga_memory = true; 627 break; 628 default: 629 adev->mman.keep_stolen_vga_memory = false; 630 break; 631 } 632 633 if (amdgpu_sriov_vf(adev) || 634 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { 635 size = 0; 636 } else { 637 size = amdgpu_gmc_get_vbios_fb_size(adev); 638 639 if (adev->mman.keep_stolen_vga_memory) 640 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 641 } 642 643 /* set to 0 if the pre-OS buffer uses up most of vram */ 644 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 645 size = 0; 646 647 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) { 648 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; 649 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; 650 } else { 651 adev->mman.stolen_vga_size = size; 652 adev->mman.stolen_extended_size = 0; 653 } 654 } 655 656 /** 657 * amdgpu_gmc_init_pdb0 - initialize PDB0 658 * 659 * @adev: amdgpu_device pointer 660 * 661 * This function is only used when GART page table is used 662 * for FB address translatioin. In such a case, we construct 663 * a 2-level system VM page table: PDB0->PTB, to cover both 664 * VRAM of the hive and system memory. 665 * 666 * PDB0 is static, initialized once on driver initialization. 667 * The first n entries of PDB0 are used as PTE by setting 668 * P bit to 1, pointing to VRAM. The n+1'th entry points 669 * to a big PTB covering system memory. 670 * 671 */ 672 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) 673 { 674 int i; 675 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW? 676 /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M 677 */ 678 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; 679 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; 680 u64 vram_addr = adev->vm_manager.vram_base_offset - 681 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 682 u64 vram_end = vram_addr + vram_size; 683 u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) + 684 adev->vm_manager.vram_base_offset - adev->gmc.vram_start; 685 686 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 687 flags |= AMDGPU_PTE_WRITEABLE; 688 flags |= AMDGPU_PTE_SNOOPED; 689 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); 690 flags |= AMDGPU_PDE_PTE; 691 692 /* The first n PDE0 entries are used as PTE, 693 * pointing to vram 694 */ 695 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size) 696 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); 697 698 /* The n+1'th PDE0 entry points to a huge 699 * PTB who has more than 512 entries each 700 * pointing to a 4K system page 701 */ 702 flags = AMDGPU_PTE_VALID; 703 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; 704 /* Requires gart_ptb_gpu_pa to be 4K aligned */ 705 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); 706 } 707