1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/io-64-nonatomic-lo-hi.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_gmc.h" 31 #include "amdgpu_ras.h" 32 #include "amdgpu_xgmi.h" 33 34 /** 35 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO 36 * 37 * @bo: the BO to get the PDE for 38 * @level: the level in the PD hirarchy 39 * @addr: resulting addr 40 * @flags: resulting flags 41 * 42 * Get the address and flags to be used for a PDE (Page Directory Entry). 43 */ 44 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, 45 uint64_t *addr, uint64_t *flags) 46 { 47 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 48 49 switch (bo->tbo.mem.mem_type) { 50 case TTM_PL_TT: 51 *addr = bo->tbo.ttm->dma_address[0]; 52 break; 53 case TTM_PL_VRAM: 54 *addr = amdgpu_bo_gpu_offset(bo); 55 break; 56 default: 57 *addr = 0; 58 break; 59 } 60 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); 61 amdgpu_gmc_get_vm_pde(adev, level, addr, flags); 62 } 63 64 /* 65 * amdgpu_gmc_pd_addr - return the address of the root directory 66 */ 67 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) 68 { 69 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 70 uint64_t pd_addr; 71 72 /* TODO: move that into ASIC specific code */ 73 if (adev->asic_type >= CHIP_VEGA10) { 74 uint64_t flags = AMDGPU_PTE_VALID; 75 76 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags); 77 pd_addr |= flags; 78 } else { 79 pd_addr = amdgpu_bo_gpu_offset(bo); 80 } 81 return pd_addr; 82 } 83 84 /** 85 * amdgpu_gmc_set_pte_pde - update the page tables using CPU 86 * 87 * @adev: amdgpu_device pointer 88 * @cpu_pt_addr: cpu address of the page table 89 * @gpu_page_idx: entry in the page table to update 90 * @addr: dst addr to write into pte/pde 91 * @flags: access flags 92 * 93 * Update the page tables using CPU. 94 */ 95 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, 96 uint32_t gpu_page_idx, uint64_t addr, 97 uint64_t flags) 98 { 99 void __iomem *ptr = (void *)cpu_pt_addr; 100 uint64_t value; 101 102 /* 103 * The following is for PTE only. GART does not have PDEs. 104 */ 105 value = addr & 0x0000FFFFFFFFF000ULL; 106 value |= flags; 107 writeq(value, ptr + (gpu_page_idx * 8)); 108 return 0; 109 } 110 111 /** 112 * amdgpu_gmc_agp_addr - return the address in the AGP address space 113 * 114 * @bo: TTM BO which needs the address, must be in GTT domain 115 * 116 * Tries to figure out how to access the BO through the AGP aperture. Returns 117 * AMDGPU_BO_INVALID_OFFSET if that is not possible. 118 */ 119 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) 120 { 121 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 122 123 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) 124 return AMDGPU_BO_INVALID_OFFSET; 125 126 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) 127 return AMDGPU_BO_INVALID_OFFSET; 128 129 return adev->gmc.agp_start + bo->ttm->dma_address[0]; 130 } 131 132 /** 133 * amdgpu_gmc_vram_location - try to find VRAM location 134 * 135 * @adev: amdgpu device structure holding all necessary information 136 * @mc: memory controller structure holding memory information 137 * @base: base address at which to put VRAM 138 * 139 * Function will try to place VRAM at base address provided 140 * as parameter. 141 */ 142 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, 143 u64 base) 144 { 145 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 146 147 mc->vram_start = base; 148 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 149 if (limit && limit < mc->real_vram_size) 150 mc->real_vram_size = limit; 151 152 if (mc->xgmi.num_physical_nodes == 0) { 153 mc->fb_start = mc->vram_start; 154 mc->fb_end = mc->vram_end; 155 } 156 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 157 mc->mc_vram_size >> 20, mc->vram_start, 158 mc->vram_end, mc->real_vram_size >> 20); 159 } 160 161 /** 162 * amdgpu_gmc_gart_location - try to find GART location 163 * 164 * @adev: amdgpu device structure holding all necessary information 165 * @mc: memory controller structure holding memory information 166 * 167 * Function will place try to place GART before or after VRAM. 168 * 169 * If GART size is bigger than space left then we ajust GART size. 170 * Thus function will never fails. 171 */ 172 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 173 { 174 const uint64_t four_gb = 0x100000000ULL; 175 u64 size_af, size_bf; 176 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ 177 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); 178 179 /* VCE doesn't like it when BOs cross a 4GB segment, so align 180 * the GART base on a 4GB boundary as well. 181 */ 182 size_bf = mc->fb_start; 183 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb); 184 185 if (mc->gart_size > max(size_bf, size_af)) { 186 dev_warn(adev->dev, "limiting GART\n"); 187 mc->gart_size = max(size_bf, size_af); 188 } 189 190 if ((size_bf >= mc->gart_size && size_bf < size_af) || 191 (size_af < mc->gart_size)) 192 mc->gart_start = 0; 193 else 194 mc->gart_start = max_mc_address - mc->gart_size + 1; 195 196 mc->gart_start &= ~(four_gb - 1); 197 mc->gart_end = mc->gart_start + mc->gart_size - 1; 198 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", 199 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 200 } 201 202 /** 203 * amdgpu_gmc_agp_location - try to find AGP location 204 * @adev: amdgpu device structure holding all necessary information 205 * @mc: memory controller structure holding memory information 206 * 207 * Function will place try to find a place for the AGP BAR in the MC address 208 * space. 209 * 210 * AGP BAR will be assigned the largest available hole in the address space. 211 * Should be called after VRAM and GART locations are setup. 212 */ 213 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 214 { 215 const uint64_t sixteen_gb = 1ULL << 34; 216 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); 217 u64 size_af, size_bf; 218 219 if (amdgpu_sriov_vf(adev)) { 220 mc->agp_start = 0xffffffffffff; 221 mc->agp_end = 0x0; 222 mc->agp_size = 0; 223 224 return; 225 } 226 227 if (mc->fb_start > mc->gart_start) { 228 size_bf = (mc->fb_start & sixteen_gb_mask) - 229 ALIGN(mc->gart_end + 1, sixteen_gb); 230 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb); 231 } else { 232 size_bf = mc->fb_start & sixteen_gb_mask; 233 size_af = (mc->gart_start & sixteen_gb_mask) - 234 ALIGN(mc->fb_end + 1, sixteen_gb); 235 } 236 237 if (size_bf > size_af) { 238 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask; 239 mc->agp_size = size_bf; 240 } else { 241 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb); 242 mc->agp_size = size_af; 243 } 244 245 mc->agp_end = mc->agp_start + mc->agp_size - 1; 246 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", 247 mc->agp_size >> 20, mc->agp_start, mc->agp_end); 248 } 249 250 /** 251 * amdgpu_gmc_filter_faults - filter VM faults 252 * 253 * @adev: amdgpu device structure 254 * @addr: address of the VM fault 255 * @pasid: PASID of the process causing the fault 256 * @timestamp: timestamp of the fault 257 * 258 * Returns: 259 * True if the fault was filtered and should not be processed further. 260 * False if the fault is a new one and needs to be handled. 261 */ 262 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, 263 uint16_t pasid, uint64_t timestamp) 264 { 265 struct amdgpu_gmc *gmc = &adev->gmc; 266 267 uint64_t stamp, key = addr << 4 | pasid; 268 struct amdgpu_gmc_fault *fault; 269 uint32_t hash; 270 271 /* If we don't have space left in the ring buffer return immediately */ 272 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - 273 AMDGPU_GMC_FAULT_TIMEOUT; 274 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) 275 return true; 276 277 /* Try to find the fault in the hash */ 278 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); 279 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; 280 while (fault->timestamp >= stamp) { 281 uint64_t tmp; 282 283 if (fault->key == key) 284 return true; 285 286 tmp = fault->timestamp; 287 fault = &gmc->fault_ring[fault->next]; 288 289 /* Check if the entry was reused */ 290 if (fault->timestamp >= tmp) 291 break; 292 } 293 294 /* Add the fault to the ring */ 295 fault = &gmc->fault_ring[gmc->last_fault]; 296 fault->key = key; 297 fault->timestamp = timestamp; 298 299 /* And update the hash */ 300 fault->next = gmc->fault_hash[hash].idx; 301 gmc->fault_hash[hash].idx = gmc->last_fault++; 302 return false; 303 } 304 305 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) 306 { 307 int r; 308 309 if (adev->umc.funcs && adev->umc.funcs->ras_late_init) { 310 r = adev->umc.funcs->ras_late_init(adev); 311 if (r) 312 return r; 313 } 314 315 if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) { 316 r = adev->mmhub.funcs->ras_late_init(adev); 317 if (r) 318 return r; 319 } 320 321 return amdgpu_xgmi_ras_late_init(adev); 322 } 323 324 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) 325 { 326 amdgpu_umc_ras_fini(adev); 327 amdgpu_mmhub_ras_fini(adev); 328 amdgpu_xgmi_ras_fini(adev); 329 } 330 331 /* 332 * The latest engine allocation on gfx9/10 is: 333 * Engine 2, 3: firmware 334 * Engine 0, 1, 4~16: amdgpu ring, 335 * subject to change when ring number changes 336 * Engine 17: Gart flushes 337 */ 338 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 339 #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 340 341 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) 342 { 343 struct amdgpu_ring *ring; 344 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 345 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 346 GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 347 unsigned i; 348 unsigned vmhub, inv_eng; 349 350 for (i = 0; i < adev->num_rings; ++i) { 351 ring = adev->rings[i]; 352 vmhub = ring->funcs->vmhub; 353 354 if (ring == &adev->mes.ring) 355 continue; 356 357 inv_eng = ffs(vm_inv_engs[vmhub]); 358 if (!inv_eng) { 359 dev_err(adev->dev, "no VM inv eng for ring %s\n", 360 ring->name); 361 return -EINVAL; 362 } 363 364 ring->vm_inv_eng = inv_eng - 1; 365 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 366 367 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 368 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 369 } 370 371 return 0; 372 } 373 374 /** 375 * amdgpu_tmz_set -- check and set if a device supports TMZ 376 * @adev: amdgpu_device pointer 377 * 378 * Check and set if an the device @adev supports Trusted Memory 379 * Zones (TMZ). 380 */ 381 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) 382 { 383 switch (adev->asic_type) { 384 case CHIP_RAVEN: 385 if (amdgpu_tmz == 0) { 386 adev->gmc.tmz_enabled = false; 387 dev_info(adev->dev, 388 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n"); 389 } else { 390 adev->gmc.tmz_enabled = true; 391 dev_info(adev->dev, 392 "Trusted Memory Zone (TMZ) feature enabled\n"); 393 } 394 break; 395 case CHIP_RENOIR: 396 case CHIP_NAVI10: 397 case CHIP_NAVI14: 398 case CHIP_NAVI12: 399 case CHIP_VANGOGH: 400 /* Don't enable it by default yet. 401 */ 402 if (amdgpu_tmz < 1) { 403 adev->gmc.tmz_enabled = false; 404 dev_info(adev->dev, 405 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n"); 406 } else { 407 adev->gmc.tmz_enabled = true; 408 dev_info(adev->dev, 409 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n"); 410 } 411 break; 412 default: 413 adev->gmc.tmz_enabled = false; 414 dev_warn(adev->dev, 415 "Trusted Memory Zone (TMZ) feature not supported\n"); 416 break; 417 } 418 } 419 420 /** 421 * amdgpu_noretry_set -- set per asic noretry defaults 422 * @adev: amdgpu_device pointer 423 * 424 * Set a per asic default for the no-retry parameter. 425 * 426 */ 427 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) 428 { 429 struct amdgpu_gmc *gmc = &adev->gmc; 430 431 switch (adev->asic_type) { 432 case CHIP_VEGA10: 433 case CHIP_VEGA20: 434 /* 435 * noretry = 0 will cause kfd page fault tests fail 436 * for some ASICs, so set default to 1 for these ASICs. 437 */ 438 if (amdgpu_noretry == -1) 439 gmc->noretry = 1; 440 else 441 gmc->noretry = amdgpu_noretry; 442 break; 443 case CHIP_RAVEN: 444 default: 445 /* Raven currently has issues with noretry 446 * regardless of what we decide for other 447 * asics, we should leave raven with 448 * noretry = 0 until we root cause the 449 * issues. 450 * 451 * default this to 0 for now, but we may want 452 * to change this in the future for certain 453 * GPUs as it can increase performance in 454 * certain cases. 455 */ 456 if (amdgpu_noretry == -1) 457 gmc->noretry = 0; 458 else 459 gmc->noretry = amdgpu_noretry; 460 break; 461 } 462 } 463 464 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, 465 bool enable) 466 { 467 struct amdgpu_vmhub *hub; 468 u32 tmp, reg, i; 469 470 hub = &adev->vmhub[hub_type]; 471 for (i = 0; i < 16; i++) { 472 reg = hub->vm_context0_cntl + hub->ctx_distance * i; 473 474 tmp = RREG32(reg); 475 if (enable) 476 tmp |= hub->vm_cntx_cntl_vm_fault; 477 else 478 tmp &= ~hub->vm_cntx_cntl_vm_fault; 479 480 WREG32(reg, tmp); 481 } 482 } 483 484 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) 485 { 486 unsigned size; 487 488 /* 489 * TODO: 490 * Currently there is a bug where some memory client outside 491 * of the driver writes to first 8M of VRAM on S3 resume, 492 * this overrides GART which by default gets placed in first 8M and 493 * causes VM_FAULTS once GTT is accessed. 494 * Keep the stolen memory reservation until the while this is not solved. 495 */ 496 switch (adev->asic_type) { 497 case CHIP_VEGA10: 498 case CHIP_RAVEN: 499 case CHIP_RENOIR: 500 adev->mman.keep_stolen_vga_memory = true; 501 break; 502 default: 503 adev->mman.keep_stolen_vga_memory = false; 504 break; 505 } 506 507 if (amdgpu_sriov_vf(adev) || 508 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { 509 size = 0; 510 } else { 511 size = amdgpu_gmc_get_vbios_fb_size(adev); 512 513 if (adev->mman.keep_stolen_vga_memory) 514 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 515 } 516 517 /* set to 0 if the pre-OS buffer uses up most of vram */ 518 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 519 size = 0; 520 521 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) { 522 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; 523 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; 524 } else { 525 adev->mman.stolen_vga_size = size; 526 adev->mman.stolen_extended_size = 0; 527 } 528 } 529