1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_gem.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_amdkfd.h" 32 #include "amdgpu_dma_buf.h" 33 #include <uapi/linux/kfd_ioctl.h> 34 #include "amdgpu_xgmi.h" 35 36 /* Userptr restore delay, just long enough to allow consecutive VM 37 * changes to accumulate 38 */ 39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 40 41 /* Impose limit on how much memory KFD can use */ 42 static struct { 43 uint64_t max_system_mem_limit; 44 uint64_t max_ttm_mem_limit; 45 int64_t system_mem_used; 46 int64_t ttm_mem_used; 47 spinlock_t mem_limit_lock; 48 } kfd_mem_limit; 49 50 static const char * const domain_bit_to_string[] = { 51 "CPU", 52 "GTT", 53 "VRAM", 54 "GDS", 55 "GWS", 56 "OA" 57 }; 58 59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 60 61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 62 63 64 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 65 { 66 return (struct amdgpu_device *)kgd; 67 } 68 69 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 70 struct kgd_mem *mem) 71 { 72 struct kfd_mem_attachment *entry; 73 74 list_for_each_entry(entry, &mem->attachments, list) 75 if (entry->bo_va->base.vm == avm) 76 return true; 77 78 return false; 79 } 80 81 /* Set memory usage limits. Current, limits are 82 * System (TTM + userptr) memory - 15/16th System RAM 83 * TTM memory - 3/8th System RAM 84 */ 85 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 86 { 87 struct sysinfo si; 88 uint64_t mem; 89 90 si_meminfo(&si); 91 mem = si.freeram - si.freehigh; 92 mem *= si.mem_unit; 93 94 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 95 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 96 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 97 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 98 (kfd_mem_limit.max_system_mem_limit >> 20), 99 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 100 } 101 102 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 103 { 104 kfd_mem_limit.system_mem_used += size; 105 } 106 107 /* Estimate page table size needed to represent a given memory size 108 * 109 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 110 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 111 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 112 * for 2MB pages for TLB efficiency. However, small allocations and 113 * fragmented system memory still need some 4KB pages. We choose a 114 * compromise that should work in most cases without reserving too 115 * much memory for page tables unnecessarily (factor 16K, >> 14). 116 */ 117 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 118 119 static size_t amdgpu_amdkfd_acc_size(uint64_t size) 120 { 121 size >>= PAGE_SHIFT; 122 size *= sizeof(dma_addr_t) + sizeof(void *); 123 124 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + 125 __roundup_pow_of_two(sizeof(struct ttm_tt)) + 126 PAGE_ALIGN(size); 127 } 128 129 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 130 uint64_t size, u32 domain, bool sg) 131 { 132 uint64_t reserved_for_pt = 133 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 134 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 135 int ret = 0; 136 137 acc_size = amdgpu_amdkfd_acc_size(size); 138 139 vram_needed = 0; 140 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 141 /* TTM GTT memory */ 142 system_mem_needed = acc_size + size; 143 ttm_mem_needed = acc_size + size; 144 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 145 /* Userptr */ 146 system_mem_needed = acc_size + size; 147 ttm_mem_needed = acc_size; 148 } else { 149 /* VRAM and SG */ 150 system_mem_needed = acc_size; 151 ttm_mem_needed = acc_size; 152 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 153 vram_needed = size; 154 } 155 156 spin_lock(&kfd_mem_limit.mem_limit_lock); 157 158 if (kfd_mem_limit.system_mem_used + system_mem_needed > 159 kfd_mem_limit.max_system_mem_limit) 160 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 161 162 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 163 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 164 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 165 kfd_mem_limit.max_ttm_mem_limit) || 166 (adev->kfd.vram_used + vram_needed > 167 adev->gmc.real_vram_size - reserved_for_pt)) { 168 ret = -ENOMEM; 169 } else { 170 kfd_mem_limit.system_mem_used += system_mem_needed; 171 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 172 adev->kfd.vram_used += vram_needed; 173 } 174 175 spin_unlock(&kfd_mem_limit.mem_limit_lock); 176 return ret; 177 } 178 179 static void unreserve_mem_limit(struct amdgpu_device *adev, 180 uint64_t size, u32 domain, bool sg) 181 { 182 size_t acc_size; 183 184 acc_size = amdgpu_amdkfd_acc_size(size); 185 186 spin_lock(&kfd_mem_limit.mem_limit_lock); 187 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 188 kfd_mem_limit.system_mem_used -= (acc_size + size); 189 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 190 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 191 kfd_mem_limit.system_mem_used -= (acc_size + size); 192 kfd_mem_limit.ttm_mem_used -= acc_size; 193 } else { 194 kfd_mem_limit.system_mem_used -= acc_size; 195 kfd_mem_limit.ttm_mem_used -= acc_size; 196 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 197 adev->kfd.vram_used -= size; 198 WARN_ONCE(adev->kfd.vram_used < 0, 199 "kfd VRAM memory accounting unbalanced"); 200 } 201 } 202 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 203 "kfd system memory accounting unbalanced"); 204 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 205 "kfd TTM memory accounting unbalanced"); 206 207 spin_unlock(&kfd_mem_limit.mem_limit_lock); 208 } 209 210 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 211 { 212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 213 u32 domain = bo->preferred_domains; 214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 215 216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { 217 domain = AMDGPU_GEM_DOMAIN_CPU; 218 sg = false; 219 } 220 221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 222 } 223 224 225 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 226 * reservation object. 227 * 228 * @bo: [IN] Remove eviction fence(s) from this BO 229 * @ef: [IN] This eviction fence is removed if it 230 * is present in the shared list. 231 * 232 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 233 */ 234 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 235 struct amdgpu_amdkfd_fence *ef) 236 { 237 struct dma_resv *resv = bo->tbo.base.resv; 238 struct dma_resv_list *old, *new; 239 unsigned int i, j, k; 240 241 if (!ef) 242 return -EINVAL; 243 244 old = dma_resv_shared_list(resv); 245 if (!old) 246 return 0; 247 248 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); 249 if (!new) 250 return -ENOMEM; 251 252 /* Go through all the shared fences in the resevation object and sort 253 * the interesting ones to the end of the list. 254 */ 255 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 256 struct dma_fence *f; 257 258 f = rcu_dereference_protected(old->shared[i], 259 dma_resv_held(resv)); 260 261 if (f->context == ef->base.context) 262 RCU_INIT_POINTER(new->shared[--j], f); 263 else 264 RCU_INIT_POINTER(new->shared[k++], f); 265 } 266 new->shared_max = old->shared_max; 267 new->shared_count = k; 268 269 /* Install the new fence list, seqcount provides the barriers */ 270 write_seqcount_begin(&resv->seq); 271 RCU_INIT_POINTER(resv->fence, new); 272 write_seqcount_end(&resv->seq); 273 274 /* Drop the references to the removed fences or move them to ef_list */ 275 for (i = j; i < old->shared_count; ++i) { 276 struct dma_fence *f; 277 278 f = rcu_dereference_protected(new->shared[i], 279 dma_resv_held(resv)); 280 dma_fence_put(f); 281 } 282 kfree_rcu(old, rcu); 283 284 return 0; 285 } 286 287 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 288 { 289 struct amdgpu_bo *root = bo; 290 struct amdgpu_vm_bo_base *vm_bo; 291 struct amdgpu_vm *vm; 292 struct amdkfd_process_info *info; 293 struct amdgpu_amdkfd_fence *ef; 294 int ret; 295 296 /* we can always get vm_bo from root PD bo.*/ 297 while (root->parent) 298 root = root->parent; 299 300 vm_bo = root->vm_bo; 301 if (!vm_bo) 302 return 0; 303 304 vm = vm_bo->vm; 305 if (!vm) 306 return 0; 307 308 info = vm->process_info; 309 if (!info || !info->eviction_fence) 310 return 0; 311 312 ef = container_of(dma_fence_get(&info->eviction_fence->base), 313 struct amdgpu_amdkfd_fence, base); 314 315 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 316 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 317 dma_resv_unlock(bo->tbo.base.resv); 318 319 dma_fence_put(&ef->base); 320 return ret; 321 } 322 323 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 324 bool wait) 325 { 326 struct ttm_operation_ctx ctx = { false, false }; 327 int ret; 328 329 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 330 "Called with userptr BO")) 331 return -EINVAL; 332 333 amdgpu_bo_placement_from_domain(bo, domain); 334 335 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 336 if (ret) 337 goto validate_fail; 338 if (wait) 339 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 340 341 validate_fail: 342 return ret; 343 } 344 345 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 346 { 347 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 348 } 349 350 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 351 * 352 * Page directories are not updated here because huge page handling 353 * during page table updates can invalidate page directory entries 354 * again. Page directories are only updated after updating page 355 * tables. 356 */ 357 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 358 { 359 struct amdgpu_bo *pd = vm->root.bo; 360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 361 int ret; 362 363 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); 364 if (ret) { 365 pr_err("failed to validate PT BOs\n"); 366 return ret; 367 } 368 369 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); 370 if (ret) { 371 pr_err("failed to validate PD\n"); 372 return ret; 373 } 374 375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 376 377 if (vm->use_cpu_for_update) { 378 ret = amdgpu_bo_kmap(pd, NULL); 379 if (ret) { 380 pr_err("failed to kmap PD, ret=%d\n", ret); 381 return ret; 382 } 383 } 384 385 return 0; 386 } 387 388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 389 { 390 struct amdgpu_bo *pd = vm->root.bo; 391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 392 int ret; 393 394 ret = amdgpu_vm_update_pdes(adev, vm, false); 395 if (ret) 396 return ret; 397 398 return amdgpu_sync_fence(sync, vm->last_update); 399 } 400 401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 402 { 403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 405 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; 406 uint32_t mapping_flags; 407 uint64_t pte_flags; 408 bool snoop = false; 409 410 mapping_flags = AMDGPU_VM_PAGE_READABLE; 411 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 412 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 414 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 415 416 switch (adev->asic_type) { 417 case CHIP_ARCTURUS: 418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 419 if (bo_adev == adev) 420 mapping_flags |= coherent ? 421 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 422 else 423 mapping_flags |= coherent ? 424 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 425 } else { 426 mapping_flags |= coherent ? 427 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 428 } 429 break; 430 case CHIP_ALDEBARAN: 431 if (coherent && uncached) { 432 if (adev->gmc.xgmi.connected_to_cpu || 433 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) 434 snoop = true; 435 mapping_flags |= AMDGPU_VM_MTYPE_UC; 436 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 437 if (bo_adev == adev) { 438 mapping_flags |= coherent ? 439 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 440 if (adev->gmc.xgmi.connected_to_cpu) 441 snoop = true; 442 } else { 443 mapping_flags |= coherent ? 444 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 445 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 446 snoop = true; 447 } 448 } else { 449 snoop = true; 450 mapping_flags |= coherent ? 451 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 452 } 453 break; 454 default: 455 mapping_flags |= coherent ? 456 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 457 } 458 459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags); 460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 461 462 return pte_flags; 463 } 464 465 static int 466 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 467 struct kfd_mem_attachment *attachment) 468 { 469 enum dma_data_direction direction = 470 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 471 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 472 struct ttm_operation_ctx ctx = {.interruptible = true}; 473 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 474 struct amdgpu_device *adev = attachment->adev; 475 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 476 struct ttm_tt *ttm = bo->tbo.ttm; 477 int ret; 478 479 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 480 if (unlikely(!ttm->sg)) 481 return -ENOMEM; 482 483 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 484 return -EINVAL; 485 486 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 487 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 488 ttm->num_pages, 0, 489 (u64)ttm->num_pages << PAGE_SHIFT, 490 GFP_KERNEL); 491 if (unlikely(ret)) 492 goto free_sg; 493 494 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 495 if (unlikely(ret)) 496 goto release_sg; 497 498 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, 499 ttm->num_pages); 500 501 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 502 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 503 if (ret) 504 goto unmap_sg; 505 506 return 0; 507 508 unmap_sg: 509 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 510 release_sg: 511 pr_err("DMA map userptr failed: %d\n", ret); 512 sg_free_table(ttm->sg); 513 free_sg: 514 kfree(ttm->sg); 515 ttm->sg = NULL; 516 return ret; 517 } 518 519 static int 520 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 521 { 522 struct ttm_operation_ctx ctx = {.interruptible = true}; 523 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 524 525 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 526 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 527 } 528 529 static int 530 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 531 struct kfd_mem_attachment *attachment) 532 { 533 switch (attachment->type) { 534 case KFD_MEM_ATT_SHARED: 535 return 0; 536 case KFD_MEM_ATT_USERPTR: 537 return kfd_mem_dmamap_userptr(mem, attachment); 538 case KFD_MEM_ATT_DMABUF: 539 return kfd_mem_dmamap_dmabuf(attachment); 540 default: 541 WARN_ON_ONCE(1); 542 } 543 return -EINVAL; 544 } 545 546 static void 547 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 548 struct kfd_mem_attachment *attachment) 549 { 550 enum dma_data_direction direction = 551 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 553 struct ttm_operation_ctx ctx = {.interruptible = false}; 554 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 555 struct amdgpu_device *adev = attachment->adev; 556 struct ttm_tt *ttm = bo->tbo.ttm; 557 558 if (unlikely(!ttm->sg)) 559 return; 560 561 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 562 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 563 564 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 565 sg_free_table(ttm->sg); 566 ttm->sg = NULL; 567 } 568 569 static void 570 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 571 { 572 struct ttm_operation_ctx ctx = {.interruptible = true}; 573 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 574 575 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 576 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 577 } 578 579 static void 580 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 581 struct kfd_mem_attachment *attachment) 582 { 583 switch (attachment->type) { 584 case KFD_MEM_ATT_SHARED: 585 break; 586 case KFD_MEM_ATT_USERPTR: 587 kfd_mem_dmaunmap_userptr(mem, attachment); 588 break; 589 case KFD_MEM_ATT_DMABUF: 590 kfd_mem_dmaunmap_dmabuf(attachment); 591 break; 592 default: 593 WARN_ON_ONCE(1); 594 } 595 } 596 597 static int 598 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, 599 struct amdgpu_bo **bo) 600 { 601 unsigned long bo_size = mem->bo->tbo.base.size; 602 struct drm_gem_object *gobj; 603 int ret; 604 605 ret = amdgpu_bo_reserve(mem->bo, false); 606 if (ret) 607 return ret; 608 609 ret = amdgpu_gem_object_create(adev, bo_size, 1, 610 AMDGPU_GEM_DOMAIN_CPU, 611 AMDGPU_GEM_CREATE_PREEMPTIBLE, 612 ttm_bo_type_sg, mem->bo->tbo.base.resv, 613 &gobj); 614 amdgpu_bo_unreserve(mem->bo); 615 if (ret) 616 return ret; 617 618 *bo = gem_to_amdgpu_bo(gobj); 619 (*bo)->parent = amdgpu_bo_ref(mem->bo); 620 621 return 0; 622 } 623 624 static int 625 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 626 struct amdgpu_bo **bo) 627 { 628 struct drm_gem_object *gobj; 629 int ret; 630 631 if (!mem->dmabuf) { 632 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, 633 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 634 DRM_RDWR : 0); 635 if (IS_ERR(mem->dmabuf)) { 636 ret = PTR_ERR(mem->dmabuf); 637 mem->dmabuf = NULL; 638 return ret; 639 } 640 } 641 642 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 643 if (IS_ERR(gobj)) 644 return PTR_ERR(gobj); 645 646 /* Import takes an extra reference on the dmabuf. Drop it now to 647 * avoid leaking it. We only need the one reference in 648 * kgd_mem->dmabuf. 649 */ 650 dma_buf_put(mem->dmabuf); 651 652 *bo = gem_to_amdgpu_bo(gobj); 653 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 654 (*bo)->parent = amdgpu_bo_ref(mem->bo); 655 656 return 0; 657 } 658 659 /* kfd_mem_attach - Add a BO to a VM 660 * 661 * Everything that needs to bo done only once when a BO is first added 662 * to a VM. It can later be mapped and unmapped many times without 663 * repeating these steps. 664 * 665 * 0. Create BO for DMA mapping, if needed 666 * 1. Allocate and initialize BO VA entry data structure 667 * 2. Add BO to the VM 668 * 3. Determine ASIC-specific PTE flags 669 * 4. Alloc page tables and directories if needed 670 * 4a. Validate new page tables and directories 671 */ 672 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 673 struct amdgpu_vm *vm, bool is_aql) 674 { 675 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 676 unsigned long bo_size = mem->bo->tbo.base.size; 677 uint64_t va = mem->va; 678 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 679 struct amdgpu_bo *bo[2] = {NULL, NULL}; 680 int i, ret; 681 682 if (!va) { 683 pr_err("Invalid VA when adding BO to VM\n"); 684 return -EINVAL; 685 } 686 687 for (i = 0; i <= is_aql; i++) { 688 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 689 if (unlikely(!attachment[i])) { 690 ret = -ENOMEM; 691 goto unwind; 692 } 693 694 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 695 va + bo_size, vm); 696 697 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && 698 amdgpu_xgmi_same_hive(adev, bo_adev))) { 699 /* Mappings on the local GPU and VRAM mappings in the 700 * local hive share the original BO 701 */ 702 attachment[i]->type = KFD_MEM_ATT_SHARED; 703 bo[i] = mem->bo; 704 drm_gem_object_get(&bo[i]->tbo.base); 705 } else if (i > 0) { 706 /* Multiple mappings on the same GPU share the BO */ 707 attachment[i]->type = KFD_MEM_ATT_SHARED; 708 bo[i] = bo[0]; 709 drm_gem_object_get(&bo[i]->tbo.base); 710 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 711 /* Create an SG BO to DMA-map userptrs on other GPUs */ 712 attachment[i]->type = KFD_MEM_ATT_USERPTR; 713 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); 714 if (ret) 715 goto unwind; 716 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && 717 mem->bo->tbo.type != ttm_bo_type_sg) { 718 /* GTT BOs use DMA-mapping ability of dynamic-attach 719 * DMA bufs. TODO: The same should work for VRAM on 720 * large-BAR GPUs. 721 */ 722 attachment[i]->type = KFD_MEM_ATT_DMABUF; 723 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 724 if (ret) 725 goto unwind; 726 } else { 727 /* FIXME: Need to DMA-map other BO types: 728 * large-BAR VRAM, doorbells, MMIO remap 729 */ 730 attachment[i]->type = KFD_MEM_ATT_SHARED; 731 bo[i] = mem->bo; 732 drm_gem_object_get(&bo[i]->tbo.base); 733 } 734 735 /* Add BO to VM internal data structures */ 736 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 737 if (unlikely(!attachment[i]->bo_va)) { 738 ret = -ENOMEM; 739 pr_err("Failed to add BO object to VM. ret == %d\n", 740 ret); 741 goto unwind; 742 } 743 744 attachment[i]->va = va; 745 attachment[i]->pte_flags = get_pte_flags(adev, mem); 746 attachment[i]->adev = adev; 747 list_add(&attachment[i]->list, &mem->attachments); 748 749 va += bo_size; 750 } 751 752 return 0; 753 754 unwind: 755 for (; i >= 0; i--) { 756 if (!attachment[i]) 757 continue; 758 if (attachment[i]->bo_va) { 759 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); 760 list_del(&attachment[i]->list); 761 } 762 if (bo[i]) 763 drm_gem_object_put(&bo[i]->tbo.base); 764 kfree(attachment[i]); 765 } 766 return ret; 767 } 768 769 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 770 { 771 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 772 773 pr_debug("\t remove VA 0x%llx in entry %p\n", 774 attachment->va, attachment); 775 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); 776 drm_gem_object_put(&bo->tbo.base); 777 list_del(&attachment->list); 778 kfree(attachment); 779 } 780 781 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 782 struct amdkfd_process_info *process_info, 783 bool userptr) 784 { 785 struct ttm_validate_buffer *entry = &mem->validate_list; 786 struct amdgpu_bo *bo = mem->bo; 787 788 INIT_LIST_HEAD(&entry->head); 789 entry->num_shared = 1; 790 entry->bo = &bo->tbo; 791 mutex_lock(&process_info->lock); 792 if (userptr) 793 list_add_tail(&entry->head, &process_info->userptr_valid_list); 794 else 795 list_add_tail(&entry->head, &process_info->kfd_bo_list); 796 mutex_unlock(&process_info->lock); 797 } 798 799 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 800 struct amdkfd_process_info *process_info) 801 { 802 struct ttm_validate_buffer *bo_list_entry; 803 804 bo_list_entry = &mem->validate_list; 805 mutex_lock(&process_info->lock); 806 list_del(&bo_list_entry->head); 807 mutex_unlock(&process_info->lock); 808 } 809 810 /* Initializes user pages. It registers the MMU notifier and validates 811 * the userptr BO in the GTT domain. 812 * 813 * The BO must already be on the userptr_valid_list. Otherwise an 814 * eviction and restore may happen that leaves the new BO unmapped 815 * with the user mode queues running. 816 * 817 * Takes the process_info->lock to protect against concurrent restore 818 * workers. 819 * 820 * Returns 0 for success, negative errno for errors. 821 */ 822 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) 823 { 824 struct amdkfd_process_info *process_info = mem->process_info; 825 struct amdgpu_bo *bo = mem->bo; 826 struct ttm_operation_ctx ctx = { true, false }; 827 int ret = 0; 828 829 mutex_lock(&process_info->lock); 830 831 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 832 if (ret) { 833 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 834 goto out; 835 } 836 837 ret = amdgpu_mn_register(bo, user_addr); 838 if (ret) { 839 pr_err("%s: Failed to register MMU notifier: %d\n", 840 __func__, ret); 841 goto out; 842 } 843 844 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 845 if (ret) { 846 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 847 goto unregister_out; 848 } 849 850 ret = amdgpu_bo_reserve(bo, true); 851 if (ret) { 852 pr_err("%s: Failed to reserve BO\n", __func__); 853 goto release_out; 854 } 855 amdgpu_bo_placement_from_domain(bo, mem->domain); 856 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 857 if (ret) 858 pr_err("%s: failed to validate BO\n", __func__); 859 amdgpu_bo_unreserve(bo); 860 861 release_out: 862 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 863 unregister_out: 864 if (ret) 865 amdgpu_mn_unregister(bo); 866 out: 867 mutex_unlock(&process_info->lock); 868 return ret; 869 } 870 871 /* Reserving a BO and its page table BOs must happen atomically to 872 * avoid deadlocks. Some operations update multiple VMs at once. Track 873 * all the reservation info in a context structure. Optionally a sync 874 * object can track VM updates. 875 */ 876 struct bo_vm_reservation_context { 877 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 878 unsigned int n_vms; /* Number of VMs reserved */ 879 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 880 struct ww_acquire_ctx ticket; /* Reservation ticket */ 881 struct list_head list, duplicates; /* BO lists */ 882 struct amdgpu_sync *sync; /* Pointer to sync object */ 883 bool reserved; /* Whether BOs are reserved */ 884 }; 885 886 enum bo_vm_match { 887 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 888 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 889 BO_VM_ALL, /* Match all VMs a BO was added to */ 890 }; 891 892 /** 893 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 894 * @mem: KFD BO structure. 895 * @vm: the VM to reserve. 896 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 897 */ 898 static int reserve_bo_and_vm(struct kgd_mem *mem, 899 struct amdgpu_vm *vm, 900 struct bo_vm_reservation_context *ctx) 901 { 902 struct amdgpu_bo *bo = mem->bo; 903 int ret; 904 905 WARN_ON(!vm); 906 907 ctx->reserved = false; 908 ctx->n_vms = 1; 909 ctx->sync = &mem->sync; 910 911 INIT_LIST_HEAD(&ctx->list); 912 INIT_LIST_HEAD(&ctx->duplicates); 913 914 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 915 if (!ctx->vm_pd) 916 return -ENOMEM; 917 918 ctx->kfd_bo.priority = 0; 919 ctx->kfd_bo.tv.bo = &bo->tbo; 920 ctx->kfd_bo.tv.num_shared = 1; 921 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 922 923 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 924 925 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 926 false, &ctx->duplicates); 927 if (ret) { 928 pr_err("Failed to reserve buffers in ttm.\n"); 929 kfree(ctx->vm_pd); 930 ctx->vm_pd = NULL; 931 return ret; 932 } 933 934 ctx->reserved = true; 935 return 0; 936 } 937 938 /** 939 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 940 * @mem: KFD BO structure. 941 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 942 * is used. Otherwise, a single VM associated with the BO. 943 * @map_type: the mapping status that will be used to filter the VMs. 944 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 945 * 946 * Returns 0 for success, negative for failure. 947 */ 948 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 949 struct amdgpu_vm *vm, enum bo_vm_match map_type, 950 struct bo_vm_reservation_context *ctx) 951 { 952 struct amdgpu_bo *bo = mem->bo; 953 struct kfd_mem_attachment *entry; 954 unsigned int i; 955 int ret; 956 957 ctx->reserved = false; 958 ctx->n_vms = 0; 959 ctx->vm_pd = NULL; 960 ctx->sync = &mem->sync; 961 962 INIT_LIST_HEAD(&ctx->list); 963 INIT_LIST_HEAD(&ctx->duplicates); 964 965 list_for_each_entry(entry, &mem->attachments, list) { 966 if ((vm && vm != entry->bo_va->base.vm) || 967 (entry->is_mapped != map_type 968 && map_type != BO_VM_ALL)) 969 continue; 970 971 ctx->n_vms++; 972 } 973 974 if (ctx->n_vms != 0) { 975 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 976 GFP_KERNEL); 977 if (!ctx->vm_pd) 978 return -ENOMEM; 979 } 980 981 ctx->kfd_bo.priority = 0; 982 ctx->kfd_bo.tv.bo = &bo->tbo; 983 ctx->kfd_bo.tv.num_shared = 1; 984 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 985 986 i = 0; 987 list_for_each_entry(entry, &mem->attachments, list) { 988 if ((vm && vm != entry->bo_va->base.vm) || 989 (entry->is_mapped != map_type 990 && map_type != BO_VM_ALL)) 991 continue; 992 993 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 994 &ctx->vm_pd[i]); 995 i++; 996 } 997 998 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 999 false, &ctx->duplicates); 1000 if (ret) { 1001 pr_err("Failed to reserve buffers in ttm.\n"); 1002 kfree(ctx->vm_pd); 1003 ctx->vm_pd = NULL; 1004 return ret; 1005 } 1006 1007 ctx->reserved = true; 1008 return 0; 1009 } 1010 1011 /** 1012 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1013 * @ctx: Reservation context to unreserve 1014 * @wait: Optionally wait for a sync object representing pending VM updates 1015 * @intr: Whether the wait is interruptible 1016 * 1017 * Also frees any resources allocated in 1018 * reserve_bo_and_(cond_)vm(s). Returns the status from 1019 * amdgpu_sync_wait. 1020 */ 1021 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1022 bool wait, bool intr) 1023 { 1024 int ret = 0; 1025 1026 if (wait) 1027 ret = amdgpu_sync_wait(ctx->sync, intr); 1028 1029 if (ctx->reserved) 1030 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 1031 kfree(ctx->vm_pd); 1032 1033 ctx->sync = NULL; 1034 1035 ctx->reserved = false; 1036 ctx->vm_pd = NULL; 1037 1038 return ret; 1039 } 1040 1041 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, 1042 struct kfd_mem_attachment *entry, 1043 struct amdgpu_sync *sync) 1044 { 1045 struct amdgpu_bo_va *bo_va = entry->bo_va; 1046 struct amdgpu_device *adev = entry->adev; 1047 struct amdgpu_vm *vm = bo_va->base.vm; 1048 1049 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1050 1051 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1052 1053 amdgpu_sync_fence(sync, bo_va->last_pt_update); 1054 1055 kfd_mem_dmaunmap_attachment(mem, entry); 1056 } 1057 1058 static int update_gpuvm_pte(struct kgd_mem *mem, 1059 struct kfd_mem_attachment *entry, 1060 struct amdgpu_sync *sync, 1061 bool *table_freed) 1062 { 1063 struct amdgpu_bo_va *bo_va = entry->bo_va; 1064 struct amdgpu_device *adev = entry->adev; 1065 int ret; 1066 1067 ret = kfd_mem_dmamap_attachment(mem, entry); 1068 if (ret) 1069 return ret; 1070 1071 /* Update the page tables */ 1072 ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); 1073 if (ret) { 1074 pr_err("amdgpu_vm_bo_update failed\n"); 1075 return ret; 1076 } 1077 1078 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1079 } 1080 1081 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1082 struct kfd_mem_attachment *entry, 1083 struct amdgpu_sync *sync, 1084 bool no_update_pte, 1085 bool *table_freed) 1086 { 1087 int ret; 1088 1089 /* Set virtual address for the allocation */ 1090 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1091 amdgpu_bo_size(entry->bo_va->base.bo), 1092 entry->pte_flags); 1093 if (ret) { 1094 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1095 entry->va, ret); 1096 return ret; 1097 } 1098 1099 if (no_update_pte) 1100 return 0; 1101 1102 ret = update_gpuvm_pte(mem, entry, sync, table_freed); 1103 if (ret) { 1104 pr_err("update_gpuvm_pte() failed\n"); 1105 goto update_gpuvm_pte_failed; 1106 } 1107 1108 return 0; 1109 1110 update_gpuvm_pte_failed: 1111 unmap_bo_from_gpuvm(mem, entry, sync); 1112 return ret; 1113 } 1114 1115 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 1116 { 1117 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 1118 1119 if (!sg) 1120 return NULL; 1121 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 1122 kfree(sg); 1123 return NULL; 1124 } 1125 sg->sgl->dma_address = addr; 1126 sg->sgl->length = size; 1127 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1128 sg->sgl->dma_length = size; 1129 #endif 1130 return sg; 1131 } 1132 1133 static int process_validate_vms(struct amdkfd_process_info *process_info) 1134 { 1135 struct amdgpu_vm *peer_vm; 1136 int ret; 1137 1138 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1139 vm_list_node) { 1140 ret = vm_validate_pt_pd_bos(peer_vm); 1141 if (ret) 1142 return ret; 1143 } 1144 1145 return 0; 1146 } 1147 1148 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1149 struct amdgpu_sync *sync) 1150 { 1151 struct amdgpu_vm *peer_vm; 1152 int ret; 1153 1154 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1155 vm_list_node) { 1156 struct amdgpu_bo *pd = peer_vm->root.bo; 1157 1158 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1159 AMDGPU_SYNC_NE_OWNER, 1160 AMDGPU_FENCE_OWNER_KFD); 1161 if (ret) 1162 return ret; 1163 } 1164 1165 return 0; 1166 } 1167 1168 static int process_update_pds(struct amdkfd_process_info *process_info, 1169 struct amdgpu_sync *sync) 1170 { 1171 struct amdgpu_vm *peer_vm; 1172 int ret; 1173 1174 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1175 vm_list_node) { 1176 ret = vm_update_pds(peer_vm, sync); 1177 if (ret) 1178 return ret; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1185 struct dma_fence **ef) 1186 { 1187 struct amdkfd_process_info *info = NULL; 1188 int ret; 1189 1190 if (!*process_info) { 1191 info = kzalloc(sizeof(*info), GFP_KERNEL); 1192 if (!info) 1193 return -ENOMEM; 1194 1195 mutex_init(&info->lock); 1196 INIT_LIST_HEAD(&info->vm_list_head); 1197 INIT_LIST_HEAD(&info->kfd_bo_list); 1198 INIT_LIST_HEAD(&info->userptr_valid_list); 1199 INIT_LIST_HEAD(&info->userptr_inval_list); 1200 1201 info->eviction_fence = 1202 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1203 current->mm, 1204 NULL); 1205 if (!info->eviction_fence) { 1206 pr_err("Failed to create eviction fence\n"); 1207 ret = -ENOMEM; 1208 goto create_evict_fence_fail; 1209 } 1210 1211 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1212 atomic_set(&info->evicted_bos, 0); 1213 INIT_DELAYED_WORK(&info->restore_userptr_work, 1214 amdgpu_amdkfd_restore_userptr_worker); 1215 1216 *process_info = info; 1217 *ef = dma_fence_get(&info->eviction_fence->base); 1218 } 1219 1220 vm->process_info = *process_info; 1221 1222 /* Validate page directory and attach eviction fence */ 1223 ret = amdgpu_bo_reserve(vm->root.bo, true); 1224 if (ret) 1225 goto reserve_pd_fail; 1226 ret = vm_validate_pt_pd_bos(vm); 1227 if (ret) { 1228 pr_err("validate_pt_pd_bos() failed\n"); 1229 goto validate_pd_fail; 1230 } 1231 ret = amdgpu_bo_sync_wait(vm->root.bo, 1232 AMDGPU_FENCE_OWNER_KFD, false); 1233 if (ret) 1234 goto wait_pd_fail; 1235 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); 1236 if (ret) 1237 goto reserve_shared_fail; 1238 amdgpu_bo_fence(vm->root.bo, 1239 &vm->process_info->eviction_fence->base, true); 1240 amdgpu_bo_unreserve(vm->root.bo); 1241 1242 /* Update process info */ 1243 mutex_lock(&vm->process_info->lock); 1244 list_add_tail(&vm->vm_list_node, 1245 &(vm->process_info->vm_list_head)); 1246 vm->process_info->n_vms++; 1247 mutex_unlock(&vm->process_info->lock); 1248 1249 return 0; 1250 1251 reserve_shared_fail: 1252 wait_pd_fail: 1253 validate_pd_fail: 1254 amdgpu_bo_unreserve(vm->root.bo); 1255 reserve_pd_fail: 1256 vm->process_info = NULL; 1257 if (info) { 1258 /* Two fence references: one in info and one in *ef */ 1259 dma_fence_put(&info->eviction_fence->base); 1260 dma_fence_put(*ef); 1261 *ef = NULL; 1262 *process_info = NULL; 1263 put_pid(info->pid); 1264 create_evict_fence_fail: 1265 mutex_destroy(&info->lock); 1266 kfree(info); 1267 } 1268 return ret; 1269 } 1270 1271 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1272 struct file *filp, u32 pasid, 1273 void **process_info, 1274 struct dma_fence **ef) 1275 { 1276 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1277 struct amdgpu_fpriv *drv_priv; 1278 struct amdgpu_vm *avm; 1279 int ret; 1280 1281 ret = amdgpu_file_to_fpriv(filp, &drv_priv); 1282 if (ret) 1283 return ret; 1284 avm = &drv_priv->vm; 1285 1286 /* Already a compute VM? */ 1287 if (avm->process_info) 1288 return -EINVAL; 1289 1290 /* Convert VM into a compute VM */ 1291 ret = amdgpu_vm_make_compute(adev, avm, pasid); 1292 if (ret) 1293 return ret; 1294 1295 /* Initialize KFD part of the VM and process info */ 1296 ret = init_kfd_vm(avm, process_info, ef); 1297 if (ret) 1298 return ret; 1299 1300 amdgpu_vm_set_task_info(avm); 1301 1302 return 0; 1303 } 1304 1305 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1306 struct amdgpu_vm *vm) 1307 { 1308 struct amdkfd_process_info *process_info = vm->process_info; 1309 struct amdgpu_bo *pd = vm->root.bo; 1310 1311 if (!process_info) 1312 return; 1313 1314 /* Release eviction fence from PD */ 1315 amdgpu_bo_reserve(pd, false); 1316 amdgpu_bo_fence(pd, NULL, false); 1317 amdgpu_bo_unreserve(pd); 1318 1319 /* Update process info */ 1320 mutex_lock(&process_info->lock); 1321 process_info->n_vms--; 1322 list_del(&vm->vm_list_node); 1323 mutex_unlock(&process_info->lock); 1324 1325 vm->process_info = NULL; 1326 1327 /* Release per-process resources when last compute VM is destroyed */ 1328 if (!process_info->n_vms) { 1329 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1330 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1331 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1332 1333 dma_fence_put(&process_info->eviction_fence->base); 1334 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1335 put_pid(process_info->pid); 1336 mutex_destroy(&process_info->lock); 1337 kfree(process_info); 1338 } 1339 } 1340 1341 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) 1342 { 1343 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1344 struct amdgpu_vm *avm; 1345 1346 if (WARN_ON(!kgd || !drm_priv)) 1347 return; 1348 1349 avm = drm_priv_to_vm(drm_priv); 1350 1351 pr_debug("Releasing process vm %p\n", avm); 1352 1353 /* The original pasid of amdgpu vm has already been 1354 * released during making a amdgpu vm to a compute vm 1355 * The current pasid is managed by kfd and will be 1356 * released on kfd process destroy. Set amdgpu pasid 1357 * to 0 to avoid duplicate release. 1358 */ 1359 amdgpu_vm_release_compute(adev, avm); 1360 } 1361 1362 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1363 { 1364 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1365 struct amdgpu_bo *pd = avm->root.bo; 1366 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1367 1368 if (adev->asic_type < CHIP_VEGA10) 1369 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1370 return avm->pd_phys_addr; 1371 } 1372 1373 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1374 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1375 void *drm_priv, struct kgd_mem **mem, 1376 uint64_t *offset, uint32_t flags) 1377 { 1378 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1379 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1380 enum ttm_bo_type bo_type = ttm_bo_type_device; 1381 struct sg_table *sg = NULL; 1382 uint64_t user_addr = 0; 1383 struct amdgpu_bo *bo; 1384 struct drm_gem_object *gobj; 1385 u32 domain, alloc_domain; 1386 u64 alloc_flags; 1387 int ret; 1388 1389 /* 1390 * Check on which domain to allocate BO 1391 */ 1392 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1393 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1394 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1395 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1396 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1397 AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1398 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1399 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1400 alloc_flags = 0; 1401 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1402 domain = AMDGPU_GEM_DOMAIN_GTT; 1403 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1404 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1405 if (!offset || !*offset) 1406 return -EINVAL; 1407 user_addr = untagged_addr(*offset); 1408 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1409 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1410 domain = AMDGPU_GEM_DOMAIN_GTT; 1411 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1412 bo_type = ttm_bo_type_sg; 1413 alloc_flags = 0; 1414 if (size > UINT_MAX) 1415 return -EINVAL; 1416 sg = create_doorbell_sg(*offset, size); 1417 if (!sg) 1418 return -ENOMEM; 1419 } else { 1420 return -EINVAL; 1421 } 1422 1423 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1424 if (!*mem) { 1425 ret = -ENOMEM; 1426 goto err; 1427 } 1428 INIT_LIST_HEAD(&(*mem)->attachments); 1429 mutex_init(&(*mem)->lock); 1430 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1431 1432 /* Workaround for AQL queue wraparound bug. Map the same 1433 * memory twice. That means we only actually allocate half 1434 * the memory. 1435 */ 1436 if ((*mem)->aql_queue) 1437 size = size >> 1; 1438 1439 (*mem)->alloc_flags = flags; 1440 1441 amdgpu_sync_create(&(*mem)->sync); 1442 1443 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1444 if (ret) { 1445 pr_debug("Insufficient memory\n"); 1446 goto err_reserve_limit; 1447 } 1448 1449 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1450 va, size, domain_string(alloc_domain)); 1451 1452 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, 1453 bo_type, NULL, &gobj); 1454 if (ret) { 1455 pr_debug("Failed to create BO on domain %s. ret %d\n", 1456 domain_string(alloc_domain), ret); 1457 goto err_bo_create; 1458 } 1459 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1460 if (ret) { 1461 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1462 goto err_node_allow; 1463 } 1464 bo = gem_to_amdgpu_bo(gobj); 1465 if (bo_type == ttm_bo_type_sg) { 1466 bo->tbo.sg = sg; 1467 bo->tbo.ttm->sg = sg; 1468 } 1469 bo->kfd_bo = *mem; 1470 (*mem)->bo = bo; 1471 if (user_addr) 1472 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1473 1474 (*mem)->va = va; 1475 (*mem)->domain = domain; 1476 (*mem)->mapped_to_gpu_memory = 0; 1477 (*mem)->process_info = avm->process_info; 1478 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1479 1480 if (user_addr) { 1481 ret = init_user_pages(*mem, user_addr); 1482 if (ret) 1483 goto allocate_init_user_pages_failed; 1484 } 1485 1486 if (offset) 1487 *offset = amdgpu_bo_mmap_offset(bo); 1488 1489 return 0; 1490 1491 allocate_init_user_pages_failed: 1492 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1493 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1494 err_node_allow: 1495 amdgpu_bo_unref(&bo); 1496 /* Don't unreserve system mem limit twice */ 1497 goto err_reserve_limit; 1498 err_bo_create: 1499 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1500 err_reserve_limit: 1501 mutex_destroy(&(*mem)->lock); 1502 kfree(*mem); 1503 err: 1504 if (sg) { 1505 sg_free_table(sg); 1506 kfree(sg); 1507 } 1508 return ret; 1509 } 1510 1511 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1512 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, 1513 uint64_t *size) 1514 { 1515 struct amdkfd_process_info *process_info = mem->process_info; 1516 unsigned long bo_size = mem->bo->tbo.base.size; 1517 struct kfd_mem_attachment *entry, *tmp; 1518 struct bo_vm_reservation_context ctx; 1519 struct ttm_validate_buffer *bo_list_entry; 1520 unsigned int mapped_to_gpu_memory; 1521 int ret; 1522 bool is_imported = false; 1523 1524 mutex_lock(&mem->lock); 1525 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1526 is_imported = mem->is_imported; 1527 mutex_unlock(&mem->lock); 1528 /* lock is not needed after this, since mem is unused and will 1529 * be freed anyway 1530 */ 1531 1532 if (mapped_to_gpu_memory > 0) { 1533 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1534 mem->va, bo_size); 1535 return -EBUSY; 1536 } 1537 1538 /* Make sure restore workers don't access the BO any more */ 1539 bo_list_entry = &mem->validate_list; 1540 mutex_lock(&process_info->lock); 1541 list_del(&bo_list_entry->head); 1542 mutex_unlock(&process_info->lock); 1543 1544 /* No more MMU notifiers */ 1545 amdgpu_mn_unregister(mem->bo); 1546 1547 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1548 if (unlikely(ret)) 1549 return ret; 1550 1551 /* The eviction fence should be removed by the last unmap. 1552 * TODO: Log an error condition if the bo still has the eviction fence 1553 * attached 1554 */ 1555 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1556 process_info->eviction_fence); 1557 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1558 mem->va + bo_size * (1 + mem->aql_queue)); 1559 1560 ret = unreserve_bo_and_vms(&ctx, false, false); 1561 1562 /* Remove from VM internal data structures */ 1563 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) 1564 kfd_mem_detach(entry); 1565 1566 /* Free the sync object */ 1567 amdgpu_sync_free(&mem->sync); 1568 1569 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1570 * remap BO. We need to free it. 1571 */ 1572 if (mem->bo->tbo.sg) { 1573 sg_free_table(mem->bo->tbo.sg); 1574 kfree(mem->bo->tbo.sg); 1575 } 1576 1577 /* Update the size of the BO being freed if it was allocated from 1578 * VRAM and is not imported. 1579 */ 1580 if (size) { 1581 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1582 (!is_imported)) 1583 *size = bo_size; 1584 else 1585 *size = 0; 1586 } 1587 1588 /* Free the BO*/ 1589 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1590 if (mem->dmabuf) 1591 dma_buf_put(mem->dmabuf); 1592 drm_gem_object_put(&mem->bo->tbo.base); 1593 mutex_destroy(&mem->lock); 1594 kfree(mem); 1595 1596 return ret; 1597 } 1598 1599 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1600 struct kgd_dev *kgd, struct kgd_mem *mem, 1601 void *drm_priv, bool *table_freed) 1602 { 1603 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1604 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1605 int ret; 1606 struct amdgpu_bo *bo; 1607 uint32_t domain; 1608 struct kfd_mem_attachment *entry; 1609 struct bo_vm_reservation_context ctx; 1610 unsigned long bo_size; 1611 bool is_invalid_userptr = false; 1612 1613 bo = mem->bo; 1614 if (!bo) { 1615 pr_err("Invalid BO when mapping memory to GPU\n"); 1616 return -EINVAL; 1617 } 1618 1619 /* Make sure restore is not running concurrently. Since we 1620 * don't map invalid userptr BOs, we rely on the next restore 1621 * worker to do the mapping 1622 */ 1623 mutex_lock(&mem->process_info->lock); 1624 1625 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1626 * sure that the MMU notifier is no longer running 1627 * concurrently and the queues are actually stopped 1628 */ 1629 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1630 mmap_write_lock(current->mm); 1631 is_invalid_userptr = atomic_read(&mem->invalid); 1632 mmap_write_unlock(current->mm); 1633 } 1634 1635 mutex_lock(&mem->lock); 1636 1637 domain = mem->domain; 1638 bo_size = bo->tbo.base.size; 1639 1640 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1641 mem->va, 1642 mem->va + bo_size * (1 + mem->aql_queue), 1643 avm, domain_string(domain)); 1644 1645 if (!kfd_mem_is_attached(avm, mem)) { 1646 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 1647 if (ret) 1648 goto out; 1649 } 1650 1651 ret = reserve_bo_and_vm(mem, avm, &ctx); 1652 if (unlikely(ret)) 1653 goto out; 1654 1655 /* Userptr can be marked as "not invalid", but not actually be 1656 * validated yet (still in the system domain). In that case 1657 * the queues are still stopped and we can leave mapping for 1658 * the next restore worker 1659 */ 1660 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1661 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 1662 is_invalid_userptr = true; 1663 1664 ret = vm_validate_pt_pd_bos(avm); 1665 if (unlikely(ret)) 1666 goto out_unreserve; 1667 1668 if (mem->mapped_to_gpu_memory == 0 && 1669 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1670 /* Validate BO only once. The eviction fence gets added to BO 1671 * the first time it is mapped. Validate will wait for all 1672 * background evictions to complete. 1673 */ 1674 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1675 if (ret) { 1676 pr_debug("Validate failed\n"); 1677 goto out_unreserve; 1678 } 1679 } 1680 1681 list_for_each_entry(entry, &mem->attachments, list) { 1682 if (entry->bo_va->base.vm != avm || entry->is_mapped) 1683 continue; 1684 1685 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1686 entry->va, entry->va + bo_size, entry); 1687 1688 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 1689 is_invalid_userptr, table_freed); 1690 if (ret) { 1691 pr_err("Failed to map bo to gpuvm\n"); 1692 goto out_unreserve; 1693 } 1694 1695 ret = vm_update_pds(avm, ctx.sync); 1696 if (ret) { 1697 pr_err("Failed to update page directories\n"); 1698 goto out_unreserve; 1699 } 1700 1701 entry->is_mapped = true; 1702 mem->mapped_to_gpu_memory++; 1703 pr_debug("\t INC mapping count %d\n", 1704 mem->mapped_to_gpu_memory); 1705 } 1706 1707 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1708 amdgpu_bo_fence(bo, 1709 &avm->process_info->eviction_fence->base, 1710 true); 1711 ret = unreserve_bo_and_vms(&ctx, false, false); 1712 1713 goto out; 1714 1715 out_unreserve: 1716 unreserve_bo_and_vms(&ctx, false, false); 1717 out: 1718 mutex_unlock(&mem->process_info->lock); 1719 mutex_unlock(&mem->lock); 1720 return ret; 1721 } 1722 1723 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1724 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) 1725 { 1726 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1727 struct amdkfd_process_info *process_info = avm->process_info; 1728 unsigned long bo_size = mem->bo->tbo.base.size; 1729 struct kfd_mem_attachment *entry; 1730 struct bo_vm_reservation_context ctx; 1731 int ret; 1732 1733 mutex_lock(&mem->lock); 1734 1735 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 1736 if (unlikely(ret)) 1737 goto out; 1738 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1739 if (ctx.n_vms == 0) { 1740 ret = -EINVAL; 1741 goto unreserve_out; 1742 } 1743 1744 ret = vm_validate_pt_pd_bos(avm); 1745 if (unlikely(ret)) 1746 goto unreserve_out; 1747 1748 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1749 mem->va, 1750 mem->va + bo_size * (1 + mem->aql_queue), 1751 avm); 1752 1753 list_for_each_entry(entry, &mem->attachments, list) { 1754 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 1755 continue; 1756 1757 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1758 entry->va, entry->va + bo_size, entry); 1759 1760 unmap_bo_from_gpuvm(mem, entry, ctx.sync); 1761 entry->is_mapped = false; 1762 1763 mem->mapped_to_gpu_memory--; 1764 pr_debug("\t DEC mapping count %d\n", 1765 mem->mapped_to_gpu_memory); 1766 } 1767 1768 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1769 * required. 1770 */ 1771 if (mem->mapped_to_gpu_memory == 0 && 1772 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 1773 !mem->bo->tbo.pin_count) 1774 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1775 process_info->eviction_fence); 1776 1777 unreserve_out: 1778 unreserve_bo_and_vms(&ctx, false, false); 1779 out: 1780 mutex_unlock(&mem->lock); 1781 return ret; 1782 } 1783 1784 int amdgpu_amdkfd_gpuvm_sync_memory( 1785 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1786 { 1787 struct amdgpu_sync sync; 1788 int ret; 1789 1790 amdgpu_sync_create(&sync); 1791 1792 mutex_lock(&mem->lock); 1793 amdgpu_sync_clone(&mem->sync, &sync); 1794 mutex_unlock(&mem->lock); 1795 1796 ret = amdgpu_sync_wait(&sync, intr); 1797 amdgpu_sync_free(&sync); 1798 return ret; 1799 } 1800 1801 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1802 struct kgd_mem *mem, void **kptr, uint64_t *size) 1803 { 1804 int ret; 1805 struct amdgpu_bo *bo = mem->bo; 1806 1807 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1808 pr_err("userptr can't be mapped to kernel\n"); 1809 return -EINVAL; 1810 } 1811 1812 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1813 * this BO in BO's restoring after eviction. 1814 */ 1815 mutex_lock(&mem->process_info->lock); 1816 1817 ret = amdgpu_bo_reserve(bo, true); 1818 if (ret) { 1819 pr_err("Failed to reserve bo. ret %d\n", ret); 1820 goto bo_reserve_failed; 1821 } 1822 1823 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1824 if (ret) { 1825 pr_err("Failed to pin bo. ret %d\n", ret); 1826 goto pin_failed; 1827 } 1828 1829 ret = amdgpu_bo_kmap(bo, kptr); 1830 if (ret) { 1831 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1832 goto kmap_failed; 1833 } 1834 1835 amdgpu_amdkfd_remove_eviction_fence( 1836 bo, mem->process_info->eviction_fence); 1837 list_del_init(&mem->validate_list.head); 1838 1839 if (size) 1840 *size = amdgpu_bo_size(bo); 1841 1842 amdgpu_bo_unreserve(bo); 1843 1844 mutex_unlock(&mem->process_info->lock); 1845 return 0; 1846 1847 kmap_failed: 1848 amdgpu_bo_unpin(bo); 1849 pin_failed: 1850 amdgpu_bo_unreserve(bo); 1851 bo_reserve_failed: 1852 mutex_unlock(&mem->process_info->lock); 1853 1854 return ret; 1855 } 1856 1857 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1858 struct kfd_vm_fault_info *mem) 1859 { 1860 struct amdgpu_device *adev; 1861 1862 adev = (struct amdgpu_device *)kgd; 1863 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1864 *mem = *adev->gmc.vm_fault_info; 1865 mb(); 1866 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1867 } 1868 return 0; 1869 } 1870 1871 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1872 struct dma_buf *dma_buf, 1873 uint64_t va, void *drm_priv, 1874 struct kgd_mem **mem, uint64_t *size, 1875 uint64_t *mmap_offset) 1876 { 1877 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1878 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1879 struct drm_gem_object *obj; 1880 struct amdgpu_bo *bo; 1881 int ret; 1882 1883 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1884 /* Can't handle non-graphics buffers */ 1885 return -EINVAL; 1886 1887 obj = dma_buf->priv; 1888 if (drm_to_adev(obj->dev) != adev) 1889 /* Can't handle buffers from other devices */ 1890 return -EINVAL; 1891 1892 bo = gem_to_amdgpu_bo(obj); 1893 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1894 AMDGPU_GEM_DOMAIN_GTT))) 1895 /* Only VRAM and GTT BOs are supported */ 1896 return -EINVAL; 1897 1898 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1899 if (!*mem) 1900 return -ENOMEM; 1901 1902 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 1903 if (ret) { 1904 kfree(mem); 1905 return ret; 1906 } 1907 1908 if (size) 1909 *size = amdgpu_bo_size(bo); 1910 1911 if (mmap_offset) 1912 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1913 1914 INIT_LIST_HEAD(&(*mem)->attachments); 1915 mutex_init(&(*mem)->lock); 1916 1917 (*mem)->alloc_flags = 1918 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1919 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 1920 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 1921 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1922 1923 drm_gem_object_get(&bo->tbo.base); 1924 (*mem)->bo = bo; 1925 (*mem)->va = va; 1926 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1927 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1928 (*mem)->mapped_to_gpu_memory = 0; 1929 (*mem)->process_info = avm->process_info; 1930 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1931 amdgpu_sync_create(&(*mem)->sync); 1932 (*mem)->is_imported = true; 1933 1934 return 0; 1935 } 1936 1937 /* Evict a userptr BO by stopping the queues if necessary 1938 * 1939 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1940 * cannot do any memory allocations, and cannot take any locks that 1941 * are held elsewhere while allocating memory. Therefore this is as 1942 * simple as possible, using atomic counters. 1943 * 1944 * It doesn't do anything to the BO itself. The real work happens in 1945 * restore, where we get updated page addresses. This function only 1946 * ensures that GPU access to the BO is stopped. 1947 */ 1948 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1949 struct mm_struct *mm) 1950 { 1951 struct amdkfd_process_info *process_info = mem->process_info; 1952 int evicted_bos; 1953 int r = 0; 1954 1955 atomic_inc(&mem->invalid); 1956 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1957 if (evicted_bos == 1) { 1958 /* First eviction, stop the queues */ 1959 r = kgd2kfd_quiesce_mm(mm); 1960 if (r) 1961 pr_err("Failed to quiesce KFD\n"); 1962 schedule_delayed_work(&process_info->restore_userptr_work, 1963 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1964 } 1965 1966 return r; 1967 } 1968 1969 /* Update invalid userptr BOs 1970 * 1971 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1972 * userptr_inval_list and updates user pages for all BOs that have 1973 * been invalidated since their last update. 1974 */ 1975 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1976 struct mm_struct *mm) 1977 { 1978 struct kgd_mem *mem, *tmp_mem; 1979 struct amdgpu_bo *bo; 1980 struct ttm_operation_ctx ctx = { false, false }; 1981 int invalid, ret; 1982 1983 /* Move all invalidated BOs to the userptr_inval_list and 1984 * release their user pages by migration to the CPU domain 1985 */ 1986 list_for_each_entry_safe(mem, tmp_mem, 1987 &process_info->userptr_valid_list, 1988 validate_list.head) { 1989 if (!atomic_read(&mem->invalid)) 1990 continue; /* BO is still valid */ 1991 1992 bo = mem->bo; 1993 1994 if (amdgpu_bo_reserve(bo, true)) 1995 return -EAGAIN; 1996 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1997 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1998 amdgpu_bo_unreserve(bo); 1999 if (ret) { 2000 pr_err("%s: Failed to invalidate userptr BO\n", 2001 __func__); 2002 return -EAGAIN; 2003 } 2004 2005 list_move_tail(&mem->validate_list.head, 2006 &process_info->userptr_inval_list); 2007 } 2008 2009 if (list_empty(&process_info->userptr_inval_list)) 2010 return 0; /* All evicted userptr BOs were freed */ 2011 2012 /* Go through userptr_inval_list and update any invalid user_pages */ 2013 list_for_each_entry(mem, &process_info->userptr_inval_list, 2014 validate_list.head) { 2015 invalid = atomic_read(&mem->invalid); 2016 if (!invalid) 2017 /* BO hasn't been invalidated since the last 2018 * revalidation attempt. Keep its BO list. 2019 */ 2020 continue; 2021 2022 bo = mem->bo; 2023 2024 /* Get updated user pages */ 2025 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 2026 if (ret) { 2027 pr_debug("%s: Failed to get user pages: %d\n", 2028 __func__, ret); 2029 2030 /* Return error -EBUSY or -ENOMEM, retry restore */ 2031 return ret; 2032 } 2033 2034 /* 2035 * FIXME: Cannot ignore the return code, must hold 2036 * notifier_lock 2037 */ 2038 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 2039 2040 /* Mark the BO as valid unless it was invalidated 2041 * again concurrently. 2042 */ 2043 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 2044 return -EAGAIN; 2045 } 2046 2047 return 0; 2048 } 2049 2050 /* Validate invalid userptr BOs 2051 * 2052 * Validates BOs on the userptr_inval_list, and moves them back to the 2053 * userptr_valid_list. Also updates GPUVM page tables with new page 2054 * addresses and waits for the page table updates to complete. 2055 */ 2056 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2057 { 2058 struct amdgpu_bo_list_entry *pd_bo_list_entries; 2059 struct list_head resv_list, duplicates; 2060 struct ww_acquire_ctx ticket; 2061 struct amdgpu_sync sync; 2062 2063 struct amdgpu_vm *peer_vm; 2064 struct kgd_mem *mem, *tmp_mem; 2065 struct amdgpu_bo *bo; 2066 struct ttm_operation_ctx ctx = { false, false }; 2067 int i, ret; 2068 2069 pd_bo_list_entries = kcalloc(process_info->n_vms, 2070 sizeof(struct amdgpu_bo_list_entry), 2071 GFP_KERNEL); 2072 if (!pd_bo_list_entries) { 2073 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 2074 ret = -ENOMEM; 2075 goto out_no_mem; 2076 } 2077 2078 INIT_LIST_HEAD(&resv_list); 2079 INIT_LIST_HEAD(&duplicates); 2080 2081 /* Get all the page directory BOs that need to be reserved */ 2082 i = 0; 2083 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2084 vm_list_node) 2085 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 2086 &pd_bo_list_entries[i++]); 2087 /* Add the userptr_inval_list entries to resv_list */ 2088 list_for_each_entry(mem, &process_info->userptr_inval_list, 2089 validate_list.head) { 2090 list_add_tail(&mem->resv_list.head, &resv_list); 2091 mem->resv_list.bo = mem->validate_list.bo; 2092 mem->resv_list.num_shared = mem->validate_list.num_shared; 2093 } 2094 2095 /* Reserve all BOs and page tables for validation */ 2096 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 2097 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 2098 if (ret) 2099 goto out_free; 2100 2101 amdgpu_sync_create(&sync); 2102 2103 ret = process_validate_vms(process_info); 2104 if (ret) 2105 goto unreserve_out; 2106 2107 /* Validate BOs and update GPUVM page tables */ 2108 list_for_each_entry_safe(mem, tmp_mem, 2109 &process_info->userptr_inval_list, 2110 validate_list.head) { 2111 struct kfd_mem_attachment *attachment; 2112 2113 bo = mem->bo; 2114 2115 /* Validate the BO if we got user pages */ 2116 if (bo->tbo.ttm->pages[0]) { 2117 amdgpu_bo_placement_from_domain(bo, mem->domain); 2118 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2119 if (ret) { 2120 pr_err("%s: failed to validate BO\n", __func__); 2121 goto unreserve_out; 2122 } 2123 } 2124 2125 list_move_tail(&mem->validate_list.head, 2126 &process_info->userptr_valid_list); 2127 2128 /* Update mapping. If the BO was not validated 2129 * (because we couldn't get user pages), this will 2130 * clear the page table entries, which will result in 2131 * VM faults if the GPU tries to access the invalid 2132 * memory. 2133 */ 2134 list_for_each_entry(attachment, &mem->attachments, list) { 2135 if (!attachment->is_mapped) 2136 continue; 2137 2138 kfd_mem_dmaunmap_attachment(mem, attachment); 2139 ret = update_gpuvm_pte(mem, attachment, &sync, NULL); 2140 if (ret) { 2141 pr_err("%s: update PTE failed\n", __func__); 2142 /* make sure this gets validated again */ 2143 atomic_inc(&mem->invalid); 2144 goto unreserve_out; 2145 } 2146 } 2147 } 2148 2149 /* Update page directories */ 2150 ret = process_update_pds(process_info, &sync); 2151 2152 unreserve_out: 2153 ttm_eu_backoff_reservation(&ticket, &resv_list); 2154 amdgpu_sync_wait(&sync, false); 2155 amdgpu_sync_free(&sync); 2156 out_free: 2157 kfree(pd_bo_list_entries); 2158 out_no_mem: 2159 2160 return ret; 2161 } 2162 2163 /* Worker callback to restore evicted userptr BOs 2164 * 2165 * Tries to update and validate all userptr BOs. If successful and no 2166 * concurrent evictions happened, the queues are restarted. Otherwise, 2167 * reschedule for another attempt later. 2168 */ 2169 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2170 { 2171 struct delayed_work *dwork = to_delayed_work(work); 2172 struct amdkfd_process_info *process_info = 2173 container_of(dwork, struct amdkfd_process_info, 2174 restore_userptr_work); 2175 struct task_struct *usertask; 2176 struct mm_struct *mm; 2177 int evicted_bos; 2178 2179 evicted_bos = atomic_read(&process_info->evicted_bos); 2180 if (!evicted_bos) 2181 return; 2182 2183 /* Reference task and mm in case of concurrent process termination */ 2184 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2185 if (!usertask) 2186 return; 2187 mm = get_task_mm(usertask); 2188 if (!mm) { 2189 put_task_struct(usertask); 2190 return; 2191 } 2192 2193 mutex_lock(&process_info->lock); 2194 2195 if (update_invalid_user_pages(process_info, mm)) 2196 goto unlock_out; 2197 /* userptr_inval_list can be empty if all evicted userptr BOs 2198 * have been freed. In that case there is nothing to validate 2199 * and we can just restart the queues. 2200 */ 2201 if (!list_empty(&process_info->userptr_inval_list)) { 2202 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 2203 goto unlock_out; /* Concurrent eviction, try again */ 2204 2205 if (validate_invalid_user_pages(process_info)) 2206 goto unlock_out; 2207 } 2208 /* Final check for concurrent evicton and atomic update. If 2209 * another eviction happens after successful update, it will 2210 * be a first eviction that calls quiesce_mm. The eviction 2211 * reference counting inside KFD will handle this case. 2212 */ 2213 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2214 evicted_bos) 2215 goto unlock_out; 2216 evicted_bos = 0; 2217 if (kgd2kfd_resume_mm(mm)) { 2218 pr_err("%s: Failed to resume KFD\n", __func__); 2219 /* No recovery from this failure. Probably the CP is 2220 * hanging. No point trying again. 2221 */ 2222 } 2223 2224 unlock_out: 2225 mutex_unlock(&process_info->lock); 2226 mmput(mm); 2227 put_task_struct(usertask); 2228 2229 /* If validation failed, reschedule another attempt */ 2230 if (evicted_bos) 2231 schedule_delayed_work(&process_info->restore_userptr_work, 2232 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2233 } 2234 2235 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2236 * KFD process identified by process_info 2237 * 2238 * @process_info: amdkfd_process_info of the KFD process 2239 * 2240 * After memory eviction, restore thread calls this function. The function 2241 * should be called when the Process is still valid. BO restore involves - 2242 * 2243 * 1. Release old eviction fence and create new one 2244 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2245 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2246 * BOs that need to be reserved. 2247 * 4. Reserve all the BOs 2248 * 5. Validate of PD and PT BOs. 2249 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2250 * 7. Add fence to all PD and PT BOs. 2251 * 8. Unreserve all BOs 2252 */ 2253 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2254 { 2255 struct amdgpu_bo_list_entry *pd_bo_list; 2256 struct amdkfd_process_info *process_info = info; 2257 struct amdgpu_vm *peer_vm; 2258 struct kgd_mem *mem; 2259 struct bo_vm_reservation_context ctx; 2260 struct amdgpu_amdkfd_fence *new_fence; 2261 int ret = 0, i; 2262 struct list_head duplicate_save; 2263 struct amdgpu_sync sync_obj; 2264 unsigned long failed_size = 0; 2265 unsigned long total_size = 0; 2266 2267 INIT_LIST_HEAD(&duplicate_save); 2268 INIT_LIST_HEAD(&ctx.list); 2269 INIT_LIST_HEAD(&ctx.duplicates); 2270 2271 pd_bo_list = kcalloc(process_info->n_vms, 2272 sizeof(struct amdgpu_bo_list_entry), 2273 GFP_KERNEL); 2274 if (!pd_bo_list) 2275 return -ENOMEM; 2276 2277 i = 0; 2278 mutex_lock(&process_info->lock); 2279 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2280 vm_list_node) 2281 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2282 2283 /* Reserve all BOs and page tables/directory. Add all BOs from 2284 * kfd_bo_list to ctx.list 2285 */ 2286 list_for_each_entry(mem, &process_info->kfd_bo_list, 2287 validate_list.head) { 2288 2289 list_add_tail(&mem->resv_list.head, &ctx.list); 2290 mem->resv_list.bo = mem->validate_list.bo; 2291 mem->resv_list.num_shared = mem->validate_list.num_shared; 2292 } 2293 2294 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2295 false, &duplicate_save); 2296 if (ret) { 2297 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2298 goto ttm_reserve_fail; 2299 } 2300 2301 amdgpu_sync_create(&sync_obj); 2302 2303 /* Validate PDs and PTs */ 2304 ret = process_validate_vms(process_info); 2305 if (ret) 2306 goto validate_map_fail; 2307 2308 ret = process_sync_pds_resv(process_info, &sync_obj); 2309 if (ret) { 2310 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2311 goto validate_map_fail; 2312 } 2313 2314 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2315 list_for_each_entry(mem, &process_info->kfd_bo_list, 2316 validate_list.head) { 2317 2318 struct amdgpu_bo *bo = mem->bo; 2319 uint32_t domain = mem->domain; 2320 struct kfd_mem_attachment *attachment; 2321 2322 total_size += amdgpu_bo_size(bo); 2323 2324 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2325 if (ret) { 2326 pr_debug("Memory eviction: Validate BOs failed\n"); 2327 failed_size += amdgpu_bo_size(bo); 2328 ret = amdgpu_amdkfd_bo_validate(bo, 2329 AMDGPU_GEM_DOMAIN_GTT, false); 2330 if (ret) { 2331 pr_debug("Memory eviction: Try again\n"); 2332 goto validate_map_fail; 2333 } 2334 } 2335 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2336 if (ret) { 2337 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2338 goto validate_map_fail; 2339 } 2340 list_for_each_entry(attachment, &mem->attachments, list) { 2341 if (!attachment->is_mapped) 2342 continue; 2343 2344 kfd_mem_dmaunmap_attachment(mem, attachment); 2345 ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); 2346 if (ret) { 2347 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2348 goto validate_map_fail; 2349 } 2350 } 2351 } 2352 2353 if (failed_size) 2354 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2355 2356 /* Update page directories */ 2357 ret = process_update_pds(process_info, &sync_obj); 2358 if (ret) { 2359 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2360 goto validate_map_fail; 2361 } 2362 2363 /* Wait for validate and PT updates to finish */ 2364 amdgpu_sync_wait(&sync_obj, false); 2365 2366 /* Release old eviction fence and create new one, because fence only 2367 * goes from unsignaled to signaled, fence cannot be reused. 2368 * Use context and mm from the old fence. 2369 */ 2370 new_fence = amdgpu_amdkfd_fence_create( 2371 process_info->eviction_fence->base.context, 2372 process_info->eviction_fence->mm, 2373 NULL); 2374 if (!new_fence) { 2375 pr_err("Failed to create eviction fence\n"); 2376 ret = -ENOMEM; 2377 goto validate_map_fail; 2378 } 2379 dma_fence_put(&process_info->eviction_fence->base); 2380 process_info->eviction_fence = new_fence; 2381 *ef = dma_fence_get(&new_fence->base); 2382 2383 /* Attach new eviction fence to all BOs */ 2384 list_for_each_entry(mem, &process_info->kfd_bo_list, 2385 validate_list.head) 2386 amdgpu_bo_fence(mem->bo, 2387 &process_info->eviction_fence->base, true); 2388 2389 /* Attach eviction fence to PD / PT BOs */ 2390 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2391 vm_list_node) { 2392 struct amdgpu_bo *bo = peer_vm->root.bo; 2393 2394 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2395 } 2396 2397 validate_map_fail: 2398 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2399 amdgpu_sync_free(&sync_obj); 2400 ttm_reserve_fail: 2401 mutex_unlock(&process_info->lock); 2402 kfree(pd_bo_list); 2403 return ret; 2404 } 2405 2406 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2407 { 2408 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2409 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2410 int ret; 2411 2412 if (!info || !gws) 2413 return -EINVAL; 2414 2415 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2416 if (!*mem) 2417 return -ENOMEM; 2418 2419 mutex_init(&(*mem)->lock); 2420 INIT_LIST_HEAD(&(*mem)->attachments); 2421 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2422 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2423 (*mem)->process_info = process_info; 2424 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2425 amdgpu_sync_create(&(*mem)->sync); 2426 2427 2428 /* Validate gws bo the first time it is added to process */ 2429 mutex_lock(&(*mem)->process_info->lock); 2430 ret = amdgpu_bo_reserve(gws_bo, false); 2431 if (unlikely(ret)) { 2432 pr_err("Reserve gws bo failed %d\n", ret); 2433 goto bo_reservation_failure; 2434 } 2435 2436 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2437 if (ret) { 2438 pr_err("GWS BO validate failed %d\n", ret); 2439 goto bo_validation_failure; 2440 } 2441 /* GWS resource is shared b/t amdgpu and amdkfd 2442 * Add process eviction fence to bo so they can 2443 * evict each other. 2444 */ 2445 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2446 if (ret) 2447 goto reserve_shared_fail; 2448 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2449 amdgpu_bo_unreserve(gws_bo); 2450 mutex_unlock(&(*mem)->process_info->lock); 2451 2452 return ret; 2453 2454 reserve_shared_fail: 2455 bo_validation_failure: 2456 amdgpu_bo_unreserve(gws_bo); 2457 bo_reservation_failure: 2458 mutex_unlock(&(*mem)->process_info->lock); 2459 amdgpu_sync_free(&(*mem)->sync); 2460 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2461 amdgpu_bo_unref(&gws_bo); 2462 mutex_destroy(&(*mem)->lock); 2463 kfree(*mem); 2464 *mem = NULL; 2465 return ret; 2466 } 2467 2468 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2469 { 2470 int ret; 2471 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2472 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2473 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2474 2475 /* Remove BO from process's validate list so restore worker won't touch 2476 * it anymore 2477 */ 2478 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2479 2480 ret = amdgpu_bo_reserve(gws_bo, false); 2481 if (unlikely(ret)) { 2482 pr_err("Reserve gws bo failed %d\n", ret); 2483 //TODO add BO back to validate_list? 2484 return ret; 2485 } 2486 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2487 process_info->eviction_fence); 2488 amdgpu_bo_unreserve(gws_bo); 2489 amdgpu_sync_free(&kgd_mem->sync); 2490 amdgpu_bo_unref(&gws_bo); 2491 mutex_destroy(&kgd_mem->lock); 2492 kfree(mem); 2493 return 0; 2494 } 2495 2496 /* Returns GPU-specific tiling mode information */ 2497 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 2498 struct tile_config *config) 2499 { 2500 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 2501 2502 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2503 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2504 config->num_tile_configs = 2505 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2506 config->macro_tile_config_ptr = 2507 adev->gfx.config.macrotile_mode_array; 2508 config->num_macro_tile_configs = 2509 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2510 2511 /* Those values are not set from GFX9 onwards */ 2512 config->num_banks = adev->gfx.config.num_banks; 2513 config->num_ranks = adev->gfx.config.num_ranks; 2514 2515 return 0; 2516 } 2517