1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_gem.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_amdkfd.h" 32 #include "amdgpu_dma_buf.h" 33 #include <uapi/linux/kfd_ioctl.h> 34 #include "amdgpu_xgmi.h" 35 36 /* Userptr restore delay, just long enough to allow consecutive VM 37 * changes to accumulate 38 */ 39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 40 41 /* Impose limit on how much memory KFD can use */ 42 static struct { 43 uint64_t max_system_mem_limit; 44 uint64_t max_ttm_mem_limit; 45 int64_t system_mem_used; 46 int64_t ttm_mem_used; 47 spinlock_t mem_limit_lock; 48 } kfd_mem_limit; 49 50 static const char * const domain_bit_to_string[] = { 51 "CPU", 52 "GTT", 53 "VRAM", 54 "GDS", 55 "GWS", 56 "OA" 57 }; 58 59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 60 61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 62 63 64 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 65 { 66 return (struct amdgpu_device *)kgd; 67 } 68 69 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 70 struct kgd_mem *mem) 71 { 72 struct kfd_mem_attachment *entry; 73 74 list_for_each_entry(entry, &mem->attachments, list) 75 if (entry->bo_va->base.vm == avm) 76 return true; 77 78 return false; 79 } 80 81 /* Set memory usage limits. Current, limits are 82 * System (TTM + userptr) memory - 15/16th System RAM 83 * TTM memory - 3/8th System RAM 84 */ 85 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 86 { 87 struct sysinfo si; 88 uint64_t mem; 89 90 si_meminfo(&si); 91 mem = si.freeram - si.freehigh; 92 mem *= si.mem_unit; 93 94 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 95 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 96 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 97 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 98 (kfd_mem_limit.max_system_mem_limit >> 20), 99 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 100 } 101 102 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 103 { 104 kfd_mem_limit.system_mem_used += size; 105 } 106 107 /* Estimate page table size needed to represent a given memory size 108 * 109 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 110 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 111 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 112 * for 2MB pages for TLB efficiency. However, small allocations and 113 * fragmented system memory still need some 4KB pages. We choose a 114 * compromise that should work in most cases without reserving too 115 * much memory for page tables unnecessarily (factor 16K, >> 14). 116 */ 117 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 118 119 static size_t amdgpu_amdkfd_acc_size(uint64_t size) 120 { 121 size >>= PAGE_SHIFT; 122 size *= sizeof(dma_addr_t) + sizeof(void *); 123 124 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + 125 __roundup_pow_of_two(sizeof(struct ttm_tt)) + 126 PAGE_ALIGN(size); 127 } 128 129 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 130 uint64_t size, u32 domain, bool sg) 131 { 132 uint64_t reserved_for_pt = 133 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 134 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 135 int ret = 0; 136 137 acc_size = amdgpu_amdkfd_acc_size(size); 138 139 vram_needed = 0; 140 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 141 /* TTM GTT memory */ 142 system_mem_needed = acc_size + size; 143 ttm_mem_needed = acc_size + size; 144 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 145 /* Userptr */ 146 system_mem_needed = acc_size + size; 147 ttm_mem_needed = acc_size; 148 } else { 149 /* VRAM and SG */ 150 system_mem_needed = acc_size; 151 ttm_mem_needed = acc_size; 152 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 153 vram_needed = size; 154 } 155 156 spin_lock(&kfd_mem_limit.mem_limit_lock); 157 158 if (kfd_mem_limit.system_mem_used + system_mem_needed > 159 kfd_mem_limit.max_system_mem_limit) 160 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 161 162 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 163 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 164 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 165 kfd_mem_limit.max_ttm_mem_limit) || 166 (adev->kfd.vram_used + vram_needed > 167 adev->gmc.real_vram_size - reserved_for_pt)) { 168 ret = -ENOMEM; 169 } else { 170 kfd_mem_limit.system_mem_used += system_mem_needed; 171 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 172 adev->kfd.vram_used += vram_needed; 173 } 174 175 spin_unlock(&kfd_mem_limit.mem_limit_lock); 176 return ret; 177 } 178 179 static void unreserve_mem_limit(struct amdgpu_device *adev, 180 uint64_t size, u32 domain, bool sg) 181 { 182 size_t acc_size; 183 184 acc_size = amdgpu_amdkfd_acc_size(size); 185 186 spin_lock(&kfd_mem_limit.mem_limit_lock); 187 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 188 kfd_mem_limit.system_mem_used -= (acc_size + size); 189 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 190 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 191 kfd_mem_limit.system_mem_used -= (acc_size + size); 192 kfd_mem_limit.ttm_mem_used -= acc_size; 193 } else { 194 kfd_mem_limit.system_mem_used -= acc_size; 195 kfd_mem_limit.ttm_mem_used -= acc_size; 196 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 197 adev->kfd.vram_used -= size; 198 WARN_ONCE(adev->kfd.vram_used < 0, 199 "kfd VRAM memory accounting unbalanced"); 200 } 201 } 202 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 203 "kfd system memory accounting unbalanced"); 204 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 205 "kfd TTM memory accounting unbalanced"); 206 207 spin_unlock(&kfd_mem_limit.mem_limit_lock); 208 } 209 210 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 211 { 212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 213 u32 domain = bo->preferred_domains; 214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 215 216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { 217 domain = AMDGPU_GEM_DOMAIN_CPU; 218 sg = false; 219 } 220 221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 222 } 223 224 225 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 226 * reservation object. 227 * 228 * @bo: [IN] Remove eviction fence(s) from this BO 229 * @ef: [IN] This eviction fence is removed if it 230 * is present in the shared list. 231 * 232 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 233 */ 234 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 235 struct amdgpu_amdkfd_fence *ef) 236 { 237 struct dma_resv *resv = bo->tbo.base.resv; 238 struct dma_resv_list *old, *new; 239 unsigned int i, j, k; 240 241 if (!ef) 242 return -EINVAL; 243 244 old = dma_resv_shared_list(resv); 245 if (!old) 246 return 0; 247 248 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); 249 if (!new) 250 return -ENOMEM; 251 252 /* Go through all the shared fences in the resevation object and sort 253 * the interesting ones to the end of the list. 254 */ 255 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 256 struct dma_fence *f; 257 258 f = rcu_dereference_protected(old->shared[i], 259 dma_resv_held(resv)); 260 261 if (f->context == ef->base.context) 262 RCU_INIT_POINTER(new->shared[--j], f); 263 else 264 RCU_INIT_POINTER(new->shared[k++], f); 265 } 266 new->shared_max = old->shared_max; 267 new->shared_count = k; 268 269 /* Install the new fence list, seqcount provides the barriers */ 270 write_seqcount_begin(&resv->seq); 271 RCU_INIT_POINTER(resv->fence, new); 272 write_seqcount_end(&resv->seq); 273 274 /* Drop the references to the removed fences or move them to ef_list */ 275 for (i = j; i < old->shared_count; ++i) { 276 struct dma_fence *f; 277 278 f = rcu_dereference_protected(new->shared[i], 279 dma_resv_held(resv)); 280 dma_fence_put(f); 281 } 282 kfree_rcu(old, rcu); 283 284 return 0; 285 } 286 287 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 288 { 289 struct amdgpu_bo *root = bo; 290 struct amdgpu_vm_bo_base *vm_bo; 291 struct amdgpu_vm *vm; 292 struct amdkfd_process_info *info; 293 struct amdgpu_amdkfd_fence *ef; 294 int ret; 295 296 /* we can always get vm_bo from root PD bo.*/ 297 while (root->parent) 298 root = root->parent; 299 300 vm_bo = root->vm_bo; 301 if (!vm_bo) 302 return 0; 303 304 vm = vm_bo->vm; 305 if (!vm) 306 return 0; 307 308 info = vm->process_info; 309 if (!info || !info->eviction_fence) 310 return 0; 311 312 ef = container_of(dma_fence_get(&info->eviction_fence->base), 313 struct amdgpu_amdkfd_fence, base); 314 315 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 316 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 317 dma_resv_unlock(bo->tbo.base.resv); 318 319 dma_fence_put(&ef->base); 320 return ret; 321 } 322 323 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 324 bool wait) 325 { 326 struct ttm_operation_ctx ctx = { false, false }; 327 int ret; 328 329 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 330 "Called with userptr BO")) 331 return -EINVAL; 332 333 amdgpu_bo_placement_from_domain(bo, domain); 334 335 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 336 if (ret) 337 goto validate_fail; 338 if (wait) 339 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 340 341 validate_fail: 342 return ret; 343 } 344 345 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 346 { 347 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 348 } 349 350 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 351 * 352 * Page directories are not updated here because huge page handling 353 * during page table updates can invalidate page directory entries 354 * again. Page directories are only updated after updating page 355 * tables. 356 */ 357 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 358 { 359 struct amdgpu_bo *pd = vm->root.bo; 360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 361 int ret; 362 363 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); 364 if (ret) { 365 pr_err("failed to validate PT BOs\n"); 366 return ret; 367 } 368 369 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); 370 if (ret) { 371 pr_err("failed to validate PD\n"); 372 return ret; 373 } 374 375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 376 377 if (vm->use_cpu_for_update) { 378 ret = amdgpu_bo_kmap(pd, NULL); 379 if (ret) { 380 pr_err("failed to kmap PD, ret=%d\n", ret); 381 return ret; 382 } 383 } 384 385 return 0; 386 } 387 388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 389 { 390 struct amdgpu_bo *pd = vm->root.bo; 391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 392 int ret; 393 394 ret = amdgpu_vm_update_pdes(adev, vm, false); 395 if (ret) 396 return ret; 397 398 return amdgpu_sync_fence(sync, vm->last_update); 399 } 400 401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 402 { 403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 405 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; 406 uint32_t mapping_flags; 407 uint64_t pte_flags; 408 bool snoop = false; 409 410 mapping_flags = AMDGPU_VM_PAGE_READABLE; 411 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 412 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 414 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 415 416 switch (adev->asic_type) { 417 case CHIP_ARCTURUS: 418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 419 if (bo_adev == adev) 420 mapping_flags |= coherent ? 421 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 422 else 423 mapping_flags |= coherent ? 424 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 425 } else { 426 mapping_flags |= coherent ? 427 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 428 } 429 break; 430 case CHIP_ALDEBARAN: 431 if (coherent && uncached) { 432 if (adev->gmc.xgmi.connected_to_cpu || 433 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) 434 snoop = true; 435 mapping_flags |= AMDGPU_VM_MTYPE_UC; 436 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 437 if (bo_adev == adev) { 438 mapping_flags |= coherent ? 439 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 440 if (adev->gmc.xgmi.connected_to_cpu) 441 snoop = true; 442 } else { 443 mapping_flags |= coherent ? 444 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 445 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 446 snoop = true; 447 } 448 } else { 449 snoop = true; 450 mapping_flags |= coherent ? 451 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 452 } 453 break; 454 default: 455 mapping_flags |= coherent ? 456 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 457 } 458 459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags); 460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 461 462 return pte_flags; 463 } 464 465 static int 466 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 467 struct kfd_mem_attachment *attachment) 468 { 469 enum dma_data_direction direction = 470 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 471 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 472 struct ttm_operation_ctx ctx = {.interruptible = true}; 473 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 474 struct amdgpu_device *adev = attachment->adev; 475 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 476 struct ttm_tt *ttm = bo->tbo.ttm; 477 int ret; 478 479 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 480 if (unlikely(!ttm->sg)) 481 return -ENOMEM; 482 483 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 484 return -EINVAL; 485 486 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 487 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 488 ttm->num_pages, 0, 489 (u64)ttm->num_pages << PAGE_SHIFT, 490 GFP_KERNEL); 491 if (unlikely(ret)) 492 goto free_sg; 493 494 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 495 if (unlikely(ret)) 496 goto release_sg; 497 498 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, 499 ttm->num_pages); 500 501 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 502 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 503 if (ret) 504 goto unmap_sg; 505 506 return 0; 507 508 unmap_sg: 509 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 510 release_sg: 511 pr_err("DMA map userptr failed: %d\n", ret); 512 sg_free_table(ttm->sg); 513 free_sg: 514 kfree(ttm->sg); 515 ttm->sg = NULL; 516 return ret; 517 } 518 519 static int 520 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 521 { 522 struct ttm_operation_ctx ctx = {.interruptible = true}; 523 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 524 525 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 526 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 527 } 528 529 static int 530 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 531 struct kfd_mem_attachment *attachment) 532 { 533 switch (attachment->type) { 534 case KFD_MEM_ATT_SHARED: 535 return 0; 536 case KFD_MEM_ATT_USERPTR: 537 return kfd_mem_dmamap_userptr(mem, attachment); 538 case KFD_MEM_ATT_DMABUF: 539 return kfd_mem_dmamap_dmabuf(attachment); 540 default: 541 WARN_ON_ONCE(1); 542 } 543 return -EINVAL; 544 } 545 546 static void 547 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 548 struct kfd_mem_attachment *attachment) 549 { 550 enum dma_data_direction direction = 551 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 553 struct ttm_operation_ctx ctx = {.interruptible = false}; 554 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 555 struct amdgpu_device *adev = attachment->adev; 556 struct ttm_tt *ttm = bo->tbo.ttm; 557 558 if (unlikely(!ttm->sg)) 559 return; 560 561 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 562 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 563 564 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 565 sg_free_table(ttm->sg); 566 ttm->sg = NULL; 567 } 568 569 static void 570 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 571 { 572 struct ttm_operation_ctx ctx = {.interruptible = true}; 573 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 574 575 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 576 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 577 } 578 579 static void 580 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 581 struct kfd_mem_attachment *attachment) 582 { 583 switch (attachment->type) { 584 case KFD_MEM_ATT_SHARED: 585 break; 586 case KFD_MEM_ATT_USERPTR: 587 kfd_mem_dmaunmap_userptr(mem, attachment); 588 break; 589 case KFD_MEM_ATT_DMABUF: 590 kfd_mem_dmaunmap_dmabuf(attachment); 591 break; 592 default: 593 WARN_ON_ONCE(1); 594 } 595 } 596 597 static int 598 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, 599 struct amdgpu_bo **bo) 600 { 601 unsigned long bo_size = mem->bo->tbo.base.size; 602 struct drm_gem_object *gobj; 603 int ret; 604 605 ret = amdgpu_bo_reserve(mem->bo, false); 606 if (ret) 607 return ret; 608 609 ret = amdgpu_gem_object_create(adev, bo_size, 1, 610 AMDGPU_GEM_DOMAIN_CPU, 611 AMDGPU_GEM_CREATE_PREEMPTIBLE, 612 ttm_bo_type_sg, mem->bo->tbo.base.resv, 613 &gobj); 614 amdgpu_bo_unreserve(mem->bo); 615 if (ret) 616 return ret; 617 618 *bo = gem_to_amdgpu_bo(gobj); 619 (*bo)->parent = amdgpu_bo_ref(mem->bo); 620 621 return 0; 622 } 623 624 static int 625 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 626 struct amdgpu_bo **bo) 627 { 628 struct drm_gem_object *gobj; 629 int ret; 630 631 if (!mem->dmabuf) { 632 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, 633 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 634 DRM_RDWR : 0); 635 if (IS_ERR(mem->dmabuf)) { 636 ret = PTR_ERR(mem->dmabuf); 637 mem->dmabuf = NULL; 638 return ret; 639 } 640 } 641 642 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 643 if (IS_ERR(gobj)) 644 return PTR_ERR(gobj); 645 646 /* Import takes an extra reference on the dmabuf. Drop it now to 647 * avoid leaking it. We only need the one reference in 648 * kgd_mem->dmabuf. 649 */ 650 dma_buf_put(mem->dmabuf); 651 652 *bo = gem_to_amdgpu_bo(gobj); 653 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 654 (*bo)->parent = amdgpu_bo_ref(mem->bo); 655 656 return 0; 657 } 658 659 /* kfd_mem_attach - Add a BO to a VM 660 * 661 * Everything that needs to bo done only once when a BO is first added 662 * to a VM. It can later be mapped and unmapped many times without 663 * repeating these steps. 664 * 665 * 0. Create BO for DMA mapping, if needed 666 * 1. Allocate and initialize BO VA entry data structure 667 * 2. Add BO to the VM 668 * 3. Determine ASIC-specific PTE flags 669 * 4. Alloc page tables and directories if needed 670 * 4a. Validate new page tables and directories 671 */ 672 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 673 struct amdgpu_vm *vm, bool is_aql) 674 { 675 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 676 unsigned long bo_size = mem->bo->tbo.base.size; 677 uint64_t va = mem->va; 678 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 679 struct amdgpu_bo *bo[2] = {NULL, NULL}; 680 int i, ret; 681 682 if (!va) { 683 pr_err("Invalid VA when adding BO to VM\n"); 684 return -EINVAL; 685 } 686 687 for (i = 0; i <= is_aql; i++) { 688 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 689 if (unlikely(!attachment[i])) { 690 ret = -ENOMEM; 691 goto unwind; 692 } 693 694 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 695 va + bo_size, vm); 696 697 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && 698 amdgpu_xgmi_same_hive(adev, bo_adev))) { 699 /* Mappings on the local GPU and VRAM mappings in the 700 * local hive share the original BO 701 */ 702 attachment[i]->type = KFD_MEM_ATT_SHARED; 703 bo[i] = mem->bo; 704 drm_gem_object_get(&bo[i]->tbo.base); 705 } else if (i > 0) { 706 /* Multiple mappings on the same GPU share the BO */ 707 attachment[i]->type = KFD_MEM_ATT_SHARED; 708 bo[i] = bo[0]; 709 drm_gem_object_get(&bo[i]->tbo.base); 710 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 711 /* Create an SG BO to DMA-map userptrs on other GPUs */ 712 attachment[i]->type = KFD_MEM_ATT_USERPTR; 713 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); 714 if (ret) 715 goto unwind; 716 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && 717 mem->bo->tbo.type != ttm_bo_type_sg) { 718 /* GTT BOs use DMA-mapping ability of dynamic-attach 719 * DMA bufs. TODO: The same should work for VRAM on 720 * large-BAR GPUs. 721 */ 722 attachment[i]->type = KFD_MEM_ATT_DMABUF; 723 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 724 if (ret) 725 goto unwind; 726 } else { 727 /* FIXME: Need to DMA-map other BO types: 728 * large-BAR VRAM, doorbells, MMIO remap 729 */ 730 attachment[i]->type = KFD_MEM_ATT_SHARED; 731 bo[i] = mem->bo; 732 drm_gem_object_get(&bo[i]->tbo.base); 733 } 734 735 /* Add BO to VM internal data structures */ 736 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 737 if (unlikely(!attachment[i]->bo_va)) { 738 ret = -ENOMEM; 739 pr_err("Failed to add BO object to VM. ret == %d\n", 740 ret); 741 goto unwind; 742 } 743 744 attachment[i]->va = va; 745 attachment[i]->pte_flags = get_pte_flags(adev, mem); 746 attachment[i]->adev = adev; 747 list_add(&attachment[i]->list, &mem->attachments); 748 749 va += bo_size; 750 } 751 752 return 0; 753 754 unwind: 755 for (; i >= 0; i--) { 756 if (!attachment[i]) 757 continue; 758 if (attachment[i]->bo_va) { 759 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); 760 list_del(&attachment[i]->list); 761 } 762 if (bo[i]) 763 drm_gem_object_put(&bo[i]->tbo.base); 764 kfree(attachment[i]); 765 } 766 return ret; 767 } 768 769 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 770 { 771 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 772 773 pr_debug("\t remove VA 0x%llx in entry %p\n", 774 attachment->va, attachment); 775 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); 776 drm_gem_object_put(&bo->tbo.base); 777 list_del(&attachment->list); 778 kfree(attachment); 779 } 780 781 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 782 struct amdkfd_process_info *process_info, 783 bool userptr) 784 { 785 struct ttm_validate_buffer *entry = &mem->validate_list; 786 struct amdgpu_bo *bo = mem->bo; 787 788 INIT_LIST_HEAD(&entry->head); 789 entry->num_shared = 1; 790 entry->bo = &bo->tbo; 791 mutex_lock(&process_info->lock); 792 if (userptr) 793 list_add_tail(&entry->head, &process_info->userptr_valid_list); 794 else 795 list_add_tail(&entry->head, &process_info->kfd_bo_list); 796 mutex_unlock(&process_info->lock); 797 } 798 799 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 800 struct amdkfd_process_info *process_info) 801 { 802 struct ttm_validate_buffer *bo_list_entry; 803 804 bo_list_entry = &mem->validate_list; 805 mutex_lock(&process_info->lock); 806 list_del(&bo_list_entry->head); 807 mutex_unlock(&process_info->lock); 808 } 809 810 /* Initializes user pages. It registers the MMU notifier and validates 811 * the userptr BO in the GTT domain. 812 * 813 * The BO must already be on the userptr_valid_list. Otherwise an 814 * eviction and restore may happen that leaves the new BO unmapped 815 * with the user mode queues running. 816 * 817 * Takes the process_info->lock to protect against concurrent restore 818 * workers. 819 * 820 * Returns 0 for success, negative errno for errors. 821 */ 822 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) 823 { 824 struct amdkfd_process_info *process_info = mem->process_info; 825 struct amdgpu_bo *bo = mem->bo; 826 struct ttm_operation_ctx ctx = { true, false }; 827 int ret = 0; 828 829 mutex_lock(&process_info->lock); 830 831 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 832 if (ret) { 833 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 834 goto out; 835 } 836 837 ret = amdgpu_mn_register(bo, user_addr); 838 if (ret) { 839 pr_err("%s: Failed to register MMU notifier: %d\n", 840 __func__, ret); 841 goto out; 842 } 843 844 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 845 if (ret) { 846 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 847 goto unregister_out; 848 } 849 850 ret = amdgpu_bo_reserve(bo, true); 851 if (ret) { 852 pr_err("%s: Failed to reserve BO\n", __func__); 853 goto release_out; 854 } 855 amdgpu_bo_placement_from_domain(bo, mem->domain); 856 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 857 if (ret) 858 pr_err("%s: failed to validate BO\n", __func__); 859 amdgpu_bo_unreserve(bo); 860 861 release_out: 862 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 863 unregister_out: 864 if (ret) 865 amdgpu_mn_unregister(bo); 866 out: 867 mutex_unlock(&process_info->lock); 868 return ret; 869 } 870 871 /* Reserving a BO and its page table BOs must happen atomically to 872 * avoid deadlocks. Some operations update multiple VMs at once. Track 873 * all the reservation info in a context structure. Optionally a sync 874 * object can track VM updates. 875 */ 876 struct bo_vm_reservation_context { 877 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 878 unsigned int n_vms; /* Number of VMs reserved */ 879 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 880 struct ww_acquire_ctx ticket; /* Reservation ticket */ 881 struct list_head list, duplicates; /* BO lists */ 882 struct amdgpu_sync *sync; /* Pointer to sync object */ 883 bool reserved; /* Whether BOs are reserved */ 884 }; 885 886 enum bo_vm_match { 887 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 888 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 889 BO_VM_ALL, /* Match all VMs a BO was added to */ 890 }; 891 892 /** 893 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 894 * @mem: KFD BO structure. 895 * @vm: the VM to reserve. 896 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 897 */ 898 static int reserve_bo_and_vm(struct kgd_mem *mem, 899 struct amdgpu_vm *vm, 900 struct bo_vm_reservation_context *ctx) 901 { 902 struct amdgpu_bo *bo = mem->bo; 903 int ret; 904 905 WARN_ON(!vm); 906 907 ctx->reserved = false; 908 ctx->n_vms = 1; 909 ctx->sync = &mem->sync; 910 911 INIT_LIST_HEAD(&ctx->list); 912 INIT_LIST_HEAD(&ctx->duplicates); 913 914 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 915 if (!ctx->vm_pd) 916 return -ENOMEM; 917 918 ctx->kfd_bo.priority = 0; 919 ctx->kfd_bo.tv.bo = &bo->tbo; 920 ctx->kfd_bo.tv.num_shared = 1; 921 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 922 923 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 924 925 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 926 false, &ctx->duplicates); 927 if (ret) { 928 pr_err("Failed to reserve buffers in ttm.\n"); 929 kfree(ctx->vm_pd); 930 ctx->vm_pd = NULL; 931 return ret; 932 } 933 934 ctx->reserved = true; 935 return 0; 936 } 937 938 /** 939 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 940 * @mem: KFD BO structure. 941 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 942 * is used. Otherwise, a single VM associated with the BO. 943 * @map_type: the mapping status that will be used to filter the VMs. 944 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 945 * 946 * Returns 0 for success, negative for failure. 947 */ 948 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 949 struct amdgpu_vm *vm, enum bo_vm_match map_type, 950 struct bo_vm_reservation_context *ctx) 951 { 952 struct amdgpu_bo *bo = mem->bo; 953 struct kfd_mem_attachment *entry; 954 unsigned int i; 955 int ret; 956 957 ctx->reserved = false; 958 ctx->n_vms = 0; 959 ctx->vm_pd = NULL; 960 ctx->sync = &mem->sync; 961 962 INIT_LIST_HEAD(&ctx->list); 963 INIT_LIST_HEAD(&ctx->duplicates); 964 965 list_for_each_entry(entry, &mem->attachments, list) { 966 if ((vm && vm != entry->bo_va->base.vm) || 967 (entry->is_mapped != map_type 968 && map_type != BO_VM_ALL)) 969 continue; 970 971 ctx->n_vms++; 972 } 973 974 if (ctx->n_vms != 0) { 975 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 976 GFP_KERNEL); 977 if (!ctx->vm_pd) 978 return -ENOMEM; 979 } 980 981 ctx->kfd_bo.priority = 0; 982 ctx->kfd_bo.tv.bo = &bo->tbo; 983 ctx->kfd_bo.tv.num_shared = 1; 984 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 985 986 i = 0; 987 list_for_each_entry(entry, &mem->attachments, list) { 988 if ((vm && vm != entry->bo_va->base.vm) || 989 (entry->is_mapped != map_type 990 && map_type != BO_VM_ALL)) 991 continue; 992 993 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 994 &ctx->vm_pd[i]); 995 i++; 996 } 997 998 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 999 false, &ctx->duplicates); 1000 if (ret) { 1001 pr_err("Failed to reserve buffers in ttm.\n"); 1002 kfree(ctx->vm_pd); 1003 ctx->vm_pd = NULL; 1004 return ret; 1005 } 1006 1007 ctx->reserved = true; 1008 return 0; 1009 } 1010 1011 /** 1012 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1013 * @ctx: Reservation context to unreserve 1014 * @wait: Optionally wait for a sync object representing pending VM updates 1015 * @intr: Whether the wait is interruptible 1016 * 1017 * Also frees any resources allocated in 1018 * reserve_bo_and_(cond_)vm(s). Returns the status from 1019 * amdgpu_sync_wait. 1020 */ 1021 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1022 bool wait, bool intr) 1023 { 1024 int ret = 0; 1025 1026 if (wait) 1027 ret = amdgpu_sync_wait(ctx->sync, intr); 1028 1029 if (ctx->reserved) 1030 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 1031 kfree(ctx->vm_pd); 1032 1033 ctx->sync = NULL; 1034 1035 ctx->reserved = false; 1036 ctx->vm_pd = NULL; 1037 1038 return ret; 1039 } 1040 1041 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, 1042 struct kfd_mem_attachment *entry, 1043 struct amdgpu_sync *sync) 1044 { 1045 struct amdgpu_bo_va *bo_va = entry->bo_va; 1046 struct amdgpu_device *adev = entry->adev; 1047 struct amdgpu_vm *vm = bo_va->base.vm; 1048 1049 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1050 1051 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1052 1053 amdgpu_sync_fence(sync, bo_va->last_pt_update); 1054 1055 kfd_mem_dmaunmap_attachment(mem, entry); 1056 } 1057 1058 static int update_gpuvm_pte(struct kgd_mem *mem, 1059 struct kfd_mem_attachment *entry, 1060 struct amdgpu_sync *sync) 1061 { 1062 struct amdgpu_bo_va *bo_va = entry->bo_va; 1063 struct amdgpu_device *adev = entry->adev; 1064 int ret; 1065 1066 ret = kfd_mem_dmamap_attachment(mem, entry); 1067 if (ret) 1068 return ret; 1069 1070 /* Update the page tables */ 1071 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1072 if (ret) { 1073 pr_err("amdgpu_vm_bo_update failed\n"); 1074 return ret; 1075 } 1076 1077 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1078 } 1079 1080 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1081 struct kfd_mem_attachment *entry, 1082 struct amdgpu_sync *sync, 1083 bool no_update_pte) 1084 { 1085 int ret; 1086 1087 /* Set virtual address for the allocation */ 1088 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1089 amdgpu_bo_size(entry->bo_va->base.bo), 1090 entry->pte_flags); 1091 if (ret) { 1092 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1093 entry->va, ret); 1094 return ret; 1095 } 1096 1097 if (no_update_pte) 1098 return 0; 1099 1100 ret = update_gpuvm_pte(mem, entry, sync); 1101 if (ret) { 1102 pr_err("update_gpuvm_pte() failed\n"); 1103 goto update_gpuvm_pte_failed; 1104 } 1105 1106 return 0; 1107 1108 update_gpuvm_pte_failed: 1109 unmap_bo_from_gpuvm(mem, entry, sync); 1110 return ret; 1111 } 1112 1113 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 1114 { 1115 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 1116 1117 if (!sg) 1118 return NULL; 1119 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 1120 kfree(sg); 1121 return NULL; 1122 } 1123 sg->sgl->dma_address = addr; 1124 sg->sgl->length = size; 1125 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1126 sg->sgl->dma_length = size; 1127 #endif 1128 return sg; 1129 } 1130 1131 static int process_validate_vms(struct amdkfd_process_info *process_info) 1132 { 1133 struct amdgpu_vm *peer_vm; 1134 int ret; 1135 1136 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1137 vm_list_node) { 1138 ret = vm_validate_pt_pd_bos(peer_vm); 1139 if (ret) 1140 return ret; 1141 } 1142 1143 return 0; 1144 } 1145 1146 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1147 struct amdgpu_sync *sync) 1148 { 1149 struct amdgpu_vm *peer_vm; 1150 int ret; 1151 1152 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1153 vm_list_node) { 1154 struct amdgpu_bo *pd = peer_vm->root.bo; 1155 1156 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1157 AMDGPU_SYNC_NE_OWNER, 1158 AMDGPU_FENCE_OWNER_KFD); 1159 if (ret) 1160 return ret; 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int process_update_pds(struct amdkfd_process_info *process_info, 1167 struct amdgpu_sync *sync) 1168 { 1169 struct amdgpu_vm *peer_vm; 1170 int ret; 1171 1172 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1173 vm_list_node) { 1174 ret = vm_update_pds(peer_vm, sync); 1175 if (ret) 1176 return ret; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1183 struct dma_fence **ef) 1184 { 1185 struct amdkfd_process_info *info = NULL; 1186 int ret; 1187 1188 if (!*process_info) { 1189 info = kzalloc(sizeof(*info), GFP_KERNEL); 1190 if (!info) 1191 return -ENOMEM; 1192 1193 mutex_init(&info->lock); 1194 INIT_LIST_HEAD(&info->vm_list_head); 1195 INIT_LIST_HEAD(&info->kfd_bo_list); 1196 INIT_LIST_HEAD(&info->userptr_valid_list); 1197 INIT_LIST_HEAD(&info->userptr_inval_list); 1198 1199 info->eviction_fence = 1200 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1201 current->mm, 1202 NULL); 1203 if (!info->eviction_fence) { 1204 pr_err("Failed to create eviction fence\n"); 1205 ret = -ENOMEM; 1206 goto create_evict_fence_fail; 1207 } 1208 1209 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1210 atomic_set(&info->evicted_bos, 0); 1211 INIT_DELAYED_WORK(&info->restore_userptr_work, 1212 amdgpu_amdkfd_restore_userptr_worker); 1213 1214 *process_info = info; 1215 *ef = dma_fence_get(&info->eviction_fence->base); 1216 } 1217 1218 vm->process_info = *process_info; 1219 1220 /* Validate page directory and attach eviction fence */ 1221 ret = amdgpu_bo_reserve(vm->root.bo, true); 1222 if (ret) 1223 goto reserve_pd_fail; 1224 ret = vm_validate_pt_pd_bos(vm); 1225 if (ret) { 1226 pr_err("validate_pt_pd_bos() failed\n"); 1227 goto validate_pd_fail; 1228 } 1229 ret = amdgpu_bo_sync_wait(vm->root.bo, 1230 AMDGPU_FENCE_OWNER_KFD, false); 1231 if (ret) 1232 goto wait_pd_fail; 1233 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); 1234 if (ret) 1235 goto reserve_shared_fail; 1236 amdgpu_bo_fence(vm->root.bo, 1237 &vm->process_info->eviction_fence->base, true); 1238 amdgpu_bo_unreserve(vm->root.bo); 1239 1240 /* Update process info */ 1241 mutex_lock(&vm->process_info->lock); 1242 list_add_tail(&vm->vm_list_node, 1243 &(vm->process_info->vm_list_head)); 1244 vm->process_info->n_vms++; 1245 mutex_unlock(&vm->process_info->lock); 1246 1247 return 0; 1248 1249 reserve_shared_fail: 1250 wait_pd_fail: 1251 validate_pd_fail: 1252 amdgpu_bo_unreserve(vm->root.bo); 1253 reserve_pd_fail: 1254 vm->process_info = NULL; 1255 if (info) { 1256 /* Two fence references: one in info and one in *ef */ 1257 dma_fence_put(&info->eviction_fence->base); 1258 dma_fence_put(*ef); 1259 *ef = NULL; 1260 *process_info = NULL; 1261 put_pid(info->pid); 1262 create_evict_fence_fail: 1263 mutex_destroy(&info->lock); 1264 kfree(info); 1265 } 1266 return ret; 1267 } 1268 1269 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1270 struct file *filp, u32 pasid, 1271 void **process_info, 1272 struct dma_fence **ef) 1273 { 1274 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1275 struct amdgpu_fpriv *drv_priv; 1276 struct amdgpu_vm *avm; 1277 int ret; 1278 1279 ret = amdgpu_file_to_fpriv(filp, &drv_priv); 1280 if (ret) 1281 return ret; 1282 avm = &drv_priv->vm; 1283 1284 /* Already a compute VM? */ 1285 if (avm->process_info) 1286 return -EINVAL; 1287 1288 /* Free the original amdgpu allocated pasid, 1289 * will be replaced with kfd allocated pasid. 1290 */ 1291 if (avm->pasid) { 1292 amdgpu_pasid_free(avm->pasid); 1293 amdgpu_vm_set_pasid(adev, avm, 0); 1294 } 1295 1296 /* Convert VM into a compute VM */ 1297 ret = amdgpu_vm_make_compute(adev, avm); 1298 if (ret) 1299 return ret; 1300 1301 ret = amdgpu_vm_set_pasid(adev, avm, pasid); 1302 if (ret) 1303 return ret; 1304 /* Initialize KFD part of the VM and process info */ 1305 ret = init_kfd_vm(avm, process_info, ef); 1306 if (ret) 1307 return ret; 1308 1309 amdgpu_vm_set_task_info(avm); 1310 1311 return 0; 1312 } 1313 1314 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1315 struct amdgpu_vm *vm) 1316 { 1317 struct amdkfd_process_info *process_info = vm->process_info; 1318 struct amdgpu_bo *pd = vm->root.bo; 1319 1320 if (!process_info) 1321 return; 1322 1323 /* Release eviction fence from PD */ 1324 amdgpu_bo_reserve(pd, false); 1325 amdgpu_bo_fence(pd, NULL, false); 1326 amdgpu_bo_unreserve(pd); 1327 1328 /* Update process info */ 1329 mutex_lock(&process_info->lock); 1330 process_info->n_vms--; 1331 list_del(&vm->vm_list_node); 1332 mutex_unlock(&process_info->lock); 1333 1334 vm->process_info = NULL; 1335 1336 /* Release per-process resources when last compute VM is destroyed */ 1337 if (!process_info->n_vms) { 1338 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1339 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1340 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1341 1342 dma_fence_put(&process_info->eviction_fence->base); 1343 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1344 put_pid(process_info->pid); 1345 mutex_destroy(&process_info->lock); 1346 kfree(process_info); 1347 } 1348 } 1349 1350 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) 1351 { 1352 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1353 struct amdgpu_vm *avm; 1354 1355 if (WARN_ON(!kgd || !drm_priv)) 1356 return; 1357 1358 avm = drm_priv_to_vm(drm_priv); 1359 1360 pr_debug("Releasing process vm %p\n", avm); 1361 1362 /* The original pasid of amdgpu vm has already been 1363 * released during making a amdgpu vm to a compute vm 1364 * The current pasid is managed by kfd and will be 1365 * released on kfd process destroy. Set amdgpu pasid 1366 * to 0 to avoid duplicate release. 1367 */ 1368 amdgpu_vm_release_compute(adev, avm); 1369 } 1370 1371 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1372 { 1373 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1374 struct amdgpu_bo *pd = avm->root.bo; 1375 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1376 1377 if (adev->asic_type < CHIP_VEGA10) 1378 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1379 return avm->pd_phys_addr; 1380 } 1381 1382 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1383 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1384 void *drm_priv, struct kgd_mem **mem, 1385 uint64_t *offset, uint32_t flags) 1386 { 1387 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1388 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1389 enum ttm_bo_type bo_type = ttm_bo_type_device; 1390 struct sg_table *sg = NULL; 1391 uint64_t user_addr = 0; 1392 struct amdgpu_bo *bo; 1393 struct drm_gem_object *gobj; 1394 u32 domain, alloc_domain; 1395 u64 alloc_flags; 1396 int ret; 1397 1398 /* 1399 * Check on which domain to allocate BO 1400 */ 1401 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1402 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1403 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1404 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1405 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1406 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1407 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1408 alloc_flags = 0; 1409 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1410 domain = AMDGPU_GEM_DOMAIN_GTT; 1411 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1412 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1413 if (!offset || !*offset) 1414 return -EINVAL; 1415 user_addr = untagged_addr(*offset); 1416 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1417 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1418 domain = AMDGPU_GEM_DOMAIN_GTT; 1419 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1420 bo_type = ttm_bo_type_sg; 1421 alloc_flags = 0; 1422 if (size > UINT_MAX) 1423 return -EINVAL; 1424 sg = create_doorbell_sg(*offset, size); 1425 if (!sg) 1426 return -ENOMEM; 1427 } else { 1428 return -EINVAL; 1429 } 1430 1431 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1432 if (!*mem) { 1433 ret = -ENOMEM; 1434 goto err; 1435 } 1436 INIT_LIST_HEAD(&(*mem)->attachments); 1437 mutex_init(&(*mem)->lock); 1438 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1439 1440 /* Workaround for AQL queue wraparound bug. Map the same 1441 * memory twice. That means we only actually allocate half 1442 * the memory. 1443 */ 1444 if ((*mem)->aql_queue) 1445 size = size >> 1; 1446 1447 (*mem)->alloc_flags = flags; 1448 1449 amdgpu_sync_create(&(*mem)->sync); 1450 1451 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1452 if (ret) { 1453 pr_debug("Insufficient memory\n"); 1454 goto err_reserve_limit; 1455 } 1456 1457 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1458 va, size, domain_string(alloc_domain)); 1459 1460 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, 1461 bo_type, NULL, &gobj); 1462 if (ret) { 1463 pr_debug("Failed to create BO on domain %s. ret %d\n", 1464 domain_string(alloc_domain), ret); 1465 goto err_bo_create; 1466 } 1467 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1468 if (ret) { 1469 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1470 goto err_node_allow; 1471 } 1472 bo = gem_to_amdgpu_bo(gobj); 1473 if (bo_type == ttm_bo_type_sg) { 1474 bo->tbo.sg = sg; 1475 bo->tbo.ttm->sg = sg; 1476 } 1477 bo->kfd_bo = *mem; 1478 (*mem)->bo = bo; 1479 if (user_addr) 1480 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1481 1482 (*mem)->va = va; 1483 (*mem)->domain = domain; 1484 (*mem)->mapped_to_gpu_memory = 0; 1485 (*mem)->process_info = avm->process_info; 1486 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1487 1488 if (user_addr) { 1489 ret = init_user_pages(*mem, user_addr); 1490 if (ret) 1491 goto allocate_init_user_pages_failed; 1492 } 1493 1494 if (offset) 1495 *offset = amdgpu_bo_mmap_offset(bo); 1496 1497 return 0; 1498 1499 allocate_init_user_pages_failed: 1500 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1501 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1502 err_node_allow: 1503 amdgpu_bo_unref(&bo); 1504 /* Don't unreserve system mem limit twice */ 1505 goto err_reserve_limit; 1506 err_bo_create: 1507 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1508 err_reserve_limit: 1509 mutex_destroy(&(*mem)->lock); 1510 kfree(*mem); 1511 err: 1512 if (sg) { 1513 sg_free_table(sg); 1514 kfree(sg); 1515 } 1516 return ret; 1517 } 1518 1519 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1520 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, 1521 uint64_t *size) 1522 { 1523 struct amdkfd_process_info *process_info = mem->process_info; 1524 unsigned long bo_size = mem->bo->tbo.base.size; 1525 struct kfd_mem_attachment *entry, *tmp; 1526 struct bo_vm_reservation_context ctx; 1527 struct ttm_validate_buffer *bo_list_entry; 1528 unsigned int mapped_to_gpu_memory; 1529 int ret; 1530 bool is_imported = false; 1531 1532 mutex_lock(&mem->lock); 1533 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1534 is_imported = mem->is_imported; 1535 mutex_unlock(&mem->lock); 1536 /* lock is not needed after this, since mem is unused and will 1537 * be freed anyway 1538 */ 1539 1540 if (mapped_to_gpu_memory > 0) { 1541 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1542 mem->va, bo_size); 1543 return -EBUSY; 1544 } 1545 1546 /* Make sure restore workers don't access the BO any more */ 1547 bo_list_entry = &mem->validate_list; 1548 mutex_lock(&process_info->lock); 1549 list_del(&bo_list_entry->head); 1550 mutex_unlock(&process_info->lock); 1551 1552 /* No more MMU notifiers */ 1553 amdgpu_mn_unregister(mem->bo); 1554 1555 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1556 if (unlikely(ret)) 1557 return ret; 1558 1559 /* The eviction fence should be removed by the last unmap. 1560 * TODO: Log an error condition if the bo still has the eviction fence 1561 * attached 1562 */ 1563 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1564 process_info->eviction_fence); 1565 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1566 mem->va + bo_size * (1 + mem->aql_queue)); 1567 1568 ret = unreserve_bo_and_vms(&ctx, false, false); 1569 1570 /* Remove from VM internal data structures */ 1571 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) 1572 kfd_mem_detach(entry); 1573 1574 /* Free the sync object */ 1575 amdgpu_sync_free(&mem->sync); 1576 1577 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1578 * remap BO. We need to free it. 1579 */ 1580 if (mem->bo->tbo.sg) { 1581 sg_free_table(mem->bo->tbo.sg); 1582 kfree(mem->bo->tbo.sg); 1583 } 1584 1585 /* Update the size of the BO being freed if it was allocated from 1586 * VRAM and is not imported. 1587 */ 1588 if (size) { 1589 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1590 (!is_imported)) 1591 *size = bo_size; 1592 else 1593 *size = 0; 1594 } 1595 1596 /* Free the BO*/ 1597 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1598 if (mem->dmabuf) 1599 dma_buf_put(mem->dmabuf); 1600 drm_gem_object_put(&mem->bo->tbo.base); 1601 mutex_destroy(&mem->lock); 1602 kfree(mem); 1603 1604 return ret; 1605 } 1606 1607 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1608 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) 1609 { 1610 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1611 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1612 int ret; 1613 struct amdgpu_bo *bo; 1614 uint32_t domain; 1615 struct kfd_mem_attachment *entry; 1616 struct bo_vm_reservation_context ctx; 1617 unsigned long bo_size; 1618 bool is_invalid_userptr = false; 1619 1620 bo = mem->bo; 1621 if (!bo) { 1622 pr_err("Invalid BO when mapping memory to GPU\n"); 1623 return -EINVAL; 1624 } 1625 1626 /* Make sure restore is not running concurrently. Since we 1627 * don't map invalid userptr BOs, we rely on the next restore 1628 * worker to do the mapping 1629 */ 1630 mutex_lock(&mem->process_info->lock); 1631 1632 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1633 * sure that the MMU notifier is no longer running 1634 * concurrently and the queues are actually stopped 1635 */ 1636 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1637 mmap_write_lock(current->mm); 1638 is_invalid_userptr = atomic_read(&mem->invalid); 1639 mmap_write_unlock(current->mm); 1640 } 1641 1642 mutex_lock(&mem->lock); 1643 1644 domain = mem->domain; 1645 bo_size = bo->tbo.base.size; 1646 1647 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1648 mem->va, 1649 mem->va + bo_size * (1 + mem->aql_queue), 1650 avm, domain_string(domain)); 1651 1652 if (!kfd_mem_is_attached(avm, mem)) { 1653 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 1654 if (ret) 1655 goto out; 1656 } 1657 1658 ret = reserve_bo_and_vm(mem, avm, &ctx); 1659 if (unlikely(ret)) 1660 goto out; 1661 1662 /* Userptr can be marked as "not invalid", but not actually be 1663 * validated yet (still in the system domain). In that case 1664 * the queues are still stopped and we can leave mapping for 1665 * the next restore worker 1666 */ 1667 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1668 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 1669 is_invalid_userptr = true; 1670 1671 ret = vm_validate_pt_pd_bos(avm); 1672 if (unlikely(ret)) 1673 goto out_unreserve; 1674 1675 if (mem->mapped_to_gpu_memory == 0 && 1676 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1677 /* Validate BO only once. The eviction fence gets added to BO 1678 * the first time it is mapped. Validate will wait for all 1679 * background evictions to complete. 1680 */ 1681 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1682 if (ret) { 1683 pr_debug("Validate failed\n"); 1684 goto out_unreserve; 1685 } 1686 } 1687 1688 list_for_each_entry(entry, &mem->attachments, list) { 1689 if (entry->bo_va->base.vm != avm || entry->is_mapped) 1690 continue; 1691 1692 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1693 entry->va, entry->va + bo_size, entry); 1694 1695 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 1696 is_invalid_userptr); 1697 if (ret) { 1698 pr_err("Failed to map bo to gpuvm\n"); 1699 goto out_unreserve; 1700 } 1701 1702 ret = vm_update_pds(avm, ctx.sync); 1703 if (ret) { 1704 pr_err("Failed to update page directories\n"); 1705 goto out_unreserve; 1706 } 1707 1708 entry->is_mapped = true; 1709 mem->mapped_to_gpu_memory++; 1710 pr_debug("\t INC mapping count %d\n", 1711 mem->mapped_to_gpu_memory); 1712 } 1713 1714 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1715 amdgpu_bo_fence(bo, 1716 &avm->process_info->eviction_fence->base, 1717 true); 1718 ret = unreserve_bo_and_vms(&ctx, false, false); 1719 1720 goto out; 1721 1722 out_unreserve: 1723 unreserve_bo_and_vms(&ctx, false, false); 1724 out: 1725 mutex_unlock(&mem->process_info->lock); 1726 mutex_unlock(&mem->lock); 1727 return ret; 1728 } 1729 1730 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1731 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) 1732 { 1733 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1734 struct amdkfd_process_info *process_info = avm->process_info; 1735 unsigned long bo_size = mem->bo->tbo.base.size; 1736 struct kfd_mem_attachment *entry; 1737 struct bo_vm_reservation_context ctx; 1738 int ret; 1739 1740 mutex_lock(&mem->lock); 1741 1742 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 1743 if (unlikely(ret)) 1744 goto out; 1745 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1746 if (ctx.n_vms == 0) { 1747 ret = -EINVAL; 1748 goto unreserve_out; 1749 } 1750 1751 ret = vm_validate_pt_pd_bos(avm); 1752 if (unlikely(ret)) 1753 goto unreserve_out; 1754 1755 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1756 mem->va, 1757 mem->va + bo_size * (1 + mem->aql_queue), 1758 avm); 1759 1760 list_for_each_entry(entry, &mem->attachments, list) { 1761 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 1762 continue; 1763 1764 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1765 entry->va, entry->va + bo_size, entry); 1766 1767 unmap_bo_from_gpuvm(mem, entry, ctx.sync); 1768 entry->is_mapped = false; 1769 1770 mem->mapped_to_gpu_memory--; 1771 pr_debug("\t DEC mapping count %d\n", 1772 mem->mapped_to_gpu_memory); 1773 } 1774 1775 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1776 * required. 1777 */ 1778 if (mem->mapped_to_gpu_memory == 0 && 1779 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 1780 !mem->bo->tbo.pin_count) 1781 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1782 process_info->eviction_fence); 1783 1784 unreserve_out: 1785 unreserve_bo_and_vms(&ctx, false, false); 1786 out: 1787 mutex_unlock(&mem->lock); 1788 return ret; 1789 } 1790 1791 int amdgpu_amdkfd_gpuvm_sync_memory( 1792 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1793 { 1794 struct amdgpu_sync sync; 1795 int ret; 1796 1797 amdgpu_sync_create(&sync); 1798 1799 mutex_lock(&mem->lock); 1800 amdgpu_sync_clone(&mem->sync, &sync); 1801 mutex_unlock(&mem->lock); 1802 1803 ret = amdgpu_sync_wait(&sync, intr); 1804 amdgpu_sync_free(&sync); 1805 return ret; 1806 } 1807 1808 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1809 struct kgd_mem *mem, void **kptr, uint64_t *size) 1810 { 1811 int ret; 1812 struct amdgpu_bo *bo = mem->bo; 1813 1814 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1815 pr_err("userptr can't be mapped to kernel\n"); 1816 return -EINVAL; 1817 } 1818 1819 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1820 * this BO in BO's restoring after eviction. 1821 */ 1822 mutex_lock(&mem->process_info->lock); 1823 1824 ret = amdgpu_bo_reserve(bo, true); 1825 if (ret) { 1826 pr_err("Failed to reserve bo. ret %d\n", ret); 1827 goto bo_reserve_failed; 1828 } 1829 1830 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1831 if (ret) { 1832 pr_err("Failed to pin bo. ret %d\n", ret); 1833 goto pin_failed; 1834 } 1835 1836 ret = amdgpu_bo_kmap(bo, kptr); 1837 if (ret) { 1838 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1839 goto kmap_failed; 1840 } 1841 1842 amdgpu_amdkfd_remove_eviction_fence( 1843 bo, mem->process_info->eviction_fence); 1844 list_del_init(&mem->validate_list.head); 1845 1846 if (size) 1847 *size = amdgpu_bo_size(bo); 1848 1849 amdgpu_bo_unreserve(bo); 1850 1851 mutex_unlock(&mem->process_info->lock); 1852 return 0; 1853 1854 kmap_failed: 1855 amdgpu_bo_unpin(bo); 1856 pin_failed: 1857 amdgpu_bo_unreserve(bo); 1858 bo_reserve_failed: 1859 mutex_unlock(&mem->process_info->lock); 1860 1861 return ret; 1862 } 1863 1864 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1865 struct kfd_vm_fault_info *mem) 1866 { 1867 struct amdgpu_device *adev; 1868 1869 adev = (struct amdgpu_device *)kgd; 1870 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1871 *mem = *adev->gmc.vm_fault_info; 1872 mb(); 1873 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1874 } 1875 return 0; 1876 } 1877 1878 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1879 struct dma_buf *dma_buf, 1880 uint64_t va, void *drm_priv, 1881 struct kgd_mem **mem, uint64_t *size, 1882 uint64_t *mmap_offset) 1883 { 1884 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1885 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1886 struct drm_gem_object *obj; 1887 struct amdgpu_bo *bo; 1888 int ret; 1889 1890 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1891 /* Can't handle non-graphics buffers */ 1892 return -EINVAL; 1893 1894 obj = dma_buf->priv; 1895 if (drm_to_adev(obj->dev) != adev) 1896 /* Can't handle buffers from other devices */ 1897 return -EINVAL; 1898 1899 bo = gem_to_amdgpu_bo(obj); 1900 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1901 AMDGPU_GEM_DOMAIN_GTT))) 1902 /* Only VRAM and GTT BOs are supported */ 1903 return -EINVAL; 1904 1905 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1906 if (!*mem) 1907 return -ENOMEM; 1908 1909 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 1910 if (ret) { 1911 kfree(mem); 1912 return ret; 1913 } 1914 1915 if (size) 1916 *size = amdgpu_bo_size(bo); 1917 1918 if (mmap_offset) 1919 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1920 1921 INIT_LIST_HEAD(&(*mem)->attachments); 1922 mutex_init(&(*mem)->lock); 1923 1924 (*mem)->alloc_flags = 1925 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1926 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 1927 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 1928 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1929 1930 drm_gem_object_get(&bo->tbo.base); 1931 (*mem)->bo = bo; 1932 (*mem)->va = va; 1933 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1934 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1935 (*mem)->mapped_to_gpu_memory = 0; 1936 (*mem)->process_info = avm->process_info; 1937 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1938 amdgpu_sync_create(&(*mem)->sync); 1939 (*mem)->is_imported = true; 1940 1941 return 0; 1942 } 1943 1944 /* Evict a userptr BO by stopping the queues if necessary 1945 * 1946 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1947 * cannot do any memory allocations, and cannot take any locks that 1948 * are held elsewhere while allocating memory. Therefore this is as 1949 * simple as possible, using atomic counters. 1950 * 1951 * It doesn't do anything to the BO itself. The real work happens in 1952 * restore, where we get updated page addresses. This function only 1953 * ensures that GPU access to the BO is stopped. 1954 */ 1955 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1956 struct mm_struct *mm) 1957 { 1958 struct amdkfd_process_info *process_info = mem->process_info; 1959 int evicted_bos; 1960 int r = 0; 1961 1962 atomic_inc(&mem->invalid); 1963 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1964 if (evicted_bos == 1) { 1965 /* First eviction, stop the queues */ 1966 r = kgd2kfd_quiesce_mm(mm); 1967 if (r) 1968 pr_err("Failed to quiesce KFD\n"); 1969 schedule_delayed_work(&process_info->restore_userptr_work, 1970 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1971 } 1972 1973 return r; 1974 } 1975 1976 /* Update invalid userptr BOs 1977 * 1978 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1979 * userptr_inval_list and updates user pages for all BOs that have 1980 * been invalidated since their last update. 1981 */ 1982 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1983 struct mm_struct *mm) 1984 { 1985 struct kgd_mem *mem, *tmp_mem; 1986 struct amdgpu_bo *bo; 1987 struct ttm_operation_ctx ctx = { false, false }; 1988 int invalid, ret; 1989 1990 /* Move all invalidated BOs to the userptr_inval_list and 1991 * release their user pages by migration to the CPU domain 1992 */ 1993 list_for_each_entry_safe(mem, tmp_mem, 1994 &process_info->userptr_valid_list, 1995 validate_list.head) { 1996 if (!atomic_read(&mem->invalid)) 1997 continue; /* BO is still valid */ 1998 1999 bo = mem->bo; 2000 2001 if (amdgpu_bo_reserve(bo, true)) 2002 return -EAGAIN; 2003 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 2004 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2005 amdgpu_bo_unreserve(bo); 2006 if (ret) { 2007 pr_err("%s: Failed to invalidate userptr BO\n", 2008 __func__); 2009 return -EAGAIN; 2010 } 2011 2012 list_move_tail(&mem->validate_list.head, 2013 &process_info->userptr_inval_list); 2014 } 2015 2016 if (list_empty(&process_info->userptr_inval_list)) 2017 return 0; /* All evicted userptr BOs were freed */ 2018 2019 /* Go through userptr_inval_list and update any invalid user_pages */ 2020 list_for_each_entry(mem, &process_info->userptr_inval_list, 2021 validate_list.head) { 2022 invalid = atomic_read(&mem->invalid); 2023 if (!invalid) 2024 /* BO hasn't been invalidated since the last 2025 * revalidation attempt. Keep its BO list. 2026 */ 2027 continue; 2028 2029 bo = mem->bo; 2030 2031 /* Get updated user pages */ 2032 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 2033 if (ret) { 2034 pr_debug("%s: Failed to get user pages: %d\n", 2035 __func__, ret); 2036 2037 /* Return error -EBUSY or -ENOMEM, retry restore */ 2038 return ret; 2039 } 2040 2041 /* 2042 * FIXME: Cannot ignore the return code, must hold 2043 * notifier_lock 2044 */ 2045 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 2046 2047 /* Mark the BO as valid unless it was invalidated 2048 * again concurrently. 2049 */ 2050 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 2051 return -EAGAIN; 2052 } 2053 2054 return 0; 2055 } 2056 2057 /* Validate invalid userptr BOs 2058 * 2059 * Validates BOs on the userptr_inval_list, and moves them back to the 2060 * userptr_valid_list. Also updates GPUVM page tables with new page 2061 * addresses and waits for the page table updates to complete. 2062 */ 2063 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2064 { 2065 struct amdgpu_bo_list_entry *pd_bo_list_entries; 2066 struct list_head resv_list, duplicates; 2067 struct ww_acquire_ctx ticket; 2068 struct amdgpu_sync sync; 2069 2070 struct amdgpu_vm *peer_vm; 2071 struct kgd_mem *mem, *tmp_mem; 2072 struct amdgpu_bo *bo; 2073 struct ttm_operation_ctx ctx = { false, false }; 2074 int i, ret; 2075 2076 pd_bo_list_entries = kcalloc(process_info->n_vms, 2077 sizeof(struct amdgpu_bo_list_entry), 2078 GFP_KERNEL); 2079 if (!pd_bo_list_entries) { 2080 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 2081 ret = -ENOMEM; 2082 goto out_no_mem; 2083 } 2084 2085 INIT_LIST_HEAD(&resv_list); 2086 INIT_LIST_HEAD(&duplicates); 2087 2088 /* Get all the page directory BOs that need to be reserved */ 2089 i = 0; 2090 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2091 vm_list_node) 2092 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 2093 &pd_bo_list_entries[i++]); 2094 /* Add the userptr_inval_list entries to resv_list */ 2095 list_for_each_entry(mem, &process_info->userptr_inval_list, 2096 validate_list.head) { 2097 list_add_tail(&mem->resv_list.head, &resv_list); 2098 mem->resv_list.bo = mem->validate_list.bo; 2099 mem->resv_list.num_shared = mem->validate_list.num_shared; 2100 } 2101 2102 /* Reserve all BOs and page tables for validation */ 2103 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 2104 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 2105 if (ret) 2106 goto out_free; 2107 2108 amdgpu_sync_create(&sync); 2109 2110 ret = process_validate_vms(process_info); 2111 if (ret) 2112 goto unreserve_out; 2113 2114 /* Validate BOs and update GPUVM page tables */ 2115 list_for_each_entry_safe(mem, tmp_mem, 2116 &process_info->userptr_inval_list, 2117 validate_list.head) { 2118 struct kfd_mem_attachment *attachment; 2119 2120 bo = mem->bo; 2121 2122 /* Validate the BO if we got user pages */ 2123 if (bo->tbo.ttm->pages[0]) { 2124 amdgpu_bo_placement_from_domain(bo, mem->domain); 2125 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2126 if (ret) { 2127 pr_err("%s: failed to validate BO\n", __func__); 2128 goto unreserve_out; 2129 } 2130 } 2131 2132 list_move_tail(&mem->validate_list.head, 2133 &process_info->userptr_valid_list); 2134 2135 /* Update mapping. If the BO was not validated 2136 * (because we couldn't get user pages), this will 2137 * clear the page table entries, which will result in 2138 * VM faults if the GPU tries to access the invalid 2139 * memory. 2140 */ 2141 list_for_each_entry(attachment, &mem->attachments, list) { 2142 if (!attachment->is_mapped) 2143 continue; 2144 2145 kfd_mem_dmaunmap_attachment(mem, attachment); 2146 ret = update_gpuvm_pte(mem, attachment, &sync); 2147 if (ret) { 2148 pr_err("%s: update PTE failed\n", __func__); 2149 /* make sure this gets validated again */ 2150 atomic_inc(&mem->invalid); 2151 goto unreserve_out; 2152 } 2153 } 2154 } 2155 2156 /* Update page directories */ 2157 ret = process_update_pds(process_info, &sync); 2158 2159 unreserve_out: 2160 ttm_eu_backoff_reservation(&ticket, &resv_list); 2161 amdgpu_sync_wait(&sync, false); 2162 amdgpu_sync_free(&sync); 2163 out_free: 2164 kfree(pd_bo_list_entries); 2165 out_no_mem: 2166 2167 return ret; 2168 } 2169 2170 /* Worker callback to restore evicted userptr BOs 2171 * 2172 * Tries to update and validate all userptr BOs. If successful and no 2173 * concurrent evictions happened, the queues are restarted. Otherwise, 2174 * reschedule for another attempt later. 2175 */ 2176 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2177 { 2178 struct delayed_work *dwork = to_delayed_work(work); 2179 struct amdkfd_process_info *process_info = 2180 container_of(dwork, struct amdkfd_process_info, 2181 restore_userptr_work); 2182 struct task_struct *usertask; 2183 struct mm_struct *mm; 2184 int evicted_bos; 2185 2186 evicted_bos = atomic_read(&process_info->evicted_bos); 2187 if (!evicted_bos) 2188 return; 2189 2190 /* Reference task and mm in case of concurrent process termination */ 2191 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2192 if (!usertask) 2193 return; 2194 mm = get_task_mm(usertask); 2195 if (!mm) { 2196 put_task_struct(usertask); 2197 return; 2198 } 2199 2200 mutex_lock(&process_info->lock); 2201 2202 if (update_invalid_user_pages(process_info, mm)) 2203 goto unlock_out; 2204 /* userptr_inval_list can be empty if all evicted userptr BOs 2205 * have been freed. In that case there is nothing to validate 2206 * and we can just restart the queues. 2207 */ 2208 if (!list_empty(&process_info->userptr_inval_list)) { 2209 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 2210 goto unlock_out; /* Concurrent eviction, try again */ 2211 2212 if (validate_invalid_user_pages(process_info)) 2213 goto unlock_out; 2214 } 2215 /* Final check for concurrent evicton and atomic update. If 2216 * another eviction happens after successful update, it will 2217 * be a first eviction that calls quiesce_mm. The eviction 2218 * reference counting inside KFD will handle this case. 2219 */ 2220 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2221 evicted_bos) 2222 goto unlock_out; 2223 evicted_bos = 0; 2224 if (kgd2kfd_resume_mm(mm)) { 2225 pr_err("%s: Failed to resume KFD\n", __func__); 2226 /* No recovery from this failure. Probably the CP is 2227 * hanging. No point trying again. 2228 */ 2229 } 2230 2231 unlock_out: 2232 mutex_unlock(&process_info->lock); 2233 mmput(mm); 2234 put_task_struct(usertask); 2235 2236 /* If validation failed, reschedule another attempt */ 2237 if (evicted_bos) 2238 schedule_delayed_work(&process_info->restore_userptr_work, 2239 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2240 } 2241 2242 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2243 * KFD process identified by process_info 2244 * 2245 * @process_info: amdkfd_process_info of the KFD process 2246 * 2247 * After memory eviction, restore thread calls this function. The function 2248 * should be called when the Process is still valid. BO restore involves - 2249 * 2250 * 1. Release old eviction fence and create new one 2251 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2252 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2253 * BOs that need to be reserved. 2254 * 4. Reserve all the BOs 2255 * 5. Validate of PD and PT BOs. 2256 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2257 * 7. Add fence to all PD and PT BOs. 2258 * 8. Unreserve all BOs 2259 */ 2260 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2261 { 2262 struct amdgpu_bo_list_entry *pd_bo_list; 2263 struct amdkfd_process_info *process_info = info; 2264 struct amdgpu_vm *peer_vm; 2265 struct kgd_mem *mem; 2266 struct bo_vm_reservation_context ctx; 2267 struct amdgpu_amdkfd_fence *new_fence; 2268 int ret = 0, i; 2269 struct list_head duplicate_save; 2270 struct amdgpu_sync sync_obj; 2271 unsigned long failed_size = 0; 2272 unsigned long total_size = 0; 2273 2274 INIT_LIST_HEAD(&duplicate_save); 2275 INIT_LIST_HEAD(&ctx.list); 2276 INIT_LIST_HEAD(&ctx.duplicates); 2277 2278 pd_bo_list = kcalloc(process_info->n_vms, 2279 sizeof(struct amdgpu_bo_list_entry), 2280 GFP_KERNEL); 2281 if (!pd_bo_list) 2282 return -ENOMEM; 2283 2284 i = 0; 2285 mutex_lock(&process_info->lock); 2286 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2287 vm_list_node) 2288 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2289 2290 /* Reserve all BOs and page tables/directory. Add all BOs from 2291 * kfd_bo_list to ctx.list 2292 */ 2293 list_for_each_entry(mem, &process_info->kfd_bo_list, 2294 validate_list.head) { 2295 2296 list_add_tail(&mem->resv_list.head, &ctx.list); 2297 mem->resv_list.bo = mem->validate_list.bo; 2298 mem->resv_list.num_shared = mem->validate_list.num_shared; 2299 } 2300 2301 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2302 false, &duplicate_save); 2303 if (ret) { 2304 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2305 goto ttm_reserve_fail; 2306 } 2307 2308 amdgpu_sync_create(&sync_obj); 2309 2310 /* Validate PDs and PTs */ 2311 ret = process_validate_vms(process_info); 2312 if (ret) 2313 goto validate_map_fail; 2314 2315 ret = process_sync_pds_resv(process_info, &sync_obj); 2316 if (ret) { 2317 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2318 goto validate_map_fail; 2319 } 2320 2321 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2322 list_for_each_entry(mem, &process_info->kfd_bo_list, 2323 validate_list.head) { 2324 2325 struct amdgpu_bo *bo = mem->bo; 2326 uint32_t domain = mem->domain; 2327 struct kfd_mem_attachment *attachment; 2328 2329 total_size += amdgpu_bo_size(bo); 2330 2331 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2332 if (ret) { 2333 pr_debug("Memory eviction: Validate BOs failed\n"); 2334 failed_size += amdgpu_bo_size(bo); 2335 ret = amdgpu_amdkfd_bo_validate(bo, 2336 AMDGPU_GEM_DOMAIN_GTT, false); 2337 if (ret) { 2338 pr_debug("Memory eviction: Try again\n"); 2339 goto validate_map_fail; 2340 } 2341 } 2342 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2343 if (ret) { 2344 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2345 goto validate_map_fail; 2346 } 2347 list_for_each_entry(attachment, &mem->attachments, list) { 2348 if (!attachment->is_mapped) 2349 continue; 2350 2351 kfd_mem_dmaunmap_attachment(mem, attachment); 2352 ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2353 if (ret) { 2354 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2355 goto validate_map_fail; 2356 } 2357 } 2358 } 2359 2360 if (failed_size) 2361 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2362 2363 /* Update page directories */ 2364 ret = process_update_pds(process_info, &sync_obj); 2365 if (ret) { 2366 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2367 goto validate_map_fail; 2368 } 2369 2370 /* Wait for validate and PT updates to finish */ 2371 amdgpu_sync_wait(&sync_obj, false); 2372 2373 /* Release old eviction fence and create new one, because fence only 2374 * goes from unsignaled to signaled, fence cannot be reused. 2375 * Use context and mm from the old fence. 2376 */ 2377 new_fence = amdgpu_amdkfd_fence_create( 2378 process_info->eviction_fence->base.context, 2379 process_info->eviction_fence->mm, 2380 NULL); 2381 if (!new_fence) { 2382 pr_err("Failed to create eviction fence\n"); 2383 ret = -ENOMEM; 2384 goto validate_map_fail; 2385 } 2386 dma_fence_put(&process_info->eviction_fence->base); 2387 process_info->eviction_fence = new_fence; 2388 *ef = dma_fence_get(&new_fence->base); 2389 2390 /* Attach new eviction fence to all BOs */ 2391 list_for_each_entry(mem, &process_info->kfd_bo_list, 2392 validate_list.head) 2393 amdgpu_bo_fence(mem->bo, 2394 &process_info->eviction_fence->base, true); 2395 2396 /* Attach eviction fence to PD / PT BOs */ 2397 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2398 vm_list_node) { 2399 struct amdgpu_bo *bo = peer_vm->root.bo; 2400 2401 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2402 } 2403 2404 validate_map_fail: 2405 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2406 amdgpu_sync_free(&sync_obj); 2407 ttm_reserve_fail: 2408 mutex_unlock(&process_info->lock); 2409 kfree(pd_bo_list); 2410 return ret; 2411 } 2412 2413 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2414 { 2415 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2416 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2417 int ret; 2418 2419 if (!info || !gws) 2420 return -EINVAL; 2421 2422 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2423 if (!*mem) 2424 return -ENOMEM; 2425 2426 mutex_init(&(*mem)->lock); 2427 INIT_LIST_HEAD(&(*mem)->attachments); 2428 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2429 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2430 (*mem)->process_info = process_info; 2431 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2432 amdgpu_sync_create(&(*mem)->sync); 2433 2434 2435 /* Validate gws bo the first time it is added to process */ 2436 mutex_lock(&(*mem)->process_info->lock); 2437 ret = amdgpu_bo_reserve(gws_bo, false); 2438 if (unlikely(ret)) { 2439 pr_err("Reserve gws bo failed %d\n", ret); 2440 goto bo_reservation_failure; 2441 } 2442 2443 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2444 if (ret) { 2445 pr_err("GWS BO validate failed %d\n", ret); 2446 goto bo_validation_failure; 2447 } 2448 /* GWS resource is shared b/t amdgpu and amdkfd 2449 * Add process eviction fence to bo so they can 2450 * evict each other. 2451 */ 2452 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2453 if (ret) 2454 goto reserve_shared_fail; 2455 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2456 amdgpu_bo_unreserve(gws_bo); 2457 mutex_unlock(&(*mem)->process_info->lock); 2458 2459 return ret; 2460 2461 reserve_shared_fail: 2462 bo_validation_failure: 2463 amdgpu_bo_unreserve(gws_bo); 2464 bo_reservation_failure: 2465 mutex_unlock(&(*mem)->process_info->lock); 2466 amdgpu_sync_free(&(*mem)->sync); 2467 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2468 amdgpu_bo_unref(&gws_bo); 2469 mutex_destroy(&(*mem)->lock); 2470 kfree(*mem); 2471 *mem = NULL; 2472 return ret; 2473 } 2474 2475 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2476 { 2477 int ret; 2478 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2479 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2480 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2481 2482 /* Remove BO from process's validate list so restore worker won't touch 2483 * it anymore 2484 */ 2485 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2486 2487 ret = amdgpu_bo_reserve(gws_bo, false); 2488 if (unlikely(ret)) { 2489 pr_err("Reserve gws bo failed %d\n", ret); 2490 //TODO add BO back to validate_list? 2491 return ret; 2492 } 2493 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2494 process_info->eviction_fence); 2495 amdgpu_bo_unreserve(gws_bo); 2496 amdgpu_sync_free(&kgd_mem->sync); 2497 amdgpu_bo_unref(&gws_bo); 2498 mutex_destroy(&kgd_mem->lock); 2499 kfree(mem); 2500 return 0; 2501 } 2502 2503 /* Returns GPU-specific tiling mode information */ 2504 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 2505 struct tile_config *config) 2506 { 2507 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 2508 2509 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2510 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2511 config->num_tile_configs = 2512 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2513 config->macro_tile_config_ptr = 2514 adev->gfx.config.macrotile_mode_array; 2515 config->num_macro_tile_configs = 2516 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2517 2518 /* Those values are not set from GFX9 onwards */ 2519 config->num_banks = adev->gfx.config.num_banks; 2520 config->num_ranks = adev->gfx.config.num_ranks; 2521 2522 return 0; 2523 } 2524