1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_amdkfd.h" 31 #include "amdgpu_dma_buf.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 /* BO flag to indicate a KFD userptr BO */ 35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) 36 37 /* Userptr restore delay, just long enough to allow consecutive VM 38 * changes to accumulate 39 */ 40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 41 42 /* Impose limit on how much memory KFD can use */ 43 static struct { 44 uint64_t max_system_mem_limit; 45 uint64_t max_ttm_mem_limit; 46 int64_t system_mem_used; 47 int64_t ttm_mem_used; 48 spinlock_t mem_limit_lock; 49 } kfd_mem_limit; 50 51 /* Struct used for amdgpu_amdkfd_bo_validate */ 52 struct amdgpu_vm_parser { 53 uint32_t domain; 54 bool wait; 55 }; 56 57 static const char * const domain_bit_to_string[] = { 58 "CPU", 59 "GTT", 60 "VRAM", 61 "GDS", 62 "GWS", 63 "OA" 64 }; 65 66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 67 68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 69 70 71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 72 { 73 return (struct amdgpu_device *)kgd; 74 } 75 76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, 77 struct kgd_mem *mem) 78 { 79 struct kfd_bo_va_list *entry; 80 81 list_for_each_entry(entry, &mem->bo_va_list, bo_list) 82 if (entry->bo_va->base.vm == avm) 83 return false; 84 85 return true; 86 } 87 88 /* Set memory usage limits. Current, limits are 89 * System (TTM + userptr) memory - 15/16th System RAM 90 * TTM memory - 3/8th System RAM 91 */ 92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 93 { 94 struct sysinfo si; 95 uint64_t mem; 96 97 si_meminfo(&si); 98 mem = si.totalram - si.totalhigh; 99 mem *= si.mem_unit; 100 101 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 102 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 103 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 104 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 105 (kfd_mem_limit.max_system_mem_limit >> 20), 106 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 107 } 108 109 /* Estimate page table size needed to represent a given memory size 110 * 111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 114 * for 2MB pages for TLB efficiency. However, small allocations and 115 * fragmented system memory still need some 4KB pages. We choose a 116 * compromise that should work in most cases without reserving too 117 * much memory for page tables unnecessarily (factor 16K, >> 14). 118 */ 119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 120 121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 122 uint64_t size, u32 domain, bool sg) 123 { 124 uint64_t reserved_for_pt = 125 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 126 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 127 int ret = 0; 128 129 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 130 sizeof(struct amdgpu_bo)); 131 132 vram_needed = 0; 133 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 134 /* TTM GTT memory */ 135 system_mem_needed = acc_size + size; 136 ttm_mem_needed = acc_size + size; 137 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 138 /* Userptr */ 139 system_mem_needed = acc_size + size; 140 ttm_mem_needed = acc_size; 141 } else { 142 /* VRAM and SG */ 143 system_mem_needed = acc_size; 144 ttm_mem_needed = acc_size; 145 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 146 vram_needed = size; 147 } 148 149 spin_lock(&kfd_mem_limit.mem_limit_lock); 150 151 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 152 kfd_mem_limit.max_system_mem_limit) || 153 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 154 kfd_mem_limit.max_ttm_mem_limit) || 155 (adev->kfd.vram_used + vram_needed > 156 adev->gmc.real_vram_size - reserved_for_pt)) { 157 ret = -ENOMEM; 158 } else { 159 kfd_mem_limit.system_mem_used += system_mem_needed; 160 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 161 adev->kfd.vram_used += vram_needed; 162 } 163 164 spin_unlock(&kfd_mem_limit.mem_limit_lock); 165 return ret; 166 } 167 168 static void unreserve_mem_limit(struct amdgpu_device *adev, 169 uint64_t size, u32 domain, bool sg) 170 { 171 size_t acc_size; 172 173 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 174 sizeof(struct amdgpu_bo)); 175 176 spin_lock(&kfd_mem_limit.mem_limit_lock); 177 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 178 kfd_mem_limit.system_mem_used -= (acc_size + size); 179 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 180 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 181 kfd_mem_limit.system_mem_used -= (acc_size + size); 182 kfd_mem_limit.ttm_mem_used -= acc_size; 183 } else { 184 kfd_mem_limit.system_mem_used -= acc_size; 185 kfd_mem_limit.ttm_mem_used -= acc_size; 186 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 187 adev->kfd.vram_used -= size; 188 WARN_ONCE(adev->kfd.vram_used < 0, 189 "kfd VRAM memory accounting unbalanced"); 190 } 191 } 192 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 193 "kfd system memory accounting unbalanced"); 194 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 195 "kfd TTM memory accounting unbalanced"); 196 197 spin_unlock(&kfd_mem_limit.mem_limit_lock); 198 } 199 200 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 201 { 202 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 203 u32 domain = bo->preferred_domains; 204 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 205 206 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { 207 domain = AMDGPU_GEM_DOMAIN_CPU; 208 sg = false; 209 } 210 211 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 212 } 213 214 215 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 216 * reservation object. 217 * 218 * @bo: [IN] Remove eviction fence(s) from this BO 219 * @ef: [IN] This eviction fence is removed if it 220 * is present in the shared list. 221 * 222 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 223 */ 224 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 225 struct amdgpu_amdkfd_fence *ef) 226 { 227 struct dma_resv *resv = bo->tbo.base.resv; 228 struct dma_resv_list *old, *new; 229 unsigned int i, j, k; 230 231 if (!ef) 232 return -EINVAL; 233 234 old = dma_resv_get_list(resv); 235 if (!old) 236 return 0; 237 238 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]), 239 GFP_KERNEL); 240 if (!new) 241 return -ENOMEM; 242 243 /* Go through all the shared fences in the resevation object and sort 244 * the interesting ones to the end of the list. 245 */ 246 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 247 struct dma_fence *f; 248 249 f = rcu_dereference_protected(old->shared[i], 250 dma_resv_held(resv)); 251 252 if (f->context == ef->base.context) 253 RCU_INIT_POINTER(new->shared[--j], f); 254 else 255 RCU_INIT_POINTER(new->shared[k++], f); 256 } 257 new->shared_max = old->shared_max; 258 new->shared_count = k; 259 260 /* Install the new fence list, seqcount provides the barriers */ 261 preempt_disable(); 262 write_seqcount_begin(&resv->seq); 263 RCU_INIT_POINTER(resv->fence, new); 264 write_seqcount_end(&resv->seq); 265 preempt_enable(); 266 267 /* Drop the references to the removed fences or move them to ef_list */ 268 for (i = j, k = 0; i < old->shared_count; ++i) { 269 struct dma_fence *f; 270 271 f = rcu_dereference_protected(new->shared[i], 272 dma_resv_held(resv)); 273 dma_fence_put(f); 274 } 275 kfree_rcu(old, rcu); 276 277 return 0; 278 } 279 280 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 281 { 282 struct amdgpu_bo *root = bo; 283 struct amdgpu_vm_bo_base *vm_bo; 284 struct amdgpu_vm *vm; 285 struct amdkfd_process_info *info; 286 struct amdgpu_amdkfd_fence *ef; 287 int ret; 288 289 /* we can always get vm_bo from root PD bo.*/ 290 while (root->parent) 291 root = root->parent; 292 293 vm_bo = root->vm_bo; 294 if (!vm_bo) 295 return 0; 296 297 vm = vm_bo->vm; 298 if (!vm) 299 return 0; 300 301 info = vm->process_info; 302 if (!info || !info->eviction_fence) 303 return 0; 304 305 ef = container_of(dma_fence_get(&info->eviction_fence->base), 306 struct amdgpu_amdkfd_fence, base); 307 308 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 309 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 310 dma_resv_unlock(bo->tbo.base.resv); 311 312 dma_fence_put(&ef->base); 313 return ret; 314 } 315 316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 317 bool wait) 318 { 319 struct ttm_operation_ctx ctx = { false, false }; 320 int ret; 321 322 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 323 "Called with userptr BO")) 324 return -EINVAL; 325 326 amdgpu_bo_placement_from_domain(bo, domain); 327 328 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 329 if (ret) 330 goto validate_fail; 331 if (wait) 332 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 333 334 validate_fail: 335 return ret; 336 } 337 338 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) 339 { 340 struct amdgpu_vm_parser *p = param; 341 342 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); 343 } 344 345 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 346 * 347 * Page directories are not updated here because huge page handling 348 * during page table updates can invalidate page directory entries 349 * again. Page directories are only updated after updating page 350 * tables. 351 */ 352 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 353 { 354 struct amdgpu_bo *pd = vm->root.base.bo; 355 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 356 struct amdgpu_vm_parser param; 357 int ret; 358 359 param.domain = AMDGPU_GEM_DOMAIN_VRAM; 360 param.wait = false; 361 362 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, 363 ¶m); 364 if (ret) { 365 pr_err("failed to validate PT BOs\n"); 366 return ret; 367 } 368 369 ret = amdgpu_amdkfd_validate(¶m, pd); 370 if (ret) { 371 pr_err("failed to validate PD\n"); 372 return ret; 373 } 374 375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 376 377 if (vm->use_cpu_for_update) { 378 ret = amdgpu_bo_kmap(pd, NULL); 379 if (ret) { 380 pr_err("failed to kmap PD, ret=%d\n", ret); 381 return ret; 382 } 383 } 384 385 return 0; 386 } 387 388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 389 { 390 struct amdgpu_bo *pd = vm->root.base.bo; 391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 392 int ret; 393 394 ret = amdgpu_vm_update_pdes(adev, vm, false); 395 if (ret) 396 return ret; 397 398 return amdgpu_sync_fence(sync, vm->last_update); 399 } 400 401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 402 { 403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 405 uint32_t mapping_flags; 406 407 mapping_flags = AMDGPU_VM_PAGE_READABLE; 408 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 409 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 410 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 411 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 412 413 switch (adev->asic_type) { 414 case CHIP_ARCTURUS: 415 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 416 if (bo_adev == adev) 417 mapping_flags |= coherent ? 418 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 419 else 420 mapping_flags |= AMDGPU_VM_MTYPE_UC; 421 } else { 422 mapping_flags |= coherent ? 423 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 424 } 425 break; 426 default: 427 mapping_flags |= coherent ? 428 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 429 } 430 431 return amdgpu_gem_va_map_flags(adev, mapping_flags); 432 } 433 434 /* add_bo_to_vm - Add a BO to a VM 435 * 436 * Everything that needs to bo done only once when a BO is first added 437 * to a VM. It can later be mapped and unmapped many times without 438 * repeating these steps. 439 * 440 * 1. Allocate and initialize BO VA entry data structure 441 * 2. Add BO to the VM 442 * 3. Determine ASIC-specific PTE flags 443 * 4. Alloc page tables and directories if needed 444 * 4a. Validate new page tables and directories 445 */ 446 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, 447 struct amdgpu_vm *vm, bool is_aql, 448 struct kfd_bo_va_list **p_bo_va_entry) 449 { 450 int ret; 451 struct kfd_bo_va_list *bo_va_entry; 452 struct amdgpu_bo *bo = mem->bo; 453 uint64_t va = mem->va; 454 struct list_head *list_bo_va = &mem->bo_va_list; 455 unsigned long bo_size = bo->tbo.mem.size; 456 457 if (!va) { 458 pr_err("Invalid VA when adding BO to VM\n"); 459 return -EINVAL; 460 } 461 462 if (is_aql) 463 va += bo_size; 464 465 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); 466 if (!bo_va_entry) 467 return -ENOMEM; 468 469 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 470 va + bo_size, vm); 471 472 /* Add BO to VM internal data structures*/ 473 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); 474 if (!bo_va_entry->bo_va) { 475 ret = -EINVAL; 476 pr_err("Failed to add BO object to VM. ret == %d\n", 477 ret); 478 goto err_vmadd; 479 } 480 481 bo_va_entry->va = va; 482 bo_va_entry->pte_flags = get_pte_flags(adev, mem); 483 bo_va_entry->kgd_dev = (void *)adev; 484 list_add(&bo_va_entry->bo_list, list_bo_va); 485 486 if (p_bo_va_entry) 487 *p_bo_va_entry = bo_va_entry; 488 489 /* Allocate validate page tables if needed */ 490 ret = vm_validate_pt_pd_bos(vm); 491 if (ret) { 492 pr_err("validate_pt_pd_bos() failed\n"); 493 goto err_alloc_pts; 494 } 495 496 return 0; 497 498 err_alloc_pts: 499 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); 500 list_del(&bo_va_entry->bo_list); 501 err_vmadd: 502 kfree(bo_va_entry); 503 return ret; 504 } 505 506 static void remove_bo_from_vm(struct amdgpu_device *adev, 507 struct kfd_bo_va_list *entry, unsigned long size) 508 { 509 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", 510 entry->va, 511 entry->va + size, entry); 512 amdgpu_vm_bo_rmv(adev, entry->bo_va); 513 list_del(&entry->bo_list); 514 kfree(entry); 515 } 516 517 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 518 struct amdkfd_process_info *process_info, 519 bool userptr) 520 { 521 struct ttm_validate_buffer *entry = &mem->validate_list; 522 struct amdgpu_bo *bo = mem->bo; 523 524 INIT_LIST_HEAD(&entry->head); 525 entry->num_shared = 1; 526 entry->bo = &bo->tbo; 527 mutex_lock(&process_info->lock); 528 if (userptr) 529 list_add_tail(&entry->head, &process_info->userptr_valid_list); 530 else 531 list_add_tail(&entry->head, &process_info->kfd_bo_list); 532 mutex_unlock(&process_info->lock); 533 } 534 535 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 536 struct amdkfd_process_info *process_info) 537 { 538 struct ttm_validate_buffer *bo_list_entry; 539 540 bo_list_entry = &mem->validate_list; 541 mutex_lock(&process_info->lock); 542 list_del(&bo_list_entry->head); 543 mutex_unlock(&process_info->lock); 544 } 545 546 /* Initializes user pages. It registers the MMU notifier and validates 547 * the userptr BO in the GTT domain. 548 * 549 * The BO must already be on the userptr_valid_list. Otherwise an 550 * eviction and restore may happen that leaves the new BO unmapped 551 * with the user mode queues running. 552 * 553 * Takes the process_info->lock to protect against concurrent restore 554 * workers. 555 * 556 * Returns 0 for success, negative errno for errors. 557 */ 558 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) 559 { 560 struct amdkfd_process_info *process_info = mem->process_info; 561 struct amdgpu_bo *bo = mem->bo; 562 struct ttm_operation_ctx ctx = { true, false }; 563 int ret = 0; 564 565 mutex_lock(&process_info->lock); 566 567 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); 568 if (ret) { 569 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 570 goto out; 571 } 572 573 ret = amdgpu_mn_register(bo, user_addr); 574 if (ret) { 575 pr_err("%s: Failed to register MMU notifier: %d\n", 576 __func__, ret); 577 goto out; 578 } 579 580 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 581 if (ret) { 582 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 583 goto unregister_out; 584 } 585 586 ret = amdgpu_bo_reserve(bo, true); 587 if (ret) { 588 pr_err("%s: Failed to reserve BO\n", __func__); 589 goto release_out; 590 } 591 amdgpu_bo_placement_from_domain(bo, mem->domain); 592 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 593 if (ret) 594 pr_err("%s: failed to validate BO\n", __func__); 595 amdgpu_bo_unreserve(bo); 596 597 release_out: 598 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 599 unregister_out: 600 if (ret) 601 amdgpu_mn_unregister(bo); 602 out: 603 mutex_unlock(&process_info->lock); 604 return ret; 605 } 606 607 /* Reserving a BO and its page table BOs must happen atomically to 608 * avoid deadlocks. Some operations update multiple VMs at once. Track 609 * all the reservation info in a context structure. Optionally a sync 610 * object can track VM updates. 611 */ 612 struct bo_vm_reservation_context { 613 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 614 unsigned int n_vms; /* Number of VMs reserved */ 615 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 616 struct ww_acquire_ctx ticket; /* Reservation ticket */ 617 struct list_head list, duplicates; /* BO lists */ 618 struct amdgpu_sync *sync; /* Pointer to sync object */ 619 bool reserved; /* Whether BOs are reserved */ 620 }; 621 622 enum bo_vm_match { 623 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 624 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 625 BO_VM_ALL, /* Match all VMs a BO was added to */ 626 }; 627 628 /** 629 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 630 * @mem: KFD BO structure. 631 * @vm: the VM to reserve. 632 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 633 */ 634 static int reserve_bo_and_vm(struct kgd_mem *mem, 635 struct amdgpu_vm *vm, 636 struct bo_vm_reservation_context *ctx) 637 { 638 struct amdgpu_bo *bo = mem->bo; 639 int ret; 640 641 WARN_ON(!vm); 642 643 ctx->reserved = false; 644 ctx->n_vms = 1; 645 ctx->sync = &mem->sync; 646 647 INIT_LIST_HEAD(&ctx->list); 648 INIT_LIST_HEAD(&ctx->duplicates); 649 650 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 651 if (!ctx->vm_pd) 652 return -ENOMEM; 653 654 ctx->kfd_bo.priority = 0; 655 ctx->kfd_bo.tv.bo = &bo->tbo; 656 ctx->kfd_bo.tv.num_shared = 1; 657 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 658 659 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 660 661 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 662 false, &ctx->duplicates); 663 if (ret) { 664 pr_err("Failed to reserve buffers in ttm.\n"); 665 kfree(ctx->vm_pd); 666 ctx->vm_pd = NULL; 667 return ret; 668 } 669 670 ctx->reserved = true; 671 return 0; 672 } 673 674 /** 675 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 676 * @mem: KFD BO structure. 677 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 678 * is used. Otherwise, a single VM associated with the BO. 679 * @map_type: the mapping status that will be used to filter the VMs. 680 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 681 * 682 * Returns 0 for success, negative for failure. 683 */ 684 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 685 struct amdgpu_vm *vm, enum bo_vm_match map_type, 686 struct bo_vm_reservation_context *ctx) 687 { 688 struct amdgpu_bo *bo = mem->bo; 689 struct kfd_bo_va_list *entry; 690 unsigned int i; 691 int ret; 692 693 ctx->reserved = false; 694 ctx->n_vms = 0; 695 ctx->vm_pd = NULL; 696 ctx->sync = &mem->sync; 697 698 INIT_LIST_HEAD(&ctx->list); 699 INIT_LIST_HEAD(&ctx->duplicates); 700 701 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 702 if ((vm && vm != entry->bo_va->base.vm) || 703 (entry->is_mapped != map_type 704 && map_type != BO_VM_ALL)) 705 continue; 706 707 ctx->n_vms++; 708 } 709 710 if (ctx->n_vms != 0) { 711 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 712 GFP_KERNEL); 713 if (!ctx->vm_pd) 714 return -ENOMEM; 715 } 716 717 ctx->kfd_bo.priority = 0; 718 ctx->kfd_bo.tv.bo = &bo->tbo; 719 ctx->kfd_bo.tv.num_shared = 1; 720 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 721 722 i = 0; 723 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 724 if ((vm && vm != entry->bo_va->base.vm) || 725 (entry->is_mapped != map_type 726 && map_type != BO_VM_ALL)) 727 continue; 728 729 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 730 &ctx->vm_pd[i]); 731 i++; 732 } 733 734 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 735 false, &ctx->duplicates); 736 if (ret) { 737 pr_err("Failed to reserve buffers in ttm.\n"); 738 kfree(ctx->vm_pd); 739 ctx->vm_pd = NULL; 740 return ret; 741 } 742 743 ctx->reserved = true; 744 return 0; 745 } 746 747 /** 748 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 749 * @ctx: Reservation context to unreserve 750 * @wait: Optionally wait for a sync object representing pending VM updates 751 * @intr: Whether the wait is interruptible 752 * 753 * Also frees any resources allocated in 754 * reserve_bo_and_(cond_)vm(s). Returns the status from 755 * amdgpu_sync_wait. 756 */ 757 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 758 bool wait, bool intr) 759 { 760 int ret = 0; 761 762 if (wait) 763 ret = amdgpu_sync_wait(ctx->sync, intr); 764 765 if (ctx->reserved) 766 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 767 kfree(ctx->vm_pd); 768 769 ctx->sync = NULL; 770 771 ctx->reserved = false; 772 ctx->vm_pd = NULL; 773 774 return ret; 775 } 776 777 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, 778 struct kfd_bo_va_list *entry, 779 struct amdgpu_sync *sync) 780 { 781 struct amdgpu_bo_va *bo_va = entry->bo_va; 782 struct amdgpu_vm *vm = bo_va->base.vm; 783 784 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 785 786 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 787 788 amdgpu_sync_fence(sync, bo_va->last_pt_update); 789 790 return 0; 791 } 792 793 static int update_gpuvm_pte(struct amdgpu_device *adev, 794 struct kfd_bo_va_list *entry, 795 struct amdgpu_sync *sync) 796 { 797 int ret; 798 struct amdgpu_bo_va *bo_va = entry->bo_va; 799 800 /* Update the page tables */ 801 ret = amdgpu_vm_bo_update(adev, bo_va, false); 802 if (ret) { 803 pr_err("amdgpu_vm_bo_update failed\n"); 804 return ret; 805 } 806 807 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 808 } 809 810 static int map_bo_to_gpuvm(struct amdgpu_device *adev, 811 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, 812 bool no_update_pte) 813 { 814 int ret; 815 816 /* Set virtual address for the allocation */ 817 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, 818 amdgpu_bo_size(entry->bo_va->base.bo), 819 entry->pte_flags); 820 if (ret) { 821 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 822 entry->va, ret); 823 return ret; 824 } 825 826 if (no_update_pte) 827 return 0; 828 829 ret = update_gpuvm_pte(adev, entry, sync); 830 if (ret) { 831 pr_err("update_gpuvm_pte() failed\n"); 832 goto update_gpuvm_pte_failed; 833 } 834 835 return 0; 836 837 update_gpuvm_pte_failed: 838 unmap_bo_from_gpuvm(adev, entry, sync); 839 return ret; 840 } 841 842 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 843 { 844 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 845 846 if (!sg) 847 return NULL; 848 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 849 kfree(sg); 850 return NULL; 851 } 852 sg->sgl->dma_address = addr; 853 sg->sgl->length = size; 854 #ifdef CONFIG_NEED_SG_DMA_LENGTH 855 sg->sgl->dma_length = size; 856 #endif 857 return sg; 858 } 859 860 static int process_validate_vms(struct amdkfd_process_info *process_info) 861 { 862 struct amdgpu_vm *peer_vm; 863 int ret; 864 865 list_for_each_entry(peer_vm, &process_info->vm_list_head, 866 vm_list_node) { 867 ret = vm_validate_pt_pd_bos(peer_vm); 868 if (ret) 869 return ret; 870 } 871 872 return 0; 873 } 874 875 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 876 struct amdgpu_sync *sync) 877 { 878 struct amdgpu_vm *peer_vm; 879 int ret; 880 881 list_for_each_entry(peer_vm, &process_info->vm_list_head, 882 vm_list_node) { 883 struct amdgpu_bo *pd = peer_vm->root.base.bo; 884 885 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 886 AMDGPU_SYNC_NE_OWNER, 887 AMDGPU_FENCE_OWNER_KFD); 888 if (ret) 889 return ret; 890 } 891 892 return 0; 893 } 894 895 static int process_update_pds(struct amdkfd_process_info *process_info, 896 struct amdgpu_sync *sync) 897 { 898 struct amdgpu_vm *peer_vm; 899 int ret; 900 901 list_for_each_entry(peer_vm, &process_info->vm_list_head, 902 vm_list_node) { 903 ret = vm_update_pds(peer_vm, sync); 904 if (ret) 905 return ret; 906 } 907 908 return 0; 909 } 910 911 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 912 struct dma_fence **ef) 913 { 914 struct amdkfd_process_info *info = NULL; 915 int ret; 916 917 if (!*process_info) { 918 info = kzalloc(sizeof(*info), GFP_KERNEL); 919 if (!info) 920 return -ENOMEM; 921 922 mutex_init(&info->lock); 923 INIT_LIST_HEAD(&info->vm_list_head); 924 INIT_LIST_HEAD(&info->kfd_bo_list); 925 INIT_LIST_HEAD(&info->userptr_valid_list); 926 INIT_LIST_HEAD(&info->userptr_inval_list); 927 928 info->eviction_fence = 929 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 930 current->mm); 931 if (!info->eviction_fence) { 932 pr_err("Failed to create eviction fence\n"); 933 ret = -ENOMEM; 934 goto create_evict_fence_fail; 935 } 936 937 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 938 atomic_set(&info->evicted_bos, 0); 939 INIT_DELAYED_WORK(&info->restore_userptr_work, 940 amdgpu_amdkfd_restore_userptr_worker); 941 942 *process_info = info; 943 *ef = dma_fence_get(&info->eviction_fence->base); 944 } 945 946 vm->process_info = *process_info; 947 948 /* Validate page directory and attach eviction fence */ 949 ret = amdgpu_bo_reserve(vm->root.base.bo, true); 950 if (ret) 951 goto reserve_pd_fail; 952 ret = vm_validate_pt_pd_bos(vm); 953 if (ret) { 954 pr_err("validate_pt_pd_bos() failed\n"); 955 goto validate_pd_fail; 956 } 957 ret = amdgpu_bo_sync_wait(vm->root.base.bo, 958 AMDGPU_FENCE_OWNER_KFD, false); 959 if (ret) 960 goto wait_pd_fail; 961 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 962 if (ret) 963 goto reserve_shared_fail; 964 amdgpu_bo_fence(vm->root.base.bo, 965 &vm->process_info->eviction_fence->base, true); 966 amdgpu_bo_unreserve(vm->root.base.bo); 967 968 /* Update process info */ 969 mutex_lock(&vm->process_info->lock); 970 list_add_tail(&vm->vm_list_node, 971 &(vm->process_info->vm_list_head)); 972 vm->process_info->n_vms++; 973 mutex_unlock(&vm->process_info->lock); 974 975 return 0; 976 977 reserve_shared_fail: 978 wait_pd_fail: 979 validate_pd_fail: 980 amdgpu_bo_unreserve(vm->root.base.bo); 981 reserve_pd_fail: 982 vm->process_info = NULL; 983 if (info) { 984 /* Two fence references: one in info and one in *ef */ 985 dma_fence_put(&info->eviction_fence->base); 986 dma_fence_put(*ef); 987 *ef = NULL; 988 *process_info = NULL; 989 put_pid(info->pid); 990 create_evict_fence_fail: 991 mutex_destroy(&info->lock); 992 kfree(info); 993 } 994 return ret; 995 } 996 997 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, 998 void **vm, void **process_info, 999 struct dma_fence **ef) 1000 { 1001 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1002 struct amdgpu_vm *new_vm; 1003 int ret; 1004 1005 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); 1006 if (!new_vm) 1007 return -ENOMEM; 1008 1009 /* Initialize AMDGPU part of the VM */ 1010 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); 1011 if (ret) { 1012 pr_err("Failed init vm ret %d\n", ret); 1013 goto amdgpu_vm_init_fail; 1014 } 1015 1016 /* Initialize KFD part of the VM and process info */ 1017 ret = init_kfd_vm(new_vm, process_info, ef); 1018 if (ret) 1019 goto init_kfd_vm_fail; 1020 1021 *vm = (void *) new_vm; 1022 1023 return 0; 1024 1025 init_kfd_vm_fail: 1026 amdgpu_vm_fini(adev, new_vm); 1027 amdgpu_vm_init_fail: 1028 kfree(new_vm); 1029 return ret; 1030 } 1031 1032 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1033 struct file *filp, unsigned int pasid, 1034 void **vm, void **process_info, 1035 struct dma_fence **ef) 1036 { 1037 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1038 struct drm_file *drm_priv = filp->private_data; 1039 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; 1040 struct amdgpu_vm *avm = &drv_priv->vm; 1041 int ret; 1042 1043 /* Already a compute VM? */ 1044 if (avm->process_info) 1045 return -EINVAL; 1046 1047 /* Convert VM into a compute VM */ 1048 ret = amdgpu_vm_make_compute(adev, avm, pasid); 1049 if (ret) 1050 return ret; 1051 1052 /* Initialize KFD part of the VM and process info */ 1053 ret = init_kfd_vm(avm, process_info, ef); 1054 if (ret) 1055 return ret; 1056 1057 *vm = (void *)avm; 1058 1059 return 0; 1060 } 1061 1062 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1063 struct amdgpu_vm *vm) 1064 { 1065 struct amdkfd_process_info *process_info = vm->process_info; 1066 struct amdgpu_bo *pd = vm->root.base.bo; 1067 1068 if (!process_info) 1069 return; 1070 1071 /* Release eviction fence from PD */ 1072 amdgpu_bo_reserve(pd, false); 1073 amdgpu_bo_fence(pd, NULL, false); 1074 amdgpu_bo_unreserve(pd); 1075 1076 /* Update process info */ 1077 mutex_lock(&process_info->lock); 1078 process_info->n_vms--; 1079 list_del(&vm->vm_list_node); 1080 mutex_unlock(&process_info->lock); 1081 1082 vm->process_info = NULL; 1083 1084 /* Release per-process resources when last compute VM is destroyed */ 1085 if (!process_info->n_vms) { 1086 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1087 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1088 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1089 1090 dma_fence_put(&process_info->eviction_fence->base); 1091 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1092 put_pid(process_info->pid); 1093 mutex_destroy(&process_info->lock); 1094 kfree(process_info); 1095 } 1096 } 1097 1098 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) 1099 { 1100 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1101 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1102 1103 if (WARN_ON(!kgd || !vm)) 1104 return; 1105 1106 pr_debug("Destroying process vm %p\n", vm); 1107 1108 /* Release the VM context */ 1109 amdgpu_vm_fini(adev, avm); 1110 kfree(vm); 1111 } 1112 1113 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) 1114 { 1115 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1116 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1117 1118 if (WARN_ON(!kgd || !vm)) 1119 return; 1120 1121 pr_debug("Releasing process vm %p\n", vm); 1122 1123 /* The original pasid of amdgpu vm has already been 1124 * released during making a amdgpu vm to a compute vm 1125 * The current pasid is managed by kfd and will be 1126 * released on kfd process destroy. Set amdgpu pasid 1127 * to 0 to avoid duplicate release. 1128 */ 1129 amdgpu_vm_release_compute(adev, avm); 1130 } 1131 1132 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) 1133 { 1134 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1135 struct amdgpu_bo *pd = avm->root.base.bo; 1136 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1137 1138 if (adev->asic_type < CHIP_VEGA10) 1139 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1140 return avm->pd_phys_addr; 1141 } 1142 1143 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1144 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1145 void *vm, struct kgd_mem **mem, 1146 uint64_t *offset, uint32_t flags) 1147 { 1148 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1149 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1150 enum ttm_bo_type bo_type = ttm_bo_type_device; 1151 struct sg_table *sg = NULL; 1152 uint64_t user_addr = 0; 1153 struct amdgpu_bo *bo; 1154 struct amdgpu_bo_param bp; 1155 u32 domain, alloc_domain; 1156 u64 alloc_flags; 1157 int ret; 1158 1159 /* 1160 * Check on which domain to allocate BO 1161 */ 1162 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1163 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1164 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1165 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1166 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1167 AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1168 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1169 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1170 alloc_flags = 0; 1171 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1172 domain = AMDGPU_GEM_DOMAIN_GTT; 1173 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1174 alloc_flags = 0; 1175 if (!offset || !*offset) 1176 return -EINVAL; 1177 user_addr = untagged_addr(*offset); 1178 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1179 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1180 domain = AMDGPU_GEM_DOMAIN_GTT; 1181 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1182 bo_type = ttm_bo_type_sg; 1183 alloc_flags = 0; 1184 if (size > UINT_MAX) 1185 return -EINVAL; 1186 sg = create_doorbell_sg(*offset, size); 1187 if (!sg) 1188 return -ENOMEM; 1189 } else { 1190 return -EINVAL; 1191 } 1192 1193 if (!down_read_trylock(&adev->reset_sem)) 1194 return -EIO; 1195 1196 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1197 if (!*mem) { 1198 ret = -ENOMEM; 1199 goto err; 1200 } 1201 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1202 mutex_init(&(*mem)->lock); 1203 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1204 1205 /* Workaround for AQL queue wraparound bug. Map the same 1206 * memory twice. That means we only actually allocate half 1207 * the memory. 1208 */ 1209 if ((*mem)->aql_queue) 1210 size = size >> 1; 1211 1212 (*mem)->alloc_flags = flags; 1213 1214 amdgpu_sync_create(&(*mem)->sync); 1215 1216 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1217 if (ret) { 1218 pr_debug("Insufficient system memory\n"); 1219 goto err_reserve_limit; 1220 } 1221 1222 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1223 va, size, domain_string(alloc_domain)); 1224 1225 memset(&bp, 0, sizeof(bp)); 1226 bp.size = size; 1227 bp.byte_align = 1; 1228 bp.domain = alloc_domain; 1229 bp.flags = alloc_flags; 1230 bp.type = bo_type; 1231 bp.resv = NULL; 1232 ret = amdgpu_bo_create(adev, &bp, &bo); 1233 if (ret) { 1234 pr_debug("Failed to create BO on domain %s. ret %d\n", 1235 domain_string(alloc_domain), ret); 1236 goto err_bo_create; 1237 } 1238 if (bo_type == ttm_bo_type_sg) { 1239 bo->tbo.sg = sg; 1240 bo->tbo.ttm->sg = sg; 1241 } 1242 bo->kfd_bo = *mem; 1243 (*mem)->bo = bo; 1244 if (user_addr) 1245 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; 1246 1247 (*mem)->va = va; 1248 (*mem)->domain = domain; 1249 (*mem)->mapped_to_gpu_memory = 0; 1250 (*mem)->process_info = avm->process_info; 1251 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1252 1253 if (user_addr) { 1254 ret = init_user_pages(*mem, user_addr); 1255 if (ret) 1256 goto allocate_init_user_pages_failed; 1257 } 1258 1259 if (offset) 1260 *offset = amdgpu_bo_mmap_offset(bo); 1261 1262 up_read(&adev->reset_sem); 1263 return 0; 1264 1265 allocate_init_user_pages_failed: 1266 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1267 amdgpu_bo_unref(&bo); 1268 /* Don't unreserve system mem limit twice */ 1269 goto err_reserve_limit; 1270 err_bo_create: 1271 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1272 err_reserve_limit: 1273 mutex_destroy(&(*mem)->lock); 1274 kfree(*mem); 1275 err: 1276 if (sg) { 1277 sg_free_table(sg); 1278 kfree(sg); 1279 } 1280 1281 up_read(&adev->reset_sem); 1282 1283 return ret; 1284 } 1285 1286 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1287 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) 1288 { 1289 struct amdkfd_process_info *process_info = mem->process_info; 1290 unsigned long bo_size = mem->bo->tbo.mem.size; 1291 struct kfd_bo_va_list *entry, *tmp; 1292 struct bo_vm_reservation_context ctx; 1293 struct ttm_validate_buffer *bo_list_entry; 1294 unsigned int mapped_to_gpu_memory; 1295 int ret; 1296 bool is_imported = 0; 1297 1298 mutex_lock(&mem->lock); 1299 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1300 is_imported = mem->is_imported; 1301 mutex_unlock(&mem->lock); 1302 /* lock is not needed after this, since mem is unused and will 1303 * be freed anyway 1304 */ 1305 1306 if (mapped_to_gpu_memory > 0) { 1307 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1308 mem->va, bo_size); 1309 return -EBUSY; 1310 } 1311 1312 /* Make sure restore workers don't access the BO any more */ 1313 bo_list_entry = &mem->validate_list; 1314 mutex_lock(&process_info->lock); 1315 list_del(&bo_list_entry->head); 1316 mutex_unlock(&process_info->lock); 1317 1318 /* No more MMU notifiers */ 1319 amdgpu_mn_unregister(mem->bo); 1320 1321 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1322 if (unlikely(ret)) 1323 return ret; 1324 1325 /* The eviction fence should be removed by the last unmap. 1326 * TODO: Log an error condition if the bo still has the eviction fence 1327 * attached 1328 */ 1329 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1330 process_info->eviction_fence); 1331 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1332 mem->va + bo_size * (1 + mem->aql_queue)); 1333 1334 /* Remove from VM internal data structures */ 1335 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) 1336 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, 1337 entry, bo_size); 1338 1339 ret = unreserve_bo_and_vms(&ctx, false, false); 1340 1341 /* Free the sync object */ 1342 amdgpu_sync_free(&mem->sync); 1343 1344 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1345 * remap BO. We need to free it. 1346 */ 1347 if (mem->bo->tbo.sg) { 1348 sg_free_table(mem->bo->tbo.sg); 1349 kfree(mem->bo->tbo.sg); 1350 } 1351 1352 /* Update the size of the BO being freed if it was allocated from 1353 * VRAM and is not imported. 1354 */ 1355 if (size) { 1356 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1357 (!is_imported)) 1358 *size = bo_size; 1359 else 1360 *size = 0; 1361 } 1362 1363 /* Free the BO*/ 1364 drm_gem_object_put(&mem->bo->tbo.base); 1365 mutex_destroy(&mem->lock); 1366 kfree(mem); 1367 1368 return ret; 1369 } 1370 1371 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1372 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1373 { 1374 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1375 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1376 int ret; 1377 struct amdgpu_bo *bo; 1378 uint32_t domain; 1379 struct kfd_bo_va_list *entry; 1380 struct bo_vm_reservation_context ctx; 1381 struct kfd_bo_va_list *bo_va_entry = NULL; 1382 struct kfd_bo_va_list *bo_va_entry_aql = NULL; 1383 unsigned long bo_size; 1384 bool is_invalid_userptr = false; 1385 1386 bo = mem->bo; 1387 if (!bo) { 1388 pr_err("Invalid BO when mapping memory to GPU\n"); 1389 return -EINVAL; 1390 } 1391 1392 /* Make sure restore is not running concurrently. Since we 1393 * don't map invalid userptr BOs, we rely on the next restore 1394 * worker to do the mapping 1395 */ 1396 mutex_lock(&mem->process_info->lock); 1397 1398 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1399 * sure that the MMU notifier is no longer running 1400 * concurrently and the queues are actually stopped 1401 */ 1402 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1403 mmap_write_lock(current->mm); 1404 is_invalid_userptr = atomic_read(&mem->invalid); 1405 mmap_write_unlock(current->mm); 1406 } 1407 1408 mutex_lock(&mem->lock); 1409 1410 domain = mem->domain; 1411 bo_size = bo->tbo.mem.size; 1412 1413 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1414 mem->va, 1415 mem->va + bo_size * (1 + mem->aql_queue), 1416 vm, domain_string(domain)); 1417 1418 ret = reserve_bo_and_vm(mem, vm, &ctx); 1419 if (unlikely(ret)) 1420 goto out; 1421 1422 /* Userptr can be marked as "not invalid", but not actually be 1423 * validated yet (still in the system domain). In that case 1424 * the queues are still stopped and we can leave mapping for 1425 * the next restore worker 1426 */ 1427 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1428 bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 1429 is_invalid_userptr = true; 1430 1431 if (check_if_add_bo_to_vm(avm, mem)) { 1432 ret = add_bo_to_vm(adev, mem, avm, false, 1433 &bo_va_entry); 1434 if (ret) 1435 goto add_bo_to_vm_failed; 1436 if (mem->aql_queue) { 1437 ret = add_bo_to_vm(adev, mem, avm, 1438 true, &bo_va_entry_aql); 1439 if (ret) 1440 goto add_bo_to_vm_failed_aql; 1441 } 1442 } else { 1443 ret = vm_validate_pt_pd_bos(avm); 1444 if (unlikely(ret)) 1445 goto add_bo_to_vm_failed; 1446 } 1447 1448 if (mem->mapped_to_gpu_memory == 0 && 1449 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1450 /* Validate BO only once. The eviction fence gets added to BO 1451 * the first time it is mapped. Validate will wait for all 1452 * background evictions to complete. 1453 */ 1454 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1455 if (ret) { 1456 pr_debug("Validate failed\n"); 1457 goto map_bo_to_gpuvm_failed; 1458 } 1459 } 1460 1461 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1462 if (entry->bo_va->base.vm == vm && !entry->is_mapped) { 1463 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1464 entry->va, entry->va + bo_size, 1465 entry); 1466 1467 ret = map_bo_to_gpuvm(adev, entry, ctx.sync, 1468 is_invalid_userptr); 1469 if (ret) { 1470 pr_err("Failed to map bo to gpuvm\n"); 1471 goto map_bo_to_gpuvm_failed; 1472 } 1473 1474 ret = vm_update_pds(vm, ctx.sync); 1475 if (ret) { 1476 pr_err("Failed to update page directories\n"); 1477 goto map_bo_to_gpuvm_failed; 1478 } 1479 1480 entry->is_mapped = true; 1481 mem->mapped_to_gpu_memory++; 1482 pr_debug("\t INC mapping count %d\n", 1483 mem->mapped_to_gpu_memory); 1484 } 1485 } 1486 1487 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) 1488 amdgpu_bo_fence(bo, 1489 &avm->process_info->eviction_fence->base, 1490 true); 1491 ret = unreserve_bo_and_vms(&ctx, false, false); 1492 1493 goto out; 1494 1495 map_bo_to_gpuvm_failed: 1496 if (bo_va_entry_aql) 1497 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); 1498 add_bo_to_vm_failed_aql: 1499 if (bo_va_entry) 1500 remove_bo_from_vm(adev, bo_va_entry, bo_size); 1501 add_bo_to_vm_failed: 1502 unreserve_bo_and_vms(&ctx, false, false); 1503 out: 1504 mutex_unlock(&mem->process_info->lock); 1505 mutex_unlock(&mem->lock); 1506 return ret; 1507 } 1508 1509 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1510 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1511 { 1512 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1513 struct amdkfd_process_info *process_info = 1514 ((struct amdgpu_vm *)vm)->process_info; 1515 unsigned long bo_size = mem->bo->tbo.mem.size; 1516 struct kfd_bo_va_list *entry; 1517 struct bo_vm_reservation_context ctx; 1518 int ret; 1519 1520 mutex_lock(&mem->lock); 1521 1522 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); 1523 if (unlikely(ret)) 1524 goto out; 1525 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1526 if (ctx.n_vms == 0) { 1527 ret = -EINVAL; 1528 goto unreserve_out; 1529 } 1530 1531 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); 1532 if (unlikely(ret)) 1533 goto unreserve_out; 1534 1535 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1536 mem->va, 1537 mem->va + bo_size * (1 + mem->aql_queue), 1538 vm); 1539 1540 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1541 if (entry->bo_va->base.vm == vm && entry->is_mapped) { 1542 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1543 entry->va, 1544 entry->va + bo_size, 1545 entry); 1546 1547 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); 1548 if (ret == 0) { 1549 entry->is_mapped = false; 1550 } else { 1551 pr_err("failed to unmap VA 0x%llx\n", 1552 mem->va); 1553 goto unreserve_out; 1554 } 1555 1556 mem->mapped_to_gpu_memory--; 1557 pr_debug("\t DEC mapping count %d\n", 1558 mem->mapped_to_gpu_memory); 1559 } 1560 } 1561 1562 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1563 * required. 1564 */ 1565 if (mem->mapped_to_gpu_memory == 0 && 1566 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) 1567 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1568 process_info->eviction_fence); 1569 1570 unreserve_out: 1571 unreserve_bo_and_vms(&ctx, false, false); 1572 out: 1573 mutex_unlock(&mem->lock); 1574 return ret; 1575 } 1576 1577 int amdgpu_amdkfd_gpuvm_sync_memory( 1578 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1579 { 1580 struct amdgpu_sync sync; 1581 int ret; 1582 1583 amdgpu_sync_create(&sync); 1584 1585 mutex_lock(&mem->lock); 1586 amdgpu_sync_clone(&mem->sync, &sync); 1587 mutex_unlock(&mem->lock); 1588 1589 ret = amdgpu_sync_wait(&sync, intr); 1590 amdgpu_sync_free(&sync); 1591 return ret; 1592 } 1593 1594 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1595 struct kgd_mem *mem, void **kptr, uint64_t *size) 1596 { 1597 int ret; 1598 struct amdgpu_bo *bo = mem->bo; 1599 1600 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1601 pr_err("userptr can't be mapped to kernel\n"); 1602 return -EINVAL; 1603 } 1604 1605 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1606 * this BO in BO's restoring after eviction. 1607 */ 1608 mutex_lock(&mem->process_info->lock); 1609 1610 ret = amdgpu_bo_reserve(bo, true); 1611 if (ret) { 1612 pr_err("Failed to reserve bo. ret %d\n", ret); 1613 goto bo_reserve_failed; 1614 } 1615 1616 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1617 if (ret) { 1618 pr_err("Failed to pin bo. ret %d\n", ret); 1619 goto pin_failed; 1620 } 1621 1622 ret = amdgpu_bo_kmap(bo, kptr); 1623 if (ret) { 1624 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1625 goto kmap_failed; 1626 } 1627 1628 amdgpu_amdkfd_remove_eviction_fence( 1629 bo, mem->process_info->eviction_fence); 1630 list_del_init(&mem->validate_list.head); 1631 1632 if (size) 1633 *size = amdgpu_bo_size(bo); 1634 1635 amdgpu_bo_unreserve(bo); 1636 1637 mutex_unlock(&mem->process_info->lock); 1638 return 0; 1639 1640 kmap_failed: 1641 amdgpu_bo_unpin(bo); 1642 pin_failed: 1643 amdgpu_bo_unreserve(bo); 1644 bo_reserve_failed: 1645 mutex_unlock(&mem->process_info->lock); 1646 1647 return ret; 1648 } 1649 1650 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1651 struct kfd_vm_fault_info *mem) 1652 { 1653 struct amdgpu_device *adev; 1654 1655 adev = (struct amdgpu_device *)kgd; 1656 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1657 *mem = *adev->gmc.vm_fault_info; 1658 mb(); 1659 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1660 } 1661 return 0; 1662 } 1663 1664 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1665 struct dma_buf *dma_buf, 1666 uint64_t va, void *vm, 1667 struct kgd_mem **mem, uint64_t *size, 1668 uint64_t *mmap_offset) 1669 { 1670 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1671 struct drm_gem_object *obj; 1672 struct amdgpu_bo *bo; 1673 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1674 1675 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1676 /* Can't handle non-graphics buffers */ 1677 return -EINVAL; 1678 1679 obj = dma_buf->priv; 1680 if (obj->dev->dev_private != adev) 1681 /* Can't handle buffers from other devices */ 1682 return -EINVAL; 1683 1684 bo = gem_to_amdgpu_bo(obj); 1685 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1686 AMDGPU_GEM_DOMAIN_GTT))) 1687 /* Only VRAM and GTT BOs are supported */ 1688 return -EINVAL; 1689 1690 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1691 if (!*mem) 1692 return -ENOMEM; 1693 1694 if (size) 1695 *size = amdgpu_bo_size(bo); 1696 1697 if (mmap_offset) 1698 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1699 1700 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1701 mutex_init(&(*mem)->lock); 1702 1703 (*mem)->alloc_flags = 1704 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1705 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 1706 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 1707 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1708 1709 drm_gem_object_get(&bo->tbo.base); 1710 (*mem)->bo = bo; 1711 (*mem)->va = va; 1712 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1713 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1714 (*mem)->mapped_to_gpu_memory = 0; 1715 (*mem)->process_info = avm->process_info; 1716 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1717 amdgpu_sync_create(&(*mem)->sync); 1718 (*mem)->is_imported = true; 1719 1720 return 0; 1721 } 1722 1723 /* Evict a userptr BO by stopping the queues if necessary 1724 * 1725 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1726 * cannot do any memory allocations, and cannot take any locks that 1727 * are held elsewhere while allocating memory. Therefore this is as 1728 * simple as possible, using atomic counters. 1729 * 1730 * It doesn't do anything to the BO itself. The real work happens in 1731 * restore, where we get updated page addresses. This function only 1732 * ensures that GPU access to the BO is stopped. 1733 */ 1734 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1735 struct mm_struct *mm) 1736 { 1737 struct amdkfd_process_info *process_info = mem->process_info; 1738 int evicted_bos; 1739 int r = 0; 1740 1741 atomic_inc(&mem->invalid); 1742 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1743 if (evicted_bos == 1) { 1744 /* First eviction, stop the queues */ 1745 r = kgd2kfd_quiesce_mm(mm); 1746 if (r) 1747 pr_err("Failed to quiesce KFD\n"); 1748 schedule_delayed_work(&process_info->restore_userptr_work, 1749 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1750 } 1751 1752 return r; 1753 } 1754 1755 /* Update invalid userptr BOs 1756 * 1757 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1758 * userptr_inval_list and updates user pages for all BOs that have 1759 * been invalidated since their last update. 1760 */ 1761 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1762 struct mm_struct *mm) 1763 { 1764 struct kgd_mem *mem, *tmp_mem; 1765 struct amdgpu_bo *bo; 1766 struct ttm_operation_ctx ctx = { false, false }; 1767 int invalid, ret; 1768 1769 /* Move all invalidated BOs to the userptr_inval_list and 1770 * release their user pages by migration to the CPU domain 1771 */ 1772 list_for_each_entry_safe(mem, tmp_mem, 1773 &process_info->userptr_valid_list, 1774 validate_list.head) { 1775 if (!atomic_read(&mem->invalid)) 1776 continue; /* BO is still valid */ 1777 1778 bo = mem->bo; 1779 1780 if (amdgpu_bo_reserve(bo, true)) 1781 return -EAGAIN; 1782 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1783 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1784 amdgpu_bo_unreserve(bo); 1785 if (ret) { 1786 pr_err("%s: Failed to invalidate userptr BO\n", 1787 __func__); 1788 return -EAGAIN; 1789 } 1790 1791 list_move_tail(&mem->validate_list.head, 1792 &process_info->userptr_inval_list); 1793 } 1794 1795 if (list_empty(&process_info->userptr_inval_list)) 1796 return 0; /* All evicted userptr BOs were freed */ 1797 1798 /* Go through userptr_inval_list and update any invalid user_pages */ 1799 list_for_each_entry(mem, &process_info->userptr_inval_list, 1800 validate_list.head) { 1801 invalid = atomic_read(&mem->invalid); 1802 if (!invalid) 1803 /* BO hasn't been invalidated since the last 1804 * revalidation attempt. Keep its BO list. 1805 */ 1806 continue; 1807 1808 bo = mem->bo; 1809 1810 /* Get updated user pages */ 1811 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 1812 if (ret) { 1813 pr_debug("%s: Failed to get user pages: %d\n", 1814 __func__, ret); 1815 1816 /* Return error -EBUSY or -ENOMEM, retry restore */ 1817 return ret; 1818 } 1819 1820 /* 1821 * FIXME: Cannot ignore the return code, must hold 1822 * notifier_lock 1823 */ 1824 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1825 1826 /* Mark the BO as valid unless it was invalidated 1827 * again concurrently. 1828 */ 1829 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 1830 return -EAGAIN; 1831 } 1832 1833 return 0; 1834 } 1835 1836 /* Validate invalid userptr BOs 1837 * 1838 * Validates BOs on the userptr_inval_list, and moves them back to the 1839 * userptr_valid_list. Also updates GPUVM page tables with new page 1840 * addresses and waits for the page table updates to complete. 1841 */ 1842 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 1843 { 1844 struct amdgpu_bo_list_entry *pd_bo_list_entries; 1845 struct list_head resv_list, duplicates; 1846 struct ww_acquire_ctx ticket; 1847 struct amdgpu_sync sync; 1848 1849 struct amdgpu_vm *peer_vm; 1850 struct kgd_mem *mem, *tmp_mem; 1851 struct amdgpu_bo *bo; 1852 struct ttm_operation_ctx ctx = { false, false }; 1853 int i, ret; 1854 1855 pd_bo_list_entries = kcalloc(process_info->n_vms, 1856 sizeof(struct amdgpu_bo_list_entry), 1857 GFP_KERNEL); 1858 if (!pd_bo_list_entries) { 1859 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 1860 ret = -ENOMEM; 1861 goto out_no_mem; 1862 } 1863 1864 INIT_LIST_HEAD(&resv_list); 1865 INIT_LIST_HEAD(&duplicates); 1866 1867 /* Get all the page directory BOs that need to be reserved */ 1868 i = 0; 1869 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1870 vm_list_node) 1871 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 1872 &pd_bo_list_entries[i++]); 1873 /* Add the userptr_inval_list entries to resv_list */ 1874 list_for_each_entry(mem, &process_info->userptr_inval_list, 1875 validate_list.head) { 1876 list_add_tail(&mem->resv_list.head, &resv_list); 1877 mem->resv_list.bo = mem->validate_list.bo; 1878 mem->resv_list.num_shared = mem->validate_list.num_shared; 1879 } 1880 1881 /* Reserve all BOs and page tables for validation */ 1882 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 1883 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 1884 if (ret) 1885 goto out_free; 1886 1887 amdgpu_sync_create(&sync); 1888 1889 ret = process_validate_vms(process_info); 1890 if (ret) 1891 goto unreserve_out; 1892 1893 /* Validate BOs and update GPUVM page tables */ 1894 list_for_each_entry_safe(mem, tmp_mem, 1895 &process_info->userptr_inval_list, 1896 validate_list.head) { 1897 struct kfd_bo_va_list *bo_va_entry; 1898 1899 bo = mem->bo; 1900 1901 /* Validate the BO if we got user pages */ 1902 if (bo->tbo.ttm->pages[0]) { 1903 amdgpu_bo_placement_from_domain(bo, mem->domain); 1904 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1905 if (ret) { 1906 pr_err("%s: failed to validate BO\n", __func__); 1907 goto unreserve_out; 1908 } 1909 } 1910 1911 list_move_tail(&mem->validate_list.head, 1912 &process_info->userptr_valid_list); 1913 1914 /* Update mapping. If the BO was not validated 1915 * (because we couldn't get user pages), this will 1916 * clear the page table entries, which will result in 1917 * VM faults if the GPU tries to access the invalid 1918 * memory. 1919 */ 1920 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { 1921 if (!bo_va_entry->is_mapped) 1922 continue; 1923 1924 ret = update_gpuvm_pte((struct amdgpu_device *) 1925 bo_va_entry->kgd_dev, 1926 bo_va_entry, &sync); 1927 if (ret) { 1928 pr_err("%s: update PTE failed\n", __func__); 1929 /* make sure this gets validated again */ 1930 atomic_inc(&mem->invalid); 1931 goto unreserve_out; 1932 } 1933 } 1934 } 1935 1936 /* Update page directories */ 1937 ret = process_update_pds(process_info, &sync); 1938 1939 unreserve_out: 1940 ttm_eu_backoff_reservation(&ticket, &resv_list); 1941 amdgpu_sync_wait(&sync, false); 1942 amdgpu_sync_free(&sync); 1943 out_free: 1944 kfree(pd_bo_list_entries); 1945 out_no_mem: 1946 1947 return ret; 1948 } 1949 1950 /* Worker callback to restore evicted userptr BOs 1951 * 1952 * Tries to update and validate all userptr BOs. If successful and no 1953 * concurrent evictions happened, the queues are restarted. Otherwise, 1954 * reschedule for another attempt later. 1955 */ 1956 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 1957 { 1958 struct delayed_work *dwork = to_delayed_work(work); 1959 struct amdkfd_process_info *process_info = 1960 container_of(dwork, struct amdkfd_process_info, 1961 restore_userptr_work); 1962 struct task_struct *usertask; 1963 struct mm_struct *mm; 1964 int evicted_bos; 1965 1966 evicted_bos = atomic_read(&process_info->evicted_bos); 1967 if (!evicted_bos) 1968 return; 1969 1970 /* Reference task and mm in case of concurrent process termination */ 1971 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 1972 if (!usertask) 1973 return; 1974 mm = get_task_mm(usertask); 1975 if (!mm) { 1976 put_task_struct(usertask); 1977 return; 1978 } 1979 1980 mutex_lock(&process_info->lock); 1981 1982 if (update_invalid_user_pages(process_info, mm)) 1983 goto unlock_out; 1984 /* userptr_inval_list can be empty if all evicted userptr BOs 1985 * have been freed. In that case there is nothing to validate 1986 * and we can just restart the queues. 1987 */ 1988 if (!list_empty(&process_info->userptr_inval_list)) { 1989 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 1990 goto unlock_out; /* Concurrent eviction, try again */ 1991 1992 if (validate_invalid_user_pages(process_info)) 1993 goto unlock_out; 1994 } 1995 /* Final check for concurrent evicton and atomic update. If 1996 * another eviction happens after successful update, it will 1997 * be a first eviction that calls quiesce_mm. The eviction 1998 * reference counting inside KFD will handle this case. 1999 */ 2000 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2001 evicted_bos) 2002 goto unlock_out; 2003 evicted_bos = 0; 2004 if (kgd2kfd_resume_mm(mm)) { 2005 pr_err("%s: Failed to resume KFD\n", __func__); 2006 /* No recovery from this failure. Probably the CP is 2007 * hanging. No point trying again. 2008 */ 2009 } 2010 2011 unlock_out: 2012 mutex_unlock(&process_info->lock); 2013 mmput(mm); 2014 put_task_struct(usertask); 2015 2016 /* If validation failed, reschedule another attempt */ 2017 if (evicted_bos) 2018 schedule_delayed_work(&process_info->restore_userptr_work, 2019 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2020 } 2021 2022 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2023 * KFD process identified by process_info 2024 * 2025 * @process_info: amdkfd_process_info of the KFD process 2026 * 2027 * After memory eviction, restore thread calls this function. The function 2028 * should be called when the Process is still valid. BO restore involves - 2029 * 2030 * 1. Release old eviction fence and create new one 2031 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2032 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2033 * BOs that need to be reserved. 2034 * 4. Reserve all the BOs 2035 * 5. Validate of PD and PT BOs. 2036 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2037 * 7. Add fence to all PD and PT BOs. 2038 * 8. Unreserve all BOs 2039 */ 2040 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2041 { 2042 struct amdgpu_bo_list_entry *pd_bo_list; 2043 struct amdkfd_process_info *process_info = info; 2044 struct amdgpu_vm *peer_vm; 2045 struct kgd_mem *mem; 2046 struct bo_vm_reservation_context ctx; 2047 struct amdgpu_amdkfd_fence *new_fence; 2048 int ret = 0, i; 2049 struct list_head duplicate_save; 2050 struct amdgpu_sync sync_obj; 2051 2052 INIT_LIST_HEAD(&duplicate_save); 2053 INIT_LIST_HEAD(&ctx.list); 2054 INIT_LIST_HEAD(&ctx.duplicates); 2055 2056 pd_bo_list = kcalloc(process_info->n_vms, 2057 sizeof(struct amdgpu_bo_list_entry), 2058 GFP_KERNEL); 2059 if (!pd_bo_list) 2060 return -ENOMEM; 2061 2062 i = 0; 2063 mutex_lock(&process_info->lock); 2064 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2065 vm_list_node) 2066 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2067 2068 /* Reserve all BOs and page tables/directory. Add all BOs from 2069 * kfd_bo_list to ctx.list 2070 */ 2071 list_for_each_entry(mem, &process_info->kfd_bo_list, 2072 validate_list.head) { 2073 2074 list_add_tail(&mem->resv_list.head, &ctx.list); 2075 mem->resv_list.bo = mem->validate_list.bo; 2076 mem->resv_list.num_shared = mem->validate_list.num_shared; 2077 } 2078 2079 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2080 false, &duplicate_save); 2081 if (ret) { 2082 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2083 goto ttm_reserve_fail; 2084 } 2085 2086 amdgpu_sync_create(&sync_obj); 2087 2088 /* Validate PDs and PTs */ 2089 ret = process_validate_vms(process_info); 2090 if (ret) 2091 goto validate_map_fail; 2092 2093 ret = process_sync_pds_resv(process_info, &sync_obj); 2094 if (ret) { 2095 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2096 goto validate_map_fail; 2097 } 2098 2099 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2100 list_for_each_entry(mem, &process_info->kfd_bo_list, 2101 validate_list.head) { 2102 2103 struct amdgpu_bo *bo = mem->bo; 2104 uint32_t domain = mem->domain; 2105 struct kfd_bo_va_list *bo_va_entry; 2106 2107 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2108 if (ret) { 2109 pr_debug("Memory eviction: Validate BOs failed. Try again\n"); 2110 goto validate_map_fail; 2111 } 2112 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2113 if (ret) { 2114 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2115 goto validate_map_fail; 2116 } 2117 list_for_each_entry(bo_va_entry, &mem->bo_va_list, 2118 bo_list) { 2119 ret = update_gpuvm_pte((struct amdgpu_device *) 2120 bo_va_entry->kgd_dev, 2121 bo_va_entry, 2122 &sync_obj); 2123 if (ret) { 2124 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2125 goto validate_map_fail; 2126 } 2127 } 2128 } 2129 2130 /* Update page directories */ 2131 ret = process_update_pds(process_info, &sync_obj); 2132 if (ret) { 2133 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2134 goto validate_map_fail; 2135 } 2136 2137 /* Wait for validate and PT updates to finish */ 2138 amdgpu_sync_wait(&sync_obj, false); 2139 2140 /* Release old eviction fence and create new one, because fence only 2141 * goes from unsignaled to signaled, fence cannot be reused. 2142 * Use context and mm from the old fence. 2143 */ 2144 new_fence = amdgpu_amdkfd_fence_create( 2145 process_info->eviction_fence->base.context, 2146 process_info->eviction_fence->mm); 2147 if (!new_fence) { 2148 pr_err("Failed to create eviction fence\n"); 2149 ret = -ENOMEM; 2150 goto validate_map_fail; 2151 } 2152 dma_fence_put(&process_info->eviction_fence->base); 2153 process_info->eviction_fence = new_fence; 2154 *ef = dma_fence_get(&new_fence->base); 2155 2156 /* Attach new eviction fence to all BOs */ 2157 list_for_each_entry(mem, &process_info->kfd_bo_list, 2158 validate_list.head) 2159 amdgpu_bo_fence(mem->bo, 2160 &process_info->eviction_fence->base, true); 2161 2162 /* Attach eviction fence to PD / PT BOs */ 2163 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2164 vm_list_node) { 2165 struct amdgpu_bo *bo = peer_vm->root.base.bo; 2166 2167 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2168 } 2169 2170 validate_map_fail: 2171 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2172 amdgpu_sync_free(&sync_obj); 2173 ttm_reserve_fail: 2174 mutex_unlock(&process_info->lock); 2175 kfree(pd_bo_list); 2176 return ret; 2177 } 2178 2179 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2180 { 2181 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2182 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2183 int ret; 2184 2185 if (!info || !gws) 2186 return -EINVAL; 2187 2188 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2189 if (!*mem) 2190 return -ENOMEM; 2191 2192 mutex_init(&(*mem)->lock); 2193 INIT_LIST_HEAD(&(*mem)->bo_va_list); 2194 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2195 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2196 (*mem)->process_info = process_info; 2197 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2198 amdgpu_sync_create(&(*mem)->sync); 2199 2200 2201 /* Validate gws bo the first time it is added to process */ 2202 mutex_lock(&(*mem)->process_info->lock); 2203 ret = amdgpu_bo_reserve(gws_bo, false); 2204 if (unlikely(ret)) { 2205 pr_err("Reserve gws bo failed %d\n", ret); 2206 goto bo_reservation_failure; 2207 } 2208 2209 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2210 if (ret) { 2211 pr_err("GWS BO validate failed %d\n", ret); 2212 goto bo_validation_failure; 2213 } 2214 /* GWS resource is shared b/t amdgpu and amdkfd 2215 * Add process eviction fence to bo so they can 2216 * evict each other. 2217 */ 2218 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2219 if (ret) 2220 goto reserve_shared_fail; 2221 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2222 amdgpu_bo_unreserve(gws_bo); 2223 mutex_unlock(&(*mem)->process_info->lock); 2224 2225 return ret; 2226 2227 reserve_shared_fail: 2228 bo_validation_failure: 2229 amdgpu_bo_unreserve(gws_bo); 2230 bo_reservation_failure: 2231 mutex_unlock(&(*mem)->process_info->lock); 2232 amdgpu_sync_free(&(*mem)->sync); 2233 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2234 amdgpu_bo_unref(&gws_bo); 2235 mutex_destroy(&(*mem)->lock); 2236 kfree(*mem); 2237 *mem = NULL; 2238 return ret; 2239 } 2240 2241 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2242 { 2243 int ret; 2244 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2245 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2246 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2247 2248 /* Remove BO from process's validate list so restore worker won't touch 2249 * it anymore 2250 */ 2251 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2252 2253 ret = amdgpu_bo_reserve(gws_bo, false); 2254 if (unlikely(ret)) { 2255 pr_err("Reserve gws bo failed %d\n", ret); 2256 //TODO add BO back to validate_list? 2257 return ret; 2258 } 2259 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2260 process_info->eviction_fence); 2261 amdgpu_bo_unreserve(gws_bo); 2262 amdgpu_sync_free(&kgd_mem->sync); 2263 amdgpu_bo_unref(&gws_bo); 2264 mutex_destroy(&kgd_mem->lock); 2265 kfree(mem); 2266 return 0; 2267 } 2268 2269 /* Returns GPU-specific tiling mode information */ 2270 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 2271 struct tile_config *config) 2272 { 2273 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 2274 2275 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2276 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2277 config->num_tile_configs = 2278 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2279 config->macro_tile_config_ptr = 2280 adev->gfx.config.macrotile_mode_array; 2281 config->num_macro_tile_configs = 2282 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2283 2284 /* Those values are not set from GFX9 onwards */ 2285 config->num_banks = adev->gfx.config.num_banks; 2286 config->num_ranks = adev->gfx.config.num_ranks; 2287 2288 return 0; 2289 } 2290