1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_amdkfd.h" 31 #include "amdgpu_dma_buf.h" 32 33 /* BO flag to indicate a KFD userptr BO */ 34 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) 35 36 /* Userptr restore delay, just long enough to allow consecutive VM 37 * changes to accumulate 38 */ 39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 40 41 /* Impose limit on how much memory KFD can use */ 42 static struct { 43 uint64_t max_system_mem_limit; 44 uint64_t max_ttm_mem_limit; 45 int64_t system_mem_used; 46 int64_t ttm_mem_used; 47 spinlock_t mem_limit_lock; 48 } kfd_mem_limit; 49 50 /* Struct used for amdgpu_amdkfd_bo_validate */ 51 struct amdgpu_vm_parser { 52 uint32_t domain; 53 bool wait; 54 }; 55 56 static const char * const domain_bit_to_string[] = { 57 "CPU", 58 "GTT", 59 "VRAM", 60 "GDS", 61 "GWS", 62 "OA" 63 }; 64 65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 66 67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 68 69 70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 71 { 72 return (struct amdgpu_device *)kgd; 73 } 74 75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, 76 struct kgd_mem *mem) 77 { 78 struct kfd_bo_va_list *entry; 79 80 list_for_each_entry(entry, &mem->bo_va_list, bo_list) 81 if (entry->bo_va->base.vm == avm) 82 return false; 83 84 return true; 85 } 86 87 /* Set memory usage limits. Current, limits are 88 * System (TTM + userptr) memory - 3/4th System RAM 89 * TTM memory - 3/8th System RAM 90 */ 91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 92 { 93 struct sysinfo si; 94 uint64_t mem; 95 96 si_meminfo(&si); 97 mem = si.totalram - si.totalhigh; 98 mem *= si.mem_unit; 99 100 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 101 kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); 102 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 103 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 104 (kfd_mem_limit.max_system_mem_limit >> 20), 105 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 106 } 107 108 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 109 uint64_t size, u32 domain, bool sg) 110 { 111 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 112 uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9; 113 int ret = 0; 114 115 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 116 sizeof(struct amdgpu_bo)); 117 118 vram_needed = 0; 119 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 120 /* TTM GTT memory */ 121 system_mem_needed = acc_size + size; 122 ttm_mem_needed = acc_size + size; 123 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 124 /* Userptr */ 125 system_mem_needed = acc_size + size; 126 ttm_mem_needed = acc_size; 127 } else { 128 /* VRAM and SG */ 129 system_mem_needed = acc_size; 130 ttm_mem_needed = acc_size; 131 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 132 vram_needed = size; 133 } 134 135 spin_lock(&kfd_mem_limit.mem_limit_lock); 136 137 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 138 kfd_mem_limit.max_system_mem_limit) || 139 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 140 kfd_mem_limit.max_ttm_mem_limit) || 141 (adev->kfd.vram_used + vram_needed > 142 adev->gmc.real_vram_size - reserved_for_pt)) { 143 ret = -ENOMEM; 144 } else { 145 kfd_mem_limit.system_mem_used += system_mem_needed; 146 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 147 adev->kfd.vram_used += vram_needed; 148 } 149 150 spin_unlock(&kfd_mem_limit.mem_limit_lock); 151 return ret; 152 } 153 154 static void unreserve_mem_limit(struct amdgpu_device *adev, 155 uint64_t size, u32 domain, bool sg) 156 { 157 size_t acc_size; 158 159 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 160 sizeof(struct amdgpu_bo)); 161 162 spin_lock(&kfd_mem_limit.mem_limit_lock); 163 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 164 kfd_mem_limit.system_mem_used -= (acc_size + size); 165 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 166 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 167 kfd_mem_limit.system_mem_used -= (acc_size + size); 168 kfd_mem_limit.ttm_mem_used -= acc_size; 169 } else { 170 kfd_mem_limit.system_mem_used -= acc_size; 171 kfd_mem_limit.ttm_mem_used -= acc_size; 172 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 173 adev->kfd.vram_used -= size; 174 WARN_ONCE(adev->kfd.vram_used < 0, 175 "kfd VRAM memory accounting unbalanced"); 176 } 177 } 178 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 179 "kfd system memory accounting unbalanced"); 180 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 181 "kfd TTM memory accounting unbalanced"); 182 183 spin_unlock(&kfd_mem_limit.mem_limit_lock); 184 } 185 186 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 187 { 188 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 189 u32 domain = bo->preferred_domains; 190 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 191 192 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { 193 domain = AMDGPU_GEM_DOMAIN_CPU; 194 sg = false; 195 } 196 197 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 198 } 199 200 201 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 202 * reservation object. 203 * 204 * @bo: [IN] Remove eviction fence(s) from this BO 205 * @ef: [IN] This eviction fence is removed if it 206 * is present in the shared list. 207 * 208 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 209 */ 210 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 211 struct amdgpu_amdkfd_fence *ef) 212 { 213 struct dma_resv *resv = bo->tbo.base.resv; 214 struct dma_resv_list *old, *new; 215 unsigned int i, j, k; 216 217 if (!ef) 218 return -EINVAL; 219 220 old = dma_resv_get_list(resv); 221 if (!old) 222 return 0; 223 224 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]), 225 GFP_KERNEL); 226 if (!new) 227 return -ENOMEM; 228 229 /* Go through all the shared fences in the resevation object and sort 230 * the interesting ones to the end of the list. 231 */ 232 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 233 struct dma_fence *f; 234 235 f = rcu_dereference_protected(old->shared[i], 236 dma_resv_held(resv)); 237 238 if (f->context == ef->base.context) 239 RCU_INIT_POINTER(new->shared[--j], f); 240 else 241 RCU_INIT_POINTER(new->shared[k++], f); 242 } 243 new->shared_max = old->shared_max; 244 new->shared_count = k; 245 246 /* Install the new fence list, seqcount provides the barriers */ 247 preempt_disable(); 248 write_seqcount_begin(&resv->seq); 249 RCU_INIT_POINTER(resv->fence, new); 250 write_seqcount_end(&resv->seq); 251 preempt_enable(); 252 253 /* Drop the references to the removed fences or move them to ef_list */ 254 for (i = j, k = 0; i < old->shared_count; ++i) { 255 struct dma_fence *f; 256 257 f = rcu_dereference_protected(new->shared[i], 258 dma_resv_held(resv)); 259 dma_fence_put(f); 260 } 261 kfree_rcu(old, rcu); 262 263 return 0; 264 } 265 266 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 267 bool wait) 268 { 269 struct ttm_operation_ctx ctx = { false, false }; 270 int ret; 271 272 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 273 "Called with userptr BO")) 274 return -EINVAL; 275 276 amdgpu_bo_placement_from_domain(bo, domain); 277 278 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 279 if (ret) 280 goto validate_fail; 281 if (wait) 282 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 283 284 validate_fail: 285 return ret; 286 } 287 288 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) 289 { 290 struct amdgpu_vm_parser *p = param; 291 292 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); 293 } 294 295 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 296 * 297 * Page directories are not updated here because huge page handling 298 * during page table updates can invalidate page directory entries 299 * again. Page directories are only updated after updating page 300 * tables. 301 */ 302 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 303 { 304 struct amdgpu_bo *pd = vm->root.base.bo; 305 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 306 struct amdgpu_vm_parser param; 307 int ret; 308 309 param.domain = AMDGPU_GEM_DOMAIN_VRAM; 310 param.wait = false; 311 312 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, 313 ¶m); 314 if (ret) { 315 pr_err("amdgpu: failed to validate PT BOs\n"); 316 return ret; 317 } 318 319 ret = amdgpu_amdkfd_validate(¶m, pd); 320 if (ret) { 321 pr_err("amdgpu: failed to validate PD\n"); 322 return ret; 323 } 324 325 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 326 327 if (vm->use_cpu_for_update) { 328 ret = amdgpu_bo_kmap(pd, NULL); 329 if (ret) { 330 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret); 331 return ret; 332 } 333 } 334 335 return 0; 336 } 337 338 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 339 { 340 struct amdgpu_bo *pd = vm->root.base.bo; 341 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 342 int ret; 343 344 ret = amdgpu_vm_update_pdes(adev, vm, false); 345 if (ret) 346 return ret; 347 348 return amdgpu_sync_fence(NULL, sync, vm->last_update, false); 349 } 350 351 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 352 { 353 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 354 bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; 355 uint32_t mapping_flags; 356 357 mapping_flags = AMDGPU_VM_PAGE_READABLE; 358 if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) 359 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 360 if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) 361 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 362 363 switch (adev->asic_type) { 364 case CHIP_ARCTURUS: 365 if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { 366 if (bo_adev == adev) 367 mapping_flags |= coherent ? 368 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 369 else 370 mapping_flags |= AMDGPU_VM_MTYPE_UC; 371 } else { 372 mapping_flags |= coherent ? 373 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 374 } 375 break; 376 default: 377 mapping_flags |= coherent ? 378 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 379 } 380 381 return amdgpu_gem_va_map_flags(adev, mapping_flags); 382 } 383 384 /* add_bo_to_vm - Add a BO to a VM 385 * 386 * Everything that needs to bo done only once when a BO is first added 387 * to a VM. It can later be mapped and unmapped many times without 388 * repeating these steps. 389 * 390 * 1. Allocate and initialize BO VA entry data structure 391 * 2. Add BO to the VM 392 * 3. Determine ASIC-specific PTE flags 393 * 4. Alloc page tables and directories if needed 394 * 4a. Validate new page tables and directories 395 */ 396 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, 397 struct amdgpu_vm *vm, bool is_aql, 398 struct kfd_bo_va_list **p_bo_va_entry) 399 { 400 int ret; 401 struct kfd_bo_va_list *bo_va_entry; 402 struct amdgpu_bo *bo = mem->bo; 403 uint64_t va = mem->va; 404 struct list_head *list_bo_va = &mem->bo_va_list; 405 unsigned long bo_size = bo->tbo.mem.size; 406 407 if (!va) { 408 pr_err("Invalid VA when adding BO to VM\n"); 409 return -EINVAL; 410 } 411 412 if (is_aql) 413 va += bo_size; 414 415 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); 416 if (!bo_va_entry) 417 return -ENOMEM; 418 419 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 420 va + bo_size, vm); 421 422 /* Add BO to VM internal data structures*/ 423 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); 424 if (!bo_va_entry->bo_va) { 425 ret = -EINVAL; 426 pr_err("Failed to add BO object to VM. ret == %d\n", 427 ret); 428 goto err_vmadd; 429 } 430 431 bo_va_entry->va = va; 432 bo_va_entry->pte_flags = get_pte_flags(adev, mem); 433 bo_va_entry->kgd_dev = (void *)adev; 434 list_add(&bo_va_entry->bo_list, list_bo_va); 435 436 if (p_bo_va_entry) 437 *p_bo_va_entry = bo_va_entry; 438 439 /* Allocate validate page tables if needed */ 440 ret = vm_validate_pt_pd_bos(vm); 441 if (ret) { 442 pr_err("validate_pt_pd_bos() failed\n"); 443 goto err_alloc_pts; 444 } 445 446 return 0; 447 448 err_alloc_pts: 449 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); 450 list_del(&bo_va_entry->bo_list); 451 err_vmadd: 452 kfree(bo_va_entry); 453 return ret; 454 } 455 456 static void remove_bo_from_vm(struct amdgpu_device *adev, 457 struct kfd_bo_va_list *entry, unsigned long size) 458 { 459 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", 460 entry->va, 461 entry->va + size, entry); 462 amdgpu_vm_bo_rmv(adev, entry->bo_va); 463 list_del(&entry->bo_list); 464 kfree(entry); 465 } 466 467 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 468 struct amdkfd_process_info *process_info, 469 bool userptr) 470 { 471 struct ttm_validate_buffer *entry = &mem->validate_list; 472 struct amdgpu_bo *bo = mem->bo; 473 474 INIT_LIST_HEAD(&entry->head); 475 entry->num_shared = 1; 476 entry->bo = &bo->tbo; 477 mutex_lock(&process_info->lock); 478 if (userptr) 479 list_add_tail(&entry->head, &process_info->userptr_valid_list); 480 else 481 list_add_tail(&entry->head, &process_info->kfd_bo_list); 482 mutex_unlock(&process_info->lock); 483 } 484 485 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 486 struct amdkfd_process_info *process_info) 487 { 488 struct ttm_validate_buffer *bo_list_entry; 489 490 bo_list_entry = &mem->validate_list; 491 mutex_lock(&process_info->lock); 492 list_del(&bo_list_entry->head); 493 mutex_unlock(&process_info->lock); 494 } 495 496 /* Initializes user pages. It registers the MMU notifier and validates 497 * the userptr BO in the GTT domain. 498 * 499 * The BO must already be on the userptr_valid_list. Otherwise an 500 * eviction and restore may happen that leaves the new BO unmapped 501 * with the user mode queues running. 502 * 503 * Takes the process_info->lock to protect against concurrent restore 504 * workers. 505 * 506 * Returns 0 for success, negative errno for errors. 507 */ 508 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, 509 uint64_t user_addr) 510 { 511 struct amdkfd_process_info *process_info = mem->process_info; 512 struct amdgpu_bo *bo = mem->bo; 513 struct ttm_operation_ctx ctx = { true, false }; 514 int ret = 0; 515 516 mutex_lock(&process_info->lock); 517 518 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); 519 if (ret) { 520 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 521 goto out; 522 } 523 524 ret = amdgpu_mn_register(bo, user_addr); 525 if (ret) { 526 pr_err("%s: Failed to register MMU notifier: %d\n", 527 __func__, ret); 528 goto out; 529 } 530 531 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 532 if (ret) { 533 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 534 goto unregister_out; 535 } 536 537 ret = amdgpu_bo_reserve(bo, true); 538 if (ret) { 539 pr_err("%s: Failed to reserve BO\n", __func__); 540 goto release_out; 541 } 542 amdgpu_bo_placement_from_domain(bo, mem->domain); 543 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 544 if (ret) 545 pr_err("%s: failed to validate BO\n", __func__); 546 amdgpu_bo_unreserve(bo); 547 548 release_out: 549 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 550 unregister_out: 551 if (ret) 552 amdgpu_mn_unregister(bo); 553 out: 554 mutex_unlock(&process_info->lock); 555 return ret; 556 } 557 558 /* Reserving a BO and its page table BOs must happen atomically to 559 * avoid deadlocks. Some operations update multiple VMs at once. Track 560 * all the reservation info in a context structure. Optionally a sync 561 * object can track VM updates. 562 */ 563 struct bo_vm_reservation_context { 564 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 565 unsigned int n_vms; /* Number of VMs reserved */ 566 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 567 struct ww_acquire_ctx ticket; /* Reservation ticket */ 568 struct list_head list, duplicates; /* BO lists */ 569 struct amdgpu_sync *sync; /* Pointer to sync object */ 570 bool reserved; /* Whether BOs are reserved */ 571 }; 572 573 enum bo_vm_match { 574 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 575 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 576 BO_VM_ALL, /* Match all VMs a BO was added to */ 577 }; 578 579 /** 580 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 581 * @mem: KFD BO structure. 582 * @vm: the VM to reserve. 583 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 584 */ 585 static int reserve_bo_and_vm(struct kgd_mem *mem, 586 struct amdgpu_vm *vm, 587 struct bo_vm_reservation_context *ctx) 588 { 589 struct amdgpu_bo *bo = mem->bo; 590 int ret; 591 592 WARN_ON(!vm); 593 594 ctx->reserved = false; 595 ctx->n_vms = 1; 596 ctx->sync = &mem->sync; 597 598 INIT_LIST_HEAD(&ctx->list); 599 INIT_LIST_HEAD(&ctx->duplicates); 600 601 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 602 if (!ctx->vm_pd) 603 return -ENOMEM; 604 605 ctx->kfd_bo.priority = 0; 606 ctx->kfd_bo.tv.bo = &bo->tbo; 607 ctx->kfd_bo.tv.num_shared = 1; 608 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 609 610 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 611 612 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 613 false, &ctx->duplicates); 614 if (!ret) 615 ctx->reserved = true; 616 else { 617 pr_err("Failed to reserve buffers in ttm\n"); 618 kfree(ctx->vm_pd); 619 ctx->vm_pd = NULL; 620 } 621 622 return ret; 623 } 624 625 /** 626 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 627 * @mem: KFD BO structure. 628 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 629 * is used. Otherwise, a single VM associated with the BO. 630 * @map_type: the mapping status that will be used to filter the VMs. 631 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 632 * 633 * Returns 0 for success, negative for failure. 634 */ 635 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 636 struct amdgpu_vm *vm, enum bo_vm_match map_type, 637 struct bo_vm_reservation_context *ctx) 638 { 639 struct amdgpu_bo *bo = mem->bo; 640 struct kfd_bo_va_list *entry; 641 unsigned int i; 642 int ret; 643 644 ctx->reserved = false; 645 ctx->n_vms = 0; 646 ctx->vm_pd = NULL; 647 ctx->sync = &mem->sync; 648 649 INIT_LIST_HEAD(&ctx->list); 650 INIT_LIST_HEAD(&ctx->duplicates); 651 652 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 653 if ((vm && vm != entry->bo_va->base.vm) || 654 (entry->is_mapped != map_type 655 && map_type != BO_VM_ALL)) 656 continue; 657 658 ctx->n_vms++; 659 } 660 661 if (ctx->n_vms != 0) { 662 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 663 GFP_KERNEL); 664 if (!ctx->vm_pd) 665 return -ENOMEM; 666 } 667 668 ctx->kfd_bo.priority = 0; 669 ctx->kfd_bo.tv.bo = &bo->tbo; 670 ctx->kfd_bo.tv.num_shared = 1; 671 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 672 673 i = 0; 674 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 675 if ((vm && vm != entry->bo_va->base.vm) || 676 (entry->is_mapped != map_type 677 && map_type != BO_VM_ALL)) 678 continue; 679 680 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 681 &ctx->vm_pd[i]); 682 i++; 683 } 684 685 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 686 false, &ctx->duplicates); 687 if (!ret) 688 ctx->reserved = true; 689 else 690 pr_err("Failed to reserve buffers in ttm.\n"); 691 692 if (ret) { 693 kfree(ctx->vm_pd); 694 ctx->vm_pd = NULL; 695 } 696 697 return ret; 698 } 699 700 /** 701 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 702 * @ctx: Reservation context to unreserve 703 * @wait: Optionally wait for a sync object representing pending VM updates 704 * @intr: Whether the wait is interruptible 705 * 706 * Also frees any resources allocated in 707 * reserve_bo_and_(cond_)vm(s). Returns the status from 708 * amdgpu_sync_wait. 709 */ 710 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 711 bool wait, bool intr) 712 { 713 int ret = 0; 714 715 if (wait) 716 ret = amdgpu_sync_wait(ctx->sync, intr); 717 718 if (ctx->reserved) 719 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 720 kfree(ctx->vm_pd); 721 722 ctx->sync = NULL; 723 724 ctx->reserved = false; 725 ctx->vm_pd = NULL; 726 727 return ret; 728 } 729 730 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, 731 struct kfd_bo_va_list *entry, 732 struct amdgpu_sync *sync) 733 { 734 struct amdgpu_bo_va *bo_va = entry->bo_va; 735 struct amdgpu_vm *vm = bo_va->base.vm; 736 737 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 738 739 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 740 741 amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); 742 743 return 0; 744 } 745 746 static int update_gpuvm_pte(struct amdgpu_device *adev, 747 struct kfd_bo_va_list *entry, 748 struct amdgpu_sync *sync) 749 { 750 int ret; 751 struct amdgpu_bo_va *bo_va = entry->bo_va; 752 753 /* Update the page tables */ 754 ret = amdgpu_vm_bo_update(adev, bo_va, false); 755 if (ret) { 756 pr_err("amdgpu_vm_bo_update failed\n"); 757 return ret; 758 } 759 760 return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); 761 } 762 763 static int map_bo_to_gpuvm(struct amdgpu_device *adev, 764 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, 765 bool no_update_pte) 766 { 767 int ret; 768 769 /* Set virtual address for the allocation */ 770 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, 771 amdgpu_bo_size(entry->bo_va->base.bo), 772 entry->pte_flags); 773 if (ret) { 774 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 775 entry->va, ret); 776 return ret; 777 } 778 779 if (no_update_pte) 780 return 0; 781 782 ret = update_gpuvm_pte(adev, entry, sync); 783 if (ret) { 784 pr_err("update_gpuvm_pte() failed\n"); 785 goto update_gpuvm_pte_failed; 786 } 787 788 return 0; 789 790 update_gpuvm_pte_failed: 791 unmap_bo_from_gpuvm(adev, entry, sync); 792 return ret; 793 } 794 795 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 796 { 797 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 798 799 if (!sg) 800 return NULL; 801 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 802 kfree(sg); 803 return NULL; 804 } 805 sg->sgl->dma_address = addr; 806 sg->sgl->length = size; 807 #ifdef CONFIG_NEED_SG_DMA_LENGTH 808 sg->sgl->dma_length = size; 809 #endif 810 return sg; 811 } 812 813 static int process_validate_vms(struct amdkfd_process_info *process_info) 814 { 815 struct amdgpu_vm *peer_vm; 816 int ret; 817 818 list_for_each_entry(peer_vm, &process_info->vm_list_head, 819 vm_list_node) { 820 ret = vm_validate_pt_pd_bos(peer_vm); 821 if (ret) 822 return ret; 823 } 824 825 return 0; 826 } 827 828 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 829 struct amdgpu_sync *sync) 830 { 831 struct amdgpu_vm *peer_vm; 832 int ret; 833 834 list_for_each_entry(peer_vm, &process_info->vm_list_head, 835 vm_list_node) { 836 struct amdgpu_bo *pd = peer_vm->root.base.bo; 837 838 ret = amdgpu_sync_resv(NULL, 839 sync, pd->tbo.base.resv, 840 AMDGPU_FENCE_OWNER_KFD, false); 841 if (ret) 842 return ret; 843 } 844 845 return 0; 846 } 847 848 static int process_update_pds(struct amdkfd_process_info *process_info, 849 struct amdgpu_sync *sync) 850 { 851 struct amdgpu_vm *peer_vm; 852 int ret; 853 854 list_for_each_entry(peer_vm, &process_info->vm_list_head, 855 vm_list_node) { 856 ret = vm_update_pds(peer_vm, sync); 857 if (ret) 858 return ret; 859 } 860 861 return 0; 862 } 863 864 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 865 struct dma_fence **ef) 866 { 867 struct amdkfd_process_info *info = NULL; 868 int ret; 869 870 if (!*process_info) { 871 info = kzalloc(sizeof(*info), GFP_KERNEL); 872 if (!info) 873 return -ENOMEM; 874 875 mutex_init(&info->lock); 876 INIT_LIST_HEAD(&info->vm_list_head); 877 INIT_LIST_HEAD(&info->kfd_bo_list); 878 INIT_LIST_HEAD(&info->userptr_valid_list); 879 INIT_LIST_HEAD(&info->userptr_inval_list); 880 881 info->eviction_fence = 882 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 883 current->mm); 884 if (!info->eviction_fence) { 885 pr_err("Failed to create eviction fence\n"); 886 ret = -ENOMEM; 887 goto create_evict_fence_fail; 888 } 889 890 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 891 atomic_set(&info->evicted_bos, 0); 892 INIT_DELAYED_WORK(&info->restore_userptr_work, 893 amdgpu_amdkfd_restore_userptr_worker); 894 895 *process_info = info; 896 *ef = dma_fence_get(&info->eviction_fence->base); 897 } 898 899 vm->process_info = *process_info; 900 901 /* Validate page directory and attach eviction fence */ 902 ret = amdgpu_bo_reserve(vm->root.base.bo, true); 903 if (ret) 904 goto reserve_pd_fail; 905 ret = vm_validate_pt_pd_bos(vm); 906 if (ret) { 907 pr_err("validate_pt_pd_bos() failed\n"); 908 goto validate_pd_fail; 909 } 910 ret = amdgpu_bo_sync_wait(vm->root.base.bo, 911 AMDGPU_FENCE_OWNER_KFD, false); 912 if (ret) 913 goto wait_pd_fail; 914 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 915 if (ret) 916 goto reserve_shared_fail; 917 amdgpu_bo_fence(vm->root.base.bo, 918 &vm->process_info->eviction_fence->base, true); 919 amdgpu_bo_unreserve(vm->root.base.bo); 920 921 /* Update process info */ 922 mutex_lock(&vm->process_info->lock); 923 list_add_tail(&vm->vm_list_node, 924 &(vm->process_info->vm_list_head)); 925 vm->process_info->n_vms++; 926 mutex_unlock(&vm->process_info->lock); 927 928 return 0; 929 930 reserve_shared_fail: 931 wait_pd_fail: 932 validate_pd_fail: 933 amdgpu_bo_unreserve(vm->root.base.bo); 934 reserve_pd_fail: 935 vm->process_info = NULL; 936 if (info) { 937 /* Two fence references: one in info and one in *ef */ 938 dma_fence_put(&info->eviction_fence->base); 939 dma_fence_put(*ef); 940 *ef = NULL; 941 *process_info = NULL; 942 put_pid(info->pid); 943 create_evict_fence_fail: 944 mutex_destroy(&info->lock); 945 kfree(info); 946 } 947 return ret; 948 } 949 950 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, 951 void **vm, void **process_info, 952 struct dma_fence **ef) 953 { 954 struct amdgpu_device *adev = get_amdgpu_device(kgd); 955 struct amdgpu_vm *new_vm; 956 int ret; 957 958 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); 959 if (!new_vm) 960 return -ENOMEM; 961 962 /* Initialize AMDGPU part of the VM */ 963 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); 964 if (ret) { 965 pr_err("Failed init vm ret %d\n", ret); 966 goto amdgpu_vm_init_fail; 967 } 968 969 /* Initialize KFD part of the VM and process info */ 970 ret = init_kfd_vm(new_vm, process_info, ef); 971 if (ret) 972 goto init_kfd_vm_fail; 973 974 *vm = (void *) new_vm; 975 976 return 0; 977 978 init_kfd_vm_fail: 979 amdgpu_vm_fini(adev, new_vm); 980 amdgpu_vm_init_fail: 981 kfree(new_vm); 982 return ret; 983 } 984 985 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 986 struct file *filp, unsigned int pasid, 987 void **vm, void **process_info, 988 struct dma_fence **ef) 989 { 990 struct amdgpu_device *adev = get_amdgpu_device(kgd); 991 struct drm_file *drm_priv = filp->private_data; 992 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; 993 struct amdgpu_vm *avm = &drv_priv->vm; 994 int ret; 995 996 /* Already a compute VM? */ 997 if (avm->process_info) 998 return -EINVAL; 999 1000 /* Convert VM into a compute VM */ 1001 ret = amdgpu_vm_make_compute(adev, avm, pasid); 1002 if (ret) 1003 return ret; 1004 1005 /* Initialize KFD part of the VM and process info */ 1006 ret = init_kfd_vm(avm, process_info, ef); 1007 if (ret) 1008 return ret; 1009 1010 *vm = (void *)avm; 1011 1012 return 0; 1013 } 1014 1015 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1016 struct amdgpu_vm *vm) 1017 { 1018 struct amdkfd_process_info *process_info = vm->process_info; 1019 struct amdgpu_bo *pd = vm->root.base.bo; 1020 1021 if (!process_info) 1022 return; 1023 1024 /* Release eviction fence from PD */ 1025 amdgpu_bo_reserve(pd, false); 1026 amdgpu_bo_fence(pd, NULL, false); 1027 amdgpu_bo_unreserve(pd); 1028 1029 /* Update process info */ 1030 mutex_lock(&process_info->lock); 1031 process_info->n_vms--; 1032 list_del(&vm->vm_list_node); 1033 mutex_unlock(&process_info->lock); 1034 1035 /* Release per-process resources when last compute VM is destroyed */ 1036 if (!process_info->n_vms) { 1037 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1038 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1039 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1040 1041 dma_fence_put(&process_info->eviction_fence->base); 1042 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1043 put_pid(process_info->pid); 1044 mutex_destroy(&process_info->lock); 1045 kfree(process_info); 1046 } 1047 } 1048 1049 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) 1050 { 1051 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1052 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1053 1054 if (WARN_ON(!kgd || !vm)) 1055 return; 1056 1057 pr_debug("Destroying process vm %p\n", vm); 1058 1059 /* Release the VM context */ 1060 amdgpu_vm_fini(adev, avm); 1061 kfree(vm); 1062 } 1063 1064 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) 1065 { 1066 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1067 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1068 1069 if (WARN_ON(!kgd || !vm)) 1070 return; 1071 1072 pr_debug("Releasing process vm %p\n", vm); 1073 1074 /* The original pasid of amdgpu vm has already been 1075 * released during making a amdgpu vm to a compute vm 1076 * The current pasid is managed by kfd and will be 1077 * released on kfd process destroy. Set amdgpu pasid 1078 * to 0 to avoid duplicate release. 1079 */ 1080 amdgpu_vm_release_compute(adev, avm); 1081 } 1082 1083 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) 1084 { 1085 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1086 struct amdgpu_bo *pd = avm->root.base.bo; 1087 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1088 1089 if (adev->asic_type < CHIP_VEGA10) 1090 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1091 return avm->pd_phys_addr; 1092 } 1093 1094 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1095 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1096 void *vm, struct kgd_mem **mem, 1097 uint64_t *offset, uint32_t flags) 1098 { 1099 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1100 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1101 enum ttm_bo_type bo_type = ttm_bo_type_device; 1102 struct sg_table *sg = NULL; 1103 uint64_t user_addr = 0; 1104 struct amdgpu_bo *bo; 1105 struct amdgpu_bo_param bp; 1106 u32 domain, alloc_domain; 1107 u64 alloc_flags; 1108 int ret; 1109 1110 /* 1111 * Check on which domain to allocate BO 1112 */ 1113 if (flags & ALLOC_MEM_FLAGS_VRAM) { 1114 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1115 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1116 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? 1117 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1118 AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1119 } else if (flags & ALLOC_MEM_FLAGS_GTT) { 1120 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1121 alloc_flags = 0; 1122 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { 1123 domain = AMDGPU_GEM_DOMAIN_GTT; 1124 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1125 alloc_flags = 0; 1126 if (!offset || !*offset) 1127 return -EINVAL; 1128 user_addr = untagged_addr(*offset); 1129 } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | 1130 ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1131 domain = AMDGPU_GEM_DOMAIN_GTT; 1132 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1133 bo_type = ttm_bo_type_sg; 1134 alloc_flags = 0; 1135 if (size > UINT_MAX) 1136 return -EINVAL; 1137 sg = create_doorbell_sg(*offset, size); 1138 if (!sg) 1139 return -ENOMEM; 1140 } else { 1141 return -EINVAL; 1142 } 1143 1144 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1145 if (!*mem) { 1146 ret = -ENOMEM; 1147 goto err; 1148 } 1149 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1150 mutex_init(&(*mem)->lock); 1151 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1152 1153 /* Workaround for AQL queue wraparound bug. Map the same 1154 * memory twice. That means we only actually allocate half 1155 * the memory. 1156 */ 1157 if ((*mem)->aql_queue) 1158 size = size >> 1; 1159 1160 (*mem)->alloc_flags = flags; 1161 1162 amdgpu_sync_create(&(*mem)->sync); 1163 1164 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1165 if (ret) { 1166 pr_debug("Insufficient system memory\n"); 1167 goto err_reserve_limit; 1168 } 1169 1170 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1171 va, size, domain_string(alloc_domain)); 1172 1173 memset(&bp, 0, sizeof(bp)); 1174 bp.size = size; 1175 bp.byte_align = 1; 1176 bp.domain = alloc_domain; 1177 bp.flags = alloc_flags; 1178 bp.type = bo_type; 1179 bp.resv = NULL; 1180 ret = amdgpu_bo_create(adev, &bp, &bo); 1181 if (ret) { 1182 pr_debug("Failed to create BO on domain %s. ret %d\n", 1183 domain_string(alloc_domain), ret); 1184 goto err_bo_create; 1185 } 1186 if (bo_type == ttm_bo_type_sg) { 1187 bo->tbo.sg = sg; 1188 bo->tbo.ttm->sg = sg; 1189 } 1190 bo->kfd_bo = *mem; 1191 (*mem)->bo = bo; 1192 if (user_addr) 1193 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; 1194 1195 (*mem)->va = va; 1196 (*mem)->domain = domain; 1197 (*mem)->mapped_to_gpu_memory = 0; 1198 (*mem)->process_info = avm->process_info; 1199 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1200 1201 if (user_addr) { 1202 ret = init_user_pages(*mem, current->mm, user_addr); 1203 if (ret) 1204 goto allocate_init_user_pages_failed; 1205 } 1206 1207 if (offset) 1208 *offset = amdgpu_bo_mmap_offset(bo); 1209 1210 return 0; 1211 1212 allocate_init_user_pages_failed: 1213 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1214 amdgpu_bo_unref(&bo); 1215 /* Don't unreserve system mem limit twice */ 1216 goto err_reserve_limit; 1217 err_bo_create: 1218 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1219 err_reserve_limit: 1220 mutex_destroy(&(*mem)->lock); 1221 kfree(*mem); 1222 err: 1223 if (sg) { 1224 sg_free_table(sg); 1225 kfree(sg); 1226 } 1227 return ret; 1228 } 1229 1230 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1231 struct kgd_dev *kgd, struct kgd_mem *mem) 1232 { 1233 struct amdkfd_process_info *process_info = mem->process_info; 1234 unsigned long bo_size = mem->bo->tbo.mem.size; 1235 struct kfd_bo_va_list *entry, *tmp; 1236 struct bo_vm_reservation_context ctx; 1237 struct ttm_validate_buffer *bo_list_entry; 1238 int ret; 1239 1240 mutex_lock(&mem->lock); 1241 1242 if (mem->mapped_to_gpu_memory > 0) { 1243 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1244 mem->va, bo_size); 1245 mutex_unlock(&mem->lock); 1246 return -EBUSY; 1247 } 1248 1249 mutex_unlock(&mem->lock); 1250 /* lock is not needed after this, since mem is unused and will 1251 * be freed anyway 1252 */ 1253 1254 /* No more MMU notifiers */ 1255 amdgpu_mn_unregister(mem->bo); 1256 1257 /* Make sure restore workers don't access the BO any more */ 1258 bo_list_entry = &mem->validate_list; 1259 mutex_lock(&process_info->lock); 1260 list_del(&bo_list_entry->head); 1261 mutex_unlock(&process_info->lock); 1262 1263 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1264 if (unlikely(ret)) 1265 return ret; 1266 1267 /* The eviction fence should be removed by the last unmap. 1268 * TODO: Log an error condition if the bo still has the eviction fence 1269 * attached 1270 */ 1271 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1272 process_info->eviction_fence); 1273 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1274 mem->va + bo_size * (1 + mem->aql_queue)); 1275 1276 /* Remove from VM internal data structures */ 1277 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) 1278 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, 1279 entry, bo_size); 1280 1281 ret = unreserve_bo_and_vms(&ctx, false, false); 1282 1283 /* Free the sync object */ 1284 amdgpu_sync_free(&mem->sync); 1285 1286 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1287 * remap BO. We need to free it. 1288 */ 1289 if (mem->bo->tbo.sg) { 1290 sg_free_table(mem->bo->tbo.sg); 1291 kfree(mem->bo->tbo.sg); 1292 } 1293 1294 /* Free the BO*/ 1295 amdgpu_bo_unref(&mem->bo); 1296 mutex_destroy(&mem->lock); 1297 kfree(mem); 1298 1299 return ret; 1300 } 1301 1302 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1303 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1304 { 1305 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1306 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1307 int ret; 1308 struct amdgpu_bo *bo; 1309 uint32_t domain; 1310 struct kfd_bo_va_list *entry; 1311 struct bo_vm_reservation_context ctx; 1312 struct kfd_bo_va_list *bo_va_entry = NULL; 1313 struct kfd_bo_va_list *bo_va_entry_aql = NULL; 1314 unsigned long bo_size; 1315 bool is_invalid_userptr = false; 1316 1317 bo = mem->bo; 1318 if (!bo) { 1319 pr_err("Invalid BO when mapping memory to GPU\n"); 1320 return -EINVAL; 1321 } 1322 1323 /* Make sure restore is not running concurrently. Since we 1324 * don't map invalid userptr BOs, we rely on the next restore 1325 * worker to do the mapping 1326 */ 1327 mutex_lock(&mem->process_info->lock); 1328 1329 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1330 * sure that the MMU notifier is no longer running 1331 * concurrently and the queues are actually stopped 1332 */ 1333 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1334 down_write(¤t->mm->mmap_sem); 1335 is_invalid_userptr = atomic_read(&mem->invalid); 1336 up_write(¤t->mm->mmap_sem); 1337 } 1338 1339 mutex_lock(&mem->lock); 1340 1341 domain = mem->domain; 1342 bo_size = bo->tbo.mem.size; 1343 1344 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1345 mem->va, 1346 mem->va + bo_size * (1 + mem->aql_queue), 1347 vm, domain_string(domain)); 1348 1349 ret = reserve_bo_and_vm(mem, vm, &ctx); 1350 if (unlikely(ret)) 1351 goto out; 1352 1353 /* Userptr can be marked as "not invalid", but not actually be 1354 * validated yet (still in the system domain). In that case 1355 * the queues are still stopped and we can leave mapping for 1356 * the next restore worker 1357 */ 1358 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1359 bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 1360 is_invalid_userptr = true; 1361 1362 if (check_if_add_bo_to_vm(avm, mem)) { 1363 ret = add_bo_to_vm(adev, mem, avm, false, 1364 &bo_va_entry); 1365 if (ret) 1366 goto add_bo_to_vm_failed; 1367 if (mem->aql_queue) { 1368 ret = add_bo_to_vm(adev, mem, avm, 1369 true, &bo_va_entry_aql); 1370 if (ret) 1371 goto add_bo_to_vm_failed_aql; 1372 } 1373 } else { 1374 ret = vm_validate_pt_pd_bos(avm); 1375 if (unlikely(ret)) 1376 goto add_bo_to_vm_failed; 1377 } 1378 1379 if (mem->mapped_to_gpu_memory == 0 && 1380 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1381 /* Validate BO only once. The eviction fence gets added to BO 1382 * the first time it is mapped. Validate will wait for all 1383 * background evictions to complete. 1384 */ 1385 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1386 if (ret) { 1387 pr_debug("Validate failed\n"); 1388 goto map_bo_to_gpuvm_failed; 1389 } 1390 } 1391 1392 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1393 if (entry->bo_va->base.vm == vm && !entry->is_mapped) { 1394 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1395 entry->va, entry->va + bo_size, 1396 entry); 1397 1398 ret = map_bo_to_gpuvm(adev, entry, ctx.sync, 1399 is_invalid_userptr); 1400 if (ret) { 1401 pr_err("Failed to map bo to gpuvm\n"); 1402 goto map_bo_to_gpuvm_failed; 1403 } 1404 1405 ret = vm_update_pds(vm, ctx.sync); 1406 if (ret) { 1407 pr_err("Failed to update page directories\n"); 1408 goto map_bo_to_gpuvm_failed; 1409 } 1410 1411 entry->is_mapped = true; 1412 mem->mapped_to_gpu_memory++; 1413 pr_debug("\t INC mapping count %d\n", 1414 mem->mapped_to_gpu_memory); 1415 } 1416 } 1417 1418 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) 1419 amdgpu_bo_fence(bo, 1420 &avm->process_info->eviction_fence->base, 1421 true); 1422 ret = unreserve_bo_and_vms(&ctx, false, false); 1423 1424 goto out; 1425 1426 map_bo_to_gpuvm_failed: 1427 if (bo_va_entry_aql) 1428 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); 1429 add_bo_to_vm_failed_aql: 1430 if (bo_va_entry) 1431 remove_bo_from_vm(adev, bo_va_entry, bo_size); 1432 add_bo_to_vm_failed: 1433 unreserve_bo_and_vms(&ctx, false, false); 1434 out: 1435 mutex_unlock(&mem->process_info->lock); 1436 mutex_unlock(&mem->lock); 1437 return ret; 1438 } 1439 1440 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1441 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1442 { 1443 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1444 struct amdkfd_process_info *process_info = 1445 ((struct amdgpu_vm *)vm)->process_info; 1446 unsigned long bo_size = mem->bo->tbo.mem.size; 1447 struct kfd_bo_va_list *entry; 1448 struct bo_vm_reservation_context ctx; 1449 int ret; 1450 1451 mutex_lock(&mem->lock); 1452 1453 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); 1454 if (unlikely(ret)) 1455 goto out; 1456 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1457 if (ctx.n_vms == 0) { 1458 ret = -EINVAL; 1459 goto unreserve_out; 1460 } 1461 1462 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); 1463 if (unlikely(ret)) 1464 goto unreserve_out; 1465 1466 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1467 mem->va, 1468 mem->va + bo_size * (1 + mem->aql_queue), 1469 vm); 1470 1471 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1472 if (entry->bo_va->base.vm == vm && entry->is_mapped) { 1473 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1474 entry->va, 1475 entry->va + bo_size, 1476 entry); 1477 1478 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); 1479 if (ret == 0) { 1480 entry->is_mapped = false; 1481 } else { 1482 pr_err("failed to unmap VA 0x%llx\n", 1483 mem->va); 1484 goto unreserve_out; 1485 } 1486 1487 mem->mapped_to_gpu_memory--; 1488 pr_debug("\t DEC mapping count %d\n", 1489 mem->mapped_to_gpu_memory); 1490 } 1491 } 1492 1493 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1494 * required. 1495 */ 1496 if (mem->mapped_to_gpu_memory == 0 && 1497 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) 1498 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1499 process_info->eviction_fence); 1500 1501 unreserve_out: 1502 unreserve_bo_and_vms(&ctx, false, false); 1503 out: 1504 mutex_unlock(&mem->lock); 1505 return ret; 1506 } 1507 1508 int amdgpu_amdkfd_gpuvm_sync_memory( 1509 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1510 { 1511 struct amdgpu_sync sync; 1512 int ret; 1513 1514 amdgpu_sync_create(&sync); 1515 1516 mutex_lock(&mem->lock); 1517 amdgpu_sync_clone(&mem->sync, &sync); 1518 mutex_unlock(&mem->lock); 1519 1520 ret = amdgpu_sync_wait(&sync, intr); 1521 amdgpu_sync_free(&sync); 1522 return ret; 1523 } 1524 1525 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1526 struct kgd_mem *mem, void **kptr, uint64_t *size) 1527 { 1528 int ret; 1529 struct amdgpu_bo *bo = mem->bo; 1530 1531 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1532 pr_err("userptr can't be mapped to kernel\n"); 1533 return -EINVAL; 1534 } 1535 1536 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1537 * this BO in BO's restoring after eviction. 1538 */ 1539 mutex_lock(&mem->process_info->lock); 1540 1541 ret = amdgpu_bo_reserve(bo, true); 1542 if (ret) { 1543 pr_err("Failed to reserve bo. ret %d\n", ret); 1544 goto bo_reserve_failed; 1545 } 1546 1547 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1548 if (ret) { 1549 pr_err("Failed to pin bo. ret %d\n", ret); 1550 goto pin_failed; 1551 } 1552 1553 ret = amdgpu_bo_kmap(bo, kptr); 1554 if (ret) { 1555 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1556 goto kmap_failed; 1557 } 1558 1559 amdgpu_amdkfd_remove_eviction_fence( 1560 bo, mem->process_info->eviction_fence); 1561 list_del_init(&mem->validate_list.head); 1562 1563 if (size) 1564 *size = amdgpu_bo_size(bo); 1565 1566 amdgpu_bo_unreserve(bo); 1567 1568 mutex_unlock(&mem->process_info->lock); 1569 return 0; 1570 1571 kmap_failed: 1572 amdgpu_bo_unpin(bo); 1573 pin_failed: 1574 amdgpu_bo_unreserve(bo); 1575 bo_reserve_failed: 1576 mutex_unlock(&mem->process_info->lock); 1577 1578 return ret; 1579 } 1580 1581 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1582 struct kfd_vm_fault_info *mem) 1583 { 1584 struct amdgpu_device *adev; 1585 1586 adev = (struct amdgpu_device *)kgd; 1587 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1588 *mem = *adev->gmc.vm_fault_info; 1589 mb(); 1590 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1591 } 1592 return 0; 1593 } 1594 1595 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1596 struct dma_buf *dma_buf, 1597 uint64_t va, void *vm, 1598 struct kgd_mem **mem, uint64_t *size, 1599 uint64_t *mmap_offset) 1600 { 1601 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1602 struct drm_gem_object *obj; 1603 struct amdgpu_bo *bo; 1604 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1605 1606 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1607 /* Can't handle non-graphics buffers */ 1608 return -EINVAL; 1609 1610 obj = dma_buf->priv; 1611 if (obj->dev->dev_private != adev) 1612 /* Can't handle buffers from other devices */ 1613 return -EINVAL; 1614 1615 bo = gem_to_amdgpu_bo(obj); 1616 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1617 AMDGPU_GEM_DOMAIN_GTT))) 1618 /* Only VRAM and GTT BOs are supported */ 1619 return -EINVAL; 1620 1621 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1622 if (!*mem) 1623 return -ENOMEM; 1624 1625 if (size) 1626 *size = amdgpu_bo_size(bo); 1627 1628 if (mmap_offset) 1629 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1630 1631 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1632 mutex_init(&(*mem)->lock); 1633 (*mem)->alloc_flags = 1634 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1635 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | 1636 ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; 1637 1638 (*mem)->bo = amdgpu_bo_ref(bo); 1639 (*mem)->va = va; 1640 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1641 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1642 (*mem)->mapped_to_gpu_memory = 0; 1643 (*mem)->process_info = avm->process_info; 1644 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1645 amdgpu_sync_create(&(*mem)->sync); 1646 1647 return 0; 1648 } 1649 1650 /* Evict a userptr BO by stopping the queues if necessary 1651 * 1652 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1653 * cannot do any memory allocations, and cannot take any locks that 1654 * are held elsewhere while allocating memory. Therefore this is as 1655 * simple as possible, using atomic counters. 1656 * 1657 * It doesn't do anything to the BO itself. The real work happens in 1658 * restore, where we get updated page addresses. This function only 1659 * ensures that GPU access to the BO is stopped. 1660 */ 1661 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1662 struct mm_struct *mm) 1663 { 1664 struct amdkfd_process_info *process_info = mem->process_info; 1665 int evicted_bos; 1666 int r = 0; 1667 1668 atomic_inc(&mem->invalid); 1669 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1670 if (evicted_bos == 1) { 1671 /* First eviction, stop the queues */ 1672 r = kgd2kfd_quiesce_mm(mm); 1673 if (r) 1674 pr_err("Failed to quiesce KFD\n"); 1675 schedule_delayed_work(&process_info->restore_userptr_work, 1676 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1677 } 1678 1679 return r; 1680 } 1681 1682 /* Update invalid userptr BOs 1683 * 1684 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1685 * userptr_inval_list and updates user pages for all BOs that have 1686 * been invalidated since their last update. 1687 */ 1688 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1689 struct mm_struct *mm) 1690 { 1691 struct kgd_mem *mem, *tmp_mem; 1692 struct amdgpu_bo *bo; 1693 struct ttm_operation_ctx ctx = { false, false }; 1694 int invalid, ret; 1695 1696 /* Move all invalidated BOs to the userptr_inval_list and 1697 * release their user pages by migration to the CPU domain 1698 */ 1699 list_for_each_entry_safe(mem, tmp_mem, 1700 &process_info->userptr_valid_list, 1701 validate_list.head) { 1702 if (!atomic_read(&mem->invalid)) 1703 continue; /* BO is still valid */ 1704 1705 bo = mem->bo; 1706 1707 if (amdgpu_bo_reserve(bo, true)) 1708 return -EAGAIN; 1709 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1710 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1711 amdgpu_bo_unreserve(bo); 1712 if (ret) { 1713 pr_err("%s: Failed to invalidate userptr BO\n", 1714 __func__); 1715 return -EAGAIN; 1716 } 1717 1718 list_move_tail(&mem->validate_list.head, 1719 &process_info->userptr_inval_list); 1720 } 1721 1722 if (list_empty(&process_info->userptr_inval_list)) 1723 return 0; /* All evicted userptr BOs were freed */ 1724 1725 /* Go through userptr_inval_list and update any invalid user_pages */ 1726 list_for_each_entry(mem, &process_info->userptr_inval_list, 1727 validate_list.head) { 1728 invalid = atomic_read(&mem->invalid); 1729 if (!invalid) 1730 /* BO hasn't been invalidated since the last 1731 * revalidation attempt. Keep its BO list. 1732 */ 1733 continue; 1734 1735 bo = mem->bo; 1736 1737 /* Get updated user pages */ 1738 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 1739 if (ret) { 1740 pr_debug("%s: Failed to get user pages: %d\n", 1741 __func__, ret); 1742 1743 /* Return error -EBUSY or -ENOMEM, retry restore */ 1744 return ret; 1745 } 1746 1747 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1748 1749 /* Mark the BO as valid unless it was invalidated 1750 * again concurrently. 1751 */ 1752 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 1753 return -EAGAIN; 1754 } 1755 1756 return 0; 1757 } 1758 1759 /* Validate invalid userptr BOs 1760 * 1761 * Validates BOs on the userptr_inval_list, and moves them back to the 1762 * userptr_valid_list. Also updates GPUVM page tables with new page 1763 * addresses and waits for the page table updates to complete. 1764 */ 1765 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 1766 { 1767 struct amdgpu_bo_list_entry *pd_bo_list_entries; 1768 struct list_head resv_list, duplicates; 1769 struct ww_acquire_ctx ticket; 1770 struct amdgpu_sync sync; 1771 1772 struct amdgpu_vm *peer_vm; 1773 struct kgd_mem *mem, *tmp_mem; 1774 struct amdgpu_bo *bo; 1775 struct ttm_operation_ctx ctx = { false, false }; 1776 int i, ret; 1777 1778 pd_bo_list_entries = kcalloc(process_info->n_vms, 1779 sizeof(struct amdgpu_bo_list_entry), 1780 GFP_KERNEL); 1781 if (!pd_bo_list_entries) { 1782 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 1783 ret = -ENOMEM; 1784 goto out_no_mem; 1785 } 1786 1787 INIT_LIST_HEAD(&resv_list); 1788 INIT_LIST_HEAD(&duplicates); 1789 1790 /* Get all the page directory BOs that need to be reserved */ 1791 i = 0; 1792 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1793 vm_list_node) 1794 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 1795 &pd_bo_list_entries[i++]); 1796 /* Add the userptr_inval_list entries to resv_list */ 1797 list_for_each_entry(mem, &process_info->userptr_inval_list, 1798 validate_list.head) { 1799 list_add_tail(&mem->resv_list.head, &resv_list); 1800 mem->resv_list.bo = mem->validate_list.bo; 1801 mem->resv_list.num_shared = mem->validate_list.num_shared; 1802 } 1803 1804 /* Reserve all BOs and page tables for validation */ 1805 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 1806 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 1807 if (ret) 1808 goto out_free; 1809 1810 amdgpu_sync_create(&sync); 1811 1812 ret = process_validate_vms(process_info); 1813 if (ret) 1814 goto unreserve_out; 1815 1816 /* Validate BOs and update GPUVM page tables */ 1817 list_for_each_entry_safe(mem, tmp_mem, 1818 &process_info->userptr_inval_list, 1819 validate_list.head) { 1820 struct kfd_bo_va_list *bo_va_entry; 1821 1822 bo = mem->bo; 1823 1824 /* Validate the BO if we got user pages */ 1825 if (bo->tbo.ttm->pages[0]) { 1826 amdgpu_bo_placement_from_domain(bo, mem->domain); 1827 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1828 if (ret) { 1829 pr_err("%s: failed to validate BO\n", __func__); 1830 goto unreserve_out; 1831 } 1832 } 1833 1834 list_move_tail(&mem->validate_list.head, 1835 &process_info->userptr_valid_list); 1836 1837 /* Update mapping. If the BO was not validated 1838 * (because we couldn't get user pages), this will 1839 * clear the page table entries, which will result in 1840 * VM faults if the GPU tries to access the invalid 1841 * memory. 1842 */ 1843 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { 1844 if (!bo_va_entry->is_mapped) 1845 continue; 1846 1847 ret = update_gpuvm_pte((struct amdgpu_device *) 1848 bo_va_entry->kgd_dev, 1849 bo_va_entry, &sync); 1850 if (ret) { 1851 pr_err("%s: update PTE failed\n", __func__); 1852 /* make sure this gets validated again */ 1853 atomic_inc(&mem->invalid); 1854 goto unreserve_out; 1855 } 1856 } 1857 } 1858 1859 /* Update page directories */ 1860 ret = process_update_pds(process_info, &sync); 1861 1862 unreserve_out: 1863 ttm_eu_backoff_reservation(&ticket, &resv_list); 1864 amdgpu_sync_wait(&sync, false); 1865 amdgpu_sync_free(&sync); 1866 out_free: 1867 kfree(pd_bo_list_entries); 1868 out_no_mem: 1869 1870 return ret; 1871 } 1872 1873 /* Worker callback to restore evicted userptr BOs 1874 * 1875 * Tries to update and validate all userptr BOs. If successful and no 1876 * concurrent evictions happened, the queues are restarted. Otherwise, 1877 * reschedule for another attempt later. 1878 */ 1879 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 1880 { 1881 struct delayed_work *dwork = to_delayed_work(work); 1882 struct amdkfd_process_info *process_info = 1883 container_of(dwork, struct amdkfd_process_info, 1884 restore_userptr_work); 1885 struct task_struct *usertask; 1886 struct mm_struct *mm; 1887 int evicted_bos; 1888 1889 evicted_bos = atomic_read(&process_info->evicted_bos); 1890 if (!evicted_bos) 1891 return; 1892 1893 /* Reference task and mm in case of concurrent process termination */ 1894 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 1895 if (!usertask) 1896 return; 1897 mm = get_task_mm(usertask); 1898 if (!mm) { 1899 put_task_struct(usertask); 1900 return; 1901 } 1902 1903 mutex_lock(&process_info->lock); 1904 1905 if (update_invalid_user_pages(process_info, mm)) 1906 goto unlock_out; 1907 /* userptr_inval_list can be empty if all evicted userptr BOs 1908 * have been freed. In that case there is nothing to validate 1909 * and we can just restart the queues. 1910 */ 1911 if (!list_empty(&process_info->userptr_inval_list)) { 1912 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 1913 goto unlock_out; /* Concurrent eviction, try again */ 1914 1915 if (validate_invalid_user_pages(process_info)) 1916 goto unlock_out; 1917 } 1918 /* Final check for concurrent evicton and atomic update. If 1919 * another eviction happens after successful update, it will 1920 * be a first eviction that calls quiesce_mm. The eviction 1921 * reference counting inside KFD will handle this case. 1922 */ 1923 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 1924 evicted_bos) 1925 goto unlock_out; 1926 evicted_bos = 0; 1927 if (kgd2kfd_resume_mm(mm)) { 1928 pr_err("%s: Failed to resume KFD\n", __func__); 1929 /* No recovery from this failure. Probably the CP is 1930 * hanging. No point trying again. 1931 */ 1932 } 1933 1934 unlock_out: 1935 mutex_unlock(&process_info->lock); 1936 mmput(mm); 1937 put_task_struct(usertask); 1938 1939 /* If validation failed, reschedule another attempt */ 1940 if (evicted_bos) 1941 schedule_delayed_work(&process_info->restore_userptr_work, 1942 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1943 } 1944 1945 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 1946 * KFD process identified by process_info 1947 * 1948 * @process_info: amdkfd_process_info of the KFD process 1949 * 1950 * After memory eviction, restore thread calls this function. The function 1951 * should be called when the Process is still valid. BO restore involves - 1952 * 1953 * 1. Release old eviction fence and create new one 1954 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 1955 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 1956 * BOs that need to be reserved. 1957 * 4. Reserve all the BOs 1958 * 5. Validate of PD and PT BOs. 1959 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 1960 * 7. Add fence to all PD and PT BOs. 1961 * 8. Unreserve all BOs 1962 */ 1963 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 1964 { 1965 struct amdgpu_bo_list_entry *pd_bo_list; 1966 struct amdkfd_process_info *process_info = info; 1967 struct amdgpu_vm *peer_vm; 1968 struct kgd_mem *mem; 1969 struct bo_vm_reservation_context ctx; 1970 struct amdgpu_amdkfd_fence *new_fence; 1971 int ret = 0, i; 1972 struct list_head duplicate_save; 1973 struct amdgpu_sync sync_obj; 1974 1975 INIT_LIST_HEAD(&duplicate_save); 1976 INIT_LIST_HEAD(&ctx.list); 1977 INIT_LIST_HEAD(&ctx.duplicates); 1978 1979 pd_bo_list = kcalloc(process_info->n_vms, 1980 sizeof(struct amdgpu_bo_list_entry), 1981 GFP_KERNEL); 1982 if (!pd_bo_list) 1983 return -ENOMEM; 1984 1985 i = 0; 1986 mutex_lock(&process_info->lock); 1987 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1988 vm_list_node) 1989 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 1990 1991 /* Reserve all BOs and page tables/directory. Add all BOs from 1992 * kfd_bo_list to ctx.list 1993 */ 1994 list_for_each_entry(mem, &process_info->kfd_bo_list, 1995 validate_list.head) { 1996 1997 list_add_tail(&mem->resv_list.head, &ctx.list); 1998 mem->resv_list.bo = mem->validate_list.bo; 1999 mem->resv_list.num_shared = mem->validate_list.num_shared; 2000 } 2001 2002 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2003 false, &duplicate_save); 2004 if (ret) { 2005 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2006 goto ttm_reserve_fail; 2007 } 2008 2009 amdgpu_sync_create(&sync_obj); 2010 2011 /* Validate PDs and PTs */ 2012 ret = process_validate_vms(process_info); 2013 if (ret) 2014 goto validate_map_fail; 2015 2016 ret = process_sync_pds_resv(process_info, &sync_obj); 2017 if (ret) { 2018 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2019 goto validate_map_fail; 2020 } 2021 2022 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2023 list_for_each_entry(mem, &process_info->kfd_bo_list, 2024 validate_list.head) { 2025 2026 struct amdgpu_bo *bo = mem->bo; 2027 uint32_t domain = mem->domain; 2028 struct kfd_bo_va_list *bo_va_entry; 2029 2030 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2031 if (ret) { 2032 pr_debug("Memory eviction: Validate BOs failed. Try again\n"); 2033 goto validate_map_fail; 2034 } 2035 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); 2036 if (ret) { 2037 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2038 goto validate_map_fail; 2039 } 2040 list_for_each_entry(bo_va_entry, &mem->bo_va_list, 2041 bo_list) { 2042 ret = update_gpuvm_pte((struct amdgpu_device *) 2043 bo_va_entry->kgd_dev, 2044 bo_va_entry, 2045 &sync_obj); 2046 if (ret) { 2047 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2048 goto validate_map_fail; 2049 } 2050 } 2051 } 2052 2053 /* Update page directories */ 2054 ret = process_update_pds(process_info, &sync_obj); 2055 if (ret) { 2056 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2057 goto validate_map_fail; 2058 } 2059 2060 /* Wait for validate and PT updates to finish */ 2061 amdgpu_sync_wait(&sync_obj, false); 2062 2063 /* Release old eviction fence and create new one, because fence only 2064 * goes from unsignaled to signaled, fence cannot be reused. 2065 * Use context and mm from the old fence. 2066 */ 2067 new_fence = amdgpu_amdkfd_fence_create( 2068 process_info->eviction_fence->base.context, 2069 process_info->eviction_fence->mm); 2070 if (!new_fence) { 2071 pr_err("Failed to create eviction fence\n"); 2072 ret = -ENOMEM; 2073 goto validate_map_fail; 2074 } 2075 dma_fence_put(&process_info->eviction_fence->base); 2076 process_info->eviction_fence = new_fence; 2077 *ef = dma_fence_get(&new_fence->base); 2078 2079 /* Attach new eviction fence to all BOs */ 2080 list_for_each_entry(mem, &process_info->kfd_bo_list, 2081 validate_list.head) 2082 amdgpu_bo_fence(mem->bo, 2083 &process_info->eviction_fence->base, true); 2084 2085 /* Attach eviction fence to PD / PT BOs */ 2086 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2087 vm_list_node) { 2088 struct amdgpu_bo *bo = peer_vm->root.base.bo; 2089 2090 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2091 } 2092 2093 validate_map_fail: 2094 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2095 amdgpu_sync_free(&sync_obj); 2096 ttm_reserve_fail: 2097 mutex_unlock(&process_info->lock); 2098 kfree(pd_bo_list); 2099 return ret; 2100 } 2101 2102 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2103 { 2104 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2105 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2106 int ret; 2107 2108 if (!info || !gws) 2109 return -EINVAL; 2110 2111 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2112 if (!*mem) 2113 return -ENOMEM; 2114 2115 mutex_init(&(*mem)->lock); 2116 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2117 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2118 (*mem)->process_info = process_info; 2119 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2120 amdgpu_sync_create(&(*mem)->sync); 2121 2122 2123 /* Validate gws bo the first time it is added to process */ 2124 mutex_lock(&(*mem)->process_info->lock); 2125 ret = amdgpu_bo_reserve(gws_bo, false); 2126 if (unlikely(ret)) { 2127 pr_err("Reserve gws bo failed %d\n", ret); 2128 goto bo_reservation_failure; 2129 } 2130 2131 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2132 if (ret) { 2133 pr_err("GWS BO validate failed %d\n", ret); 2134 goto bo_validation_failure; 2135 } 2136 /* GWS resource is shared b/t amdgpu and amdkfd 2137 * Add process eviction fence to bo so they can 2138 * evict each other. 2139 */ 2140 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2141 if (ret) 2142 goto reserve_shared_fail; 2143 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2144 amdgpu_bo_unreserve(gws_bo); 2145 mutex_unlock(&(*mem)->process_info->lock); 2146 2147 return ret; 2148 2149 reserve_shared_fail: 2150 bo_validation_failure: 2151 amdgpu_bo_unreserve(gws_bo); 2152 bo_reservation_failure: 2153 mutex_unlock(&(*mem)->process_info->lock); 2154 amdgpu_sync_free(&(*mem)->sync); 2155 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2156 amdgpu_bo_unref(&gws_bo); 2157 mutex_destroy(&(*mem)->lock); 2158 kfree(*mem); 2159 *mem = NULL; 2160 return ret; 2161 } 2162 2163 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2164 { 2165 int ret; 2166 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2167 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2168 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2169 2170 /* Remove BO from process's validate list so restore worker won't touch 2171 * it anymore 2172 */ 2173 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2174 2175 ret = amdgpu_bo_reserve(gws_bo, false); 2176 if (unlikely(ret)) { 2177 pr_err("Reserve gws bo failed %d\n", ret); 2178 //TODO add BO back to validate_list? 2179 return ret; 2180 } 2181 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2182 process_info->eviction_fence); 2183 amdgpu_bo_unreserve(gws_bo); 2184 amdgpu_sync_free(&kgd_mem->sync); 2185 amdgpu_bo_unref(&gws_bo); 2186 mutex_destroy(&kgd_mem->lock); 2187 kfree(mem); 2188 return 0; 2189 } 2190