1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_gem.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_amdkfd.h" 32 #include "amdgpu_dma_buf.h" 33 #include <uapi/linux/kfd_ioctl.h> 34 35 /* BO flag to indicate a KFD userptr BO */ 36 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) 37 38 /* Userptr restore delay, just long enough to allow consecutive VM 39 * changes to accumulate 40 */ 41 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 42 43 /* Impose limit on how much memory KFD can use */ 44 static struct { 45 uint64_t max_system_mem_limit; 46 uint64_t max_ttm_mem_limit; 47 int64_t system_mem_used; 48 int64_t ttm_mem_used; 49 spinlock_t mem_limit_lock; 50 } kfd_mem_limit; 51 52 /* Struct used for amdgpu_amdkfd_bo_validate */ 53 struct amdgpu_vm_parser { 54 uint32_t domain; 55 bool wait; 56 }; 57 58 static const char * const domain_bit_to_string[] = { 59 "CPU", 60 "GTT", 61 "VRAM", 62 "GDS", 63 "GWS", 64 "OA" 65 }; 66 67 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 68 69 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 70 71 72 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 73 { 74 return (struct amdgpu_device *)kgd; 75 } 76 77 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, 78 struct kgd_mem *mem) 79 { 80 struct kfd_bo_va_list *entry; 81 82 list_for_each_entry(entry, &mem->bo_va_list, bo_list) 83 if (entry->bo_va->base.vm == avm) 84 return false; 85 86 return true; 87 } 88 89 /* Set memory usage limits. Current, limits are 90 * System (TTM + userptr) memory - 15/16th System RAM 91 * TTM memory - 3/8th System RAM 92 */ 93 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 94 { 95 struct sysinfo si; 96 uint64_t mem; 97 98 si_meminfo(&si); 99 mem = si.totalram - si.totalhigh; 100 mem *= si.mem_unit; 101 102 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 103 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 104 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 105 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 106 (kfd_mem_limit.max_system_mem_limit >> 20), 107 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 108 } 109 110 /* Estimate page table size needed to represent a given memory size 111 * 112 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 113 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 114 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 115 * for 2MB pages for TLB efficiency. However, small allocations and 116 * fragmented system memory still need some 4KB pages. We choose a 117 * compromise that should work in most cases without reserving too 118 * much memory for page tables unnecessarily (factor 16K, >> 14). 119 */ 120 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 121 122 static size_t amdgpu_amdkfd_acc_size(uint64_t size) 123 { 124 size >>= PAGE_SHIFT; 125 size *= sizeof(dma_addr_t) + sizeof(void *); 126 127 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + 128 __roundup_pow_of_two(sizeof(struct ttm_tt)) + 129 PAGE_ALIGN(size); 130 } 131 132 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 133 uint64_t size, u32 domain, bool sg) 134 { 135 uint64_t reserved_for_pt = 136 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 137 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 138 int ret = 0; 139 140 acc_size = amdgpu_amdkfd_acc_size(size); 141 142 vram_needed = 0; 143 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 144 /* TTM GTT memory */ 145 system_mem_needed = acc_size + size; 146 ttm_mem_needed = acc_size + size; 147 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 148 /* Userptr */ 149 system_mem_needed = acc_size + size; 150 ttm_mem_needed = acc_size; 151 } else { 152 /* VRAM and SG */ 153 system_mem_needed = acc_size; 154 ttm_mem_needed = acc_size; 155 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 156 vram_needed = size; 157 } 158 159 spin_lock(&kfd_mem_limit.mem_limit_lock); 160 161 if (kfd_mem_limit.system_mem_used + system_mem_needed > 162 kfd_mem_limit.max_system_mem_limit) 163 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 164 165 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 166 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 167 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 168 kfd_mem_limit.max_ttm_mem_limit) || 169 (adev->kfd.vram_used + vram_needed > 170 adev->gmc.real_vram_size - reserved_for_pt)) { 171 ret = -ENOMEM; 172 } else { 173 kfd_mem_limit.system_mem_used += system_mem_needed; 174 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 175 adev->kfd.vram_used += vram_needed; 176 } 177 178 spin_unlock(&kfd_mem_limit.mem_limit_lock); 179 return ret; 180 } 181 182 static void unreserve_mem_limit(struct amdgpu_device *adev, 183 uint64_t size, u32 domain, bool sg) 184 { 185 size_t acc_size; 186 187 acc_size = amdgpu_amdkfd_acc_size(size); 188 189 spin_lock(&kfd_mem_limit.mem_limit_lock); 190 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 191 kfd_mem_limit.system_mem_used -= (acc_size + size); 192 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 193 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 194 kfd_mem_limit.system_mem_used -= (acc_size + size); 195 kfd_mem_limit.ttm_mem_used -= acc_size; 196 } else { 197 kfd_mem_limit.system_mem_used -= acc_size; 198 kfd_mem_limit.ttm_mem_used -= acc_size; 199 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 200 adev->kfd.vram_used -= size; 201 WARN_ONCE(adev->kfd.vram_used < 0, 202 "kfd VRAM memory accounting unbalanced"); 203 } 204 } 205 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 206 "kfd system memory accounting unbalanced"); 207 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 208 "kfd TTM memory accounting unbalanced"); 209 210 spin_unlock(&kfd_mem_limit.mem_limit_lock); 211 } 212 213 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 214 { 215 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 216 u32 domain = bo->preferred_domains; 217 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 218 219 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { 220 domain = AMDGPU_GEM_DOMAIN_CPU; 221 sg = false; 222 } 223 224 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 225 } 226 227 228 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 229 * reservation object. 230 * 231 * @bo: [IN] Remove eviction fence(s) from this BO 232 * @ef: [IN] This eviction fence is removed if it 233 * is present in the shared list. 234 * 235 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 236 */ 237 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 238 struct amdgpu_amdkfd_fence *ef) 239 { 240 struct dma_resv *resv = bo->tbo.base.resv; 241 struct dma_resv_list *old, *new; 242 unsigned int i, j, k; 243 244 if (!ef) 245 return -EINVAL; 246 247 old = dma_resv_get_list(resv); 248 if (!old) 249 return 0; 250 251 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); 252 if (!new) 253 return -ENOMEM; 254 255 /* Go through all the shared fences in the resevation object and sort 256 * the interesting ones to the end of the list. 257 */ 258 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 259 struct dma_fence *f; 260 261 f = rcu_dereference_protected(old->shared[i], 262 dma_resv_held(resv)); 263 264 if (f->context == ef->base.context) 265 RCU_INIT_POINTER(new->shared[--j], f); 266 else 267 RCU_INIT_POINTER(new->shared[k++], f); 268 } 269 new->shared_max = old->shared_max; 270 new->shared_count = k; 271 272 /* Install the new fence list, seqcount provides the barriers */ 273 write_seqcount_begin(&resv->seq); 274 RCU_INIT_POINTER(resv->fence, new); 275 write_seqcount_end(&resv->seq); 276 277 /* Drop the references to the removed fences or move them to ef_list */ 278 for (i = j, k = 0; i < old->shared_count; ++i) { 279 struct dma_fence *f; 280 281 f = rcu_dereference_protected(new->shared[i], 282 dma_resv_held(resv)); 283 dma_fence_put(f); 284 } 285 kfree_rcu(old, rcu); 286 287 return 0; 288 } 289 290 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 291 { 292 struct amdgpu_bo *root = bo; 293 struct amdgpu_vm_bo_base *vm_bo; 294 struct amdgpu_vm *vm; 295 struct amdkfd_process_info *info; 296 struct amdgpu_amdkfd_fence *ef; 297 int ret; 298 299 /* we can always get vm_bo from root PD bo.*/ 300 while (root->parent) 301 root = root->parent; 302 303 vm_bo = root->vm_bo; 304 if (!vm_bo) 305 return 0; 306 307 vm = vm_bo->vm; 308 if (!vm) 309 return 0; 310 311 info = vm->process_info; 312 if (!info || !info->eviction_fence) 313 return 0; 314 315 ef = container_of(dma_fence_get(&info->eviction_fence->base), 316 struct amdgpu_amdkfd_fence, base); 317 318 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 319 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 320 dma_resv_unlock(bo->tbo.base.resv); 321 322 dma_fence_put(&ef->base); 323 return ret; 324 } 325 326 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 327 bool wait) 328 { 329 struct ttm_operation_ctx ctx = { false, false }; 330 int ret; 331 332 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 333 "Called with userptr BO")) 334 return -EINVAL; 335 336 amdgpu_bo_placement_from_domain(bo, domain); 337 338 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 339 if (ret) 340 goto validate_fail; 341 if (wait) 342 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 343 344 validate_fail: 345 return ret; 346 } 347 348 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) 349 { 350 struct amdgpu_vm_parser *p = param; 351 352 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); 353 } 354 355 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 356 * 357 * Page directories are not updated here because huge page handling 358 * during page table updates can invalidate page directory entries 359 * again. Page directories are only updated after updating page 360 * tables. 361 */ 362 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 363 { 364 struct amdgpu_bo *pd = vm->root.base.bo; 365 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 366 struct amdgpu_vm_parser param; 367 int ret; 368 369 param.domain = AMDGPU_GEM_DOMAIN_VRAM; 370 param.wait = false; 371 372 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, 373 ¶m); 374 if (ret) { 375 pr_err("failed to validate PT BOs\n"); 376 return ret; 377 } 378 379 ret = amdgpu_amdkfd_validate(¶m, pd); 380 if (ret) { 381 pr_err("failed to validate PD\n"); 382 return ret; 383 } 384 385 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 386 387 if (vm->use_cpu_for_update) { 388 ret = amdgpu_bo_kmap(pd, NULL); 389 if (ret) { 390 pr_err("failed to kmap PD, ret=%d\n", ret); 391 return ret; 392 } 393 } 394 395 return 0; 396 } 397 398 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 399 { 400 struct amdgpu_bo *pd = vm->root.base.bo; 401 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 402 int ret; 403 404 ret = amdgpu_vm_update_pdes(adev, vm, false); 405 if (ret) 406 return ret; 407 408 return amdgpu_sync_fence(sync, vm->last_update); 409 } 410 411 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 412 { 413 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 414 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 415 uint32_t mapping_flags; 416 417 mapping_flags = AMDGPU_VM_PAGE_READABLE; 418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 419 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 420 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 421 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 422 423 switch (adev->asic_type) { 424 case CHIP_ARCTURUS: 425 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 426 if (bo_adev == adev) 427 mapping_flags |= coherent ? 428 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 429 else 430 mapping_flags |= AMDGPU_VM_MTYPE_UC; 431 } else { 432 mapping_flags |= coherent ? 433 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 434 } 435 break; 436 default: 437 mapping_flags |= coherent ? 438 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 439 } 440 441 return amdgpu_gem_va_map_flags(adev, mapping_flags); 442 } 443 444 /* add_bo_to_vm - Add a BO to a VM 445 * 446 * Everything that needs to bo done only once when a BO is first added 447 * to a VM. It can later be mapped and unmapped many times without 448 * repeating these steps. 449 * 450 * 1. Allocate and initialize BO VA entry data structure 451 * 2. Add BO to the VM 452 * 3. Determine ASIC-specific PTE flags 453 * 4. Alloc page tables and directories if needed 454 * 4a. Validate new page tables and directories 455 */ 456 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, 457 struct amdgpu_vm *vm, bool is_aql, 458 struct kfd_bo_va_list **p_bo_va_entry) 459 { 460 int ret; 461 struct kfd_bo_va_list *bo_va_entry; 462 struct amdgpu_bo *bo = mem->bo; 463 uint64_t va = mem->va; 464 struct list_head *list_bo_va = &mem->bo_va_list; 465 unsigned long bo_size = bo->tbo.base.size; 466 467 if (!va) { 468 pr_err("Invalid VA when adding BO to VM\n"); 469 return -EINVAL; 470 } 471 472 if (is_aql) 473 va += bo_size; 474 475 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); 476 if (!bo_va_entry) 477 return -ENOMEM; 478 479 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 480 va + bo_size, vm); 481 482 /* Add BO to VM internal data structures*/ 483 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); 484 if (!bo_va_entry->bo_va) { 485 ret = -EINVAL; 486 pr_err("Failed to add BO object to VM. ret == %d\n", 487 ret); 488 goto err_vmadd; 489 } 490 491 bo_va_entry->va = va; 492 bo_va_entry->pte_flags = get_pte_flags(adev, mem); 493 bo_va_entry->kgd_dev = (void *)adev; 494 list_add(&bo_va_entry->bo_list, list_bo_va); 495 496 if (p_bo_va_entry) 497 *p_bo_va_entry = bo_va_entry; 498 499 /* Allocate validate page tables if needed */ 500 ret = vm_validate_pt_pd_bos(vm); 501 if (ret) { 502 pr_err("validate_pt_pd_bos() failed\n"); 503 goto err_alloc_pts; 504 } 505 506 return 0; 507 508 err_alloc_pts: 509 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); 510 list_del(&bo_va_entry->bo_list); 511 err_vmadd: 512 kfree(bo_va_entry); 513 return ret; 514 } 515 516 static void remove_bo_from_vm(struct amdgpu_device *adev, 517 struct kfd_bo_va_list *entry, unsigned long size) 518 { 519 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", 520 entry->va, 521 entry->va + size, entry); 522 amdgpu_vm_bo_rmv(adev, entry->bo_va); 523 list_del(&entry->bo_list); 524 kfree(entry); 525 } 526 527 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 528 struct amdkfd_process_info *process_info, 529 bool userptr) 530 { 531 struct ttm_validate_buffer *entry = &mem->validate_list; 532 struct amdgpu_bo *bo = mem->bo; 533 534 INIT_LIST_HEAD(&entry->head); 535 entry->num_shared = 1; 536 entry->bo = &bo->tbo; 537 mutex_lock(&process_info->lock); 538 if (userptr) 539 list_add_tail(&entry->head, &process_info->userptr_valid_list); 540 else 541 list_add_tail(&entry->head, &process_info->kfd_bo_list); 542 mutex_unlock(&process_info->lock); 543 } 544 545 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 546 struct amdkfd_process_info *process_info) 547 { 548 struct ttm_validate_buffer *bo_list_entry; 549 550 bo_list_entry = &mem->validate_list; 551 mutex_lock(&process_info->lock); 552 list_del(&bo_list_entry->head); 553 mutex_unlock(&process_info->lock); 554 } 555 556 /* Initializes user pages. It registers the MMU notifier and validates 557 * the userptr BO in the GTT domain. 558 * 559 * The BO must already be on the userptr_valid_list. Otherwise an 560 * eviction and restore may happen that leaves the new BO unmapped 561 * with the user mode queues running. 562 * 563 * Takes the process_info->lock to protect against concurrent restore 564 * workers. 565 * 566 * Returns 0 for success, negative errno for errors. 567 */ 568 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) 569 { 570 struct amdkfd_process_info *process_info = mem->process_info; 571 struct amdgpu_bo *bo = mem->bo; 572 struct ttm_operation_ctx ctx = { true, false }; 573 int ret = 0; 574 575 mutex_lock(&process_info->lock); 576 577 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 578 if (ret) { 579 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 580 goto out; 581 } 582 583 ret = amdgpu_mn_register(bo, user_addr); 584 if (ret) { 585 pr_err("%s: Failed to register MMU notifier: %d\n", 586 __func__, ret); 587 goto out; 588 } 589 590 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 591 if (ret) { 592 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 593 goto unregister_out; 594 } 595 596 ret = amdgpu_bo_reserve(bo, true); 597 if (ret) { 598 pr_err("%s: Failed to reserve BO\n", __func__); 599 goto release_out; 600 } 601 amdgpu_bo_placement_from_domain(bo, mem->domain); 602 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 603 if (ret) 604 pr_err("%s: failed to validate BO\n", __func__); 605 amdgpu_bo_unreserve(bo); 606 607 release_out: 608 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 609 unregister_out: 610 if (ret) 611 amdgpu_mn_unregister(bo); 612 out: 613 mutex_unlock(&process_info->lock); 614 return ret; 615 } 616 617 /* Reserving a BO and its page table BOs must happen atomically to 618 * avoid deadlocks. Some operations update multiple VMs at once. Track 619 * all the reservation info in a context structure. Optionally a sync 620 * object can track VM updates. 621 */ 622 struct bo_vm_reservation_context { 623 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 624 unsigned int n_vms; /* Number of VMs reserved */ 625 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 626 struct ww_acquire_ctx ticket; /* Reservation ticket */ 627 struct list_head list, duplicates; /* BO lists */ 628 struct amdgpu_sync *sync; /* Pointer to sync object */ 629 bool reserved; /* Whether BOs are reserved */ 630 }; 631 632 enum bo_vm_match { 633 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 634 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 635 BO_VM_ALL, /* Match all VMs a BO was added to */ 636 }; 637 638 /** 639 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 640 * @mem: KFD BO structure. 641 * @vm: the VM to reserve. 642 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 643 */ 644 static int reserve_bo_and_vm(struct kgd_mem *mem, 645 struct amdgpu_vm *vm, 646 struct bo_vm_reservation_context *ctx) 647 { 648 struct amdgpu_bo *bo = mem->bo; 649 int ret; 650 651 WARN_ON(!vm); 652 653 ctx->reserved = false; 654 ctx->n_vms = 1; 655 ctx->sync = &mem->sync; 656 657 INIT_LIST_HEAD(&ctx->list); 658 INIT_LIST_HEAD(&ctx->duplicates); 659 660 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 661 if (!ctx->vm_pd) 662 return -ENOMEM; 663 664 ctx->kfd_bo.priority = 0; 665 ctx->kfd_bo.tv.bo = &bo->tbo; 666 ctx->kfd_bo.tv.num_shared = 1; 667 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 668 669 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 670 671 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 672 false, &ctx->duplicates); 673 if (ret) { 674 pr_err("Failed to reserve buffers in ttm.\n"); 675 kfree(ctx->vm_pd); 676 ctx->vm_pd = NULL; 677 return ret; 678 } 679 680 ctx->reserved = true; 681 return 0; 682 } 683 684 /** 685 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 686 * @mem: KFD BO structure. 687 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 688 * is used. Otherwise, a single VM associated with the BO. 689 * @map_type: the mapping status that will be used to filter the VMs. 690 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 691 * 692 * Returns 0 for success, negative for failure. 693 */ 694 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 695 struct amdgpu_vm *vm, enum bo_vm_match map_type, 696 struct bo_vm_reservation_context *ctx) 697 { 698 struct amdgpu_bo *bo = mem->bo; 699 struct kfd_bo_va_list *entry; 700 unsigned int i; 701 int ret; 702 703 ctx->reserved = false; 704 ctx->n_vms = 0; 705 ctx->vm_pd = NULL; 706 ctx->sync = &mem->sync; 707 708 INIT_LIST_HEAD(&ctx->list); 709 INIT_LIST_HEAD(&ctx->duplicates); 710 711 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 712 if ((vm && vm != entry->bo_va->base.vm) || 713 (entry->is_mapped != map_type 714 && map_type != BO_VM_ALL)) 715 continue; 716 717 ctx->n_vms++; 718 } 719 720 if (ctx->n_vms != 0) { 721 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 722 GFP_KERNEL); 723 if (!ctx->vm_pd) 724 return -ENOMEM; 725 } 726 727 ctx->kfd_bo.priority = 0; 728 ctx->kfd_bo.tv.bo = &bo->tbo; 729 ctx->kfd_bo.tv.num_shared = 1; 730 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 731 732 i = 0; 733 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 734 if ((vm && vm != entry->bo_va->base.vm) || 735 (entry->is_mapped != map_type 736 && map_type != BO_VM_ALL)) 737 continue; 738 739 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 740 &ctx->vm_pd[i]); 741 i++; 742 } 743 744 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 745 false, &ctx->duplicates); 746 if (ret) { 747 pr_err("Failed to reserve buffers in ttm.\n"); 748 kfree(ctx->vm_pd); 749 ctx->vm_pd = NULL; 750 return ret; 751 } 752 753 ctx->reserved = true; 754 return 0; 755 } 756 757 /** 758 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 759 * @ctx: Reservation context to unreserve 760 * @wait: Optionally wait for a sync object representing pending VM updates 761 * @intr: Whether the wait is interruptible 762 * 763 * Also frees any resources allocated in 764 * reserve_bo_and_(cond_)vm(s). Returns the status from 765 * amdgpu_sync_wait. 766 */ 767 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 768 bool wait, bool intr) 769 { 770 int ret = 0; 771 772 if (wait) 773 ret = amdgpu_sync_wait(ctx->sync, intr); 774 775 if (ctx->reserved) 776 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 777 kfree(ctx->vm_pd); 778 779 ctx->sync = NULL; 780 781 ctx->reserved = false; 782 ctx->vm_pd = NULL; 783 784 return ret; 785 } 786 787 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, 788 struct kfd_bo_va_list *entry, 789 struct amdgpu_sync *sync) 790 { 791 struct amdgpu_bo_va *bo_va = entry->bo_va; 792 struct amdgpu_vm *vm = bo_va->base.vm; 793 794 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 795 796 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 797 798 amdgpu_sync_fence(sync, bo_va->last_pt_update); 799 800 return 0; 801 } 802 803 static int update_gpuvm_pte(struct amdgpu_device *adev, 804 struct kfd_bo_va_list *entry, 805 struct amdgpu_sync *sync) 806 { 807 int ret; 808 struct amdgpu_bo_va *bo_va = entry->bo_va; 809 810 /* Update the page tables */ 811 ret = amdgpu_vm_bo_update(adev, bo_va, false); 812 if (ret) { 813 pr_err("amdgpu_vm_bo_update failed\n"); 814 return ret; 815 } 816 817 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 818 } 819 820 static int map_bo_to_gpuvm(struct amdgpu_device *adev, 821 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, 822 bool no_update_pte) 823 { 824 int ret; 825 826 /* Set virtual address for the allocation */ 827 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, 828 amdgpu_bo_size(entry->bo_va->base.bo), 829 entry->pte_flags); 830 if (ret) { 831 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 832 entry->va, ret); 833 return ret; 834 } 835 836 if (no_update_pte) 837 return 0; 838 839 ret = update_gpuvm_pte(adev, entry, sync); 840 if (ret) { 841 pr_err("update_gpuvm_pte() failed\n"); 842 goto update_gpuvm_pte_failed; 843 } 844 845 return 0; 846 847 update_gpuvm_pte_failed: 848 unmap_bo_from_gpuvm(adev, entry, sync); 849 return ret; 850 } 851 852 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 853 { 854 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 855 856 if (!sg) 857 return NULL; 858 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 859 kfree(sg); 860 return NULL; 861 } 862 sg->sgl->dma_address = addr; 863 sg->sgl->length = size; 864 #ifdef CONFIG_NEED_SG_DMA_LENGTH 865 sg->sgl->dma_length = size; 866 #endif 867 return sg; 868 } 869 870 static int process_validate_vms(struct amdkfd_process_info *process_info) 871 { 872 struct amdgpu_vm *peer_vm; 873 int ret; 874 875 list_for_each_entry(peer_vm, &process_info->vm_list_head, 876 vm_list_node) { 877 ret = vm_validate_pt_pd_bos(peer_vm); 878 if (ret) 879 return ret; 880 } 881 882 return 0; 883 } 884 885 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 886 struct amdgpu_sync *sync) 887 { 888 struct amdgpu_vm *peer_vm; 889 int ret; 890 891 list_for_each_entry(peer_vm, &process_info->vm_list_head, 892 vm_list_node) { 893 struct amdgpu_bo *pd = peer_vm->root.base.bo; 894 895 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 896 AMDGPU_SYNC_NE_OWNER, 897 AMDGPU_FENCE_OWNER_KFD); 898 if (ret) 899 return ret; 900 } 901 902 return 0; 903 } 904 905 static int process_update_pds(struct amdkfd_process_info *process_info, 906 struct amdgpu_sync *sync) 907 { 908 struct amdgpu_vm *peer_vm; 909 int ret; 910 911 list_for_each_entry(peer_vm, &process_info->vm_list_head, 912 vm_list_node) { 913 ret = vm_update_pds(peer_vm, sync); 914 if (ret) 915 return ret; 916 } 917 918 return 0; 919 } 920 921 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 922 struct dma_fence **ef) 923 { 924 struct amdkfd_process_info *info = NULL; 925 int ret; 926 927 if (!*process_info) { 928 info = kzalloc(sizeof(*info), GFP_KERNEL); 929 if (!info) 930 return -ENOMEM; 931 932 mutex_init(&info->lock); 933 INIT_LIST_HEAD(&info->vm_list_head); 934 INIT_LIST_HEAD(&info->kfd_bo_list); 935 INIT_LIST_HEAD(&info->userptr_valid_list); 936 INIT_LIST_HEAD(&info->userptr_inval_list); 937 938 info->eviction_fence = 939 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 940 current->mm); 941 if (!info->eviction_fence) { 942 pr_err("Failed to create eviction fence\n"); 943 ret = -ENOMEM; 944 goto create_evict_fence_fail; 945 } 946 947 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 948 atomic_set(&info->evicted_bos, 0); 949 INIT_DELAYED_WORK(&info->restore_userptr_work, 950 amdgpu_amdkfd_restore_userptr_worker); 951 952 *process_info = info; 953 *ef = dma_fence_get(&info->eviction_fence->base); 954 } 955 956 vm->process_info = *process_info; 957 958 /* Validate page directory and attach eviction fence */ 959 ret = amdgpu_bo_reserve(vm->root.base.bo, true); 960 if (ret) 961 goto reserve_pd_fail; 962 ret = vm_validate_pt_pd_bos(vm); 963 if (ret) { 964 pr_err("validate_pt_pd_bos() failed\n"); 965 goto validate_pd_fail; 966 } 967 ret = amdgpu_bo_sync_wait(vm->root.base.bo, 968 AMDGPU_FENCE_OWNER_KFD, false); 969 if (ret) 970 goto wait_pd_fail; 971 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 972 if (ret) 973 goto reserve_shared_fail; 974 amdgpu_bo_fence(vm->root.base.bo, 975 &vm->process_info->eviction_fence->base, true); 976 amdgpu_bo_unreserve(vm->root.base.bo); 977 978 /* Update process info */ 979 mutex_lock(&vm->process_info->lock); 980 list_add_tail(&vm->vm_list_node, 981 &(vm->process_info->vm_list_head)); 982 vm->process_info->n_vms++; 983 mutex_unlock(&vm->process_info->lock); 984 985 return 0; 986 987 reserve_shared_fail: 988 wait_pd_fail: 989 validate_pd_fail: 990 amdgpu_bo_unreserve(vm->root.base.bo); 991 reserve_pd_fail: 992 vm->process_info = NULL; 993 if (info) { 994 /* Two fence references: one in info and one in *ef */ 995 dma_fence_put(&info->eviction_fence->base); 996 dma_fence_put(*ef); 997 *ef = NULL; 998 *process_info = NULL; 999 put_pid(info->pid); 1000 create_evict_fence_fail: 1001 mutex_destroy(&info->lock); 1002 kfree(info); 1003 } 1004 return ret; 1005 } 1006 1007 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, 1008 void **vm, void **process_info, 1009 struct dma_fence **ef) 1010 { 1011 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1012 struct amdgpu_vm *new_vm; 1013 int ret; 1014 1015 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); 1016 if (!new_vm) 1017 return -ENOMEM; 1018 1019 /* Initialize AMDGPU part of the VM */ 1020 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); 1021 if (ret) { 1022 pr_err("Failed init vm ret %d\n", ret); 1023 goto amdgpu_vm_init_fail; 1024 } 1025 1026 /* Initialize KFD part of the VM and process info */ 1027 ret = init_kfd_vm(new_vm, process_info, ef); 1028 if (ret) 1029 goto init_kfd_vm_fail; 1030 1031 *vm = (void *) new_vm; 1032 1033 return 0; 1034 1035 init_kfd_vm_fail: 1036 amdgpu_vm_fini(adev, new_vm); 1037 amdgpu_vm_init_fail: 1038 kfree(new_vm); 1039 return ret; 1040 } 1041 1042 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1043 struct file *filp, u32 pasid, 1044 void **vm, void **process_info, 1045 struct dma_fence **ef) 1046 { 1047 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1048 struct drm_file *drm_priv = filp->private_data; 1049 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; 1050 struct amdgpu_vm *avm = &drv_priv->vm; 1051 int ret; 1052 1053 /* Already a compute VM? */ 1054 if (avm->process_info) 1055 return -EINVAL; 1056 1057 /* Convert VM into a compute VM */ 1058 ret = amdgpu_vm_make_compute(adev, avm, pasid); 1059 if (ret) 1060 return ret; 1061 1062 /* Initialize KFD part of the VM and process info */ 1063 ret = init_kfd_vm(avm, process_info, ef); 1064 if (ret) 1065 return ret; 1066 1067 *vm = (void *)avm; 1068 1069 return 0; 1070 } 1071 1072 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1073 struct amdgpu_vm *vm) 1074 { 1075 struct amdkfd_process_info *process_info = vm->process_info; 1076 struct amdgpu_bo *pd = vm->root.base.bo; 1077 1078 if (!process_info) 1079 return; 1080 1081 /* Release eviction fence from PD */ 1082 amdgpu_bo_reserve(pd, false); 1083 amdgpu_bo_fence(pd, NULL, false); 1084 amdgpu_bo_unreserve(pd); 1085 1086 /* Update process info */ 1087 mutex_lock(&process_info->lock); 1088 process_info->n_vms--; 1089 list_del(&vm->vm_list_node); 1090 mutex_unlock(&process_info->lock); 1091 1092 vm->process_info = NULL; 1093 1094 /* Release per-process resources when last compute VM is destroyed */ 1095 if (!process_info->n_vms) { 1096 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1097 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1098 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1099 1100 dma_fence_put(&process_info->eviction_fence->base); 1101 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1102 put_pid(process_info->pid); 1103 mutex_destroy(&process_info->lock); 1104 kfree(process_info); 1105 } 1106 } 1107 1108 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) 1109 { 1110 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1111 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1112 1113 if (WARN_ON(!kgd || !vm)) 1114 return; 1115 1116 pr_debug("Destroying process vm %p\n", vm); 1117 1118 /* Release the VM context */ 1119 amdgpu_vm_fini(adev, avm); 1120 kfree(vm); 1121 } 1122 1123 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) 1124 { 1125 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1126 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1127 1128 if (WARN_ON(!kgd || !vm)) 1129 return; 1130 1131 pr_debug("Releasing process vm %p\n", vm); 1132 1133 /* The original pasid of amdgpu vm has already been 1134 * released during making a amdgpu vm to a compute vm 1135 * The current pasid is managed by kfd and will be 1136 * released on kfd process destroy. Set amdgpu pasid 1137 * to 0 to avoid duplicate release. 1138 */ 1139 amdgpu_vm_release_compute(adev, avm); 1140 } 1141 1142 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) 1143 { 1144 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1145 struct amdgpu_bo *pd = avm->root.base.bo; 1146 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1147 1148 if (adev->asic_type < CHIP_VEGA10) 1149 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1150 return avm->pd_phys_addr; 1151 } 1152 1153 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1154 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1155 void *vm, struct kgd_mem **mem, 1156 uint64_t *offset, uint32_t flags) 1157 { 1158 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1159 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1160 enum ttm_bo_type bo_type = ttm_bo_type_device; 1161 struct sg_table *sg = NULL; 1162 uint64_t user_addr = 0; 1163 struct amdgpu_bo *bo; 1164 struct drm_gem_object *gobj; 1165 u32 domain, alloc_domain; 1166 u64 alloc_flags; 1167 int ret; 1168 1169 /* 1170 * Check on which domain to allocate BO 1171 */ 1172 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1173 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1174 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1175 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1176 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1177 AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1178 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1179 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1180 alloc_flags = 0; 1181 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1182 domain = AMDGPU_GEM_DOMAIN_GTT; 1183 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1184 alloc_flags = 0; 1185 if (!offset || !*offset) 1186 return -EINVAL; 1187 user_addr = untagged_addr(*offset); 1188 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1189 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1190 domain = AMDGPU_GEM_DOMAIN_GTT; 1191 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1192 bo_type = ttm_bo_type_sg; 1193 alloc_flags = 0; 1194 if (size > UINT_MAX) 1195 return -EINVAL; 1196 sg = create_doorbell_sg(*offset, size); 1197 if (!sg) 1198 return -ENOMEM; 1199 } else { 1200 return -EINVAL; 1201 } 1202 1203 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1204 if (!*mem) { 1205 ret = -ENOMEM; 1206 goto err; 1207 } 1208 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1209 mutex_init(&(*mem)->lock); 1210 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1211 1212 /* Workaround for AQL queue wraparound bug. Map the same 1213 * memory twice. That means we only actually allocate half 1214 * the memory. 1215 */ 1216 if ((*mem)->aql_queue) 1217 size = size >> 1; 1218 1219 (*mem)->alloc_flags = flags; 1220 1221 amdgpu_sync_create(&(*mem)->sync); 1222 1223 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1224 if (ret) { 1225 pr_debug("Insufficient memory\n"); 1226 goto err_reserve_limit; 1227 } 1228 1229 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1230 va, size, domain_string(alloc_domain)); 1231 1232 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, 1233 bo_type, NULL, &gobj); 1234 if (ret) { 1235 pr_debug("Failed to create BO on domain %s. ret %d\n", 1236 domain_string(alloc_domain), ret); 1237 goto err_bo_create; 1238 } 1239 bo = gem_to_amdgpu_bo(gobj); 1240 if (bo_type == ttm_bo_type_sg) { 1241 bo->tbo.sg = sg; 1242 bo->tbo.ttm->sg = sg; 1243 } 1244 bo->kfd_bo = *mem; 1245 (*mem)->bo = bo; 1246 if (user_addr) 1247 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; 1248 1249 (*mem)->va = va; 1250 (*mem)->domain = domain; 1251 (*mem)->mapped_to_gpu_memory = 0; 1252 (*mem)->process_info = avm->process_info; 1253 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1254 1255 if (user_addr) { 1256 ret = init_user_pages(*mem, user_addr); 1257 if (ret) 1258 goto allocate_init_user_pages_failed; 1259 } 1260 1261 if (offset) 1262 *offset = amdgpu_bo_mmap_offset(bo); 1263 1264 return 0; 1265 1266 allocate_init_user_pages_failed: 1267 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1268 amdgpu_bo_unref(&bo); 1269 /* Don't unreserve system mem limit twice */ 1270 goto err_reserve_limit; 1271 err_bo_create: 1272 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1273 err_reserve_limit: 1274 mutex_destroy(&(*mem)->lock); 1275 kfree(*mem); 1276 err: 1277 if (sg) { 1278 sg_free_table(sg); 1279 kfree(sg); 1280 } 1281 return ret; 1282 } 1283 1284 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1285 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) 1286 { 1287 struct amdkfd_process_info *process_info = mem->process_info; 1288 unsigned long bo_size = mem->bo->tbo.base.size; 1289 struct kfd_bo_va_list *entry, *tmp; 1290 struct bo_vm_reservation_context ctx; 1291 struct ttm_validate_buffer *bo_list_entry; 1292 unsigned int mapped_to_gpu_memory; 1293 int ret; 1294 bool is_imported = false; 1295 1296 mutex_lock(&mem->lock); 1297 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1298 is_imported = mem->is_imported; 1299 mutex_unlock(&mem->lock); 1300 /* lock is not needed after this, since mem is unused and will 1301 * be freed anyway 1302 */ 1303 1304 if (mapped_to_gpu_memory > 0) { 1305 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1306 mem->va, bo_size); 1307 return -EBUSY; 1308 } 1309 1310 /* Make sure restore workers don't access the BO any more */ 1311 bo_list_entry = &mem->validate_list; 1312 mutex_lock(&process_info->lock); 1313 list_del(&bo_list_entry->head); 1314 mutex_unlock(&process_info->lock); 1315 1316 /* No more MMU notifiers */ 1317 amdgpu_mn_unregister(mem->bo); 1318 1319 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1320 if (unlikely(ret)) 1321 return ret; 1322 1323 /* The eviction fence should be removed by the last unmap. 1324 * TODO: Log an error condition if the bo still has the eviction fence 1325 * attached 1326 */ 1327 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1328 process_info->eviction_fence); 1329 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1330 mem->va + bo_size * (1 + mem->aql_queue)); 1331 1332 /* Remove from VM internal data structures */ 1333 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) 1334 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, 1335 entry, bo_size); 1336 1337 ret = unreserve_bo_and_vms(&ctx, false, false); 1338 1339 /* Free the sync object */ 1340 amdgpu_sync_free(&mem->sync); 1341 1342 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1343 * remap BO. We need to free it. 1344 */ 1345 if (mem->bo->tbo.sg) { 1346 sg_free_table(mem->bo->tbo.sg); 1347 kfree(mem->bo->tbo.sg); 1348 } 1349 1350 /* Update the size of the BO being freed if it was allocated from 1351 * VRAM and is not imported. 1352 */ 1353 if (size) { 1354 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1355 (!is_imported)) 1356 *size = bo_size; 1357 else 1358 *size = 0; 1359 } 1360 1361 /* Free the BO*/ 1362 drm_gem_object_put(&mem->bo->tbo.base); 1363 mutex_destroy(&mem->lock); 1364 kfree(mem); 1365 1366 return ret; 1367 } 1368 1369 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1370 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1371 { 1372 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1373 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1374 int ret; 1375 struct amdgpu_bo *bo; 1376 uint32_t domain; 1377 struct kfd_bo_va_list *entry; 1378 struct bo_vm_reservation_context ctx; 1379 struct kfd_bo_va_list *bo_va_entry = NULL; 1380 struct kfd_bo_va_list *bo_va_entry_aql = NULL; 1381 unsigned long bo_size; 1382 bool is_invalid_userptr = false; 1383 1384 bo = mem->bo; 1385 if (!bo) { 1386 pr_err("Invalid BO when mapping memory to GPU\n"); 1387 return -EINVAL; 1388 } 1389 1390 /* Make sure restore is not running concurrently. Since we 1391 * don't map invalid userptr BOs, we rely on the next restore 1392 * worker to do the mapping 1393 */ 1394 mutex_lock(&mem->process_info->lock); 1395 1396 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1397 * sure that the MMU notifier is no longer running 1398 * concurrently and the queues are actually stopped 1399 */ 1400 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1401 mmap_write_lock(current->mm); 1402 is_invalid_userptr = atomic_read(&mem->invalid); 1403 mmap_write_unlock(current->mm); 1404 } 1405 1406 mutex_lock(&mem->lock); 1407 1408 domain = mem->domain; 1409 bo_size = bo->tbo.base.size; 1410 1411 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1412 mem->va, 1413 mem->va + bo_size * (1 + mem->aql_queue), 1414 vm, domain_string(domain)); 1415 1416 ret = reserve_bo_and_vm(mem, vm, &ctx); 1417 if (unlikely(ret)) 1418 goto out; 1419 1420 /* Userptr can be marked as "not invalid", but not actually be 1421 * validated yet (still in the system domain). In that case 1422 * the queues are still stopped and we can leave mapping for 1423 * the next restore worker 1424 */ 1425 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1426 bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 1427 is_invalid_userptr = true; 1428 1429 if (check_if_add_bo_to_vm(avm, mem)) { 1430 ret = add_bo_to_vm(adev, mem, avm, false, 1431 &bo_va_entry); 1432 if (ret) 1433 goto add_bo_to_vm_failed; 1434 if (mem->aql_queue) { 1435 ret = add_bo_to_vm(adev, mem, avm, 1436 true, &bo_va_entry_aql); 1437 if (ret) 1438 goto add_bo_to_vm_failed_aql; 1439 } 1440 } else { 1441 ret = vm_validate_pt_pd_bos(avm); 1442 if (unlikely(ret)) 1443 goto add_bo_to_vm_failed; 1444 } 1445 1446 if (mem->mapped_to_gpu_memory == 0 && 1447 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1448 /* Validate BO only once. The eviction fence gets added to BO 1449 * the first time it is mapped. Validate will wait for all 1450 * background evictions to complete. 1451 */ 1452 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1453 if (ret) { 1454 pr_debug("Validate failed\n"); 1455 goto map_bo_to_gpuvm_failed; 1456 } 1457 } 1458 1459 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1460 if (entry->bo_va->base.vm == vm && !entry->is_mapped) { 1461 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1462 entry->va, entry->va + bo_size, 1463 entry); 1464 1465 ret = map_bo_to_gpuvm(adev, entry, ctx.sync, 1466 is_invalid_userptr); 1467 if (ret) { 1468 pr_err("Failed to map bo to gpuvm\n"); 1469 goto map_bo_to_gpuvm_failed; 1470 } 1471 1472 ret = vm_update_pds(vm, ctx.sync); 1473 if (ret) { 1474 pr_err("Failed to update page directories\n"); 1475 goto map_bo_to_gpuvm_failed; 1476 } 1477 1478 entry->is_mapped = true; 1479 mem->mapped_to_gpu_memory++; 1480 pr_debug("\t INC mapping count %d\n", 1481 mem->mapped_to_gpu_memory); 1482 } 1483 } 1484 1485 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1486 amdgpu_bo_fence(bo, 1487 &avm->process_info->eviction_fence->base, 1488 true); 1489 ret = unreserve_bo_and_vms(&ctx, false, false); 1490 1491 goto out; 1492 1493 map_bo_to_gpuvm_failed: 1494 if (bo_va_entry_aql) 1495 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); 1496 add_bo_to_vm_failed_aql: 1497 if (bo_va_entry) 1498 remove_bo_from_vm(adev, bo_va_entry, bo_size); 1499 add_bo_to_vm_failed: 1500 unreserve_bo_and_vms(&ctx, false, false); 1501 out: 1502 mutex_unlock(&mem->process_info->lock); 1503 mutex_unlock(&mem->lock); 1504 return ret; 1505 } 1506 1507 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1508 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1509 { 1510 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1511 struct amdkfd_process_info *process_info = 1512 ((struct amdgpu_vm *)vm)->process_info; 1513 unsigned long bo_size = mem->bo->tbo.base.size; 1514 struct kfd_bo_va_list *entry; 1515 struct bo_vm_reservation_context ctx; 1516 int ret; 1517 1518 mutex_lock(&mem->lock); 1519 1520 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); 1521 if (unlikely(ret)) 1522 goto out; 1523 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1524 if (ctx.n_vms == 0) { 1525 ret = -EINVAL; 1526 goto unreserve_out; 1527 } 1528 1529 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); 1530 if (unlikely(ret)) 1531 goto unreserve_out; 1532 1533 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1534 mem->va, 1535 mem->va + bo_size * (1 + mem->aql_queue), 1536 vm); 1537 1538 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1539 if (entry->bo_va->base.vm == vm && entry->is_mapped) { 1540 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1541 entry->va, 1542 entry->va + bo_size, 1543 entry); 1544 1545 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); 1546 if (ret == 0) { 1547 entry->is_mapped = false; 1548 } else { 1549 pr_err("failed to unmap VA 0x%llx\n", 1550 mem->va); 1551 goto unreserve_out; 1552 } 1553 1554 mem->mapped_to_gpu_memory--; 1555 pr_debug("\t DEC mapping count %d\n", 1556 mem->mapped_to_gpu_memory); 1557 } 1558 } 1559 1560 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1561 * required. 1562 */ 1563 if (mem->mapped_to_gpu_memory == 0 && 1564 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 1565 !mem->bo->tbo.pin_count) 1566 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1567 process_info->eviction_fence); 1568 1569 unreserve_out: 1570 unreserve_bo_and_vms(&ctx, false, false); 1571 out: 1572 mutex_unlock(&mem->lock); 1573 return ret; 1574 } 1575 1576 int amdgpu_amdkfd_gpuvm_sync_memory( 1577 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1578 { 1579 struct amdgpu_sync sync; 1580 int ret; 1581 1582 amdgpu_sync_create(&sync); 1583 1584 mutex_lock(&mem->lock); 1585 amdgpu_sync_clone(&mem->sync, &sync); 1586 mutex_unlock(&mem->lock); 1587 1588 ret = amdgpu_sync_wait(&sync, intr); 1589 amdgpu_sync_free(&sync); 1590 return ret; 1591 } 1592 1593 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1594 struct kgd_mem *mem, void **kptr, uint64_t *size) 1595 { 1596 int ret; 1597 struct amdgpu_bo *bo = mem->bo; 1598 1599 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1600 pr_err("userptr can't be mapped to kernel\n"); 1601 return -EINVAL; 1602 } 1603 1604 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1605 * this BO in BO's restoring after eviction. 1606 */ 1607 mutex_lock(&mem->process_info->lock); 1608 1609 ret = amdgpu_bo_reserve(bo, true); 1610 if (ret) { 1611 pr_err("Failed to reserve bo. ret %d\n", ret); 1612 goto bo_reserve_failed; 1613 } 1614 1615 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1616 if (ret) { 1617 pr_err("Failed to pin bo. ret %d\n", ret); 1618 goto pin_failed; 1619 } 1620 1621 ret = amdgpu_bo_kmap(bo, kptr); 1622 if (ret) { 1623 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1624 goto kmap_failed; 1625 } 1626 1627 amdgpu_amdkfd_remove_eviction_fence( 1628 bo, mem->process_info->eviction_fence); 1629 list_del_init(&mem->validate_list.head); 1630 1631 if (size) 1632 *size = amdgpu_bo_size(bo); 1633 1634 amdgpu_bo_unreserve(bo); 1635 1636 mutex_unlock(&mem->process_info->lock); 1637 return 0; 1638 1639 kmap_failed: 1640 amdgpu_bo_unpin(bo); 1641 pin_failed: 1642 amdgpu_bo_unreserve(bo); 1643 bo_reserve_failed: 1644 mutex_unlock(&mem->process_info->lock); 1645 1646 return ret; 1647 } 1648 1649 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1650 struct kfd_vm_fault_info *mem) 1651 { 1652 struct amdgpu_device *adev; 1653 1654 adev = (struct amdgpu_device *)kgd; 1655 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1656 *mem = *adev->gmc.vm_fault_info; 1657 mb(); 1658 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1659 } 1660 return 0; 1661 } 1662 1663 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1664 struct dma_buf *dma_buf, 1665 uint64_t va, void *vm, 1666 struct kgd_mem **mem, uint64_t *size, 1667 uint64_t *mmap_offset) 1668 { 1669 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1670 struct drm_gem_object *obj; 1671 struct amdgpu_bo *bo; 1672 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1673 1674 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1675 /* Can't handle non-graphics buffers */ 1676 return -EINVAL; 1677 1678 obj = dma_buf->priv; 1679 if (drm_to_adev(obj->dev) != adev) 1680 /* Can't handle buffers from other devices */ 1681 return -EINVAL; 1682 1683 bo = gem_to_amdgpu_bo(obj); 1684 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1685 AMDGPU_GEM_DOMAIN_GTT))) 1686 /* Only VRAM and GTT BOs are supported */ 1687 return -EINVAL; 1688 1689 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1690 if (!*mem) 1691 return -ENOMEM; 1692 1693 if (size) 1694 *size = amdgpu_bo_size(bo); 1695 1696 if (mmap_offset) 1697 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1698 1699 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1700 mutex_init(&(*mem)->lock); 1701 1702 (*mem)->alloc_flags = 1703 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1704 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 1705 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 1706 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1707 1708 drm_gem_object_get(&bo->tbo.base); 1709 (*mem)->bo = bo; 1710 (*mem)->va = va; 1711 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1712 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1713 (*mem)->mapped_to_gpu_memory = 0; 1714 (*mem)->process_info = avm->process_info; 1715 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1716 amdgpu_sync_create(&(*mem)->sync); 1717 (*mem)->is_imported = true; 1718 1719 return 0; 1720 } 1721 1722 /* Evict a userptr BO by stopping the queues if necessary 1723 * 1724 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1725 * cannot do any memory allocations, and cannot take any locks that 1726 * are held elsewhere while allocating memory. Therefore this is as 1727 * simple as possible, using atomic counters. 1728 * 1729 * It doesn't do anything to the BO itself. The real work happens in 1730 * restore, where we get updated page addresses. This function only 1731 * ensures that GPU access to the BO is stopped. 1732 */ 1733 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1734 struct mm_struct *mm) 1735 { 1736 struct amdkfd_process_info *process_info = mem->process_info; 1737 int evicted_bos; 1738 int r = 0; 1739 1740 atomic_inc(&mem->invalid); 1741 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1742 if (evicted_bos == 1) { 1743 /* First eviction, stop the queues */ 1744 r = kgd2kfd_quiesce_mm(mm); 1745 if (r) 1746 pr_err("Failed to quiesce KFD\n"); 1747 schedule_delayed_work(&process_info->restore_userptr_work, 1748 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1749 } 1750 1751 return r; 1752 } 1753 1754 /* Update invalid userptr BOs 1755 * 1756 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1757 * userptr_inval_list and updates user pages for all BOs that have 1758 * been invalidated since their last update. 1759 */ 1760 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1761 struct mm_struct *mm) 1762 { 1763 struct kgd_mem *mem, *tmp_mem; 1764 struct amdgpu_bo *bo; 1765 struct ttm_operation_ctx ctx = { false, false }; 1766 int invalid, ret; 1767 1768 /* Move all invalidated BOs to the userptr_inval_list and 1769 * release their user pages by migration to the CPU domain 1770 */ 1771 list_for_each_entry_safe(mem, tmp_mem, 1772 &process_info->userptr_valid_list, 1773 validate_list.head) { 1774 if (!atomic_read(&mem->invalid)) 1775 continue; /* BO is still valid */ 1776 1777 bo = mem->bo; 1778 1779 if (amdgpu_bo_reserve(bo, true)) 1780 return -EAGAIN; 1781 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1782 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1783 amdgpu_bo_unreserve(bo); 1784 if (ret) { 1785 pr_err("%s: Failed to invalidate userptr BO\n", 1786 __func__); 1787 return -EAGAIN; 1788 } 1789 1790 list_move_tail(&mem->validate_list.head, 1791 &process_info->userptr_inval_list); 1792 } 1793 1794 if (list_empty(&process_info->userptr_inval_list)) 1795 return 0; /* All evicted userptr BOs were freed */ 1796 1797 /* Go through userptr_inval_list and update any invalid user_pages */ 1798 list_for_each_entry(mem, &process_info->userptr_inval_list, 1799 validate_list.head) { 1800 invalid = atomic_read(&mem->invalid); 1801 if (!invalid) 1802 /* BO hasn't been invalidated since the last 1803 * revalidation attempt. Keep its BO list. 1804 */ 1805 continue; 1806 1807 bo = mem->bo; 1808 1809 /* Get updated user pages */ 1810 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 1811 if (ret) { 1812 pr_debug("%s: Failed to get user pages: %d\n", 1813 __func__, ret); 1814 1815 /* Return error -EBUSY or -ENOMEM, retry restore */ 1816 return ret; 1817 } 1818 1819 /* 1820 * FIXME: Cannot ignore the return code, must hold 1821 * notifier_lock 1822 */ 1823 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1824 1825 /* Mark the BO as valid unless it was invalidated 1826 * again concurrently. 1827 */ 1828 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 1829 return -EAGAIN; 1830 } 1831 1832 return 0; 1833 } 1834 1835 /* Validate invalid userptr BOs 1836 * 1837 * Validates BOs on the userptr_inval_list, and moves them back to the 1838 * userptr_valid_list. Also updates GPUVM page tables with new page 1839 * addresses and waits for the page table updates to complete. 1840 */ 1841 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 1842 { 1843 struct amdgpu_bo_list_entry *pd_bo_list_entries; 1844 struct list_head resv_list, duplicates; 1845 struct ww_acquire_ctx ticket; 1846 struct amdgpu_sync sync; 1847 1848 struct amdgpu_vm *peer_vm; 1849 struct kgd_mem *mem, *tmp_mem; 1850 struct amdgpu_bo *bo; 1851 struct ttm_operation_ctx ctx = { false, false }; 1852 int i, ret; 1853 1854 pd_bo_list_entries = kcalloc(process_info->n_vms, 1855 sizeof(struct amdgpu_bo_list_entry), 1856 GFP_KERNEL); 1857 if (!pd_bo_list_entries) { 1858 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 1859 ret = -ENOMEM; 1860 goto out_no_mem; 1861 } 1862 1863 INIT_LIST_HEAD(&resv_list); 1864 INIT_LIST_HEAD(&duplicates); 1865 1866 /* Get all the page directory BOs that need to be reserved */ 1867 i = 0; 1868 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1869 vm_list_node) 1870 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 1871 &pd_bo_list_entries[i++]); 1872 /* Add the userptr_inval_list entries to resv_list */ 1873 list_for_each_entry(mem, &process_info->userptr_inval_list, 1874 validate_list.head) { 1875 list_add_tail(&mem->resv_list.head, &resv_list); 1876 mem->resv_list.bo = mem->validate_list.bo; 1877 mem->resv_list.num_shared = mem->validate_list.num_shared; 1878 } 1879 1880 /* Reserve all BOs and page tables for validation */ 1881 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 1882 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 1883 if (ret) 1884 goto out_free; 1885 1886 amdgpu_sync_create(&sync); 1887 1888 ret = process_validate_vms(process_info); 1889 if (ret) 1890 goto unreserve_out; 1891 1892 /* Validate BOs and update GPUVM page tables */ 1893 list_for_each_entry_safe(mem, tmp_mem, 1894 &process_info->userptr_inval_list, 1895 validate_list.head) { 1896 struct kfd_bo_va_list *bo_va_entry; 1897 1898 bo = mem->bo; 1899 1900 /* Validate the BO if we got user pages */ 1901 if (bo->tbo.ttm->pages[0]) { 1902 amdgpu_bo_placement_from_domain(bo, mem->domain); 1903 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1904 if (ret) { 1905 pr_err("%s: failed to validate BO\n", __func__); 1906 goto unreserve_out; 1907 } 1908 } 1909 1910 list_move_tail(&mem->validate_list.head, 1911 &process_info->userptr_valid_list); 1912 1913 /* Update mapping. If the BO was not validated 1914 * (because we couldn't get user pages), this will 1915 * clear the page table entries, which will result in 1916 * VM faults if the GPU tries to access the invalid 1917 * memory. 1918 */ 1919 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { 1920 if (!bo_va_entry->is_mapped) 1921 continue; 1922 1923 ret = update_gpuvm_pte((struct amdgpu_device *) 1924 bo_va_entry->kgd_dev, 1925 bo_va_entry, &sync); 1926 if (ret) { 1927 pr_err("%s: update PTE failed\n", __func__); 1928 /* make sure this gets validated again */ 1929 atomic_inc(&mem->invalid); 1930 goto unreserve_out; 1931 } 1932 } 1933 } 1934 1935 /* Update page directories */ 1936 ret = process_update_pds(process_info, &sync); 1937 1938 unreserve_out: 1939 ttm_eu_backoff_reservation(&ticket, &resv_list); 1940 amdgpu_sync_wait(&sync, false); 1941 amdgpu_sync_free(&sync); 1942 out_free: 1943 kfree(pd_bo_list_entries); 1944 out_no_mem: 1945 1946 return ret; 1947 } 1948 1949 /* Worker callback to restore evicted userptr BOs 1950 * 1951 * Tries to update and validate all userptr BOs. If successful and no 1952 * concurrent evictions happened, the queues are restarted. Otherwise, 1953 * reschedule for another attempt later. 1954 */ 1955 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 1956 { 1957 struct delayed_work *dwork = to_delayed_work(work); 1958 struct amdkfd_process_info *process_info = 1959 container_of(dwork, struct amdkfd_process_info, 1960 restore_userptr_work); 1961 struct task_struct *usertask; 1962 struct mm_struct *mm; 1963 int evicted_bos; 1964 1965 evicted_bos = atomic_read(&process_info->evicted_bos); 1966 if (!evicted_bos) 1967 return; 1968 1969 /* Reference task and mm in case of concurrent process termination */ 1970 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 1971 if (!usertask) 1972 return; 1973 mm = get_task_mm(usertask); 1974 if (!mm) { 1975 put_task_struct(usertask); 1976 return; 1977 } 1978 1979 mutex_lock(&process_info->lock); 1980 1981 if (update_invalid_user_pages(process_info, mm)) 1982 goto unlock_out; 1983 /* userptr_inval_list can be empty if all evicted userptr BOs 1984 * have been freed. In that case there is nothing to validate 1985 * and we can just restart the queues. 1986 */ 1987 if (!list_empty(&process_info->userptr_inval_list)) { 1988 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 1989 goto unlock_out; /* Concurrent eviction, try again */ 1990 1991 if (validate_invalid_user_pages(process_info)) 1992 goto unlock_out; 1993 } 1994 /* Final check for concurrent evicton and atomic update. If 1995 * another eviction happens after successful update, it will 1996 * be a first eviction that calls quiesce_mm. The eviction 1997 * reference counting inside KFD will handle this case. 1998 */ 1999 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2000 evicted_bos) 2001 goto unlock_out; 2002 evicted_bos = 0; 2003 if (kgd2kfd_resume_mm(mm)) { 2004 pr_err("%s: Failed to resume KFD\n", __func__); 2005 /* No recovery from this failure. Probably the CP is 2006 * hanging. No point trying again. 2007 */ 2008 } 2009 2010 unlock_out: 2011 mutex_unlock(&process_info->lock); 2012 mmput(mm); 2013 put_task_struct(usertask); 2014 2015 /* If validation failed, reschedule another attempt */ 2016 if (evicted_bos) 2017 schedule_delayed_work(&process_info->restore_userptr_work, 2018 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2019 } 2020 2021 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2022 * KFD process identified by process_info 2023 * 2024 * @process_info: amdkfd_process_info of the KFD process 2025 * 2026 * After memory eviction, restore thread calls this function. The function 2027 * should be called when the Process is still valid. BO restore involves - 2028 * 2029 * 1. Release old eviction fence and create new one 2030 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2031 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2032 * BOs that need to be reserved. 2033 * 4. Reserve all the BOs 2034 * 5. Validate of PD and PT BOs. 2035 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2036 * 7. Add fence to all PD and PT BOs. 2037 * 8. Unreserve all BOs 2038 */ 2039 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2040 { 2041 struct amdgpu_bo_list_entry *pd_bo_list; 2042 struct amdkfd_process_info *process_info = info; 2043 struct amdgpu_vm *peer_vm; 2044 struct kgd_mem *mem; 2045 struct bo_vm_reservation_context ctx; 2046 struct amdgpu_amdkfd_fence *new_fence; 2047 int ret = 0, i; 2048 struct list_head duplicate_save; 2049 struct amdgpu_sync sync_obj; 2050 unsigned long failed_size = 0; 2051 unsigned long total_size = 0; 2052 2053 INIT_LIST_HEAD(&duplicate_save); 2054 INIT_LIST_HEAD(&ctx.list); 2055 INIT_LIST_HEAD(&ctx.duplicates); 2056 2057 pd_bo_list = kcalloc(process_info->n_vms, 2058 sizeof(struct amdgpu_bo_list_entry), 2059 GFP_KERNEL); 2060 if (!pd_bo_list) 2061 return -ENOMEM; 2062 2063 i = 0; 2064 mutex_lock(&process_info->lock); 2065 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2066 vm_list_node) 2067 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2068 2069 /* Reserve all BOs and page tables/directory. Add all BOs from 2070 * kfd_bo_list to ctx.list 2071 */ 2072 list_for_each_entry(mem, &process_info->kfd_bo_list, 2073 validate_list.head) { 2074 2075 list_add_tail(&mem->resv_list.head, &ctx.list); 2076 mem->resv_list.bo = mem->validate_list.bo; 2077 mem->resv_list.num_shared = mem->validate_list.num_shared; 2078 } 2079 2080 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2081 false, &duplicate_save); 2082 if (ret) { 2083 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2084 goto ttm_reserve_fail; 2085 } 2086 2087 amdgpu_sync_create(&sync_obj); 2088 2089 /* Validate PDs and PTs */ 2090 ret = process_validate_vms(process_info); 2091 if (ret) 2092 goto validate_map_fail; 2093 2094 ret = process_sync_pds_resv(process_info, &sync_obj); 2095 if (ret) { 2096 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2097 goto validate_map_fail; 2098 } 2099 2100 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2101 list_for_each_entry(mem, &process_info->kfd_bo_list, 2102 validate_list.head) { 2103 2104 struct amdgpu_bo *bo = mem->bo; 2105 uint32_t domain = mem->domain; 2106 struct kfd_bo_va_list *bo_va_entry; 2107 2108 total_size += amdgpu_bo_size(bo); 2109 2110 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2111 if (ret) { 2112 pr_debug("Memory eviction: Validate BOs failed\n"); 2113 failed_size += amdgpu_bo_size(bo); 2114 ret = amdgpu_amdkfd_bo_validate(bo, 2115 AMDGPU_GEM_DOMAIN_GTT, false); 2116 if (ret) { 2117 pr_debug("Memory eviction: Try again\n"); 2118 goto validate_map_fail; 2119 } 2120 } 2121 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2122 if (ret) { 2123 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2124 goto validate_map_fail; 2125 } 2126 list_for_each_entry(bo_va_entry, &mem->bo_va_list, 2127 bo_list) { 2128 ret = update_gpuvm_pte((struct amdgpu_device *) 2129 bo_va_entry->kgd_dev, 2130 bo_va_entry, 2131 &sync_obj); 2132 if (ret) { 2133 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2134 goto validate_map_fail; 2135 } 2136 } 2137 } 2138 2139 if (failed_size) 2140 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2141 2142 /* Update page directories */ 2143 ret = process_update_pds(process_info, &sync_obj); 2144 if (ret) { 2145 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2146 goto validate_map_fail; 2147 } 2148 2149 /* Wait for validate and PT updates to finish */ 2150 amdgpu_sync_wait(&sync_obj, false); 2151 2152 /* Release old eviction fence and create new one, because fence only 2153 * goes from unsignaled to signaled, fence cannot be reused. 2154 * Use context and mm from the old fence. 2155 */ 2156 new_fence = amdgpu_amdkfd_fence_create( 2157 process_info->eviction_fence->base.context, 2158 process_info->eviction_fence->mm); 2159 if (!new_fence) { 2160 pr_err("Failed to create eviction fence\n"); 2161 ret = -ENOMEM; 2162 goto validate_map_fail; 2163 } 2164 dma_fence_put(&process_info->eviction_fence->base); 2165 process_info->eviction_fence = new_fence; 2166 *ef = dma_fence_get(&new_fence->base); 2167 2168 /* Attach new eviction fence to all BOs */ 2169 list_for_each_entry(mem, &process_info->kfd_bo_list, 2170 validate_list.head) 2171 amdgpu_bo_fence(mem->bo, 2172 &process_info->eviction_fence->base, true); 2173 2174 /* Attach eviction fence to PD / PT BOs */ 2175 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2176 vm_list_node) { 2177 struct amdgpu_bo *bo = peer_vm->root.base.bo; 2178 2179 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2180 } 2181 2182 validate_map_fail: 2183 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2184 amdgpu_sync_free(&sync_obj); 2185 ttm_reserve_fail: 2186 mutex_unlock(&process_info->lock); 2187 kfree(pd_bo_list); 2188 return ret; 2189 } 2190 2191 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2192 { 2193 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2194 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2195 int ret; 2196 2197 if (!info || !gws) 2198 return -EINVAL; 2199 2200 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2201 if (!*mem) 2202 return -ENOMEM; 2203 2204 mutex_init(&(*mem)->lock); 2205 INIT_LIST_HEAD(&(*mem)->bo_va_list); 2206 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2207 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2208 (*mem)->process_info = process_info; 2209 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2210 amdgpu_sync_create(&(*mem)->sync); 2211 2212 2213 /* Validate gws bo the first time it is added to process */ 2214 mutex_lock(&(*mem)->process_info->lock); 2215 ret = amdgpu_bo_reserve(gws_bo, false); 2216 if (unlikely(ret)) { 2217 pr_err("Reserve gws bo failed %d\n", ret); 2218 goto bo_reservation_failure; 2219 } 2220 2221 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2222 if (ret) { 2223 pr_err("GWS BO validate failed %d\n", ret); 2224 goto bo_validation_failure; 2225 } 2226 /* GWS resource is shared b/t amdgpu and amdkfd 2227 * Add process eviction fence to bo so they can 2228 * evict each other. 2229 */ 2230 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2231 if (ret) 2232 goto reserve_shared_fail; 2233 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2234 amdgpu_bo_unreserve(gws_bo); 2235 mutex_unlock(&(*mem)->process_info->lock); 2236 2237 return ret; 2238 2239 reserve_shared_fail: 2240 bo_validation_failure: 2241 amdgpu_bo_unreserve(gws_bo); 2242 bo_reservation_failure: 2243 mutex_unlock(&(*mem)->process_info->lock); 2244 amdgpu_sync_free(&(*mem)->sync); 2245 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2246 amdgpu_bo_unref(&gws_bo); 2247 mutex_destroy(&(*mem)->lock); 2248 kfree(*mem); 2249 *mem = NULL; 2250 return ret; 2251 } 2252 2253 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2254 { 2255 int ret; 2256 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2257 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2258 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2259 2260 /* Remove BO from process's validate list so restore worker won't touch 2261 * it anymore 2262 */ 2263 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2264 2265 ret = amdgpu_bo_reserve(gws_bo, false); 2266 if (unlikely(ret)) { 2267 pr_err("Reserve gws bo failed %d\n", ret); 2268 //TODO add BO back to validate_list? 2269 return ret; 2270 } 2271 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2272 process_info->eviction_fence); 2273 amdgpu_bo_unreserve(gws_bo); 2274 amdgpu_sync_free(&kgd_mem->sync); 2275 amdgpu_bo_unref(&gws_bo); 2276 mutex_destroy(&kgd_mem->lock); 2277 kfree(mem); 2278 return 0; 2279 } 2280 2281 /* Returns GPU-specific tiling mode information */ 2282 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 2283 struct tile_config *config) 2284 { 2285 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 2286 2287 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2288 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2289 config->num_tile_configs = 2290 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2291 config->macro_tile_config_ptr = 2292 adev->gfx.config.macrotile_mode_array; 2293 config->num_macro_tile_configs = 2294 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2295 2296 /* Those values are not set from GFX9 onwards */ 2297 config->num_banks = adev->gfx.config.num_banks; 2298 config->num_ranks = adev->gfx.config.num_ranks; 2299 2300 return 0; 2301 } 2302