1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_gem.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_amdkfd.h" 32 #include "amdgpu_dma_buf.h" 33 #include <uapi/linux/kfd_ioctl.h> 34 #include "amdgpu_xgmi.h" 35 36 /* Userptr restore delay, just long enough to allow consecutive VM 37 * changes to accumulate 38 */ 39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 40 41 /* Impose limit on how much memory KFD can use */ 42 static struct { 43 uint64_t max_system_mem_limit; 44 uint64_t max_ttm_mem_limit; 45 int64_t system_mem_used; 46 int64_t ttm_mem_used; 47 spinlock_t mem_limit_lock; 48 } kfd_mem_limit; 49 50 static const char * const domain_bit_to_string[] = { 51 "CPU", 52 "GTT", 53 "VRAM", 54 "GDS", 55 "GWS", 56 "OA" 57 }; 58 59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 60 61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 62 63 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 64 struct kgd_mem *mem) 65 { 66 struct kfd_mem_attachment *entry; 67 68 list_for_each_entry(entry, &mem->attachments, list) 69 if (entry->bo_va->base.vm == avm) 70 return true; 71 72 return false; 73 } 74 75 /* Set memory usage limits. Current, limits are 76 * System (TTM + userptr) memory - 15/16th System RAM 77 * TTM memory - 3/8th System RAM 78 */ 79 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 80 { 81 struct sysinfo si; 82 uint64_t mem; 83 84 si_meminfo(&si); 85 mem = si.freeram - si.freehigh; 86 mem *= si.mem_unit; 87 88 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 89 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 90 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 91 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 92 (kfd_mem_limit.max_system_mem_limit >> 20), 93 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 94 } 95 96 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 97 { 98 kfd_mem_limit.system_mem_used += size; 99 } 100 101 /* Estimate page table size needed to represent a given memory size 102 * 103 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 104 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 105 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 106 * for 2MB pages for TLB efficiency. However, small allocations and 107 * fragmented system memory still need some 4KB pages. We choose a 108 * compromise that should work in most cases without reserving too 109 * much memory for page tables unnecessarily (factor 16K, >> 14). 110 */ 111 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 112 113 static size_t amdgpu_amdkfd_acc_size(uint64_t size) 114 { 115 size >>= PAGE_SHIFT; 116 size *= sizeof(dma_addr_t) + sizeof(void *); 117 118 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + 119 __roundup_pow_of_two(sizeof(struct ttm_tt)) + 120 PAGE_ALIGN(size); 121 } 122 123 /** 124 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size 125 * of buffer including any reserved for control structures 126 * 127 * @adev: Device to which allocated BO belongs to 128 * @size: Size of buffer, in bytes, encapsulated by B0. This should be 129 * equivalent to amdgpu_bo_size(BO) 130 * @alloc_flag: Flag used in allocating a BO as noted above 131 * 132 * Return: returns -ENOMEM in case of error, ZERO otherwise 133 */ 134 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 135 uint64_t size, u32 alloc_flag) 136 { 137 uint64_t reserved_for_pt = 138 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 139 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 140 int ret = 0; 141 142 acc_size = amdgpu_amdkfd_acc_size(size); 143 144 vram_needed = 0; 145 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 146 system_mem_needed = acc_size + size; 147 ttm_mem_needed = acc_size + size; 148 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 149 system_mem_needed = acc_size; 150 ttm_mem_needed = acc_size; 151 vram_needed = size; 152 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 153 system_mem_needed = acc_size + size; 154 ttm_mem_needed = acc_size; 155 } else if (alloc_flag & 156 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 157 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 158 system_mem_needed = acc_size; 159 ttm_mem_needed = acc_size; 160 } else { 161 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 162 return -ENOMEM; 163 } 164 165 spin_lock(&kfd_mem_limit.mem_limit_lock); 166 167 if (kfd_mem_limit.system_mem_used + system_mem_needed > 168 kfd_mem_limit.max_system_mem_limit) 169 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 170 171 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 172 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 173 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 174 kfd_mem_limit.max_ttm_mem_limit) || 175 (adev->kfd.vram_used + vram_needed > 176 adev->gmc.real_vram_size - reserved_for_pt)) { 177 ret = -ENOMEM; 178 goto release; 179 } 180 181 /* Update memory accounting by decreasing available system 182 * memory, TTM memory and GPU memory as computed above 183 */ 184 adev->kfd.vram_used += vram_needed; 185 kfd_mem_limit.system_mem_used += system_mem_needed; 186 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 187 188 release: 189 spin_unlock(&kfd_mem_limit.mem_limit_lock); 190 return ret; 191 } 192 193 static void unreserve_mem_limit(struct amdgpu_device *adev, 194 uint64_t size, u32 alloc_flag) 195 { 196 size_t acc_size; 197 198 acc_size = amdgpu_amdkfd_acc_size(size); 199 200 spin_lock(&kfd_mem_limit.mem_limit_lock); 201 202 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 203 kfd_mem_limit.system_mem_used -= (acc_size + size); 204 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 205 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 206 kfd_mem_limit.system_mem_used -= acc_size; 207 kfd_mem_limit.ttm_mem_used -= acc_size; 208 adev->kfd.vram_used -= size; 209 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 210 kfd_mem_limit.system_mem_used -= (acc_size + size); 211 kfd_mem_limit.ttm_mem_used -= acc_size; 212 } else if (alloc_flag & 213 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 214 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 215 kfd_mem_limit.system_mem_used -= acc_size; 216 kfd_mem_limit.ttm_mem_used -= acc_size; 217 } else { 218 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 219 goto release; 220 } 221 222 WARN_ONCE(adev->kfd.vram_used < 0, 223 "KFD VRAM memory accounting unbalanced"); 224 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 225 "KFD TTM memory accounting unbalanced"); 226 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 227 "KFD system memory accounting unbalanced"); 228 229 release: 230 spin_unlock(&kfd_mem_limit.mem_limit_lock); 231 } 232 233 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 234 { 235 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 236 u32 alloc_flags = bo->kfd_bo->alloc_flags; 237 u64 size = amdgpu_bo_size(bo); 238 239 unreserve_mem_limit(adev, size, alloc_flags); 240 241 kfree(bo->kfd_bo); 242 } 243 244 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 245 * reservation object. 246 * 247 * @bo: [IN] Remove eviction fence(s) from this BO 248 * @ef: [IN] This eviction fence is removed if it 249 * is present in the shared list. 250 * 251 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 252 */ 253 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 254 struct amdgpu_amdkfd_fence *ef) 255 { 256 struct dma_resv *resv = bo->tbo.base.resv; 257 struct dma_resv_list *old, *new; 258 unsigned int i, j, k; 259 260 if (!ef) 261 return -EINVAL; 262 263 old = dma_resv_shared_list(resv); 264 if (!old) 265 return 0; 266 267 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); 268 if (!new) 269 return -ENOMEM; 270 271 /* Go through all the shared fences in the resevation object and sort 272 * the interesting ones to the end of the list. 273 */ 274 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 275 struct dma_fence *f; 276 277 f = rcu_dereference_protected(old->shared[i], 278 dma_resv_held(resv)); 279 280 if (f->context == ef->base.context) 281 RCU_INIT_POINTER(new->shared[--j], f); 282 else 283 RCU_INIT_POINTER(new->shared[k++], f); 284 } 285 new->shared_max = old->shared_max; 286 new->shared_count = k; 287 288 /* Install the new fence list, seqcount provides the barriers */ 289 write_seqcount_begin(&resv->seq); 290 RCU_INIT_POINTER(resv->fence, new); 291 write_seqcount_end(&resv->seq); 292 293 /* Drop the references to the removed fences or move them to ef_list */ 294 for (i = j; i < old->shared_count; ++i) { 295 struct dma_fence *f; 296 297 f = rcu_dereference_protected(new->shared[i], 298 dma_resv_held(resv)); 299 dma_fence_put(f); 300 } 301 kfree_rcu(old, rcu); 302 303 return 0; 304 } 305 306 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 307 { 308 struct amdgpu_bo *root = bo; 309 struct amdgpu_vm_bo_base *vm_bo; 310 struct amdgpu_vm *vm; 311 struct amdkfd_process_info *info; 312 struct amdgpu_amdkfd_fence *ef; 313 int ret; 314 315 /* we can always get vm_bo from root PD bo.*/ 316 while (root->parent) 317 root = root->parent; 318 319 vm_bo = root->vm_bo; 320 if (!vm_bo) 321 return 0; 322 323 vm = vm_bo->vm; 324 if (!vm) 325 return 0; 326 327 info = vm->process_info; 328 if (!info || !info->eviction_fence) 329 return 0; 330 331 ef = container_of(dma_fence_get(&info->eviction_fence->base), 332 struct amdgpu_amdkfd_fence, base); 333 334 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 335 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 336 dma_resv_unlock(bo->tbo.base.resv); 337 338 dma_fence_put(&ef->base); 339 return ret; 340 } 341 342 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 343 bool wait) 344 { 345 struct ttm_operation_ctx ctx = { false, false }; 346 int ret; 347 348 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 349 "Called with userptr BO")) 350 return -EINVAL; 351 352 amdgpu_bo_placement_from_domain(bo, domain); 353 354 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 355 if (ret) 356 goto validate_fail; 357 if (wait) 358 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 359 360 validate_fail: 361 return ret; 362 } 363 364 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 365 { 366 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 367 } 368 369 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 370 * 371 * Page directories are not updated here because huge page handling 372 * during page table updates can invalidate page directory entries 373 * again. Page directories are only updated after updating page 374 * tables. 375 */ 376 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 377 { 378 struct amdgpu_bo *pd = vm->root.bo; 379 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 380 int ret; 381 382 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); 383 if (ret) { 384 pr_err("failed to validate PT BOs\n"); 385 return ret; 386 } 387 388 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); 389 if (ret) { 390 pr_err("failed to validate PD\n"); 391 return ret; 392 } 393 394 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 395 396 if (vm->use_cpu_for_update) { 397 ret = amdgpu_bo_kmap(pd, NULL); 398 if (ret) { 399 pr_err("failed to kmap PD, ret=%d\n", ret); 400 return ret; 401 } 402 } 403 404 return 0; 405 } 406 407 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 408 { 409 struct amdgpu_bo *pd = vm->root.bo; 410 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 411 int ret; 412 413 ret = amdgpu_vm_update_pdes(adev, vm, false); 414 if (ret) 415 return ret; 416 417 return amdgpu_sync_fence(sync, vm->last_update); 418 } 419 420 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 421 { 422 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 423 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 424 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; 425 uint32_t mapping_flags; 426 uint64_t pte_flags; 427 bool snoop = false; 428 429 mapping_flags = AMDGPU_VM_PAGE_READABLE; 430 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 431 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 432 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 433 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 434 435 switch (adev->asic_type) { 436 case CHIP_ARCTURUS: 437 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 438 if (bo_adev == adev) 439 mapping_flags |= coherent ? 440 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 441 else 442 mapping_flags |= coherent ? 443 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 444 } else { 445 mapping_flags |= coherent ? 446 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 447 } 448 break; 449 case CHIP_ALDEBARAN: 450 if (coherent && uncached) { 451 if (adev->gmc.xgmi.connected_to_cpu || 452 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) 453 snoop = true; 454 mapping_flags |= AMDGPU_VM_MTYPE_UC; 455 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 456 if (bo_adev == adev) { 457 mapping_flags |= coherent ? 458 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 459 if (adev->gmc.xgmi.connected_to_cpu) 460 snoop = true; 461 } else { 462 mapping_flags |= coherent ? 463 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 464 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 465 snoop = true; 466 } 467 } else { 468 snoop = true; 469 mapping_flags |= coherent ? 470 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 471 } 472 break; 473 default: 474 mapping_flags |= coherent ? 475 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 476 } 477 478 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags); 479 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 480 481 return pte_flags; 482 } 483 484 static int 485 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 486 struct kfd_mem_attachment *attachment) 487 { 488 enum dma_data_direction direction = 489 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 490 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 491 struct ttm_operation_ctx ctx = {.interruptible = true}; 492 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 493 struct amdgpu_device *adev = attachment->adev; 494 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 495 struct ttm_tt *ttm = bo->tbo.ttm; 496 int ret; 497 498 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 499 if (unlikely(!ttm->sg)) 500 return -ENOMEM; 501 502 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 503 return -EINVAL; 504 505 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 506 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 507 ttm->num_pages, 0, 508 (u64)ttm->num_pages << PAGE_SHIFT, 509 GFP_KERNEL); 510 if (unlikely(ret)) 511 goto free_sg; 512 513 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 514 if (unlikely(ret)) 515 goto release_sg; 516 517 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, 518 ttm->num_pages); 519 520 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 521 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 522 if (ret) 523 goto unmap_sg; 524 525 return 0; 526 527 unmap_sg: 528 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 529 release_sg: 530 pr_err("DMA map userptr failed: %d\n", ret); 531 sg_free_table(ttm->sg); 532 free_sg: 533 kfree(ttm->sg); 534 ttm->sg = NULL; 535 return ret; 536 } 537 538 static int 539 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 540 { 541 struct ttm_operation_ctx ctx = {.interruptible = true}; 542 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 543 544 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 545 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 546 } 547 548 static int 549 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 550 struct kfd_mem_attachment *attachment) 551 { 552 switch (attachment->type) { 553 case KFD_MEM_ATT_SHARED: 554 return 0; 555 case KFD_MEM_ATT_USERPTR: 556 return kfd_mem_dmamap_userptr(mem, attachment); 557 case KFD_MEM_ATT_DMABUF: 558 return kfd_mem_dmamap_dmabuf(attachment); 559 default: 560 WARN_ON_ONCE(1); 561 } 562 return -EINVAL; 563 } 564 565 static void 566 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 567 struct kfd_mem_attachment *attachment) 568 { 569 enum dma_data_direction direction = 570 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 571 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 572 struct ttm_operation_ctx ctx = {.interruptible = false}; 573 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 574 struct amdgpu_device *adev = attachment->adev; 575 struct ttm_tt *ttm = bo->tbo.ttm; 576 577 if (unlikely(!ttm->sg)) 578 return; 579 580 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 581 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 582 583 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 584 sg_free_table(ttm->sg); 585 kfree(ttm->sg); 586 ttm->sg = NULL; 587 } 588 589 static void 590 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 591 { 592 struct ttm_operation_ctx ctx = {.interruptible = true}; 593 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 594 595 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 596 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 597 } 598 599 static void 600 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 601 struct kfd_mem_attachment *attachment) 602 { 603 switch (attachment->type) { 604 case KFD_MEM_ATT_SHARED: 605 break; 606 case KFD_MEM_ATT_USERPTR: 607 kfd_mem_dmaunmap_userptr(mem, attachment); 608 break; 609 case KFD_MEM_ATT_DMABUF: 610 kfd_mem_dmaunmap_dmabuf(attachment); 611 break; 612 default: 613 WARN_ON_ONCE(1); 614 } 615 } 616 617 static int 618 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, 619 struct amdgpu_bo **bo) 620 { 621 unsigned long bo_size = mem->bo->tbo.base.size; 622 struct drm_gem_object *gobj; 623 int ret; 624 625 ret = amdgpu_bo_reserve(mem->bo, false); 626 if (ret) 627 return ret; 628 629 ret = amdgpu_gem_object_create(adev, bo_size, 1, 630 AMDGPU_GEM_DOMAIN_CPU, 631 AMDGPU_GEM_CREATE_PREEMPTIBLE, 632 ttm_bo_type_sg, mem->bo->tbo.base.resv, 633 &gobj); 634 amdgpu_bo_unreserve(mem->bo); 635 if (ret) 636 return ret; 637 638 *bo = gem_to_amdgpu_bo(gobj); 639 (*bo)->parent = amdgpu_bo_ref(mem->bo); 640 641 return 0; 642 } 643 644 static int 645 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 646 struct amdgpu_bo **bo) 647 { 648 struct drm_gem_object *gobj; 649 int ret; 650 651 if (!mem->dmabuf) { 652 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, 653 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 654 DRM_RDWR : 0); 655 if (IS_ERR(mem->dmabuf)) { 656 ret = PTR_ERR(mem->dmabuf); 657 mem->dmabuf = NULL; 658 return ret; 659 } 660 } 661 662 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 663 if (IS_ERR(gobj)) 664 return PTR_ERR(gobj); 665 666 *bo = gem_to_amdgpu_bo(gobj); 667 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 668 (*bo)->parent = amdgpu_bo_ref(mem->bo); 669 670 return 0; 671 } 672 673 /* kfd_mem_attach - Add a BO to a VM 674 * 675 * Everything that needs to bo done only once when a BO is first added 676 * to a VM. It can later be mapped and unmapped many times without 677 * repeating these steps. 678 * 679 * 0. Create BO for DMA mapping, if needed 680 * 1. Allocate and initialize BO VA entry data structure 681 * 2. Add BO to the VM 682 * 3. Determine ASIC-specific PTE flags 683 * 4. Alloc page tables and directories if needed 684 * 4a. Validate new page tables and directories 685 */ 686 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 687 struct amdgpu_vm *vm, bool is_aql) 688 { 689 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 690 unsigned long bo_size = mem->bo->tbo.base.size; 691 uint64_t va = mem->va; 692 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 693 struct amdgpu_bo *bo[2] = {NULL, NULL}; 694 int i, ret; 695 696 if (!va) { 697 pr_err("Invalid VA when adding BO to VM\n"); 698 return -EINVAL; 699 } 700 701 for (i = 0; i <= is_aql; i++) { 702 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 703 if (unlikely(!attachment[i])) { 704 ret = -ENOMEM; 705 goto unwind; 706 } 707 708 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 709 va + bo_size, vm); 710 711 if (adev == bo_adev || 712 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || 713 (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) { 714 /* Mappings on the local GPU, or VRAM mappings in the 715 * local hive, or userptr mapping IOMMU direct map mode 716 * share the original BO 717 */ 718 attachment[i]->type = KFD_MEM_ATT_SHARED; 719 bo[i] = mem->bo; 720 drm_gem_object_get(&bo[i]->tbo.base); 721 } else if (i > 0) { 722 /* Multiple mappings on the same GPU share the BO */ 723 attachment[i]->type = KFD_MEM_ATT_SHARED; 724 bo[i] = bo[0]; 725 drm_gem_object_get(&bo[i]->tbo.base); 726 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 727 /* Create an SG BO to DMA-map userptrs on other GPUs */ 728 attachment[i]->type = KFD_MEM_ATT_USERPTR; 729 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); 730 if (ret) 731 goto unwind; 732 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && 733 mem->bo->tbo.type != ttm_bo_type_sg) { 734 /* GTT BOs use DMA-mapping ability of dynamic-attach 735 * DMA bufs. TODO: The same should work for VRAM on 736 * large-BAR GPUs. 737 */ 738 attachment[i]->type = KFD_MEM_ATT_DMABUF; 739 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 740 if (ret) 741 goto unwind; 742 } else { 743 /* FIXME: Need to DMA-map other BO types: 744 * large-BAR VRAM, doorbells, MMIO remap 745 */ 746 attachment[i]->type = KFD_MEM_ATT_SHARED; 747 bo[i] = mem->bo; 748 drm_gem_object_get(&bo[i]->tbo.base); 749 } 750 751 /* Add BO to VM internal data structures */ 752 ret = amdgpu_bo_reserve(bo[i], false); 753 if (ret) { 754 pr_debug("Unable to reserve BO during memory attach"); 755 goto unwind; 756 } 757 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 758 amdgpu_bo_unreserve(bo[i]); 759 if (unlikely(!attachment[i]->bo_va)) { 760 ret = -ENOMEM; 761 pr_err("Failed to add BO object to VM. ret == %d\n", 762 ret); 763 goto unwind; 764 } 765 attachment[i]->va = va; 766 attachment[i]->pte_flags = get_pte_flags(adev, mem); 767 attachment[i]->adev = adev; 768 list_add(&attachment[i]->list, &mem->attachments); 769 770 va += bo_size; 771 } 772 773 return 0; 774 775 unwind: 776 for (; i >= 0; i--) { 777 if (!attachment[i]) 778 continue; 779 if (attachment[i]->bo_va) { 780 amdgpu_bo_reserve(bo[i], true); 781 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); 782 amdgpu_bo_unreserve(bo[i]); 783 list_del(&attachment[i]->list); 784 } 785 if (bo[i]) 786 drm_gem_object_put(&bo[i]->tbo.base); 787 kfree(attachment[i]); 788 } 789 return ret; 790 } 791 792 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 793 { 794 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 795 796 pr_debug("\t remove VA 0x%llx in entry %p\n", 797 attachment->va, attachment); 798 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); 799 drm_gem_object_put(&bo->tbo.base); 800 list_del(&attachment->list); 801 kfree(attachment); 802 } 803 804 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 805 struct amdkfd_process_info *process_info, 806 bool userptr) 807 { 808 struct ttm_validate_buffer *entry = &mem->validate_list; 809 struct amdgpu_bo *bo = mem->bo; 810 811 INIT_LIST_HEAD(&entry->head); 812 entry->num_shared = 1; 813 entry->bo = &bo->tbo; 814 mutex_lock(&process_info->lock); 815 if (userptr) 816 list_add_tail(&entry->head, &process_info->userptr_valid_list); 817 else 818 list_add_tail(&entry->head, &process_info->kfd_bo_list); 819 mutex_unlock(&process_info->lock); 820 } 821 822 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 823 struct amdkfd_process_info *process_info) 824 { 825 struct ttm_validate_buffer *bo_list_entry; 826 827 bo_list_entry = &mem->validate_list; 828 mutex_lock(&process_info->lock); 829 list_del(&bo_list_entry->head); 830 mutex_unlock(&process_info->lock); 831 } 832 833 /* Initializes user pages. It registers the MMU notifier and validates 834 * the userptr BO in the GTT domain. 835 * 836 * The BO must already be on the userptr_valid_list. Otherwise an 837 * eviction and restore may happen that leaves the new BO unmapped 838 * with the user mode queues running. 839 * 840 * Takes the process_info->lock to protect against concurrent restore 841 * workers. 842 * 843 * Returns 0 for success, negative errno for errors. 844 */ 845 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, 846 bool criu_resume) 847 { 848 struct amdkfd_process_info *process_info = mem->process_info; 849 struct amdgpu_bo *bo = mem->bo; 850 struct ttm_operation_ctx ctx = { true, false }; 851 int ret = 0; 852 853 mutex_lock(&process_info->lock); 854 855 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 856 if (ret) { 857 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 858 goto out; 859 } 860 861 ret = amdgpu_mn_register(bo, user_addr); 862 if (ret) { 863 pr_err("%s: Failed to register MMU notifier: %d\n", 864 __func__, ret); 865 goto out; 866 } 867 868 if (criu_resume) { 869 /* 870 * During a CRIU restore operation, the userptr buffer objects 871 * will be validated in the restore_userptr_work worker at a 872 * later stage when it is scheduled by another ioctl called by 873 * CRIU master process for the target pid for restore. 874 */ 875 atomic_inc(&mem->invalid); 876 mutex_unlock(&process_info->lock); 877 return 0; 878 } 879 880 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 881 if (ret) { 882 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 883 goto unregister_out; 884 } 885 886 ret = amdgpu_bo_reserve(bo, true); 887 if (ret) { 888 pr_err("%s: Failed to reserve BO\n", __func__); 889 goto release_out; 890 } 891 amdgpu_bo_placement_from_domain(bo, mem->domain); 892 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 893 if (ret) 894 pr_err("%s: failed to validate BO\n", __func__); 895 amdgpu_bo_unreserve(bo); 896 897 release_out: 898 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 899 unregister_out: 900 if (ret) 901 amdgpu_mn_unregister(bo); 902 out: 903 mutex_unlock(&process_info->lock); 904 return ret; 905 } 906 907 /* Reserving a BO and its page table BOs must happen atomically to 908 * avoid deadlocks. Some operations update multiple VMs at once. Track 909 * all the reservation info in a context structure. Optionally a sync 910 * object can track VM updates. 911 */ 912 struct bo_vm_reservation_context { 913 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 914 unsigned int n_vms; /* Number of VMs reserved */ 915 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 916 struct ww_acquire_ctx ticket; /* Reservation ticket */ 917 struct list_head list, duplicates; /* BO lists */ 918 struct amdgpu_sync *sync; /* Pointer to sync object */ 919 bool reserved; /* Whether BOs are reserved */ 920 }; 921 922 enum bo_vm_match { 923 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 924 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 925 BO_VM_ALL, /* Match all VMs a BO was added to */ 926 }; 927 928 /** 929 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 930 * @mem: KFD BO structure. 931 * @vm: the VM to reserve. 932 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 933 */ 934 static int reserve_bo_and_vm(struct kgd_mem *mem, 935 struct amdgpu_vm *vm, 936 struct bo_vm_reservation_context *ctx) 937 { 938 struct amdgpu_bo *bo = mem->bo; 939 int ret; 940 941 WARN_ON(!vm); 942 943 ctx->reserved = false; 944 ctx->n_vms = 1; 945 ctx->sync = &mem->sync; 946 947 INIT_LIST_HEAD(&ctx->list); 948 INIT_LIST_HEAD(&ctx->duplicates); 949 950 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 951 if (!ctx->vm_pd) 952 return -ENOMEM; 953 954 ctx->kfd_bo.priority = 0; 955 ctx->kfd_bo.tv.bo = &bo->tbo; 956 ctx->kfd_bo.tv.num_shared = 1; 957 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 958 959 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 960 961 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 962 false, &ctx->duplicates); 963 if (ret) { 964 pr_err("Failed to reserve buffers in ttm.\n"); 965 kfree(ctx->vm_pd); 966 ctx->vm_pd = NULL; 967 return ret; 968 } 969 970 ctx->reserved = true; 971 return 0; 972 } 973 974 /** 975 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 976 * @mem: KFD BO structure. 977 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 978 * is used. Otherwise, a single VM associated with the BO. 979 * @map_type: the mapping status that will be used to filter the VMs. 980 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 981 * 982 * Returns 0 for success, negative for failure. 983 */ 984 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 985 struct amdgpu_vm *vm, enum bo_vm_match map_type, 986 struct bo_vm_reservation_context *ctx) 987 { 988 struct amdgpu_bo *bo = mem->bo; 989 struct kfd_mem_attachment *entry; 990 unsigned int i; 991 int ret; 992 993 ctx->reserved = false; 994 ctx->n_vms = 0; 995 ctx->vm_pd = NULL; 996 ctx->sync = &mem->sync; 997 998 INIT_LIST_HEAD(&ctx->list); 999 INIT_LIST_HEAD(&ctx->duplicates); 1000 1001 list_for_each_entry(entry, &mem->attachments, list) { 1002 if ((vm && vm != entry->bo_va->base.vm) || 1003 (entry->is_mapped != map_type 1004 && map_type != BO_VM_ALL)) 1005 continue; 1006 1007 ctx->n_vms++; 1008 } 1009 1010 if (ctx->n_vms != 0) { 1011 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 1012 GFP_KERNEL); 1013 if (!ctx->vm_pd) 1014 return -ENOMEM; 1015 } 1016 1017 ctx->kfd_bo.priority = 0; 1018 ctx->kfd_bo.tv.bo = &bo->tbo; 1019 ctx->kfd_bo.tv.num_shared = 1; 1020 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 1021 1022 i = 0; 1023 list_for_each_entry(entry, &mem->attachments, list) { 1024 if ((vm && vm != entry->bo_va->base.vm) || 1025 (entry->is_mapped != map_type 1026 && map_type != BO_VM_ALL)) 1027 continue; 1028 1029 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 1030 &ctx->vm_pd[i]); 1031 i++; 1032 } 1033 1034 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 1035 false, &ctx->duplicates); 1036 if (ret) { 1037 pr_err("Failed to reserve buffers in ttm.\n"); 1038 kfree(ctx->vm_pd); 1039 ctx->vm_pd = NULL; 1040 return ret; 1041 } 1042 1043 ctx->reserved = true; 1044 return 0; 1045 } 1046 1047 /** 1048 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1049 * @ctx: Reservation context to unreserve 1050 * @wait: Optionally wait for a sync object representing pending VM updates 1051 * @intr: Whether the wait is interruptible 1052 * 1053 * Also frees any resources allocated in 1054 * reserve_bo_and_(cond_)vm(s). Returns the status from 1055 * amdgpu_sync_wait. 1056 */ 1057 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1058 bool wait, bool intr) 1059 { 1060 int ret = 0; 1061 1062 if (wait) 1063 ret = amdgpu_sync_wait(ctx->sync, intr); 1064 1065 if (ctx->reserved) 1066 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 1067 kfree(ctx->vm_pd); 1068 1069 ctx->sync = NULL; 1070 1071 ctx->reserved = false; 1072 ctx->vm_pd = NULL; 1073 1074 return ret; 1075 } 1076 1077 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, 1078 struct kfd_mem_attachment *entry, 1079 struct amdgpu_sync *sync) 1080 { 1081 struct amdgpu_bo_va *bo_va = entry->bo_va; 1082 struct amdgpu_device *adev = entry->adev; 1083 struct amdgpu_vm *vm = bo_va->base.vm; 1084 1085 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1086 1087 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1088 1089 amdgpu_sync_fence(sync, bo_va->last_pt_update); 1090 1091 kfd_mem_dmaunmap_attachment(mem, entry); 1092 } 1093 1094 static int update_gpuvm_pte(struct kgd_mem *mem, 1095 struct kfd_mem_attachment *entry, 1096 struct amdgpu_sync *sync) 1097 { 1098 struct amdgpu_bo_va *bo_va = entry->bo_va; 1099 struct amdgpu_device *adev = entry->adev; 1100 int ret; 1101 1102 ret = kfd_mem_dmamap_attachment(mem, entry); 1103 if (ret) 1104 return ret; 1105 1106 /* Update the page tables */ 1107 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1108 if (ret) { 1109 pr_err("amdgpu_vm_bo_update failed\n"); 1110 return ret; 1111 } 1112 1113 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1114 } 1115 1116 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1117 struct kfd_mem_attachment *entry, 1118 struct amdgpu_sync *sync, 1119 bool no_update_pte) 1120 { 1121 int ret; 1122 1123 /* Set virtual address for the allocation */ 1124 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1125 amdgpu_bo_size(entry->bo_va->base.bo), 1126 entry->pte_flags); 1127 if (ret) { 1128 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1129 entry->va, ret); 1130 return ret; 1131 } 1132 1133 if (no_update_pte) 1134 return 0; 1135 1136 ret = update_gpuvm_pte(mem, entry, sync); 1137 if (ret) { 1138 pr_err("update_gpuvm_pte() failed\n"); 1139 goto update_gpuvm_pte_failed; 1140 } 1141 1142 return 0; 1143 1144 update_gpuvm_pte_failed: 1145 unmap_bo_from_gpuvm(mem, entry, sync); 1146 return ret; 1147 } 1148 1149 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 1150 { 1151 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 1152 1153 if (!sg) 1154 return NULL; 1155 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 1156 kfree(sg); 1157 return NULL; 1158 } 1159 sg->sgl->dma_address = addr; 1160 sg->sgl->length = size; 1161 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1162 sg->sgl->dma_length = size; 1163 #endif 1164 return sg; 1165 } 1166 1167 static int process_validate_vms(struct amdkfd_process_info *process_info) 1168 { 1169 struct amdgpu_vm *peer_vm; 1170 int ret; 1171 1172 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1173 vm_list_node) { 1174 ret = vm_validate_pt_pd_bos(peer_vm); 1175 if (ret) 1176 return ret; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1183 struct amdgpu_sync *sync) 1184 { 1185 struct amdgpu_vm *peer_vm; 1186 int ret; 1187 1188 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1189 vm_list_node) { 1190 struct amdgpu_bo *pd = peer_vm->root.bo; 1191 1192 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1193 AMDGPU_SYNC_NE_OWNER, 1194 AMDGPU_FENCE_OWNER_KFD); 1195 if (ret) 1196 return ret; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static int process_update_pds(struct amdkfd_process_info *process_info, 1203 struct amdgpu_sync *sync) 1204 { 1205 struct amdgpu_vm *peer_vm; 1206 int ret; 1207 1208 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1209 vm_list_node) { 1210 ret = vm_update_pds(peer_vm, sync); 1211 if (ret) 1212 return ret; 1213 } 1214 1215 return 0; 1216 } 1217 1218 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1219 struct dma_fence **ef) 1220 { 1221 struct amdkfd_process_info *info = NULL; 1222 int ret; 1223 1224 if (!*process_info) { 1225 info = kzalloc(sizeof(*info), GFP_KERNEL); 1226 if (!info) 1227 return -ENOMEM; 1228 1229 mutex_init(&info->lock); 1230 INIT_LIST_HEAD(&info->vm_list_head); 1231 INIT_LIST_HEAD(&info->kfd_bo_list); 1232 INIT_LIST_HEAD(&info->userptr_valid_list); 1233 INIT_LIST_HEAD(&info->userptr_inval_list); 1234 1235 info->eviction_fence = 1236 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1237 current->mm, 1238 NULL); 1239 if (!info->eviction_fence) { 1240 pr_err("Failed to create eviction fence\n"); 1241 ret = -ENOMEM; 1242 goto create_evict_fence_fail; 1243 } 1244 1245 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1246 atomic_set(&info->evicted_bos, 0); 1247 INIT_DELAYED_WORK(&info->restore_userptr_work, 1248 amdgpu_amdkfd_restore_userptr_worker); 1249 1250 *process_info = info; 1251 *ef = dma_fence_get(&info->eviction_fence->base); 1252 } 1253 1254 vm->process_info = *process_info; 1255 1256 /* Validate page directory and attach eviction fence */ 1257 ret = amdgpu_bo_reserve(vm->root.bo, true); 1258 if (ret) 1259 goto reserve_pd_fail; 1260 ret = vm_validate_pt_pd_bos(vm); 1261 if (ret) { 1262 pr_err("validate_pt_pd_bos() failed\n"); 1263 goto validate_pd_fail; 1264 } 1265 ret = amdgpu_bo_sync_wait(vm->root.bo, 1266 AMDGPU_FENCE_OWNER_KFD, false); 1267 if (ret) 1268 goto wait_pd_fail; 1269 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); 1270 if (ret) 1271 goto reserve_shared_fail; 1272 amdgpu_bo_fence(vm->root.bo, 1273 &vm->process_info->eviction_fence->base, true); 1274 amdgpu_bo_unreserve(vm->root.bo); 1275 1276 /* Update process info */ 1277 mutex_lock(&vm->process_info->lock); 1278 list_add_tail(&vm->vm_list_node, 1279 &(vm->process_info->vm_list_head)); 1280 vm->process_info->n_vms++; 1281 mutex_unlock(&vm->process_info->lock); 1282 1283 return 0; 1284 1285 reserve_shared_fail: 1286 wait_pd_fail: 1287 validate_pd_fail: 1288 amdgpu_bo_unreserve(vm->root.bo); 1289 reserve_pd_fail: 1290 vm->process_info = NULL; 1291 if (info) { 1292 /* Two fence references: one in info and one in *ef */ 1293 dma_fence_put(&info->eviction_fence->base); 1294 dma_fence_put(*ef); 1295 *ef = NULL; 1296 *process_info = NULL; 1297 put_pid(info->pid); 1298 create_evict_fence_fail: 1299 mutex_destroy(&info->lock); 1300 kfree(info); 1301 } 1302 return ret; 1303 } 1304 1305 /** 1306 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria 1307 * @bo: Handle of buffer object being pinned 1308 * @domain: Domain into which BO should be pinned 1309 * 1310 * - USERPTR BOs are UNPINNABLE and will return error 1311 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1312 * PIN count incremented. It is valid to PIN a BO multiple times 1313 * 1314 * Return: ZERO if successful in pinning, Non-Zero in case of error. 1315 */ 1316 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) 1317 { 1318 int ret = 0; 1319 1320 ret = amdgpu_bo_reserve(bo, false); 1321 if (unlikely(ret)) 1322 return ret; 1323 1324 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); 1325 if (ret) 1326 pr_err("Error in Pinning BO to domain: %d\n", domain); 1327 1328 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 1329 amdgpu_bo_unreserve(bo); 1330 1331 return ret; 1332 } 1333 1334 /** 1335 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria 1336 * @bo: Handle of buffer object being unpinned 1337 * 1338 * - Is a illegal request for USERPTR BOs and is ignored 1339 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1340 * PIN count decremented. Calls to UNPIN must balance calls to PIN 1341 */ 1342 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) 1343 { 1344 int ret = 0; 1345 1346 ret = amdgpu_bo_reserve(bo, false); 1347 if (unlikely(ret)) 1348 return; 1349 1350 amdgpu_bo_unpin(bo); 1351 amdgpu_bo_unreserve(bo); 1352 } 1353 1354 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 1355 struct file *filp, u32 pasid, 1356 void **process_info, 1357 struct dma_fence **ef) 1358 { 1359 struct amdgpu_fpriv *drv_priv; 1360 struct amdgpu_vm *avm; 1361 int ret; 1362 1363 ret = amdgpu_file_to_fpriv(filp, &drv_priv); 1364 if (ret) 1365 return ret; 1366 avm = &drv_priv->vm; 1367 1368 /* Already a compute VM? */ 1369 if (avm->process_info) 1370 return -EINVAL; 1371 1372 /* Free the original amdgpu allocated pasid, 1373 * will be replaced with kfd allocated pasid. 1374 */ 1375 if (avm->pasid) { 1376 amdgpu_pasid_free(avm->pasid); 1377 amdgpu_vm_set_pasid(adev, avm, 0); 1378 } 1379 1380 /* Convert VM into a compute VM */ 1381 ret = amdgpu_vm_make_compute(adev, avm); 1382 if (ret) 1383 return ret; 1384 1385 ret = amdgpu_vm_set_pasid(adev, avm, pasid); 1386 if (ret) 1387 return ret; 1388 /* Initialize KFD part of the VM and process info */ 1389 ret = init_kfd_vm(avm, process_info, ef); 1390 if (ret) 1391 return ret; 1392 1393 amdgpu_vm_set_task_info(avm); 1394 1395 return 0; 1396 } 1397 1398 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1399 struct amdgpu_vm *vm) 1400 { 1401 struct amdkfd_process_info *process_info = vm->process_info; 1402 struct amdgpu_bo *pd = vm->root.bo; 1403 1404 if (!process_info) 1405 return; 1406 1407 /* Release eviction fence from PD */ 1408 amdgpu_bo_reserve(pd, false); 1409 amdgpu_bo_fence(pd, NULL, false); 1410 amdgpu_bo_unreserve(pd); 1411 1412 /* Update process info */ 1413 mutex_lock(&process_info->lock); 1414 process_info->n_vms--; 1415 list_del(&vm->vm_list_node); 1416 mutex_unlock(&process_info->lock); 1417 1418 vm->process_info = NULL; 1419 1420 /* Release per-process resources when last compute VM is destroyed */ 1421 if (!process_info->n_vms) { 1422 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1423 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1424 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1425 1426 dma_fence_put(&process_info->eviction_fence->base); 1427 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1428 put_pid(process_info->pid); 1429 mutex_destroy(&process_info->lock); 1430 kfree(process_info); 1431 } 1432 } 1433 1434 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, 1435 void *drm_priv) 1436 { 1437 struct amdgpu_vm *avm; 1438 1439 if (WARN_ON(!adev || !drm_priv)) 1440 return; 1441 1442 avm = drm_priv_to_vm(drm_priv); 1443 1444 pr_debug("Releasing process vm %p\n", avm); 1445 1446 /* The original pasid of amdgpu vm has already been 1447 * released during making a amdgpu vm to a compute vm 1448 * The current pasid is managed by kfd and will be 1449 * released on kfd process destroy. Set amdgpu pasid 1450 * to 0 to avoid duplicate release. 1451 */ 1452 amdgpu_vm_release_compute(adev, avm); 1453 } 1454 1455 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1456 { 1457 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1458 struct amdgpu_bo *pd = avm->root.bo; 1459 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1460 1461 if (adev->asic_type < CHIP_VEGA10) 1462 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1463 return avm->pd_phys_addr; 1464 } 1465 1466 void amdgpu_amdkfd_block_mmu_notifications(void *p) 1467 { 1468 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1469 1470 mutex_lock(&pinfo->lock); 1471 WRITE_ONCE(pinfo->block_mmu_notifications, true); 1472 mutex_unlock(&pinfo->lock); 1473 } 1474 1475 int amdgpu_amdkfd_criu_resume(void *p) 1476 { 1477 int ret = 0; 1478 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1479 1480 mutex_lock(&pinfo->lock); 1481 pr_debug("scheduling work\n"); 1482 atomic_inc(&pinfo->evicted_bos); 1483 if (!READ_ONCE(pinfo->block_mmu_notifications)) { 1484 ret = -EINVAL; 1485 goto out_unlock; 1486 } 1487 WRITE_ONCE(pinfo->block_mmu_notifications, false); 1488 schedule_delayed_work(&pinfo->restore_userptr_work, 0); 1489 1490 out_unlock: 1491 mutex_unlock(&pinfo->lock); 1492 return ret; 1493 } 1494 1495 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1496 struct amdgpu_device *adev, uint64_t va, uint64_t size, 1497 void *drm_priv, struct kgd_mem **mem, 1498 uint64_t *offset, uint32_t flags, bool criu_resume) 1499 { 1500 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1501 enum ttm_bo_type bo_type = ttm_bo_type_device; 1502 struct sg_table *sg = NULL; 1503 uint64_t user_addr = 0; 1504 struct amdgpu_bo *bo; 1505 struct drm_gem_object *gobj = NULL; 1506 u32 domain, alloc_domain; 1507 u64 alloc_flags; 1508 int ret; 1509 1510 /* 1511 * Check on which domain to allocate BO 1512 */ 1513 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1514 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1515 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1516 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1517 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1518 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1519 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1520 alloc_flags = 0; 1521 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1522 domain = AMDGPU_GEM_DOMAIN_GTT; 1523 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1524 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1525 if (!offset || !*offset) 1526 return -EINVAL; 1527 user_addr = untagged_addr(*offset); 1528 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1529 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1530 domain = AMDGPU_GEM_DOMAIN_GTT; 1531 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1532 bo_type = ttm_bo_type_sg; 1533 alloc_flags = 0; 1534 if (size > UINT_MAX) 1535 return -EINVAL; 1536 sg = create_doorbell_sg(*offset, size); 1537 if (!sg) 1538 return -ENOMEM; 1539 } else { 1540 return -EINVAL; 1541 } 1542 1543 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1544 if (!*mem) { 1545 ret = -ENOMEM; 1546 goto err; 1547 } 1548 INIT_LIST_HEAD(&(*mem)->attachments); 1549 mutex_init(&(*mem)->lock); 1550 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1551 1552 /* Workaround for AQL queue wraparound bug. Map the same 1553 * memory twice. That means we only actually allocate half 1554 * the memory. 1555 */ 1556 if ((*mem)->aql_queue) 1557 size = size >> 1; 1558 1559 (*mem)->alloc_flags = flags; 1560 1561 amdgpu_sync_create(&(*mem)->sync); 1562 1563 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); 1564 if (ret) { 1565 pr_debug("Insufficient memory\n"); 1566 goto err_reserve_limit; 1567 } 1568 1569 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1570 va, size, domain_string(alloc_domain)); 1571 1572 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, 1573 bo_type, NULL, &gobj); 1574 if (ret) { 1575 pr_debug("Failed to create BO on domain %s. ret %d\n", 1576 domain_string(alloc_domain), ret); 1577 goto err_bo_create; 1578 } 1579 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1580 if (ret) { 1581 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1582 goto err_node_allow; 1583 } 1584 bo = gem_to_amdgpu_bo(gobj); 1585 if (bo_type == ttm_bo_type_sg) { 1586 bo->tbo.sg = sg; 1587 bo->tbo.ttm->sg = sg; 1588 } 1589 bo->kfd_bo = *mem; 1590 (*mem)->bo = bo; 1591 if (user_addr) 1592 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1593 1594 (*mem)->va = va; 1595 (*mem)->domain = domain; 1596 (*mem)->mapped_to_gpu_memory = 0; 1597 (*mem)->process_info = avm->process_info; 1598 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1599 1600 if (user_addr) { 1601 pr_debug("creating userptr BO for user_addr = %llu\n", user_addr); 1602 ret = init_user_pages(*mem, user_addr, criu_resume); 1603 if (ret) 1604 goto allocate_init_user_pages_failed; 1605 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1606 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1607 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); 1608 if (ret) { 1609 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); 1610 goto err_pin_bo; 1611 } 1612 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 1613 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 1614 } 1615 1616 if (offset) 1617 *offset = amdgpu_bo_mmap_offset(bo); 1618 1619 return 0; 1620 1621 allocate_init_user_pages_failed: 1622 err_pin_bo: 1623 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1624 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1625 err_node_allow: 1626 /* Don't unreserve system mem limit twice */ 1627 goto err_reserve_limit; 1628 err_bo_create: 1629 unreserve_mem_limit(adev, size, flags); 1630 err_reserve_limit: 1631 mutex_destroy(&(*mem)->lock); 1632 if (gobj) 1633 drm_gem_object_put(gobj); 1634 else 1635 kfree(*mem); 1636 err: 1637 if (sg) { 1638 sg_free_table(sg); 1639 kfree(sg); 1640 } 1641 return ret; 1642 } 1643 1644 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1645 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 1646 uint64_t *size) 1647 { 1648 struct amdkfd_process_info *process_info = mem->process_info; 1649 unsigned long bo_size = mem->bo->tbo.base.size; 1650 struct kfd_mem_attachment *entry, *tmp; 1651 struct bo_vm_reservation_context ctx; 1652 struct ttm_validate_buffer *bo_list_entry; 1653 unsigned int mapped_to_gpu_memory; 1654 int ret; 1655 bool is_imported = false; 1656 1657 mutex_lock(&mem->lock); 1658 1659 /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */ 1660 if (mem->alloc_flags & 1661 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1662 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1663 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); 1664 } 1665 1666 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1667 is_imported = mem->is_imported; 1668 mutex_unlock(&mem->lock); 1669 /* lock is not needed after this, since mem is unused and will 1670 * be freed anyway 1671 */ 1672 1673 if (mapped_to_gpu_memory > 0) { 1674 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1675 mem->va, bo_size); 1676 return -EBUSY; 1677 } 1678 1679 /* Make sure restore workers don't access the BO any more */ 1680 bo_list_entry = &mem->validate_list; 1681 mutex_lock(&process_info->lock); 1682 list_del(&bo_list_entry->head); 1683 mutex_unlock(&process_info->lock); 1684 1685 /* No more MMU notifiers */ 1686 amdgpu_mn_unregister(mem->bo); 1687 1688 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1689 if (unlikely(ret)) 1690 return ret; 1691 1692 /* The eviction fence should be removed by the last unmap. 1693 * TODO: Log an error condition if the bo still has the eviction fence 1694 * attached 1695 */ 1696 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1697 process_info->eviction_fence); 1698 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1699 mem->va + bo_size * (1 + mem->aql_queue)); 1700 1701 /* Remove from VM internal data structures */ 1702 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) 1703 kfd_mem_detach(entry); 1704 1705 ret = unreserve_bo_and_vms(&ctx, false, false); 1706 1707 /* Free the sync object */ 1708 amdgpu_sync_free(&mem->sync); 1709 1710 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1711 * remap BO. We need to free it. 1712 */ 1713 if (mem->bo->tbo.sg) { 1714 sg_free_table(mem->bo->tbo.sg); 1715 kfree(mem->bo->tbo.sg); 1716 } 1717 1718 /* Update the size of the BO being freed if it was allocated from 1719 * VRAM and is not imported. 1720 */ 1721 if (size) { 1722 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1723 (!is_imported)) 1724 *size = bo_size; 1725 else 1726 *size = 0; 1727 } 1728 1729 /* Free the BO*/ 1730 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1731 if (mem->dmabuf) 1732 dma_buf_put(mem->dmabuf); 1733 mutex_destroy(&mem->lock); 1734 1735 /* If this releases the last reference, it will end up calling 1736 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why 1737 * this needs to be the last call here. 1738 */ 1739 drm_gem_object_put(&mem->bo->tbo.base); 1740 1741 return ret; 1742 } 1743 1744 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1745 struct amdgpu_device *adev, struct kgd_mem *mem, 1746 void *drm_priv) 1747 { 1748 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1749 int ret; 1750 struct amdgpu_bo *bo; 1751 uint32_t domain; 1752 struct kfd_mem_attachment *entry; 1753 struct bo_vm_reservation_context ctx; 1754 unsigned long bo_size; 1755 bool is_invalid_userptr = false; 1756 1757 bo = mem->bo; 1758 if (!bo) { 1759 pr_err("Invalid BO when mapping memory to GPU\n"); 1760 return -EINVAL; 1761 } 1762 1763 /* Make sure restore is not running concurrently. Since we 1764 * don't map invalid userptr BOs, we rely on the next restore 1765 * worker to do the mapping 1766 */ 1767 mutex_lock(&mem->process_info->lock); 1768 1769 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1770 * sure that the MMU notifier is no longer running 1771 * concurrently and the queues are actually stopped 1772 */ 1773 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1774 mmap_write_lock(current->mm); 1775 is_invalid_userptr = atomic_read(&mem->invalid); 1776 mmap_write_unlock(current->mm); 1777 } 1778 1779 mutex_lock(&mem->lock); 1780 1781 domain = mem->domain; 1782 bo_size = bo->tbo.base.size; 1783 1784 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1785 mem->va, 1786 mem->va + bo_size * (1 + mem->aql_queue), 1787 avm, domain_string(domain)); 1788 1789 if (!kfd_mem_is_attached(avm, mem)) { 1790 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 1791 if (ret) 1792 goto out; 1793 } 1794 1795 ret = reserve_bo_and_vm(mem, avm, &ctx); 1796 if (unlikely(ret)) 1797 goto out; 1798 1799 /* Userptr can be marked as "not invalid", but not actually be 1800 * validated yet (still in the system domain). In that case 1801 * the queues are still stopped and we can leave mapping for 1802 * the next restore worker 1803 */ 1804 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1805 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 1806 is_invalid_userptr = true; 1807 1808 ret = vm_validate_pt_pd_bos(avm); 1809 if (unlikely(ret)) 1810 goto out_unreserve; 1811 1812 if (mem->mapped_to_gpu_memory == 0 && 1813 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1814 /* Validate BO only once. The eviction fence gets added to BO 1815 * the first time it is mapped. Validate will wait for all 1816 * background evictions to complete. 1817 */ 1818 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1819 if (ret) { 1820 pr_debug("Validate failed\n"); 1821 goto out_unreserve; 1822 } 1823 } 1824 1825 list_for_each_entry(entry, &mem->attachments, list) { 1826 if (entry->bo_va->base.vm != avm || entry->is_mapped) 1827 continue; 1828 1829 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1830 entry->va, entry->va + bo_size, entry); 1831 1832 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 1833 is_invalid_userptr); 1834 if (ret) { 1835 pr_err("Failed to map bo to gpuvm\n"); 1836 goto out_unreserve; 1837 } 1838 1839 ret = vm_update_pds(avm, ctx.sync); 1840 if (ret) { 1841 pr_err("Failed to update page directories\n"); 1842 goto out_unreserve; 1843 } 1844 1845 entry->is_mapped = true; 1846 mem->mapped_to_gpu_memory++; 1847 pr_debug("\t INC mapping count %d\n", 1848 mem->mapped_to_gpu_memory); 1849 } 1850 1851 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1852 amdgpu_bo_fence(bo, 1853 &avm->process_info->eviction_fence->base, 1854 true); 1855 ret = unreserve_bo_and_vms(&ctx, false, false); 1856 1857 goto out; 1858 1859 out_unreserve: 1860 unreserve_bo_and_vms(&ctx, false, false); 1861 out: 1862 mutex_unlock(&mem->process_info->lock); 1863 mutex_unlock(&mem->lock); 1864 return ret; 1865 } 1866 1867 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1868 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) 1869 { 1870 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1871 struct amdkfd_process_info *process_info = avm->process_info; 1872 unsigned long bo_size = mem->bo->tbo.base.size; 1873 struct kfd_mem_attachment *entry; 1874 struct bo_vm_reservation_context ctx; 1875 int ret; 1876 1877 mutex_lock(&mem->lock); 1878 1879 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 1880 if (unlikely(ret)) 1881 goto out; 1882 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1883 if (ctx.n_vms == 0) { 1884 ret = -EINVAL; 1885 goto unreserve_out; 1886 } 1887 1888 ret = vm_validate_pt_pd_bos(avm); 1889 if (unlikely(ret)) 1890 goto unreserve_out; 1891 1892 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1893 mem->va, 1894 mem->va + bo_size * (1 + mem->aql_queue), 1895 avm); 1896 1897 list_for_each_entry(entry, &mem->attachments, list) { 1898 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 1899 continue; 1900 1901 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1902 entry->va, entry->va + bo_size, entry); 1903 1904 unmap_bo_from_gpuvm(mem, entry, ctx.sync); 1905 entry->is_mapped = false; 1906 1907 mem->mapped_to_gpu_memory--; 1908 pr_debug("\t DEC mapping count %d\n", 1909 mem->mapped_to_gpu_memory); 1910 } 1911 1912 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1913 * required. 1914 */ 1915 if (mem->mapped_to_gpu_memory == 0 && 1916 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 1917 !mem->bo->tbo.pin_count) 1918 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1919 process_info->eviction_fence); 1920 1921 unreserve_out: 1922 unreserve_bo_and_vms(&ctx, false, false); 1923 out: 1924 mutex_unlock(&mem->lock); 1925 return ret; 1926 } 1927 1928 int amdgpu_amdkfd_gpuvm_sync_memory( 1929 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) 1930 { 1931 struct amdgpu_sync sync; 1932 int ret; 1933 1934 amdgpu_sync_create(&sync); 1935 1936 mutex_lock(&mem->lock); 1937 amdgpu_sync_clone(&mem->sync, &sync); 1938 mutex_unlock(&mem->lock); 1939 1940 ret = amdgpu_sync_wait(&sync, intr); 1941 amdgpu_sync_free(&sync); 1942 return ret; 1943 } 1944 1945 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, 1946 struct kgd_mem *mem, void **kptr, uint64_t *size) 1947 { 1948 int ret; 1949 struct amdgpu_bo *bo = mem->bo; 1950 1951 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1952 pr_err("userptr can't be mapped to kernel\n"); 1953 return -EINVAL; 1954 } 1955 1956 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1957 * this BO in BO's restoring after eviction. 1958 */ 1959 mutex_lock(&mem->process_info->lock); 1960 1961 ret = amdgpu_bo_reserve(bo, true); 1962 if (ret) { 1963 pr_err("Failed to reserve bo. ret %d\n", ret); 1964 goto bo_reserve_failed; 1965 } 1966 1967 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1968 if (ret) { 1969 pr_err("Failed to pin bo. ret %d\n", ret); 1970 goto pin_failed; 1971 } 1972 1973 ret = amdgpu_bo_kmap(bo, kptr); 1974 if (ret) { 1975 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1976 goto kmap_failed; 1977 } 1978 1979 amdgpu_amdkfd_remove_eviction_fence( 1980 bo, mem->process_info->eviction_fence); 1981 list_del_init(&mem->validate_list.head); 1982 1983 if (size) 1984 *size = amdgpu_bo_size(bo); 1985 1986 amdgpu_bo_unreserve(bo); 1987 1988 mutex_unlock(&mem->process_info->lock); 1989 return 0; 1990 1991 kmap_failed: 1992 amdgpu_bo_unpin(bo); 1993 pin_failed: 1994 amdgpu_bo_unreserve(bo); 1995 bo_reserve_failed: 1996 mutex_unlock(&mem->process_info->lock); 1997 1998 return ret; 1999 } 2000 2001 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, 2002 struct kgd_mem *mem) 2003 { 2004 struct amdgpu_bo *bo = mem->bo; 2005 2006 amdgpu_bo_reserve(bo, true); 2007 amdgpu_bo_kunmap(bo); 2008 amdgpu_bo_unpin(bo); 2009 amdgpu_bo_unreserve(bo); 2010 } 2011 2012 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 2013 struct kfd_vm_fault_info *mem) 2014 { 2015 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 2016 *mem = *adev->gmc.vm_fault_info; 2017 mb(); 2018 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 2019 } 2020 return 0; 2021 } 2022 2023 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, 2024 struct dma_buf *dma_buf, 2025 uint64_t va, void *drm_priv, 2026 struct kgd_mem **mem, uint64_t *size, 2027 uint64_t *mmap_offset) 2028 { 2029 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2030 struct drm_gem_object *obj; 2031 struct amdgpu_bo *bo; 2032 int ret; 2033 2034 if (dma_buf->ops != &amdgpu_dmabuf_ops) 2035 /* Can't handle non-graphics buffers */ 2036 return -EINVAL; 2037 2038 obj = dma_buf->priv; 2039 if (drm_to_adev(obj->dev) != adev) 2040 /* Can't handle buffers from other devices */ 2041 return -EINVAL; 2042 2043 bo = gem_to_amdgpu_bo(obj); 2044 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 2045 AMDGPU_GEM_DOMAIN_GTT))) 2046 /* Only VRAM and GTT BOs are supported */ 2047 return -EINVAL; 2048 2049 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2050 if (!*mem) 2051 return -ENOMEM; 2052 2053 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 2054 if (ret) { 2055 kfree(mem); 2056 return ret; 2057 } 2058 2059 if (size) 2060 *size = amdgpu_bo_size(bo); 2061 2062 if (mmap_offset) 2063 *mmap_offset = amdgpu_bo_mmap_offset(bo); 2064 2065 INIT_LIST_HEAD(&(*mem)->attachments); 2066 mutex_init(&(*mem)->lock); 2067 2068 (*mem)->alloc_flags = 2069 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2070 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 2071 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 2072 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 2073 2074 drm_gem_object_get(&bo->tbo.base); 2075 (*mem)->bo = bo; 2076 (*mem)->va = va; 2077 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2078 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 2079 (*mem)->mapped_to_gpu_memory = 0; 2080 (*mem)->process_info = avm->process_info; 2081 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 2082 amdgpu_sync_create(&(*mem)->sync); 2083 (*mem)->is_imported = true; 2084 2085 return 0; 2086 } 2087 2088 /* Evict a userptr BO by stopping the queues if necessary 2089 * 2090 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 2091 * cannot do any memory allocations, and cannot take any locks that 2092 * are held elsewhere while allocating memory. Therefore this is as 2093 * simple as possible, using atomic counters. 2094 * 2095 * It doesn't do anything to the BO itself. The real work happens in 2096 * restore, where we get updated page addresses. This function only 2097 * ensures that GPU access to the BO is stopped. 2098 */ 2099 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 2100 struct mm_struct *mm) 2101 { 2102 struct amdkfd_process_info *process_info = mem->process_info; 2103 int evicted_bos; 2104 int r = 0; 2105 2106 /* Do not process MMU notifications until stage-4 IOCTL is received */ 2107 if (READ_ONCE(process_info->block_mmu_notifications)) 2108 return 0; 2109 2110 atomic_inc(&mem->invalid); 2111 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 2112 if (evicted_bos == 1) { 2113 /* First eviction, stop the queues */ 2114 r = kgd2kfd_quiesce_mm(mm); 2115 if (r) 2116 pr_err("Failed to quiesce KFD\n"); 2117 schedule_delayed_work(&process_info->restore_userptr_work, 2118 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2119 } 2120 2121 return r; 2122 } 2123 2124 /* Update invalid userptr BOs 2125 * 2126 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 2127 * userptr_inval_list and updates user pages for all BOs that have 2128 * been invalidated since their last update. 2129 */ 2130 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 2131 struct mm_struct *mm) 2132 { 2133 struct kgd_mem *mem, *tmp_mem; 2134 struct amdgpu_bo *bo; 2135 struct ttm_operation_ctx ctx = { false, false }; 2136 int invalid, ret; 2137 2138 /* Move all invalidated BOs to the userptr_inval_list and 2139 * release their user pages by migration to the CPU domain 2140 */ 2141 list_for_each_entry_safe(mem, tmp_mem, 2142 &process_info->userptr_valid_list, 2143 validate_list.head) { 2144 if (!atomic_read(&mem->invalid)) 2145 continue; /* BO is still valid */ 2146 2147 bo = mem->bo; 2148 2149 if (amdgpu_bo_reserve(bo, true)) 2150 return -EAGAIN; 2151 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 2152 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2153 amdgpu_bo_unreserve(bo); 2154 if (ret) { 2155 pr_err("%s: Failed to invalidate userptr BO\n", 2156 __func__); 2157 return -EAGAIN; 2158 } 2159 2160 list_move_tail(&mem->validate_list.head, 2161 &process_info->userptr_inval_list); 2162 } 2163 2164 if (list_empty(&process_info->userptr_inval_list)) 2165 return 0; /* All evicted userptr BOs were freed */ 2166 2167 /* Go through userptr_inval_list and update any invalid user_pages */ 2168 list_for_each_entry(mem, &process_info->userptr_inval_list, 2169 validate_list.head) { 2170 invalid = atomic_read(&mem->invalid); 2171 if (!invalid) 2172 /* BO hasn't been invalidated since the last 2173 * revalidation attempt. Keep its BO list. 2174 */ 2175 continue; 2176 2177 bo = mem->bo; 2178 2179 /* Get updated user pages */ 2180 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 2181 if (ret) { 2182 pr_debug("Failed %d to get user pages\n", ret); 2183 2184 /* Return -EFAULT bad address error as success. It will 2185 * fail later with a VM fault if the GPU tries to access 2186 * it. Better than hanging indefinitely with stalled 2187 * user mode queues. 2188 * 2189 * Return other error -EBUSY or -ENOMEM to retry restore 2190 */ 2191 if (ret != -EFAULT) 2192 return ret; 2193 } else { 2194 2195 /* 2196 * FIXME: Cannot ignore the return code, must hold 2197 * notifier_lock 2198 */ 2199 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 2200 } 2201 2202 /* Mark the BO as valid unless it was invalidated 2203 * again concurrently. 2204 */ 2205 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 2206 return -EAGAIN; 2207 } 2208 2209 return 0; 2210 } 2211 2212 /* Validate invalid userptr BOs 2213 * 2214 * Validates BOs on the userptr_inval_list, and moves them back to the 2215 * userptr_valid_list. Also updates GPUVM page tables with new page 2216 * addresses and waits for the page table updates to complete. 2217 */ 2218 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2219 { 2220 struct amdgpu_bo_list_entry *pd_bo_list_entries; 2221 struct list_head resv_list, duplicates; 2222 struct ww_acquire_ctx ticket; 2223 struct amdgpu_sync sync; 2224 2225 struct amdgpu_vm *peer_vm; 2226 struct kgd_mem *mem, *tmp_mem; 2227 struct amdgpu_bo *bo; 2228 struct ttm_operation_ctx ctx = { false, false }; 2229 int i, ret; 2230 2231 pd_bo_list_entries = kcalloc(process_info->n_vms, 2232 sizeof(struct amdgpu_bo_list_entry), 2233 GFP_KERNEL); 2234 if (!pd_bo_list_entries) { 2235 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 2236 ret = -ENOMEM; 2237 goto out_no_mem; 2238 } 2239 2240 INIT_LIST_HEAD(&resv_list); 2241 INIT_LIST_HEAD(&duplicates); 2242 2243 /* Get all the page directory BOs that need to be reserved */ 2244 i = 0; 2245 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2246 vm_list_node) 2247 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 2248 &pd_bo_list_entries[i++]); 2249 /* Add the userptr_inval_list entries to resv_list */ 2250 list_for_each_entry(mem, &process_info->userptr_inval_list, 2251 validate_list.head) { 2252 list_add_tail(&mem->resv_list.head, &resv_list); 2253 mem->resv_list.bo = mem->validate_list.bo; 2254 mem->resv_list.num_shared = mem->validate_list.num_shared; 2255 } 2256 2257 /* Reserve all BOs and page tables for validation */ 2258 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 2259 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 2260 if (ret) 2261 goto out_free; 2262 2263 amdgpu_sync_create(&sync); 2264 2265 ret = process_validate_vms(process_info); 2266 if (ret) 2267 goto unreserve_out; 2268 2269 /* Validate BOs and update GPUVM page tables */ 2270 list_for_each_entry_safe(mem, tmp_mem, 2271 &process_info->userptr_inval_list, 2272 validate_list.head) { 2273 struct kfd_mem_attachment *attachment; 2274 2275 bo = mem->bo; 2276 2277 /* Validate the BO if we got user pages */ 2278 if (bo->tbo.ttm->pages[0]) { 2279 amdgpu_bo_placement_from_domain(bo, mem->domain); 2280 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2281 if (ret) { 2282 pr_err("%s: failed to validate BO\n", __func__); 2283 goto unreserve_out; 2284 } 2285 } 2286 2287 list_move_tail(&mem->validate_list.head, 2288 &process_info->userptr_valid_list); 2289 2290 /* Update mapping. If the BO was not validated 2291 * (because we couldn't get user pages), this will 2292 * clear the page table entries, which will result in 2293 * VM faults if the GPU tries to access the invalid 2294 * memory. 2295 */ 2296 list_for_each_entry(attachment, &mem->attachments, list) { 2297 if (!attachment->is_mapped) 2298 continue; 2299 2300 kfd_mem_dmaunmap_attachment(mem, attachment); 2301 ret = update_gpuvm_pte(mem, attachment, &sync); 2302 if (ret) { 2303 pr_err("%s: update PTE failed\n", __func__); 2304 /* make sure this gets validated again */ 2305 atomic_inc(&mem->invalid); 2306 goto unreserve_out; 2307 } 2308 } 2309 } 2310 2311 /* Update page directories */ 2312 ret = process_update_pds(process_info, &sync); 2313 2314 unreserve_out: 2315 ttm_eu_backoff_reservation(&ticket, &resv_list); 2316 amdgpu_sync_wait(&sync, false); 2317 amdgpu_sync_free(&sync); 2318 out_free: 2319 kfree(pd_bo_list_entries); 2320 out_no_mem: 2321 2322 return ret; 2323 } 2324 2325 /* Worker callback to restore evicted userptr BOs 2326 * 2327 * Tries to update and validate all userptr BOs. If successful and no 2328 * concurrent evictions happened, the queues are restarted. Otherwise, 2329 * reschedule for another attempt later. 2330 */ 2331 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2332 { 2333 struct delayed_work *dwork = to_delayed_work(work); 2334 struct amdkfd_process_info *process_info = 2335 container_of(dwork, struct amdkfd_process_info, 2336 restore_userptr_work); 2337 struct task_struct *usertask; 2338 struct mm_struct *mm; 2339 int evicted_bos; 2340 2341 evicted_bos = atomic_read(&process_info->evicted_bos); 2342 if (!evicted_bos) 2343 return; 2344 2345 /* Reference task and mm in case of concurrent process termination */ 2346 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2347 if (!usertask) 2348 return; 2349 mm = get_task_mm(usertask); 2350 if (!mm) { 2351 put_task_struct(usertask); 2352 return; 2353 } 2354 2355 mutex_lock(&process_info->lock); 2356 2357 if (update_invalid_user_pages(process_info, mm)) 2358 goto unlock_out; 2359 /* userptr_inval_list can be empty if all evicted userptr BOs 2360 * have been freed. In that case there is nothing to validate 2361 * and we can just restart the queues. 2362 */ 2363 if (!list_empty(&process_info->userptr_inval_list)) { 2364 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 2365 goto unlock_out; /* Concurrent eviction, try again */ 2366 2367 if (validate_invalid_user_pages(process_info)) 2368 goto unlock_out; 2369 } 2370 /* Final check for concurrent evicton and atomic update. If 2371 * another eviction happens after successful update, it will 2372 * be a first eviction that calls quiesce_mm. The eviction 2373 * reference counting inside KFD will handle this case. 2374 */ 2375 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2376 evicted_bos) 2377 goto unlock_out; 2378 evicted_bos = 0; 2379 if (kgd2kfd_resume_mm(mm)) { 2380 pr_err("%s: Failed to resume KFD\n", __func__); 2381 /* No recovery from this failure. Probably the CP is 2382 * hanging. No point trying again. 2383 */ 2384 } 2385 2386 unlock_out: 2387 mutex_unlock(&process_info->lock); 2388 mmput(mm); 2389 put_task_struct(usertask); 2390 2391 /* If validation failed, reschedule another attempt */ 2392 if (evicted_bos) 2393 schedule_delayed_work(&process_info->restore_userptr_work, 2394 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2395 } 2396 2397 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2398 * KFD process identified by process_info 2399 * 2400 * @process_info: amdkfd_process_info of the KFD process 2401 * 2402 * After memory eviction, restore thread calls this function. The function 2403 * should be called when the Process is still valid. BO restore involves - 2404 * 2405 * 1. Release old eviction fence and create new one 2406 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2407 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2408 * BOs that need to be reserved. 2409 * 4. Reserve all the BOs 2410 * 5. Validate of PD and PT BOs. 2411 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2412 * 7. Add fence to all PD and PT BOs. 2413 * 8. Unreserve all BOs 2414 */ 2415 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2416 { 2417 struct amdgpu_bo_list_entry *pd_bo_list; 2418 struct amdkfd_process_info *process_info = info; 2419 struct amdgpu_vm *peer_vm; 2420 struct kgd_mem *mem; 2421 struct bo_vm_reservation_context ctx; 2422 struct amdgpu_amdkfd_fence *new_fence; 2423 int ret = 0, i; 2424 struct list_head duplicate_save; 2425 struct amdgpu_sync sync_obj; 2426 unsigned long failed_size = 0; 2427 unsigned long total_size = 0; 2428 2429 INIT_LIST_HEAD(&duplicate_save); 2430 INIT_LIST_HEAD(&ctx.list); 2431 INIT_LIST_HEAD(&ctx.duplicates); 2432 2433 pd_bo_list = kcalloc(process_info->n_vms, 2434 sizeof(struct amdgpu_bo_list_entry), 2435 GFP_KERNEL); 2436 if (!pd_bo_list) 2437 return -ENOMEM; 2438 2439 i = 0; 2440 mutex_lock(&process_info->lock); 2441 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2442 vm_list_node) 2443 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2444 2445 /* Reserve all BOs and page tables/directory. Add all BOs from 2446 * kfd_bo_list to ctx.list 2447 */ 2448 list_for_each_entry(mem, &process_info->kfd_bo_list, 2449 validate_list.head) { 2450 2451 list_add_tail(&mem->resv_list.head, &ctx.list); 2452 mem->resv_list.bo = mem->validate_list.bo; 2453 mem->resv_list.num_shared = mem->validate_list.num_shared; 2454 } 2455 2456 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2457 false, &duplicate_save); 2458 if (ret) { 2459 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2460 goto ttm_reserve_fail; 2461 } 2462 2463 amdgpu_sync_create(&sync_obj); 2464 2465 /* Validate PDs and PTs */ 2466 ret = process_validate_vms(process_info); 2467 if (ret) 2468 goto validate_map_fail; 2469 2470 ret = process_sync_pds_resv(process_info, &sync_obj); 2471 if (ret) { 2472 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2473 goto validate_map_fail; 2474 } 2475 2476 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2477 list_for_each_entry(mem, &process_info->kfd_bo_list, 2478 validate_list.head) { 2479 2480 struct amdgpu_bo *bo = mem->bo; 2481 uint32_t domain = mem->domain; 2482 struct kfd_mem_attachment *attachment; 2483 2484 total_size += amdgpu_bo_size(bo); 2485 2486 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2487 if (ret) { 2488 pr_debug("Memory eviction: Validate BOs failed\n"); 2489 failed_size += amdgpu_bo_size(bo); 2490 ret = amdgpu_amdkfd_bo_validate(bo, 2491 AMDGPU_GEM_DOMAIN_GTT, false); 2492 if (ret) { 2493 pr_debug("Memory eviction: Try again\n"); 2494 goto validate_map_fail; 2495 } 2496 } 2497 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2498 if (ret) { 2499 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2500 goto validate_map_fail; 2501 } 2502 list_for_each_entry(attachment, &mem->attachments, list) { 2503 if (!attachment->is_mapped) 2504 continue; 2505 2506 kfd_mem_dmaunmap_attachment(mem, attachment); 2507 ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2508 if (ret) { 2509 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2510 goto validate_map_fail; 2511 } 2512 } 2513 } 2514 2515 if (failed_size) 2516 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2517 2518 /* Update page directories */ 2519 ret = process_update_pds(process_info, &sync_obj); 2520 if (ret) { 2521 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2522 goto validate_map_fail; 2523 } 2524 2525 /* Wait for validate and PT updates to finish */ 2526 amdgpu_sync_wait(&sync_obj, false); 2527 2528 /* Release old eviction fence and create new one, because fence only 2529 * goes from unsignaled to signaled, fence cannot be reused. 2530 * Use context and mm from the old fence. 2531 */ 2532 new_fence = amdgpu_amdkfd_fence_create( 2533 process_info->eviction_fence->base.context, 2534 process_info->eviction_fence->mm, 2535 NULL); 2536 if (!new_fence) { 2537 pr_err("Failed to create eviction fence\n"); 2538 ret = -ENOMEM; 2539 goto validate_map_fail; 2540 } 2541 dma_fence_put(&process_info->eviction_fence->base); 2542 process_info->eviction_fence = new_fence; 2543 *ef = dma_fence_get(&new_fence->base); 2544 2545 /* Attach new eviction fence to all BOs */ 2546 list_for_each_entry(mem, &process_info->kfd_bo_list, 2547 validate_list.head) 2548 amdgpu_bo_fence(mem->bo, 2549 &process_info->eviction_fence->base, true); 2550 2551 /* Attach eviction fence to PD / PT BOs */ 2552 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2553 vm_list_node) { 2554 struct amdgpu_bo *bo = peer_vm->root.bo; 2555 2556 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2557 } 2558 2559 validate_map_fail: 2560 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2561 amdgpu_sync_free(&sync_obj); 2562 ttm_reserve_fail: 2563 mutex_unlock(&process_info->lock); 2564 kfree(pd_bo_list); 2565 return ret; 2566 } 2567 2568 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2569 { 2570 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2571 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2572 int ret; 2573 2574 if (!info || !gws) 2575 return -EINVAL; 2576 2577 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2578 if (!*mem) 2579 return -ENOMEM; 2580 2581 mutex_init(&(*mem)->lock); 2582 INIT_LIST_HEAD(&(*mem)->attachments); 2583 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2584 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2585 (*mem)->process_info = process_info; 2586 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2587 amdgpu_sync_create(&(*mem)->sync); 2588 2589 2590 /* Validate gws bo the first time it is added to process */ 2591 mutex_lock(&(*mem)->process_info->lock); 2592 ret = amdgpu_bo_reserve(gws_bo, false); 2593 if (unlikely(ret)) { 2594 pr_err("Reserve gws bo failed %d\n", ret); 2595 goto bo_reservation_failure; 2596 } 2597 2598 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2599 if (ret) { 2600 pr_err("GWS BO validate failed %d\n", ret); 2601 goto bo_validation_failure; 2602 } 2603 /* GWS resource is shared b/t amdgpu and amdkfd 2604 * Add process eviction fence to bo so they can 2605 * evict each other. 2606 */ 2607 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2608 if (ret) 2609 goto reserve_shared_fail; 2610 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2611 amdgpu_bo_unreserve(gws_bo); 2612 mutex_unlock(&(*mem)->process_info->lock); 2613 2614 return ret; 2615 2616 reserve_shared_fail: 2617 bo_validation_failure: 2618 amdgpu_bo_unreserve(gws_bo); 2619 bo_reservation_failure: 2620 mutex_unlock(&(*mem)->process_info->lock); 2621 amdgpu_sync_free(&(*mem)->sync); 2622 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2623 amdgpu_bo_unref(&gws_bo); 2624 mutex_destroy(&(*mem)->lock); 2625 kfree(*mem); 2626 *mem = NULL; 2627 return ret; 2628 } 2629 2630 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2631 { 2632 int ret; 2633 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2634 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2635 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2636 2637 /* Remove BO from process's validate list so restore worker won't touch 2638 * it anymore 2639 */ 2640 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2641 2642 ret = amdgpu_bo_reserve(gws_bo, false); 2643 if (unlikely(ret)) { 2644 pr_err("Reserve gws bo failed %d\n", ret); 2645 //TODO add BO back to validate_list? 2646 return ret; 2647 } 2648 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2649 process_info->eviction_fence); 2650 amdgpu_bo_unreserve(gws_bo); 2651 amdgpu_sync_free(&kgd_mem->sync); 2652 amdgpu_bo_unref(&gws_bo); 2653 mutex_destroy(&kgd_mem->lock); 2654 kfree(mem); 2655 return 0; 2656 } 2657 2658 /* Returns GPU-specific tiling mode information */ 2659 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 2660 struct tile_config *config) 2661 { 2662 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2663 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2664 config->num_tile_configs = 2665 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2666 config->macro_tile_config_ptr = 2667 adev->gfx.config.macrotile_mode_array; 2668 config->num_macro_tile_configs = 2669 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2670 2671 /* Those values are not set from GFX9 onwards */ 2672 config->num_banks = adev->gfx.config.num_banks; 2673 config->num_ranks = adev->gfx.config.num_ranks; 2674 2675 return 0; 2676 } 2677 2678 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) 2679 { 2680 struct kfd_mem_attachment *entry; 2681 2682 list_for_each_entry(entry, &mem->attachments, list) { 2683 if (entry->is_mapped && entry->adev == adev) 2684 return true; 2685 } 2686 return false; 2687 } 2688