1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2014-2018 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/dma-buf.h> 24 #include <linux/list.h> 25 #include <linux/pagemap.h> 26 #include <linux/sched/mm.h> 27 #include <linux/sched/task.h> 28 29 #include "amdgpu_object.h" 30 #include "amdgpu_gem.h" 31 #include "amdgpu_vm.h" 32 #include "amdgpu_amdkfd.h" 33 #include "amdgpu_dma_buf.h" 34 #include <uapi/linux/kfd_ioctl.h> 35 #include "amdgpu_xgmi.h" 36 #include "kfd_smi_events.h" 37 38 /* Userptr restore delay, just long enough to allow consecutive VM 39 * changes to accumulate 40 */ 41 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 42 43 /* 44 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB 45 * BO chunk 46 */ 47 #define VRAM_AVAILABLITY_ALIGN (1 << 21) 48 49 /* Impose limit on how much memory KFD can use */ 50 static struct { 51 uint64_t max_system_mem_limit; 52 uint64_t max_ttm_mem_limit; 53 int64_t system_mem_used; 54 int64_t ttm_mem_used; 55 spinlock_t mem_limit_lock; 56 } kfd_mem_limit; 57 58 static const char * const domain_bit_to_string[] = { 59 "CPU", 60 "GTT", 61 "VRAM", 62 "GDS", 63 "GWS", 64 "OA" 65 }; 66 67 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 68 69 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 70 71 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 72 struct kgd_mem *mem) 73 { 74 struct kfd_mem_attachment *entry; 75 76 list_for_each_entry(entry, &mem->attachments, list) 77 if (entry->bo_va->base.vm == avm) 78 return true; 79 80 return false; 81 } 82 83 /* Set memory usage limits. Current, limits are 84 * System (TTM + userptr) memory - 15/16th System RAM 85 * TTM memory - 3/8th System RAM 86 */ 87 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 88 { 89 struct sysinfo si; 90 uint64_t mem; 91 92 si_meminfo(&si); 93 mem = si.freeram - si.freehigh; 94 mem *= si.mem_unit; 95 96 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 97 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 98 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 99 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 100 (kfd_mem_limit.max_system_mem_limit >> 20), 101 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 102 } 103 104 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 105 { 106 kfd_mem_limit.system_mem_used += size; 107 } 108 109 /* Estimate page table size needed to represent a given memory size 110 * 111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 114 * for 2MB pages for TLB efficiency. However, small allocations and 115 * fragmented system memory still need some 4KB pages. We choose a 116 * compromise that should work in most cases without reserving too 117 * much memory for page tables unnecessarily (factor 16K, >> 14). 118 */ 119 120 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM) 121 122 /** 123 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size 124 * of buffer. 125 * 126 * @adev: Device to which allocated BO belongs to 127 * @size: Size of buffer, in bytes, encapsulated by B0. This should be 128 * equivalent to amdgpu_bo_size(BO) 129 * @alloc_flag: Flag used in allocating a BO as noted above 130 * 131 * Return: returns -ENOMEM in case of error, ZERO otherwise 132 */ 133 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 134 uint64_t size, u32 alloc_flag) 135 { 136 uint64_t reserved_for_pt = 137 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 138 size_t system_mem_needed, ttm_mem_needed, vram_needed; 139 int ret = 0; 140 141 system_mem_needed = 0; 142 ttm_mem_needed = 0; 143 vram_needed = 0; 144 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 145 system_mem_needed = size; 146 ttm_mem_needed = size; 147 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 148 /* 149 * Conservatively round up the allocation requirement to 2 MB 150 * to avoid fragmentation caused by 4K allocations in the tail 151 * 2M BO chunk. 152 */ 153 vram_needed = size; 154 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 155 system_mem_needed = size; 156 } else if (!(alloc_flag & 157 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 158 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 159 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 160 return -ENOMEM; 161 } 162 163 spin_lock(&kfd_mem_limit.mem_limit_lock); 164 165 if (kfd_mem_limit.system_mem_used + system_mem_needed > 166 kfd_mem_limit.max_system_mem_limit) 167 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 168 169 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 170 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 171 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 172 kfd_mem_limit.max_ttm_mem_limit) || 173 (adev && adev->kfd.vram_used + vram_needed > 174 adev->gmc.real_vram_size - reserved_for_pt)) { 175 ret = -ENOMEM; 176 goto release; 177 } 178 179 /* Update memory accounting by decreasing available system 180 * memory, TTM memory and GPU memory as computed above 181 */ 182 WARN_ONCE(vram_needed && !adev, 183 "adev reference can't be null when vram is used"); 184 if (adev) { 185 adev->kfd.vram_used += vram_needed; 186 adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); 187 } 188 kfd_mem_limit.system_mem_used += system_mem_needed; 189 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 190 191 release: 192 spin_unlock(&kfd_mem_limit.mem_limit_lock); 193 return ret; 194 } 195 196 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, 197 uint64_t size, u32 alloc_flag) 198 { 199 spin_lock(&kfd_mem_limit.mem_limit_lock); 200 201 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 202 kfd_mem_limit.system_mem_used -= size; 203 kfd_mem_limit.ttm_mem_used -= size; 204 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 205 WARN_ONCE(!adev, 206 "adev reference can't be null when alloc mem flags vram is set"); 207 if (adev) { 208 adev->kfd.vram_used -= size; 209 adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN); 210 } 211 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 212 kfd_mem_limit.system_mem_used -= size; 213 } else if (!(alloc_flag & 214 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 215 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 216 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 217 goto release; 218 } 219 WARN_ONCE(adev && adev->kfd.vram_used < 0, 220 "KFD VRAM memory accounting unbalanced"); 221 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 222 "KFD TTM memory accounting unbalanced"); 223 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 224 "KFD system memory accounting unbalanced"); 225 226 release: 227 spin_unlock(&kfd_mem_limit.mem_limit_lock); 228 } 229 230 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 231 { 232 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 233 u32 alloc_flags = bo->kfd_bo->alloc_flags; 234 u64 size = amdgpu_bo_size(bo); 235 236 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags); 237 238 kfree(bo->kfd_bo); 239 } 240 241 /** 242 * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information 243 * about USERPTR or DOOREBELL or MMIO BO. 244 * @adev: Device for which dmamap BO is being created 245 * @mem: BO of peer device that is being DMA mapped. Provides parameters 246 * in building the dmamap BO 247 * @bo_out: Output parameter updated with handle of dmamap BO 248 */ 249 static int 250 create_dmamap_sg_bo(struct amdgpu_device *adev, 251 struct kgd_mem *mem, struct amdgpu_bo **bo_out) 252 { 253 struct drm_gem_object *gem_obj; 254 int ret, align; 255 256 ret = amdgpu_bo_reserve(mem->bo, false); 257 if (ret) 258 return ret; 259 260 align = 1; 261 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align, 262 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE, 263 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj); 264 265 amdgpu_bo_unreserve(mem->bo); 266 267 if (ret) { 268 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret); 269 return -EINVAL; 270 } 271 272 *bo_out = gem_to_amdgpu_bo(gem_obj); 273 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); 274 return ret; 275 } 276 277 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 278 * reservation object. 279 * 280 * @bo: [IN] Remove eviction fence(s) from this BO 281 * @ef: [IN] This eviction fence is removed if it 282 * is present in the shared list. 283 * 284 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 285 */ 286 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 287 struct amdgpu_amdkfd_fence *ef) 288 { 289 struct dma_fence *replacement; 290 291 if (!ef) 292 return -EINVAL; 293 294 /* TODO: Instead of block before we should use the fence of the page 295 * table update and TLB flush here directly. 296 */ 297 replacement = dma_fence_get_stub(); 298 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, 299 replacement, DMA_RESV_USAGE_BOOKKEEP); 300 dma_fence_put(replacement); 301 return 0; 302 } 303 304 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 305 { 306 struct amdgpu_bo *root = bo; 307 struct amdgpu_vm_bo_base *vm_bo; 308 struct amdgpu_vm *vm; 309 struct amdkfd_process_info *info; 310 struct amdgpu_amdkfd_fence *ef; 311 int ret; 312 313 /* we can always get vm_bo from root PD bo.*/ 314 while (root->parent) 315 root = root->parent; 316 317 vm_bo = root->vm_bo; 318 if (!vm_bo) 319 return 0; 320 321 vm = vm_bo->vm; 322 if (!vm) 323 return 0; 324 325 info = vm->process_info; 326 if (!info || !info->eviction_fence) 327 return 0; 328 329 ef = container_of(dma_fence_get(&info->eviction_fence->base), 330 struct amdgpu_amdkfd_fence, base); 331 332 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 333 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 334 dma_resv_unlock(bo->tbo.base.resv); 335 336 dma_fence_put(&ef->base); 337 return ret; 338 } 339 340 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 341 bool wait) 342 { 343 struct ttm_operation_ctx ctx = { false, false }; 344 int ret; 345 346 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 347 "Called with userptr BO")) 348 return -EINVAL; 349 350 amdgpu_bo_placement_from_domain(bo, domain); 351 352 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 353 if (ret) 354 goto validate_fail; 355 if (wait) 356 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 357 358 validate_fail: 359 return ret; 360 } 361 362 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 363 { 364 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 365 } 366 367 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 368 * 369 * Page directories are not updated here because huge page handling 370 * during page table updates can invalidate page directory entries 371 * again. Page directories are only updated after updating page 372 * tables. 373 */ 374 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 375 { 376 struct amdgpu_bo *pd = vm->root.bo; 377 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 378 int ret; 379 380 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); 381 if (ret) { 382 pr_err("failed to validate PT BOs\n"); 383 return ret; 384 } 385 386 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 387 388 return 0; 389 } 390 391 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 392 { 393 struct amdgpu_bo *pd = vm->root.bo; 394 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 395 int ret; 396 397 ret = amdgpu_vm_update_pdes(adev, vm, false); 398 if (ret) 399 return ret; 400 401 return amdgpu_sync_fence(sync, vm->last_update); 402 } 403 404 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 405 { 406 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 407 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 408 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; 409 uint32_t mapping_flags; 410 uint64_t pte_flags; 411 bool snoop = false; 412 413 mapping_flags = AMDGPU_VM_PAGE_READABLE; 414 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 415 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 416 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 417 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 418 419 switch (adev->asic_type) { 420 case CHIP_ARCTURUS: 421 case CHIP_ALDEBARAN: 422 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 423 if (bo_adev == adev) { 424 if (uncached) 425 mapping_flags |= AMDGPU_VM_MTYPE_UC; 426 else if (coherent) 427 mapping_flags |= AMDGPU_VM_MTYPE_CC; 428 else 429 mapping_flags |= AMDGPU_VM_MTYPE_RW; 430 if (adev->asic_type == CHIP_ALDEBARAN && 431 adev->gmc.xgmi.connected_to_cpu) 432 snoop = true; 433 } else { 434 if (uncached || coherent) 435 mapping_flags |= AMDGPU_VM_MTYPE_UC; 436 else 437 mapping_flags |= AMDGPU_VM_MTYPE_NC; 438 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 439 snoop = true; 440 } 441 } else { 442 if (uncached || coherent) 443 mapping_flags |= AMDGPU_VM_MTYPE_UC; 444 else 445 mapping_flags |= AMDGPU_VM_MTYPE_NC; 446 snoop = true; 447 } 448 break; 449 default: 450 if (uncached || coherent) 451 mapping_flags |= AMDGPU_VM_MTYPE_UC; 452 else 453 mapping_flags |= AMDGPU_VM_MTYPE_NC; 454 455 if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) 456 snoop = true; 457 } 458 459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags); 460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 461 462 return pte_flags; 463 } 464 465 /** 466 * create_sg_table() - Create an sg_table for a contiguous DMA addr range 467 * @addr: The starting address to point to 468 * @size: Size of memory area in bytes being pointed to 469 * 470 * Allocates an instance of sg_table and initializes it to point to memory 471 * area specified by input parameters. The address used to build is assumed 472 * to be DMA mapped, if needed. 473 * 474 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table 475 * because they are physically contiguous. 476 * 477 * Return: Initialized instance of SG Table or NULL 478 */ 479 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size) 480 { 481 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 482 483 if (!sg) 484 return NULL; 485 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 486 kfree(sg); 487 return NULL; 488 } 489 sg_dma_address(sg->sgl) = addr; 490 sg->sgl->length = size; 491 #ifdef CONFIG_NEED_SG_DMA_LENGTH 492 sg->sgl->dma_length = size; 493 #endif 494 return sg; 495 } 496 497 static int 498 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 499 struct kfd_mem_attachment *attachment) 500 { 501 enum dma_data_direction direction = 502 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 503 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 504 struct ttm_operation_ctx ctx = {.interruptible = true}; 505 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 506 struct amdgpu_device *adev = attachment->adev; 507 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 508 struct ttm_tt *ttm = bo->tbo.ttm; 509 int ret; 510 511 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 512 return -EINVAL; 513 514 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 515 if (unlikely(!ttm->sg)) 516 return -ENOMEM; 517 518 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 519 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 520 ttm->num_pages, 0, 521 (u64)ttm->num_pages << PAGE_SHIFT, 522 GFP_KERNEL); 523 if (unlikely(ret)) 524 goto free_sg; 525 526 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 527 if (unlikely(ret)) 528 goto release_sg; 529 530 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, 531 ttm->num_pages); 532 533 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 534 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 535 if (ret) 536 goto unmap_sg; 537 538 return 0; 539 540 unmap_sg: 541 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 542 release_sg: 543 pr_err("DMA map userptr failed: %d\n", ret); 544 sg_free_table(ttm->sg); 545 free_sg: 546 kfree(ttm->sg); 547 ttm->sg = NULL; 548 return ret; 549 } 550 551 static int 552 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 553 { 554 struct ttm_operation_ctx ctx = {.interruptible = true}; 555 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 556 557 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 558 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 559 } 560 561 /** 562 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO 563 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 564 * @attachment: Virtual address attachment of the BO on accessing device 565 * 566 * An access request from the device that owns DOORBELL does not require DMA mapping. 567 * This is because the request doesn't go through PCIe root complex i.e. it instead 568 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL 569 * 570 * In contrast, all access requests for MMIO need to be DMA mapped without regard to 571 * device ownership. This is because access requests for MMIO go through PCIe root 572 * complex. 573 * 574 * This is accomplished in two steps: 575 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used 576 * in updating requesting device's page table 577 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU 578 * accessible. This allows an update of requesting device's page table 579 * with entries associated with DOOREBELL or MMIO memory 580 * 581 * This method is invoked in the following contexts: 582 * - Mapping of DOORBELL or MMIO BO of same or peer device 583 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access 584 * 585 * Return: ZERO if successful, NON-ZERO otherwise 586 */ 587 static int 588 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, 589 struct kfd_mem_attachment *attachment) 590 { 591 struct ttm_operation_ctx ctx = {.interruptible = true}; 592 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 593 struct amdgpu_device *adev = attachment->adev; 594 struct ttm_tt *ttm = bo->tbo.ttm; 595 enum dma_data_direction dir; 596 dma_addr_t dma_addr; 597 bool mmio; 598 int ret; 599 600 /* Expect SG Table of dmapmap BO to be NULL */ 601 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); 602 if (unlikely(ttm->sg)) { 603 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio); 604 return -EINVAL; 605 } 606 607 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 608 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 609 dma_addr = mem->bo->tbo.sg->sgl->dma_address; 610 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); 611 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr); 612 dma_addr = dma_map_resource(adev->dev, dma_addr, 613 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 614 ret = dma_mapping_error(adev->dev, dma_addr); 615 if (unlikely(ret)) 616 return ret; 617 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr); 618 619 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); 620 if (unlikely(!ttm->sg)) { 621 ret = -ENOMEM; 622 goto unmap_sg; 623 } 624 625 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 626 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 627 if (unlikely(ret)) 628 goto free_sg; 629 630 return ret; 631 632 free_sg: 633 sg_free_table(ttm->sg); 634 kfree(ttm->sg); 635 ttm->sg = NULL; 636 unmap_sg: 637 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, 638 dir, DMA_ATTR_SKIP_CPU_SYNC); 639 return ret; 640 } 641 642 static int 643 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 644 struct kfd_mem_attachment *attachment) 645 { 646 switch (attachment->type) { 647 case KFD_MEM_ATT_SHARED: 648 return 0; 649 case KFD_MEM_ATT_USERPTR: 650 return kfd_mem_dmamap_userptr(mem, attachment); 651 case KFD_MEM_ATT_DMABUF: 652 return kfd_mem_dmamap_dmabuf(attachment); 653 case KFD_MEM_ATT_SG: 654 return kfd_mem_dmamap_sg_bo(mem, attachment); 655 default: 656 WARN_ON_ONCE(1); 657 } 658 return -EINVAL; 659 } 660 661 static void 662 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 663 struct kfd_mem_attachment *attachment) 664 { 665 enum dma_data_direction direction = 666 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 667 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 668 struct ttm_operation_ctx ctx = {.interruptible = false}; 669 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 670 struct amdgpu_device *adev = attachment->adev; 671 struct ttm_tt *ttm = bo->tbo.ttm; 672 673 if (unlikely(!ttm->sg)) 674 return; 675 676 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 677 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 678 679 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 680 sg_free_table(ttm->sg); 681 kfree(ttm->sg); 682 ttm->sg = NULL; 683 } 684 685 static void 686 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 687 { 688 struct ttm_operation_ctx ctx = {.interruptible = true}; 689 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 690 691 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 692 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 693 } 694 695 /** 696 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO 697 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 698 * @attachment: Virtual address attachment of the BO on accessing device 699 * 700 * The method performs following steps: 701 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible 702 * - Free SG Table that is used to encapsulate DMA mapped memory of 703 * peer device's DOORBELL or MMIO memory 704 * 705 * This method is invoked in the following contexts: 706 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory 707 * Eviction of DOOREBELL or MMIO BO on device having access to its memory 708 * 709 * Return: void 710 */ 711 static void 712 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, 713 struct kfd_mem_attachment *attachment) 714 { 715 struct ttm_operation_ctx ctx = {.interruptible = true}; 716 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 717 struct amdgpu_device *adev = attachment->adev; 718 struct ttm_tt *ttm = bo->tbo.ttm; 719 enum dma_data_direction dir; 720 721 if (unlikely(!ttm->sg)) { 722 pr_err("SG Table of BO is UNEXPECTEDLY NULL"); 723 return; 724 } 725 726 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 727 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 728 729 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 730 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 731 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address, 732 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 733 sg_free_table(ttm->sg); 734 kfree(ttm->sg); 735 ttm->sg = NULL; 736 bo->tbo.sg = NULL; 737 } 738 739 static void 740 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 741 struct kfd_mem_attachment *attachment) 742 { 743 switch (attachment->type) { 744 case KFD_MEM_ATT_SHARED: 745 break; 746 case KFD_MEM_ATT_USERPTR: 747 kfd_mem_dmaunmap_userptr(mem, attachment); 748 break; 749 case KFD_MEM_ATT_DMABUF: 750 kfd_mem_dmaunmap_dmabuf(attachment); 751 break; 752 case KFD_MEM_ATT_SG: 753 kfd_mem_dmaunmap_sg_bo(mem, attachment); 754 break; 755 default: 756 WARN_ON_ONCE(1); 757 } 758 } 759 760 static int 761 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 762 struct amdgpu_bo **bo) 763 { 764 struct drm_gem_object *gobj; 765 int ret; 766 767 if (!mem->dmabuf) { 768 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, 769 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 770 DRM_RDWR : 0); 771 if (IS_ERR(mem->dmabuf)) { 772 ret = PTR_ERR(mem->dmabuf); 773 mem->dmabuf = NULL; 774 return ret; 775 } 776 } 777 778 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 779 if (IS_ERR(gobj)) 780 return PTR_ERR(gobj); 781 782 *bo = gem_to_amdgpu_bo(gobj); 783 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 784 785 return 0; 786 } 787 788 /* kfd_mem_attach - Add a BO to a VM 789 * 790 * Everything that needs to bo done only once when a BO is first added 791 * to a VM. It can later be mapped and unmapped many times without 792 * repeating these steps. 793 * 794 * 0. Create BO for DMA mapping, if needed 795 * 1. Allocate and initialize BO VA entry data structure 796 * 2. Add BO to the VM 797 * 3. Determine ASIC-specific PTE flags 798 * 4. Alloc page tables and directories if needed 799 * 4a. Validate new page tables and directories 800 */ 801 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 802 struct amdgpu_vm *vm, bool is_aql) 803 { 804 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 805 unsigned long bo_size = mem->bo->tbo.base.size; 806 uint64_t va = mem->va; 807 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 808 struct amdgpu_bo *bo[2] = {NULL, NULL}; 809 bool same_hive = false; 810 int i, ret; 811 812 if (!va) { 813 pr_err("Invalid VA when adding BO to VM\n"); 814 return -EINVAL; 815 } 816 817 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices 818 * 819 * The access path of MMIO and DOORBELL BOs of is always over PCIe. 820 * In contrast the access path of VRAM BOs depens upon the type of 821 * link that connects the peer device. Access over PCIe is allowed 822 * if peer device has large BAR. In contrast, access over xGMI is 823 * allowed for both small and large BAR configurations of peer device 824 */ 825 if ((adev != bo_adev) && 826 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || 827 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || 828 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 829 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) 830 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev); 831 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev)) 832 return -EINVAL; 833 } 834 835 for (i = 0; i <= is_aql; i++) { 836 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 837 if (unlikely(!attachment[i])) { 838 ret = -ENOMEM; 839 goto unwind; 840 } 841 842 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 843 va + bo_size, vm); 844 845 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || 846 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || 847 same_hive) { 848 /* Mappings on the local GPU, or VRAM mappings in the 849 * local hive, or userptr mapping IOMMU direct map mode 850 * share the original BO 851 */ 852 attachment[i]->type = KFD_MEM_ATT_SHARED; 853 bo[i] = mem->bo; 854 drm_gem_object_get(&bo[i]->tbo.base); 855 } else if (i > 0) { 856 /* Multiple mappings on the same GPU share the BO */ 857 attachment[i]->type = KFD_MEM_ATT_SHARED; 858 bo[i] = bo[0]; 859 drm_gem_object_get(&bo[i]->tbo.base); 860 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 861 /* Create an SG BO to DMA-map userptrs on other GPUs */ 862 attachment[i]->type = KFD_MEM_ATT_USERPTR; 863 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 864 if (ret) 865 goto unwind; 866 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */ 867 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { 868 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || 869 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), 870 "Handing invalid SG BO in ATTACH request"); 871 attachment[i]->type = KFD_MEM_ATT_SG; 872 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 873 if (ret) 874 goto unwind; 875 /* Enable acces to GTT and VRAM BOs of peer devices */ 876 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || 877 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { 878 attachment[i]->type = KFD_MEM_ATT_DMABUF; 879 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 880 if (ret) 881 goto unwind; 882 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n"); 883 } else { 884 WARN_ONCE(true, "Handling invalid ATTACH request"); 885 ret = -EINVAL; 886 goto unwind; 887 } 888 889 /* Add BO to VM internal data structures */ 890 ret = amdgpu_bo_reserve(bo[i], false); 891 if (ret) { 892 pr_debug("Unable to reserve BO during memory attach"); 893 goto unwind; 894 } 895 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 896 amdgpu_bo_unreserve(bo[i]); 897 if (unlikely(!attachment[i]->bo_va)) { 898 ret = -ENOMEM; 899 pr_err("Failed to add BO object to VM. ret == %d\n", 900 ret); 901 goto unwind; 902 } 903 attachment[i]->va = va; 904 attachment[i]->pte_flags = get_pte_flags(adev, mem); 905 attachment[i]->adev = adev; 906 list_add(&attachment[i]->list, &mem->attachments); 907 908 va += bo_size; 909 } 910 911 return 0; 912 913 unwind: 914 for (; i >= 0; i--) { 915 if (!attachment[i]) 916 continue; 917 if (attachment[i]->bo_va) { 918 amdgpu_bo_reserve(bo[i], true); 919 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); 920 amdgpu_bo_unreserve(bo[i]); 921 list_del(&attachment[i]->list); 922 } 923 if (bo[i]) 924 drm_gem_object_put(&bo[i]->tbo.base); 925 kfree(attachment[i]); 926 } 927 return ret; 928 } 929 930 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 931 { 932 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 933 934 pr_debug("\t remove VA 0x%llx in entry %p\n", 935 attachment->va, attachment); 936 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); 937 drm_gem_object_put(&bo->tbo.base); 938 list_del(&attachment->list); 939 kfree(attachment); 940 } 941 942 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 943 struct amdkfd_process_info *process_info, 944 bool userptr) 945 { 946 struct ttm_validate_buffer *entry = &mem->validate_list; 947 struct amdgpu_bo *bo = mem->bo; 948 949 INIT_LIST_HEAD(&entry->head); 950 entry->num_shared = 1; 951 entry->bo = &bo->tbo; 952 mutex_lock(&process_info->lock); 953 if (userptr) 954 list_add_tail(&entry->head, &process_info->userptr_valid_list); 955 else 956 list_add_tail(&entry->head, &process_info->kfd_bo_list); 957 mutex_unlock(&process_info->lock); 958 } 959 960 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 961 struct amdkfd_process_info *process_info) 962 { 963 struct ttm_validate_buffer *bo_list_entry; 964 965 bo_list_entry = &mem->validate_list; 966 mutex_lock(&process_info->lock); 967 list_del(&bo_list_entry->head); 968 mutex_unlock(&process_info->lock); 969 } 970 971 /* Initializes user pages. It registers the MMU notifier and validates 972 * the userptr BO in the GTT domain. 973 * 974 * The BO must already be on the userptr_valid_list. Otherwise an 975 * eviction and restore may happen that leaves the new BO unmapped 976 * with the user mode queues running. 977 * 978 * Takes the process_info->lock to protect against concurrent restore 979 * workers. 980 * 981 * Returns 0 for success, negative errno for errors. 982 */ 983 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, 984 bool criu_resume) 985 { 986 struct amdkfd_process_info *process_info = mem->process_info; 987 struct amdgpu_bo *bo = mem->bo; 988 struct ttm_operation_ctx ctx = { true, false }; 989 int ret = 0; 990 991 mutex_lock(&process_info->lock); 992 993 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 994 if (ret) { 995 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 996 goto out; 997 } 998 999 ret = amdgpu_mn_register(bo, user_addr); 1000 if (ret) { 1001 pr_err("%s: Failed to register MMU notifier: %d\n", 1002 __func__, ret); 1003 goto out; 1004 } 1005 1006 if (criu_resume) { 1007 /* 1008 * During a CRIU restore operation, the userptr buffer objects 1009 * will be validated in the restore_userptr_work worker at a 1010 * later stage when it is scheduled by another ioctl called by 1011 * CRIU master process for the target pid for restore. 1012 */ 1013 atomic_inc(&mem->invalid); 1014 mutex_unlock(&process_info->lock); 1015 return 0; 1016 } 1017 1018 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 1019 if (ret) { 1020 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 1021 goto unregister_out; 1022 } 1023 1024 ret = amdgpu_bo_reserve(bo, true); 1025 if (ret) { 1026 pr_err("%s: Failed to reserve BO\n", __func__); 1027 goto release_out; 1028 } 1029 amdgpu_bo_placement_from_domain(bo, mem->domain); 1030 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1031 if (ret) 1032 pr_err("%s: failed to validate BO\n", __func__); 1033 amdgpu_bo_unreserve(bo); 1034 1035 release_out: 1036 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1037 unregister_out: 1038 if (ret) 1039 amdgpu_mn_unregister(bo); 1040 out: 1041 mutex_unlock(&process_info->lock); 1042 return ret; 1043 } 1044 1045 /* Reserving a BO and its page table BOs must happen atomically to 1046 * avoid deadlocks. Some operations update multiple VMs at once. Track 1047 * all the reservation info in a context structure. Optionally a sync 1048 * object can track VM updates. 1049 */ 1050 struct bo_vm_reservation_context { 1051 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 1052 unsigned int n_vms; /* Number of VMs reserved */ 1053 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 1054 struct ww_acquire_ctx ticket; /* Reservation ticket */ 1055 struct list_head list, duplicates; /* BO lists */ 1056 struct amdgpu_sync *sync; /* Pointer to sync object */ 1057 bool reserved; /* Whether BOs are reserved */ 1058 }; 1059 1060 enum bo_vm_match { 1061 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 1062 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 1063 BO_VM_ALL, /* Match all VMs a BO was added to */ 1064 }; 1065 1066 /** 1067 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 1068 * @mem: KFD BO structure. 1069 * @vm: the VM to reserve. 1070 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1071 */ 1072 static int reserve_bo_and_vm(struct kgd_mem *mem, 1073 struct amdgpu_vm *vm, 1074 struct bo_vm_reservation_context *ctx) 1075 { 1076 struct amdgpu_bo *bo = mem->bo; 1077 int ret; 1078 1079 WARN_ON(!vm); 1080 1081 ctx->reserved = false; 1082 ctx->n_vms = 1; 1083 ctx->sync = &mem->sync; 1084 1085 INIT_LIST_HEAD(&ctx->list); 1086 INIT_LIST_HEAD(&ctx->duplicates); 1087 1088 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 1089 if (!ctx->vm_pd) 1090 return -ENOMEM; 1091 1092 ctx->kfd_bo.priority = 0; 1093 ctx->kfd_bo.tv.bo = &bo->tbo; 1094 ctx->kfd_bo.tv.num_shared = 1; 1095 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 1096 1097 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 1098 1099 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 1100 false, &ctx->duplicates); 1101 if (ret) { 1102 pr_err("Failed to reserve buffers in ttm.\n"); 1103 kfree(ctx->vm_pd); 1104 ctx->vm_pd = NULL; 1105 return ret; 1106 } 1107 1108 ctx->reserved = true; 1109 return 0; 1110 } 1111 1112 /** 1113 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 1114 * @mem: KFD BO structure. 1115 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 1116 * is used. Otherwise, a single VM associated with the BO. 1117 * @map_type: the mapping status that will be used to filter the VMs. 1118 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1119 * 1120 * Returns 0 for success, negative for failure. 1121 */ 1122 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 1123 struct amdgpu_vm *vm, enum bo_vm_match map_type, 1124 struct bo_vm_reservation_context *ctx) 1125 { 1126 struct amdgpu_bo *bo = mem->bo; 1127 struct kfd_mem_attachment *entry; 1128 unsigned int i; 1129 int ret; 1130 1131 ctx->reserved = false; 1132 ctx->n_vms = 0; 1133 ctx->vm_pd = NULL; 1134 ctx->sync = &mem->sync; 1135 1136 INIT_LIST_HEAD(&ctx->list); 1137 INIT_LIST_HEAD(&ctx->duplicates); 1138 1139 list_for_each_entry(entry, &mem->attachments, list) { 1140 if ((vm && vm != entry->bo_va->base.vm) || 1141 (entry->is_mapped != map_type 1142 && map_type != BO_VM_ALL)) 1143 continue; 1144 1145 ctx->n_vms++; 1146 } 1147 1148 if (ctx->n_vms != 0) { 1149 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 1150 GFP_KERNEL); 1151 if (!ctx->vm_pd) 1152 return -ENOMEM; 1153 } 1154 1155 ctx->kfd_bo.priority = 0; 1156 ctx->kfd_bo.tv.bo = &bo->tbo; 1157 ctx->kfd_bo.tv.num_shared = 1; 1158 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 1159 1160 i = 0; 1161 list_for_each_entry(entry, &mem->attachments, list) { 1162 if ((vm && vm != entry->bo_va->base.vm) || 1163 (entry->is_mapped != map_type 1164 && map_type != BO_VM_ALL)) 1165 continue; 1166 1167 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 1168 &ctx->vm_pd[i]); 1169 i++; 1170 } 1171 1172 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 1173 false, &ctx->duplicates); 1174 if (ret) { 1175 pr_err("Failed to reserve buffers in ttm.\n"); 1176 kfree(ctx->vm_pd); 1177 ctx->vm_pd = NULL; 1178 return ret; 1179 } 1180 1181 ctx->reserved = true; 1182 return 0; 1183 } 1184 1185 /** 1186 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1187 * @ctx: Reservation context to unreserve 1188 * @wait: Optionally wait for a sync object representing pending VM updates 1189 * @intr: Whether the wait is interruptible 1190 * 1191 * Also frees any resources allocated in 1192 * reserve_bo_and_(cond_)vm(s). Returns the status from 1193 * amdgpu_sync_wait. 1194 */ 1195 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1196 bool wait, bool intr) 1197 { 1198 int ret = 0; 1199 1200 if (wait) 1201 ret = amdgpu_sync_wait(ctx->sync, intr); 1202 1203 if (ctx->reserved) 1204 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 1205 kfree(ctx->vm_pd); 1206 1207 ctx->sync = NULL; 1208 1209 ctx->reserved = false; 1210 ctx->vm_pd = NULL; 1211 1212 return ret; 1213 } 1214 1215 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, 1216 struct kfd_mem_attachment *entry, 1217 struct amdgpu_sync *sync) 1218 { 1219 struct amdgpu_bo_va *bo_va = entry->bo_va; 1220 struct amdgpu_device *adev = entry->adev; 1221 struct amdgpu_vm *vm = bo_va->base.vm; 1222 1223 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1224 1225 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1226 1227 amdgpu_sync_fence(sync, bo_va->last_pt_update); 1228 1229 kfd_mem_dmaunmap_attachment(mem, entry); 1230 } 1231 1232 static int update_gpuvm_pte(struct kgd_mem *mem, 1233 struct kfd_mem_attachment *entry, 1234 struct amdgpu_sync *sync) 1235 { 1236 struct amdgpu_bo_va *bo_va = entry->bo_va; 1237 struct amdgpu_device *adev = entry->adev; 1238 int ret; 1239 1240 ret = kfd_mem_dmamap_attachment(mem, entry); 1241 if (ret) 1242 return ret; 1243 1244 /* Update the page tables */ 1245 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1246 if (ret) { 1247 pr_err("amdgpu_vm_bo_update failed\n"); 1248 return ret; 1249 } 1250 1251 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1252 } 1253 1254 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1255 struct kfd_mem_attachment *entry, 1256 struct amdgpu_sync *sync, 1257 bool no_update_pte) 1258 { 1259 int ret; 1260 1261 /* Set virtual address for the allocation */ 1262 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1263 amdgpu_bo_size(entry->bo_va->base.bo), 1264 entry->pte_flags); 1265 if (ret) { 1266 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1267 entry->va, ret); 1268 return ret; 1269 } 1270 1271 if (no_update_pte) 1272 return 0; 1273 1274 ret = update_gpuvm_pte(mem, entry, sync); 1275 if (ret) { 1276 pr_err("update_gpuvm_pte() failed\n"); 1277 goto update_gpuvm_pte_failed; 1278 } 1279 1280 return 0; 1281 1282 update_gpuvm_pte_failed: 1283 unmap_bo_from_gpuvm(mem, entry, sync); 1284 return ret; 1285 } 1286 1287 static int process_validate_vms(struct amdkfd_process_info *process_info) 1288 { 1289 struct amdgpu_vm *peer_vm; 1290 int ret; 1291 1292 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1293 vm_list_node) { 1294 ret = vm_validate_pt_pd_bos(peer_vm); 1295 if (ret) 1296 return ret; 1297 } 1298 1299 return 0; 1300 } 1301 1302 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1303 struct amdgpu_sync *sync) 1304 { 1305 struct amdgpu_vm *peer_vm; 1306 int ret; 1307 1308 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1309 vm_list_node) { 1310 struct amdgpu_bo *pd = peer_vm->root.bo; 1311 1312 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1313 AMDGPU_SYNC_NE_OWNER, 1314 AMDGPU_FENCE_OWNER_KFD); 1315 if (ret) 1316 return ret; 1317 } 1318 1319 return 0; 1320 } 1321 1322 static int process_update_pds(struct amdkfd_process_info *process_info, 1323 struct amdgpu_sync *sync) 1324 { 1325 struct amdgpu_vm *peer_vm; 1326 int ret; 1327 1328 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1329 vm_list_node) { 1330 ret = vm_update_pds(peer_vm, sync); 1331 if (ret) 1332 return ret; 1333 } 1334 1335 return 0; 1336 } 1337 1338 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1339 struct dma_fence **ef) 1340 { 1341 struct amdkfd_process_info *info = NULL; 1342 int ret; 1343 1344 if (!*process_info) { 1345 info = kzalloc(sizeof(*info), GFP_KERNEL); 1346 if (!info) 1347 return -ENOMEM; 1348 1349 mutex_init(&info->lock); 1350 INIT_LIST_HEAD(&info->vm_list_head); 1351 INIT_LIST_HEAD(&info->kfd_bo_list); 1352 INIT_LIST_HEAD(&info->userptr_valid_list); 1353 INIT_LIST_HEAD(&info->userptr_inval_list); 1354 1355 info->eviction_fence = 1356 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1357 current->mm, 1358 NULL); 1359 if (!info->eviction_fence) { 1360 pr_err("Failed to create eviction fence\n"); 1361 ret = -ENOMEM; 1362 goto create_evict_fence_fail; 1363 } 1364 1365 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1366 atomic_set(&info->evicted_bos, 0); 1367 INIT_DELAYED_WORK(&info->restore_userptr_work, 1368 amdgpu_amdkfd_restore_userptr_worker); 1369 1370 *process_info = info; 1371 *ef = dma_fence_get(&info->eviction_fence->base); 1372 } 1373 1374 vm->process_info = *process_info; 1375 1376 /* Validate page directory and attach eviction fence */ 1377 ret = amdgpu_bo_reserve(vm->root.bo, true); 1378 if (ret) 1379 goto reserve_pd_fail; 1380 ret = vm_validate_pt_pd_bos(vm); 1381 if (ret) { 1382 pr_err("validate_pt_pd_bos() failed\n"); 1383 goto validate_pd_fail; 1384 } 1385 ret = amdgpu_bo_sync_wait(vm->root.bo, 1386 AMDGPU_FENCE_OWNER_KFD, false); 1387 if (ret) 1388 goto wait_pd_fail; 1389 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); 1390 if (ret) 1391 goto reserve_shared_fail; 1392 dma_resv_add_fence(vm->root.bo->tbo.base.resv, 1393 &vm->process_info->eviction_fence->base, 1394 DMA_RESV_USAGE_BOOKKEEP); 1395 amdgpu_bo_unreserve(vm->root.bo); 1396 1397 /* Update process info */ 1398 mutex_lock(&vm->process_info->lock); 1399 list_add_tail(&vm->vm_list_node, 1400 &(vm->process_info->vm_list_head)); 1401 vm->process_info->n_vms++; 1402 mutex_unlock(&vm->process_info->lock); 1403 1404 return 0; 1405 1406 reserve_shared_fail: 1407 wait_pd_fail: 1408 validate_pd_fail: 1409 amdgpu_bo_unreserve(vm->root.bo); 1410 reserve_pd_fail: 1411 vm->process_info = NULL; 1412 if (info) { 1413 /* Two fence references: one in info and one in *ef */ 1414 dma_fence_put(&info->eviction_fence->base); 1415 dma_fence_put(*ef); 1416 *ef = NULL; 1417 *process_info = NULL; 1418 put_pid(info->pid); 1419 create_evict_fence_fail: 1420 mutex_destroy(&info->lock); 1421 kfree(info); 1422 } 1423 return ret; 1424 } 1425 1426 /** 1427 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria 1428 * @bo: Handle of buffer object being pinned 1429 * @domain: Domain into which BO should be pinned 1430 * 1431 * - USERPTR BOs are UNPINNABLE and will return error 1432 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1433 * PIN count incremented. It is valid to PIN a BO multiple times 1434 * 1435 * Return: ZERO if successful in pinning, Non-Zero in case of error. 1436 */ 1437 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) 1438 { 1439 int ret = 0; 1440 1441 ret = amdgpu_bo_reserve(bo, false); 1442 if (unlikely(ret)) 1443 return ret; 1444 1445 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); 1446 if (ret) 1447 pr_err("Error in Pinning BO to domain: %d\n", domain); 1448 1449 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 1450 amdgpu_bo_unreserve(bo); 1451 1452 return ret; 1453 } 1454 1455 /** 1456 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria 1457 * @bo: Handle of buffer object being unpinned 1458 * 1459 * - Is a illegal request for USERPTR BOs and is ignored 1460 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1461 * PIN count decremented. Calls to UNPIN must balance calls to PIN 1462 */ 1463 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) 1464 { 1465 int ret = 0; 1466 1467 ret = amdgpu_bo_reserve(bo, false); 1468 if (unlikely(ret)) 1469 return; 1470 1471 amdgpu_bo_unpin(bo); 1472 amdgpu_bo_unreserve(bo); 1473 } 1474 1475 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 1476 struct file *filp, u32 pasid, 1477 void **process_info, 1478 struct dma_fence **ef) 1479 { 1480 struct amdgpu_fpriv *drv_priv; 1481 struct amdgpu_vm *avm; 1482 int ret; 1483 1484 ret = amdgpu_file_to_fpriv(filp, &drv_priv); 1485 if (ret) 1486 return ret; 1487 avm = &drv_priv->vm; 1488 1489 /* Already a compute VM? */ 1490 if (avm->process_info) 1491 return -EINVAL; 1492 1493 /* Free the original amdgpu allocated pasid, 1494 * will be replaced with kfd allocated pasid. 1495 */ 1496 if (avm->pasid) { 1497 amdgpu_pasid_free(avm->pasid); 1498 amdgpu_vm_set_pasid(adev, avm, 0); 1499 } 1500 1501 /* Convert VM into a compute VM */ 1502 ret = amdgpu_vm_make_compute(adev, avm); 1503 if (ret) 1504 return ret; 1505 1506 ret = amdgpu_vm_set_pasid(adev, avm, pasid); 1507 if (ret) 1508 return ret; 1509 /* Initialize KFD part of the VM and process info */ 1510 ret = init_kfd_vm(avm, process_info, ef); 1511 if (ret) 1512 return ret; 1513 1514 amdgpu_vm_set_task_info(avm); 1515 1516 return 0; 1517 } 1518 1519 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1520 struct amdgpu_vm *vm) 1521 { 1522 struct amdkfd_process_info *process_info = vm->process_info; 1523 1524 if (!process_info) 1525 return; 1526 1527 /* Update process info */ 1528 mutex_lock(&process_info->lock); 1529 process_info->n_vms--; 1530 list_del(&vm->vm_list_node); 1531 mutex_unlock(&process_info->lock); 1532 1533 vm->process_info = NULL; 1534 1535 /* Release per-process resources when last compute VM is destroyed */ 1536 if (!process_info->n_vms) { 1537 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1538 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1539 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1540 1541 dma_fence_put(&process_info->eviction_fence->base); 1542 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1543 put_pid(process_info->pid); 1544 mutex_destroy(&process_info->lock); 1545 kfree(process_info); 1546 } 1547 } 1548 1549 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, 1550 void *drm_priv) 1551 { 1552 struct amdgpu_vm *avm; 1553 1554 if (WARN_ON(!adev || !drm_priv)) 1555 return; 1556 1557 avm = drm_priv_to_vm(drm_priv); 1558 1559 pr_debug("Releasing process vm %p\n", avm); 1560 1561 /* The original pasid of amdgpu vm has already been 1562 * released during making a amdgpu vm to a compute vm 1563 * The current pasid is managed by kfd and will be 1564 * released on kfd process destroy. Set amdgpu pasid 1565 * to 0 to avoid duplicate release. 1566 */ 1567 amdgpu_vm_release_compute(adev, avm); 1568 } 1569 1570 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1571 { 1572 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1573 struct amdgpu_bo *pd = avm->root.bo; 1574 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1575 1576 if (adev->asic_type < CHIP_VEGA10) 1577 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1578 return avm->pd_phys_addr; 1579 } 1580 1581 void amdgpu_amdkfd_block_mmu_notifications(void *p) 1582 { 1583 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1584 1585 mutex_lock(&pinfo->lock); 1586 WRITE_ONCE(pinfo->block_mmu_notifications, true); 1587 mutex_unlock(&pinfo->lock); 1588 } 1589 1590 int amdgpu_amdkfd_criu_resume(void *p) 1591 { 1592 int ret = 0; 1593 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1594 1595 mutex_lock(&pinfo->lock); 1596 pr_debug("scheduling work\n"); 1597 atomic_inc(&pinfo->evicted_bos); 1598 if (!READ_ONCE(pinfo->block_mmu_notifications)) { 1599 ret = -EINVAL; 1600 goto out_unlock; 1601 } 1602 WRITE_ONCE(pinfo->block_mmu_notifications, false); 1603 schedule_delayed_work(&pinfo->restore_userptr_work, 0); 1604 1605 out_unlock: 1606 mutex_unlock(&pinfo->lock); 1607 return ret; 1608 } 1609 1610 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) 1611 { 1612 uint64_t reserved_for_pt = 1613 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 1614 size_t available; 1615 1616 spin_lock(&kfd_mem_limit.mem_limit_lock); 1617 available = adev->gmc.real_vram_size 1618 - adev->kfd.vram_used_aligned 1619 - atomic64_read(&adev->vram_pin_size) 1620 - reserved_for_pt; 1621 spin_unlock(&kfd_mem_limit.mem_limit_lock); 1622 1623 return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); 1624 } 1625 1626 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1627 struct amdgpu_device *adev, uint64_t va, uint64_t size, 1628 void *drm_priv, struct kgd_mem **mem, 1629 uint64_t *offset, uint32_t flags, bool criu_resume) 1630 { 1631 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1632 enum ttm_bo_type bo_type = ttm_bo_type_device; 1633 struct sg_table *sg = NULL; 1634 uint64_t user_addr = 0; 1635 struct amdgpu_bo *bo; 1636 struct drm_gem_object *gobj = NULL; 1637 u32 domain, alloc_domain; 1638 u64 alloc_flags; 1639 int ret; 1640 1641 /* 1642 * Check on which domain to allocate BO 1643 */ 1644 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1645 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1646 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1647 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1648 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1649 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1650 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1651 alloc_flags = 0; 1652 } else { 1653 domain = AMDGPU_GEM_DOMAIN_GTT; 1654 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1655 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1656 1657 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1658 if (!offset || !*offset) 1659 return -EINVAL; 1660 user_addr = untagged_addr(*offset); 1661 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1662 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1663 bo_type = ttm_bo_type_sg; 1664 if (size > UINT_MAX) 1665 return -EINVAL; 1666 sg = create_sg_table(*offset, size); 1667 if (!sg) 1668 return -ENOMEM; 1669 } else { 1670 return -EINVAL; 1671 } 1672 } 1673 1674 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1675 if (!*mem) { 1676 ret = -ENOMEM; 1677 goto err; 1678 } 1679 INIT_LIST_HEAD(&(*mem)->attachments); 1680 mutex_init(&(*mem)->lock); 1681 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1682 1683 /* Workaround for AQL queue wraparound bug. Map the same 1684 * memory twice. That means we only actually allocate half 1685 * the memory. 1686 */ 1687 if ((*mem)->aql_queue) 1688 size = size >> 1; 1689 1690 (*mem)->alloc_flags = flags; 1691 1692 amdgpu_sync_create(&(*mem)->sync); 1693 1694 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); 1695 if (ret) { 1696 pr_debug("Insufficient memory\n"); 1697 goto err_reserve_limit; 1698 } 1699 1700 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1701 va, size, domain_string(alloc_domain)); 1702 1703 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, 1704 bo_type, NULL, &gobj); 1705 if (ret) { 1706 pr_debug("Failed to create BO on domain %s. ret %d\n", 1707 domain_string(alloc_domain), ret); 1708 goto err_bo_create; 1709 } 1710 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1711 if (ret) { 1712 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1713 goto err_node_allow; 1714 } 1715 bo = gem_to_amdgpu_bo(gobj); 1716 if (bo_type == ttm_bo_type_sg) { 1717 bo->tbo.sg = sg; 1718 bo->tbo.ttm->sg = sg; 1719 } 1720 bo->kfd_bo = *mem; 1721 (*mem)->bo = bo; 1722 if (user_addr) 1723 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1724 1725 (*mem)->va = va; 1726 (*mem)->domain = domain; 1727 (*mem)->mapped_to_gpu_memory = 0; 1728 (*mem)->process_info = avm->process_info; 1729 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1730 1731 if (user_addr) { 1732 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr); 1733 ret = init_user_pages(*mem, user_addr, criu_resume); 1734 if (ret) 1735 goto allocate_init_user_pages_failed; 1736 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1737 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1738 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); 1739 if (ret) { 1740 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); 1741 goto err_pin_bo; 1742 } 1743 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 1744 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 1745 } 1746 1747 if (offset) 1748 *offset = amdgpu_bo_mmap_offset(bo); 1749 1750 return 0; 1751 1752 allocate_init_user_pages_failed: 1753 err_pin_bo: 1754 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1755 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1756 err_node_allow: 1757 /* Don't unreserve system mem limit twice */ 1758 goto err_reserve_limit; 1759 err_bo_create: 1760 amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags); 1761 err_reserve_limit: 1762 mutex_destroy(&(*mem)->lock); 1763 if (gobj) 1764 drm_gem_object_put(gobj); 1765 else 1766 kfree(*mem); 1767 err: 1768 if (sg) { 1769 sg_free_table(sg); 1770 kfree(sg); 1771 } 1772 return ret; 1773 } 1774 1775 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1776 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 1777 uint64_t *size) 1778 { 1779 struct amdkfd_process_info *process_info = mem->process_info; 1780 unsigned long bo_size = mem->bo->tbo.base.size; 1781 bool use_release_notifier = (mem->bo->kfd_bo == mem); 1782 struct kfd_mem_attachment *entry, *tmp; 1783 struct bo_vm_reservation_context ctx; 1784 struct ttm_validate_buffer *bo_list_entry; 1785 unsigned int mapped_to_gpu_memory; 1786 int ret; 1787 bool is_imported = false; 1788 1789 mutex_lock(&mem->lock); 1790 1791 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */ 1792 if (mem->alloc_flags & 1793 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1794 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1795 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); 1796 } 1797 1798 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1799 is_imported = mem->is_imported; 1800 mutex_unlock(&mem->lock); 1801 /* lock is not needed after this, since mem is unused and will 1802 * be freed anyway 1803 */ 1804 1805 if (mapped_to_gpu_memory > 0) { 1806 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1807 mem->va, bo_size); 1808 return -EBUSY; 1809 } 1810 1811 /* Make sure restore workers don't access the BO any more */ 1812 bo_list_entry = &mem->validate_list; 1813 mutex_lock(&process_info->lock); 1814 list_del(&bo_list_entry->head); 1815 mutex_unlock(&process_info->lock); 1816 1817 /* No more MMU notifiers */ 1818 amdgpu_mn_unregister(mem->bo); 1819 1820 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1821 if (unlikely(ret)) 1822 return ret; 1823 1824 /* The eviction fence should be removed by the last unmap. 1825 * TODO: Log an error condition if the bo still has the eviction fence 1826 * attached 1827 */ 1828 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1829 process_info->eviction_fence); 1830 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1831 mem->va + bo_size * (1 + mem->aql_queue)); 1832 1833 /* Remove from VM internal data structures */ 1834 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) 1835 kfd_mem_detach(entry); 1836 1837 ret = unreserve_bo_and_vms(&ctx, false, false); 1838 1839 /* Free the sync object */ 1840 amdgpu_sync_free(&mem->sync); 1841 1842 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1843 * remap BO. We need to free it. 1844 */ 1845 if (mem->bo->tbo.sg) { 1846 sg_free_table(mem->bo->tbo.sg); 1847 kfree(mem->bo->tbo.sg); 1848 } 1849 1850 /* Update the size of the BO being freed if it was allocated from 1851 * VRAM and is not imported. 1852 */ 1853 if (size) { 1854 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1855 (!is_imported)) 1856 *size = bo_size; 1857 else 1858 *size = 0; 1859 } 1860 1861 /* Free the BO*/ 1862 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1863 if (mem->dmabuf) 1864 dma_buf_put(mem->dmabuf); 1865 mutex_destroy(&mem->lock); 1866 1867 /* If this releases the last reference, it will end up calling 1868 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why 1869 * this needs to be the last call here. 1870 */ 1871 drm_gem_object_put(&mem->bo->tbo.base); 1872 1873 /* 1874 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(), 1875 * explicitly free it here. 1876 */ 1877 if (!use_release_notifier) 1878 kfree(mem); 1879 1880 return ret; 1881 } 1882 1883 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1884 struct amdgpu_device *adev, struct kgd_mem *mem, 1885 void *drm_priv) 1886 { 1887 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1888 int ret; 1889 struct amdgpu_bo *bo; 1890 uint32_t domain; 1891 struct kfd_mem_attachment *entry; 1892 struct bo_vm_reservation_context ctx; 1893 unsigned long bo_size; 1894 bool is_invalid_userptr = false; 1895 1896 bo = mem->bo; 1897 if (!bo) { 1898 pr_err("Invalid BO when mapping memory to GPU\n"); 1899 return -EINVAL; 1900 } 1901 1902 /* Make sure restore is not running concurrently. Since we 1903 * don't map invalid userptr BOs, we rely on the next restore 1904 * worker to do the mapping 1905 */ 1906 mutex_lock(&mem->process_info->lock); 1907 1908 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1909 * sure that the MMU notifier is no longer running 1910 * concurrently and the queues are actually stopped 1911 */ 1912 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1913 mmap_write_lock(current->mm); 1914 is_invalid_userptr = atomic_read(&mem->invalid); 1915 mmap_write_unlock(current->mm); 1916 } 1917 1918 mutex_lock(&mem->lock); 1919 1920 domain = mem->domain; 1921 bo_size = bo->tbo.base.size; 1922 1923 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1924 mem->va, 1925 mem->va + bo_size * (1 + mem->aql_queue), 1926 avm, domain_string(domain)); 1927 1928 if (!kfd_mem_is_attached(avm, mem)) { 1929 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 1930 if (ret) 1931 goto out; 1932 } 1933 1934 ret = reserve_bo_and_vm(mem, avm, &ctx); 1935 if (unlikely(ret)) 1936 goto out; 1937 1938 /* Userptr can be marked as "not invalid", but not actually be 1939 * validated yet (still in the system domain). In that case 1940 * the queues are still stopped and we can leave mapping for 1941 * the next restore worker 1942 */ 1943 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1944 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 1945 is_invalid_userptr = true; 1946 1947 ret = vm_validate_pt_pd_bos(avm); 1948 if (unlikely(ret)) 1949 goto out_unreserve; 1950 1951 if (mem->mapped_to_gpu_memory == 0 && 1952 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1953 /* Validate BO only once. The eviction fence gets added to BO 1954 * the first time it is mapped. Validate will wait for all 1955 * background evictions to complete. 1956 */ 1957 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1958 if (ret) { 1959 pr_debug("Validate failed\n"); 1960 goto out_unreserve; 1961 } 1962 } 1963 1964 list_for_each_entry(entry, &mem->attachments, list) { 1965 if (entry->bo_va->base.vm != avm || entry->is_mapped) 1966 continue; 1967 1968 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1969 entry->va, entry->va + bo_size, entry); 1970 1971 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 1972 is_invalid_userptr); 1973 if (ret) { 1974 pr_err("Failed to map bo to gpuvm\n"); 1975 goto out_unreserve; 1976 } 1977 1978 ret = vm_update_pds(avm, ctx.sync); 1979 if (ret) { 1980 pr_err("Failed to update page directories\n"); 1981 goto out_unreserve; 1982 } 1983 1984 entry->is_mapped = true; 1985 mem->mapped_to_gpu_memory++; 1986 pr_debug("\t INC mapping count %d\n", 1987 mem->mapped_to_gpu_memory); 1988 } 1989 1990 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1991 dma_resv_add_fence(bo->tbo.base.resv, 1992 &avm->process_info->eviction_fence->base, 1993 DMA_RESV_USAGE_BOOKKEEP); 1994 ret = unreserve_bo_and_vms(&ctx, false, false); 1995 1996 goto out; 1997 1998 out_unreserve: 1999 unreserve_bo_and_vms(&ctx, false, false); 2000 out: 2001 mutex_unlock(&mem->process_info->lock); 2002 mutex_unlock(&mem->lock); 2003 return ret; 2004 } 2005 2006 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 2007 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) 2008 { 2009 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2010 struct amdkfd_process_info *process_info = avm->process_info; 2011 unsigned long bo_size = mem->bo->tbo.base.size; 2012 struct kfd_mem_attachment *entry; 2013 struct bo_vm_reservation_context ctx; 2014 int ret; 2015 2016 mutex_lock(&mem->lock); 2017 2018 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 2019 if (unlikely(ret)) 2020 goto out; 2021 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 2022 if (ctx.n_vms == 0) { 2023 ret = -EINVAL; 2024 goto unreserve_out; 2025 } 2026 2027 ret = vm_validate_pt_pd_bos(avm); 2028 if (unlikely(ret)) 2029 goto unreserve_out; 2030 2031 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 2032 mem->va, 2033 mem->va + bo_size * (1 + mem->aql_queue), 2034 avm); 2035 2036 list_for_each_entry(entry, &mem->attachments, list) { 2037 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 2038 continue; 2039 2040 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 2041 entry->va, entry->va + bo_size, entry); 2042 2043 unmap_bo_from_gpuvm(mem, entry, ctx.sync); 2044 entry->is_mapped = false; 2045 2046 mem->mapped_to_gpu_memory--; 2047 pr_debug("\t DEC mapping count %d\n", 2048 mem->mapped_to_gpu_memory); 2049 } 2050 2051 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 2052 * required. 2053 */ 2054 if (mem->mapped_to_gpu_memory == 0 && 2055 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 2056 !mem->bo->tbo.pin_count) 2057 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 2058 process_info->eviction_fence); 2059 2060 unreserve_out: 2061 unreserve_bo_and_vms(&ctx, false, false); 2062 out: 2063 mutex_unlock(&mem->lock); 2064 return ret; 2065 } 2066 2067 int amdgpu_amdkfd_gpuvm_sync_memory( 2068 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) 2069 { 2070 struct amdgpu_sync sync; 2071 int ret; 2072 2073 amdgpu_sync_create(&sync); 2074 2075 mutex_lock(&mem->lock); 2076 amdgpu_sync_clone(&mem->sync, &sync); 2077 mutex_unlock(&mem->lock); 2078 2079 ret = amdgpu_sync_wait(&sync, intr); 2080 amdgpu_sync_free(&sync); 2081 return ret; 2082 } 2083 2084 /** 2085 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count 2086 * @adev: Device to which allocated BO belongs 2087 * @bo: Buffer object to be mapped 2088 * 2089 * Before return, bo reference count is incremented. To release the reference and unpin/ 2090 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem. 2091 */ 2092 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo) 2093 { 2094 int ret; 2095 2096 ret = amdgpu_bo_reserve(bo, true); 2097 if (ret) { 2098 pr_err("Failed to reserve bo. ret %d\n", ret); 2099 goto err_reserve_bo_failed; 2100 } 2101 2102 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2103 if (ret) { 2104 pr_err("Failed to pin bo. ret %d\n", ret); 2105 goto err_pin_bo_failed; 2106 } 2107 2108 ret = amdgpu_ttm_alloc_gart(&bo->tbo); 2109 if (ret) { 2110 pr_err("Failed to bind bo to GART. ret %d\n", ret); 2111 goto err_map_bo_gart_failed; 2112 } 2113 2114 amdgpu_amdkfd_remove_eviction_fence( 2115 bo, bo->kfd_bo->process_info->eviction_fence); 2116 2117 amdgpu_bo_unreserve(bo); 2118 2119 bo = amdgpu_bo_ref(bo); 2120 2121 return 0; 2122 2123 err_map_bo_gart_failed: 2124 amdgpu_bo_unpin(bo); 2125 err_pin_bo_failed: 2126 amdgpu_bo_unreserve(bo); 2127 err_reserve_bo_failed: 2128 2129 return ret; 2130 } 2131 2132 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access 2133 * 2134 * @mem: Buffer object to be mapped for CPU access 2135 * @kptr[out]: pointer in kernel CPU address space 2136 * @size[out]: size of the buffer 2137 * 2138 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed 2139 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the 2140 * validate_list, so the GPU mapping can be restored after a page table was 2141 * evicted. 2142 * 2143 * Return: 0 on success, error code on failure 2144 */ 2145 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, 2146 void **kptr, uint64_t *size) 2147 { 2148 int ret; 2149 struct amdgpu_bo *bo = mem->bo; 2150 2151 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2152 pr_err("userptr can't be mapped to kernel\n"); 2153 return -EINVAL; 2154 } 2155 2156 mutex_lock(&mem->process_info->lock); 2157 2158 ret = amdgpu_bo_reserve(bo, true); 2159 if (ret) { 2160 pr_err("Failed to reserve bo. ret %d\n", ret); 2161 goto bo_reserve_failed; 2162 } 2163 2164 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2165 if (ret) { 2166 pr_err("Failed to pin bo. ret %d\n", ret); 2167 goto pin_failed; 2168 } 2169 2170 ret = amdgpu_bo_kmap(bo, kptr); 2171 if (ret) { 2172 pr_err("Failed to map bo to kernel. ret %d\n", ret); 2173 goto kmap_failed; 2174 } 2175 2176 amdgpu_amdkfd_remove_eviction_fence( 2177 bo, mem->process_info->eviction_fence); 2178 2179 if (size) 2180 *size = amdgpu_bo_size(bo); 2181 2182 amdgpu_bo_unreserve(bo); 2183 2184 mutex_unlock(&mem->process_info->lock); 2185 return 0; 2186 2187 kmap_failed: 2188 amdgpu_bo_unpin(bo); 2189 pin_failed: 2190 amdgpu_bo_unreserve(bo); 2191 bo_reserve_failed: 2192 mutex_unlock(&mem->process_info->lock); 2193 2194 return ret; 2195 } 2196 2197 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access 2198 * 2199 * @mem: Buffer object to be unmapped for CPU access 2200 * 2201 * Removes the kernel CPU mapping and unpins the BO. It does not restore the 2202 * eviction fence, so this function should only be used for cleanup before the 2203 * BO is destroyed. 2204 */ 2205 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) 2206 { 2207 struct amdgpu_bo *bo = mem->bo; 2208 2209 amdgpu_bo_reserve(bo, true); 2210 amdgpu_bo_kunmap(bo); 2211 amdgpu_bo_unpin(bo); 2212 amdgpu_bo_unreserve(bo); 2213 } 2214 2215 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 2216 struct kfd_vm_fault_info *mem) 2217 { 2218 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 2219 *mem = *adev->gmc.vm_fault_info; 2220 mb(); /* make sure read happened */ 2221 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 2222 } 2223 return 0; 2224 } 2225 2226 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, 2227 struct dma_buf *dma_buf, 2228 uint64_t va, void *drm_priv, 2229 struct kgd_mem **mem, uint64_t *size, 2230 uint64_t *mmap_offset) 2231 { 2232 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2233 struct drm_gem_object *obj; 2234 struct amdgpu_bo *bo; 2235 int ret; 2236 2237 if (dma_buf->ops != &amdgpu_dmabuf_ops) 2238 /* Can't handle non-graphics buffers */ 2239 return -EINVAL; 2240 2241 obj = dma_buf->priv; 2242 if (drm_to_adev(obj->dev) != adev) 2243 /* Can't handle buffers from other devices */ 2244 return -EINVAL; 2245 2246 bo = gem_to_amdgpu_bo(obj); 2247 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 2248 AMDGPU_GEM_DOMAIN_GTT))) 2249 /* Only VRAM and GTT BOs are supported */ 2250 return -EINVAL; 2251 2252 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2253 if (!*mem) 2254 return -ENOMEM; 2255 2256 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 2257 if (ret) { 2258 kfree(mem); 2259 return ret; 2260 } 2261 2262 if (size) 2263 *size = amdgpu_bo_size(bo); 2264 2265 if (mmap_offset) 2266 *mmap_offset = amdgpu_bo_mmap_offset(bo); 2267 2268 INIT_LIST_HEAD(&(*mem)->attachments); 2269 mutex_init(&(*mem)->lock); 2270 2271 (*mem)->alloc_flags = 2272 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2273 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 2274 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 2275 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 2276 2277 drm_gem_object_get(&bo->tbo.base); 2278 (*mem)->bo = bo; 2279 (*mem)->va = va; 2280 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2281 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 2282 (*mem)->mapped_to_gpu_memory = 0; 2283 (*mem)->process_info = avm->process_info; 2284 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 2285 amdgpu_sync_create(&(*mem)->sync); 2286 (*mem)->is_imported = true; 2287 2288 return 0; 2289 } 2290 2291 /* Evict a userptr BO by stopping the queues if necessary 2292 * 2293 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 2294 * cannot do any memory allocations, and cannot take any locks that 2295 * are held elsewhere while allocating memory. Therefore this is as 2296 * simple as possible, using atomic counters. 2297 * 2298 * It doesn't do anything to the BO itself. The real work happens in 2299 * restore, where we get updated page addresses. This function only 2300 * ensures that GPU access to the BO is stopped. 2301 */ 2302 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 2303 struct mm_struct *mm) 2304 { 2305 struct amdkfd_process_info *process_info = mem->process_info; 2306 int evicted_bos; 2307 int r = 0; 2308 2309 /* Do not process MMU notifications until stage-4 IOCTL is received */ 2310 if (READ_ONCE(process_info->block_mmu_notifications)) 2311 return 0; 2312 2313 atomic_inc(&mem->invalid); 2314 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 2315 if (evicted_bos == 1) { 2316 /* First eviction, stop the queues */ 2317 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); 2318 if (r) 2319 pr_err("Failed to quiesce KFD\n"); 2320 schedule_delayed_work(&process_info->restore_userptr_work, 2321 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2322 } 2323 2324 return r; 2325 } 2326 2327 /* Update invalid userptr BOs 2328 * 2329 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 2330 * userptr_inval_list and updates user pages for all BOs that have 2331 * been invalidated since their last update. 2332 */ 2333 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 2334 struct mm_struct *mm) 2335 { 2336 struct kgd_mem *mem, *tmp_mem; 2337 struct amdgpu_bo *bo; 2338 struct ttm_operation_ctx ctx = { false, false }; 2339 int invalid, ret; 2340 2341 /* Move all invalidated BOs to the userptr_inval_list and 2342 * release their user pages by migration to the CPU domain 2343 */ 2344 list_for_each_entry_safe(mem, tmp_mem, 2345 &process_info->userptr_valid_list, 2346 validate_list.head) { 2347 if (!atomic_read(&mem->invalid)) 2348 continue; /* BO is still valid */ 2349 2350 bo = mem->bo; 2351 2352 if (amdgpu_bo_reserve(bo, true)) 2353 return -EAGAIN; 2354 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 2355 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2356 amdgpu_bo_unreserve(bo); 2357 if (ret) { 2358 pr_err("%s: Failed to invalidate userptr BO\n", 2359 __func__); 2360 return -EAGAIN; 2361 } 2362 2363 list_move_tail(&mem->validate_list.head, 2364 &process_info->userptr_inval_list); 2365 } 2366 2367 if (list_empty(&process_info->userptr_inval_list)) 2368 return 0; /* All evicted userptr BOs were freed */ 2369 2370 /* Go through userptr_inval_list and update any invalid user_pages */ 2371 list_for_each_entry(mem, &process_info->userptr_inval_list, 2372 validate_list.head) { 2373 invalid = atomic_read(&mem->invalid); 2374 if (!invalid) 2375 /* BO hasn't been invalidated since the last 2376 * revalidation attempt. Keep its BO list. 2377 */ 2378 continue; 2379 2380 bo = mem->bo; 2381 2382 /* Get updated user pages */ 2383 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 2384 if (ret) { 2385 pr_debug("Failed %d to get user pages\n", ret); 2386 2387 /* Return -EFAULT bad address error as success. It will 2388 * fail later with a VM fault if the GPU tries to access 2389 * it. Better than hanging indefinitely with stalled 2390 * user mode queues. 2391 * 2392 * Return other error -EBUSY or -ENOMEM to retry restore 2393 */ 2394 if (ret != -EFAULT) 2395 return ret; 2396 } else { 2397 2398 /* 2399 * FIXME: Cannot ignore the return code, must hold 2400 * notifier_lock 2401 */ 2402 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 2403 } 2404 2405 /* Mark the BO as valid unless it was invalidated 2406 * again concurrently. 2407 */ 2408 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 2409 return -EAGAIN; 2410 } 2411 2412 return 0; 2413 } 2414 2415 /* Validate invalid userptr BOs 2416 * 2417 * Validates BOs on the userptr_inval_list, and moves them back to the 2418 * userptr_valid_list. Also updates GPUVM page tables with new page 2419 * addresses and waits for the page table updates to complete. 2420 */ 2421 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2422 { 2423 struct amdgpu_bo_list_entry *pd_bo_list_entries; 2424 struct list_head resv_list, duplicates; 2425 struct ww_acquire_ctx ticket; 2426 struct amdgpu_sync sync; 2427 2428 struct amdgpu_vm *peer_vm; 2429 struct kgd_mem *mem, *tmp_mem; 2430 struct amdgpu_bo *bo; 2431 struct ttm_operation_ctx ctx = { false, false }; 2432 int i, ret; 2433 2434 pd_bo_list_entries = kcalloc(process_info->n_vms, 2435 sizeof(struct amdgpu_bo_list_entry), 2436 GFP_KERNEL); 2437 if (!pd_bo_list_entries) { 2438 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 2439 ret = -ENOMEM; 2440 goto out_no_mem; 2441 } 2442 2443 INIT_LIST_HEAD(&resv_list); 2444 INIT_LIST_HEAD(&duplicates); 2445 2446 /* Get all the page directory BOs that need to be reserved */ 2447 i = 0; 2448 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2449 vm_list_node) 2450 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 2451 &pd_bo_list_entries[i++]); 2452 /* Add the userptr_inval_list entries to resv_list */ 2453 list_for_each_entry(mem, &process_info->userptr_inval_list, 2454 validate_list.head) { 2455 list_add_tail(&mem->resv_list.head, &resv_list); 2456 mem->resv_list.bo = mem->validate_list.bo; 2457 mem->resv_list.num_shared = mem->validate_list.num_shared; 2458 } 2459 2460 /* Reserve all BOs and page tables for validation */ 2461 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 2462 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 2463 if (ret) 2464 goto out_free; 2465 2466 amdgpu_sync_create(&sync); 2467 2468 ret = process_validate_vms(process_info); 2469 if (ret) 2470 goto unreserve_out; 2471 2472 /* Validate BOs and update GPUVM page tables */ 2473 list_for_each_entry_safe(mem, tmp_mem, 2474 &process_info->userptr_inval_list, 2475 validate_list.head) { 2476 struct kfd_mem_attachment *attachment; 2477 2478 bo = mem->bo; 2479 2480 /* Validate the BO if we got user pages */ 2481 if (bo->tbo.ttm->pages[0]) { 2482 amdgpu_bo_placement_from_domain(bo, mem->domain); 2483 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2484 if (ret) { 2485 pr_err("%s: failed to validate BO\n", __func__); 2486 goto unreserve_out; 2487 } 2488 } 2489 2490 list_move_tail(&mem->validate_list.head, 2491 &process_info->userptr_valid_list); 2492 2493 /* Update mapping. If the BO was not validated 2494 * (because we couldn't get user pages), this will 2495 * clear the page table entries, which will result in 2496 * VM faults if the GPU tries to access the invalid 2497 * memory. 2498 */ 2499 list_for_each_entry(attachment, &mem->attachments, list) { 2500 if (!attachment->is_mapped) 2501 continue; 2502 2503 kfd_mem_dmaunmap_attachment(mem, attachment); 2504 ret = update_gpuvm_pte(mem, attachment, &sync); 2505 if (ret) { 2506 pr_err("%s: update PTE failed\n", __func__); 2507 /* make sure this gets validated again */ 2508 atomic_inc(&mem->invalid); 2509 goto unreserve_out; 2510 } 2511 } 2512 } 2513 2514 /* Update page directories */ 2515 ret = process_update_pds(process_info, &sync); 2516 2517 unreserve_out: 2518 ttm_eu_backoff_reservation(&ticket, &resv_list); 2519 amdgpu_sync_wait(&sync, false); 2520 amdgpu_sync_free(&sync); 2521 out_free: 2522 kfree(pd_bo_list_entries); 2523 out_no_mem: 2524 2525 return ret; 2526 } 2527 2528 /* Worker callback to restore evicted userptr BOs 2529 * 2530 * Tries to update and validate all userptr BOs. If successful and no 2531 * concurrent evictions happened, the queues are restarted. Otherwise, 2532 * reschedule for another attempt later. 2533 */ 2534 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2535 { 2536 struct delayed_work *dwork = to_delayed_work(work); 2537 struct amdkfd_process_info *process_info = 2538 container_of(dwork, struct amdkfd_process_info, 2539 restore_userptr_work); 2540 struct task_struct *usertask; 2541 struct mm_struct *mm; 2542 int evicted_bos; 2543 2544 evicted_bos = atomic_read(&process_info->evicted_bos); 2545 if (!evicted_bos) 2546 return; 2547 2548 /* Reference task and mm in case of concurrent process termination */ 2549 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2550 if (!usertask) 2551 return; 2552 mm = get_task_mm(usertask); 2553 if (!mm) { 2554 put_task_struct(usertask); 2555 return; 2556 } 2557 2558 mutex_lock(&process_info->lock); 2559 2560 if (update_invalid_user_pages(process_info, mm)) 2561 goto unlock_out; 2562 /* userptr_inval_list can be empty if all evicted userptr BOs 2563 * have been freed. In that case there is nothing to validate 2564 * and we can just restart the queues. 2565 */ 2566 if (!list_empty(&process_info->userptr_inval_list)) { 2567 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 2568 goto unlock_out; /* Concurrent eviction, try again */ 2569 2570 if (validate_invalid_user_pages(process_info)) 2571 goto unlock_out; 2572 } 2573 /* Final check for concurrent evicton and atomic update. If 2574 * another eviction happens after successful update, it will 2575 * be a first eviction that calls quiesce_mm. The eviction 2576 * reference counting inside KFD will handle this case. 2577 */ 2578 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2579 evicted_bos) 2580 goto unlock_out; 2581 evicted_bos = 0; 2582 if (kgd2kfd_resume_mm(mm)) { 2583 pr_err("%s: Failed to resume KFD\n", __func__); 2584 /* No recovery from this failure. Probably the CP is 2585 * hanging. No point trying again. 2586 */ 2587 } 2588 2589 unlock_out: 2590 mutex_unlock(&process_info->lock); 2591 2592 /* If validation failed, reschedule another attempt */ 2593 if (evicted_bos) { 2594 schedule_delayed_work(&process_info->restore_userptr_work, 2595 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2596 2597 kfd_smi_event_queue_restore_rescheduled(mm); 2598 } 2599 mmput(mm); 2600 put_task_struct(usertask); 2601 } 2602 2603 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2604 * KFD process identified by process_info 2605 * 2606 * @process_info: amdkfd_process_info of the KFD process 2607 * 2608 * After memory eviction, restore thread calls this function. The function 2609 * should be called when the Process is still valid. BO restore involves - 2610 * 2611 * 1. Release old eviction fence and create new one 2612 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2613 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2614 * BOs that need to be reserved. 2615 * 4. Reserve all the BOs 2616 * 5. Validate of PD and PT BOs. 2617 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2618 * 7. Add fence to all PD and PT BOs. 2619 * 8. Unreserve all BOs 2620 */ 2621 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2622 { 2623 struct amdgpu_bo_list_entry *pd_bo_list; 2624 struct amdkfd_process_info *process_info = info; 2625 struct amdgpu_vm *peer_vm; 2626 struct kgd_mem *mem; 2627 struct bo_vm_reservation_context ctx; 2628 struct amdgpu_amdkfd_fence *new_fence; 2629 int ret = 0, i; 2630 struct list_head duplicate_save; 2631 struct amdgpu_sync sync_obj; 2632 unsigned long failed_size = 0; 2633 unsigned long total_size = 0; 2634 2635 INIT_LIST_HEAD(&duplicate_save); 2636 INIT_LIST_HEAD(&ctx.list); 2637 INIT_LIST_HEAD(&ctx.duplicates); 2638 2639 pd_bo_list = kcalloc(process_info->n_vms, 2640 sizeof(struct amdgpu_bo_list_entry), 2641 GFP_KERNEL); 2642 if (!pd_bo_list) 2643 return -ENOMEM; 2644 2645 i = 0; 2646 mutex_lock(&process_info->lock); 2647 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2648 vm_list_node) 2649 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2650 2651 /* Reserve all BOs and page tables/directory. Add all BOs from 2652 * kfd_bo_list to ctx.list 2653 */ 2654 list_for_each_entry(mem, &process_info->kfd_bo_list, 2655 validate_list.head) { 2656 2657 list_add_tail(&mem->resv_list.head, &ctx.list); 2658 mem->resv_list.bo = mem->validate_list.bo; 2659 mem->resv_list.num_shared = mem->validate_list.num_shared; 2660 } 2661 2662 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2663 false, &duplicate_save); 2664 if (ret) { 2665 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2666 goto ttm_reserve_fail; 2667 } 2668 2669 amdgpu_sync_create(&sync_obj); 2670 2671 /* Validate PDs and PTs */ 2672 ret = process_validate_vms(process_info); 2673 if (ret) 2674 goto validate_map_fail; 2675 2676 ret = process_sync_pds_resv(process_info, &sync_obj); 2677 if (ret) { 2678 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2679 goto validate_map_fail; 2680 } 2681 2682 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2683 list_for_each_entry(mem, &process_info->kfd_bo_list, 2684 validate_list.head) { 2685 2686 struct amdgpu_bo *bo = mem->bo; 2687 uint32_t domain = mem->domain; 2688 struct kfd_mem_attachment *attachment; 2689 struct dma_resv_iter cursor; 2690 struct dma_fence *fence; 2691 2692 total_size += amdgpu_bo_size(bo); 2693 2694 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2695 if (ret) { 2696 pr_debug("Memory eviction: Validate BOs failed\n"); 2697 failed_size += amdgpu_bo_size(bo); 2698 ret = amdgpu_amdkfd_bo_validate(bo, 2699 AMDGPU_GEM_DOMAIN_GTT, false); 2700 if (ret) { 2701 pr_debug("Memory eviction: Try again\n"); 2702 goto validate_map_fail; 2703 } 2704 } 2705 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, 2706 DMA_RESV_USAGE_KERNEL, fence) { 2707 ret = amdgpu_sync_fence(&sync_obj, fence); 2708 if (ret) { 2709 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2710 goto validate_map_fail; 2711 } 2712 } 2713 list_for_each_entry(attachment, &mem->attachments, list) { 2714 if (!attachment->is_mapped) 2715 continue; 2716 2717 kfd_mem_dmaunmap_attachment(mem, attachment); 2718 ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2719 if (ret) { 2720 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2721 goto validate_map_fail; 2722 } 2723 } 2724 } 2725 2726 if (failed_size) 2727 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2728 2729 /* Update page directories */ 2730 ret = process_update_pds(process_info, &sync_obj); 2731 if (ret) { 2732 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2733 goto validate_map_fail; 2734 } 2735 2736 /* Wait for validate and PT updates to finish */ 2737 amdgpu_sync_wait(&sync_obj, false); 2738 2739 /* Release old eviction fence and create new one, because fence only 2740 * goes from unsignaled to signaled, fence cannot be reused. 2741 * Use context and mm from the old fence. 2742 */ 2743 new_fence = amdgpu_amdkfd_fence_create( 2744 process_info->eviction_fence->base.context, 2745 process_info->eviction_fence->mm, 2746 NULL); 2747 if (!new_fence) { 2748 pr_err("Failed to create eviction fence\n"); 2749 ret = -ENOMEM; 2750 goto validate_map_fail; 2751 } 2752 dma_fence_put(&process_info->eviction_fence->base); 2753 process_info->eviction_fence = new_fence; 2754 *ef = dma_fence_get(&new_fence->base); 2755 2756 /* Attach new eviction fence to all BOs except pinned ones */ 2757 list_for_each_entry(mem, &process_info->kfd_bo_list, 2758 validate_list.head) { 2759 if (mem->bo->tbo.pin_count) 2760 continue; 2761 2762 dma_resv_add_fence(mem->bo->tbo.base.resv, 2763 &process_info->eviction_fence->base, 2764 DMA_RESV_USAGE_BOOKKEEP); 2765 } 2766 /* Attach eviction fence to PD / PT BOs */ 2767 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2768 vm_list_node) { 2769 struct amdgpu_bo *bo = peer_vm->root.bo; 2770 2771 dma_resv_add_fence(bo->tbo.base.resv, 2772 &process_info->eviction_fence->base, 2773 DMA_RESV_USAGE_BOOKKEEP); 2774 } 2775 2776 validate_map_fail: 2777 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2778 amdgpu_sync_free(&sync_obj); 2779 ttm_reserve_fail: 2780 mutex_unlock(&process_info->lock); 2781 kfree(pd_bo_list); 2782 return ret; 2783 } 2784 2785 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2786 { 2787 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2788 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2789 int ret; 2790 2791 if (!info || !gws) 2792 return -EINVAL; 2793 2794 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2795 if (!*mem) 2796 return -ENOMEM; 2797 2798 mutex_init(&(*mem)->lock); 2799 INIT_LIST_HEAD(&(*mem)->attachments); 2800 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2801 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2802 (*mem)->process_info = process_info; 2803 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2804 amdgpu_sync_create(&(*mem)->sync); 2805 2806 2807 /* Validate gws bo the first time it is added to process */ 2808 mutex_lock(&(*mem)->process_info->lock); 2809 ret = amdgpu_bo_reserve(gws_bo, false); 2810 if (unlikely(ret)) { 2811 pr_err("Reserve gws bo failed %d\n", ret); 2812 goto bo_reservation_failure; 2813 } 2814 2815 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2816 if (ret) { 2817 pr_err("GWS BO validate failed %d\n", ret); 2818 goto bo_validation_failure; 2819 } 2820 /* GWS resource is shared b/t amdgpu and amdkfd 2821 * Add process eviction fence to bo so they can 2822 * evict each other. 2823 */ 2824 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); 2825 if (ret) 2826 goto reserve_shared_fail; 2827 dma_resv_add_fence(gws_bo->tbo.base.resv, 2828 &process_info->eviction_fence->base, 2829 DMA_RESV_USAGE_BOOKKEEP); 2830 amdgpu_bo_unreserve(gws_bo); 2831 mutex_unlock(&(*mem)->process_info->lock); 2832 2833 return ret; 2834 2835 reserve_shared_fail: 2836 bo_validation_failure: 2837 amdgpu_bo_unreserve(gws_bo); 2838 bo_reservation_failure: 2839 mutex_unlock(&(*mem)->process_info->lock); 2840 amdgpu_sync_free(&(*mem)->sync); 2841 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2842 amdgpu_bo_unref(&gws_bo); 2843 mutex_destroy(&(*mem)->lock); 2844 kfree(*mem); 2845 *mem = NULL; 2846 return ret; 2847 } 2848 2849 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2850 { 2851 int ret; 2852 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2853 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2854 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2855 2856 /* Remove BO from process's validate list so restore worker won't touch 2857 * it anymore 2858 */ 2859 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2860 2861 ret = amdgpu_bo_reserve(gws_bo, false); 2862 if (unlikely(ret)) { 2863 pr_err("Reserve gws bo failed %d\n", ret); 2864 //TODO add BO back to validate_list? 2865 return ret; 2866 } 2867 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2868 process_info->eviction_fence); 2869 amdgpu_bo_unreserve(gws_bo); 2870 amdgpu_sync_free(&kgd_mem->sync); 2871 amdgpu_bo_unref(&gws_bo); 2872 mutex_destroy(&kgd_mem->lock); 2873 kfree(mem); 2874 return 0; 2875 } 2876 2877 /* Returns GPU-specific tiling mode information */ 2878 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 2879 struct tile_config *config) 2880 { 2881 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2882 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2883 config->num_tile_configs = 2884 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2885 config->macro_tile_config_ptr = 2886 adev->gfx.config.macrotile_mode_array; 2887 config->num_macro_tile_configs = 2888 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2889 2890 /* Those values are not set from GFX9 onwards */ 2891 config->num_banks = adev->gfx.config.num_banks; 2892 config->num_ranks = adev->gfx.config.num_ranks; 2893 2894 return 0; 2895 } 2896 2897 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) 2898 { 2899 struct kfd_mem_attachment *entry; 2900 2901 list_for_each_entry(entry, &mem->attachments, list) { 2902 if (entry->is_mapped && entry->adev == adev) 2903 return true; 2904 } 2905 return false; 2906 } 2907 2908 #if defined(CONFIG_DEBUG_FS) 2909 2910 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data) 2911 { 2912 2913 spin_lock(&kfd_mem_limit.mem_limit_lock); 2914 seq_printf(m, "System mem used %lldM out of %lluM\n", 2915 (kfd_mem_limit.system_mem_used >> 20), 2916 (kfd_mem_limit.max_system_mem_limit >> 20)); 2917 seq_printf(m, "TTM mem used %lldM out of %lluM\n", 2918 (kfd_mem_limit.ttm_mem_used >> 20), 2919 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 2920 spin_unlock(&kfd_mem_limit.mem_limit_lock); 2921 2922 return 0; 2923 } 2924 2925 #endif 2926