1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/types.h> 25 #include <linux/sched/task.h> 26 #include "amdgpu_sync.h" 27 #include "amdgpu_object.h" 28 #include "amdgpu_vm.h" 29 #include "amdgpu_mn.h" 30 #include "amdgpu.h" 31 #include "amdgpu_xgmi.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 #include "kfd_smi_events.h" 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__ 41 42 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 43 44 /* Long enough to ensure no retry fault comes after svm range is restored and 45 * page table is updated. 46 */ 47 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) 48 49 /* Giant svm range split into smaller ranges based on this, it is decided using 50 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to 51 * power of 2MB. 52 */ 53 static uint64_t max_svm_range_pages; 54 55 struct criu_svm_metadata { 56 struct list_head list; 57 struct kfd_criu_svm_range_priv_data data; 58 }; 59 60 static void svm_range_evict_svm_bo_worker(struct work_struct *work); 61 static bool 62 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 63 const struct mmu_notifier_range *range, 64 unsigned long cur_seq); 65 static int 66 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 67 uint64_t *bo_s, uint64_t *bo_l); 68 static const struct mmu_interval_notifier_ops svm_range_mn_ops = { 69 .invalidate = svm_range_cpu_invalidate_pagetables, 70 }; 71 72 /** 73 * svm_range_unlink - unlink svm_range from lists and interval tree 74 * @prange: svm range structure to be removed 75 * 76 * Remove the svm_range from the svms and svm_bo lists and the svms 77 * interval tree. 78 * 79 * Context: The caller must hold svms->lock 80 */ 81 static void svm_range_unlink(struct svm_range *prange) 82 { 83 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 84 prange, prange->start, prange->last); 85 86 if (prange->svm_bo) { 87 spin_lock(&prange->svm_bo->list_lock); 88 list_del(&prange->svm_bo_list); 89 spin_unlock(&prange->svm_bo->list_lock); 90 } 91 92 list_del(&prange->list); 93 if (prange->it_node.start != 0 && prange->it_node.last != 0) 94 interval_tree_remove(&prange->it_node, &prange->svms->objects); 95 } 96 97 static void 98 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange) 99 { 100 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 101 prange, prange->start, prange->last); 102 103 mmu_interval_notifier_insert_locked(&prange->notifier, mm, 104 prange->start << PAGE_SHIFT, 105 prange->npages << PAGE_SHIFT, 106 &svm_range_mn_ops); 107 } 108 109 /** 110 * svm_range_add_to_svms - add svm range to svms 111 * @prange: svm range structure to be added 112 * 113 * Add the svm range to svms interval tree and link list 114 * 115 * Context: The caller must hold svms->lock 116 */ 117 static void svm_range_add_to_svms(struct svm_range *prange) 118 { 119 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 120 prange, prange->start, prange->last); 121 122 list_move_tail(&prange->list, &prange->svms->list); 123 prange->it_node.start = prange->start; 124 prange->it_node.last = prange->last; 125 interval_tree_insert(&prange->it_node, &prange->svms->objects); 126 } 127 128 static void svm_range_remove_notifier(struct svm_range *prange) 129 { 130 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", 131 prange->svms, prange, 132 prange->notifier.interval_tree.start >> PAGE_SHIFT, 133 prange->notifier.interval_tree.last >> PAGE_SHIFT); 134 135 if (prange->notifier.interval_tree.start != 0 && 136 prange->notifier.interval_tree.last != 0) 137 mmu_interval_notifier_remove(&prange->notifier); 138 } 139 140 static bool 141 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) 142 { 143 return dma_addr && !dma_mapping_error(dev, dma_addr) && 144 !(dma_addr & SVM_RANGE_VRAM_DOMAIN); 145 } 146 147 static int 148 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, 149 unsigned long offset, unsigned long npages, 150 unsigned long *hmm_pfns, uint32_t gpuidx) 151 { 152 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 153 dma_addr_t *addr = prange->dma_addr[gpuidx]; 154 struct device *dev = adev->dev; 155 struct page *page; 156 int i, r; 157 158 if (!addr) { 159 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL); 160 if (!addr) 161 return -ENOMEM; 162 prange->dma_addr[gpuidx] = addr; 163 } 164 165 addr += offset; 166 for (i = 0; i < npages; i++) { 167 if (svm_is_valid_dma_mapping_addr(dev, addr[i])) 168 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); 169 170 page = hmm_pfn_to_page(hmm_pfns[i]); 171 if (is_zone_device_page(page)) { 172 struct amdgpu_device *bo_adev = 173 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); 174 175 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + 176 bo_adev->vm_manager.vram_base_offset - 177 bo_adev->kfd.dev->pgmap.range.start; 178 addr[i] |= SVM_RANGE_VRAM_DOMAIN; 179 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); 180 continue; 181 } 182 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 183 r = dma_mapping_error(dev, addr[i]); 184 if (r) { 185 dev_err(dev, "failed %d dma_map_page\n", r); 186 return r; 187 } 188 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", 189 addr[i] >> PAGE_SHIFT, page_to_pfn(page)); 190 } 191 return 0; 192 } 193 194 static int 195 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, 196 unsigned long offset, unsigned long npages, 197 unsigned long *hmm_pfns) 198 { 199 struct kfd_process *p; 200 uint32_t gpuidx; 201 int r; 202 203 p = container_of(prange->svms, struct kfd_process, svms); 204 205 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 206 struct kfd_process_device *pdd; 207 208 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 209 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 210 if (!pdd) { 211 pr_debug("failed to find device idx %d\n", gpuidx); 212 return -EINVAL; 213 } 214 215 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, 216 hmm_pfns, gpuidx); 217 if (r) 218 break; 219 } 220 221 return r; 222 } 223 224 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, 225 unsigned long offset, unsigned long npages) 226 { 227 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 228 int i; 229 230 if (!dma_addr) 231 return; 232 233 for (i = offset; i < offset + npages; i++) { 234 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) 235 continue; 236 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); 237 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); 238 dma_addr[i] = 0; 239 } 240 } 241 242 void svm_range_free_dma_mappings(struct svm_range *prange) 243 { 244 struct kfd_process_device *pdd; 245 dma_addr_t *dma_addr; 246 struct device *dev; 247 struct kfd_process *p; 248 uint32_t gpuidx; 249 250 p = container_of(prange->svms, struct kfd_process, svms); 251 252 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) { 253 dma_addr = prange->dma_addr[gpuidx]; 254 if (!dma_addr) 255 continue; 256 257 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 258 if (!pdd) { 259 pr_debug("failed to find device idx %d\n", gpuidx); 260 continue; 261 } 262 dev = &pdd->dev->pdev->dev; 263 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); 264 kvfree(dma_addr); 265 prange->dma_addr[gpuidx] = NULL; 266 } 267 } 268 269 static void svm_range_free(struct svm_range *prange, bool update_mem_usage) 270 { 271 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; 272 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 273 274 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, 275 prange->start, prange->last); 276 277 svm_range_vram_node_free(prange); 278 svm_range_free_dma_mappings(prange); 279 280 if (update_mem_usage && !p->xnack_enabled) { 281 pr_debug("unreserve mem limit: %lld\n", size); 282 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 283 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); 284 } 285 mutex_destroy(&prange->lock); 286 mutex_destroy(&prange->migrate_mutex); 287 kfree(prange); 288 } 289 290 static void 291 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc, 292 uint8_t *granularity, uint32_t *flags) 293 { 294 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 295 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 296 *granularity = 9; 297 *flags = 298 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT; 299 } 300 301 static struct 302 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, 303 uint64_t last, bool update_mem_usage) 304 { 305 uint64_t size = last - start + 1; 306 struct svm_range *prange; 307 struct kfd_process *p; 308 309 prange = kzalloc(sizeof(*prange), GFP_KERNEL); 310 if (!prange) 311 return NULL; 312 313 p = container_of(svms, struct kfd_process, svms); 314 if (!p->xnack_enabled && update_mem_usage && 315 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, 316 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) { 317 pr_info("SVM mapping failed, exceeds resident system memory limit\n"); 318 kfree(prange); 319 return NULL; 320 } 321 prange->npages = size; 322 prange->svms = svms; 323 prange->start = start; 324 prange->last = last; 325 INIT_LIST_HEAD(&prange->list); 326 INIT_LIST_HEAD(&prange->update_list); 327 INIT_LIST_HEAD(&prange->svm_bo_list); 328 INIT_LIST_HEAD(&prange->deferred_list); 329 INIT_LIST_HEAD(&prange->child_list); 330 atomic_set(&prange->invalid, 0); 331 prange->validate_timestamp = 0; 332 mutex_init(&prange->migrate_mutex); 333 mutex_init(&prange->lock); 334 335 if (p->xnack_enabled) 336 bitmap_copy(prange->bitmap_access, svms->bitmap_supported, 337 MAX_GPU_INSTANCE); 338 339 svm_range_set_default_attributes(&prange->preferred_loc, 340 &prange->prefetch_loc, 341 &prange->granularity, &prange->flags); 342 343 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); 344 345 return prange; 346 } 347 348 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo) 349 { 350 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref)) 351 return false; 352 353 return true; 354 } 355 356 static void svm_range_bo_release(struct kref *kref) 357 { 358 struct svm_range_bo *svm_bo; 359 360 svm_bo = container_of(kref, struct svm_range_bo, kref); 361 pr_debug("svm_bo 0x%p\n", svm_bo); 362 363 spin_lock(&svm_bo->list_lock); 364 while (!list_empty(&svm_bo->range_list)) { 365 struct svm_range *prange = 366 list_first_entry(&svm_bo->range_list, 367 struct svm_range, svm_bo_list); 368 /* list_del_init tells a concurrent svm_range_vram_node_new when 369 * it's safe to reuse the svm_bo pointer and svm_bo_list head. 370 */ 371 list_del_init(&prange->svm_bo_list); 372 spin_unlock(&svm_bo->list_lock); 373 374 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 375 prange->start, prange->last); 376 mutex_lock(&prange->lock); 377 prange->svm_bo = NULL; 378 mutex_unlock(&prange->lock); 379 380 spin_lock(&svm_bo->list_lock); 381 } 382 spin_unlock(&svm_bo->list_lock); 383 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) { 384 /* We're not in the eviction worker. 385 * Signal the fence and synchronize with any 386 * pending eviction work. 387 */ 388 dma_fence_signal(&svm_bo->eviction_fence->base); 389 cancel_work_sync(&svm_bo->eviction_work); 390 } 391 dma_fence_put(&svm_bo->eviction_fence->base); 392 amdgpu_bo_unref(&svm_bo->bo); 393 kfree(svm_bo); 394 } 395 396 static void svm_range_bo_wq_release(struct work_struct *work) 397 { 398 struct svm_range_bo *svm_bo; 399 400 svm_bo = container_of(work, struct svm_range_bo, release_work); 401 svm_range_bo_release(&svm_bo->kref); 402 } 403 404 static void svm_range_bo_release_async(struct kref *kref) 405 { 406 struct svm_range_bo *svm_bo; 407 408 svm_bo = container_of(kref, struct svm_range_bo, kref); 409 pr_debug("svm_bo 0x%p\n", svm_bo); 410 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release); 411 schedule_work(&svm_bo->release_work); 412 } 413 414 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo) 415 { 416 kref_put(&svm_bo->kref, svm_range_bo_release_async); 417 } 418 419 static void svm_range_bo_unref(struct svm_range_bo *svm_bo) 420 { 421 if (svm_bo) 422 kref_put(&svm_bo->kref, svm_range_bo_release); 423 } 424 425 static bool 426 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) 427 { 428 struct amdgpu_device *bo_adev; 429 430 mutex_lock(&prange->lock); 431 if (!prange->svm_bo) { 432 mutex_unlock(&prange->lock); 433 return false; 434 } 435 if (prange->ttm_res) { 436 /* We still have a reference, all is well */ 437 mutex_unlock(&prange->lock); 438 return true; 439 } 440 if (svm_bo_ref_unless_zero(prange->svm_bo)) { 441 /* 442 * Migrate from GPU to GPU, remove range from source bo_adev 443 * svm_bo range list, and return false to allocate svm_bo from 444 * destination adev. 445 */ 446 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); 447 if (bo_adev != adev) { 448 mutex_unlock(&prange->lock); 449 450 spin_lock(&prange->svm_bo->list_lock); 451 list_del_init(&prange->svm_bo_list); 452 spin_unlock(&prange->svm_bo->list_lock); 453 454 svm_range_bo_unref(prange->svm_bo); 455 return false; 456 } 457 if (READ_ONCE(prange->svm_bo->evicting)) { 458 struct dma_fence *f; 459 struct svm_range_bo *svm_bo; 460 /* The BO is getting evicted, 461 * we need to get a new one 462 */ 463 mutex_unlock(&prange->lock); 464 svm_bo = prange->svm_bo; 465 f = dma_fence_get(&svm_bo->eviction_fence->base); 466 svm_range_bo_unref(prange->svm_bo); 467 /* wait for the fence to avoid long spin-loop 468 * at list_empty_careful 469 */ 470 dma_fence_wait(f, false); 471 dma_fence_put(f); 472 } else { 473 /* The BO was still around and we got 474 * a new reference to it 475 */ 476 mutex_unlock(&prange->lock); 477 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", 478 prange->svms, prange->start, prange->last); 479 480 prange->ttm_res = prange->svm_bo->bo->tbo.resource; 481 return true; 482 } 483 484 } else { 485 mutex_unlock(&prange->lock); 486 } 487 488 /* We need a new svm_bo. Spin-loop to wait for concurrent 489 * svm_range_bo_release to finish removing this range from 490 * its range list. After this, it is safe to reuse the 491 * svm_bo pointer and svm_bo_list head. 492 */ 493 while (!list_empty_careful(&prange->svm_bo_list)) 494 ; 495 496 return false; 497 } 498 499 static struct svm_range_bo *svm_range_bo_new(void) 500 { 501 struct svm_range_bo *svm_bo; 502 503 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL); 504 if (!svm_bo) 505 return NULL; 506 507 kref_init(&svm_bo->kref); 508 INIT_LIST_HEAD(&svm_bo->range_list); 509 spin_lock_init(&svm_bo->list_lock); 510 511 return svm_bo; 512 } 513 514 int 515 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, 516 bool clear) 517 { 518 struct amdgpu_bo_param bp; 519 struct svm_range_bo *svm_bo; 520 struct amdgpu_bo_user *ubo; 521 struct amdgpu_bo *bo; 522 struct kfd_process *p; 523 struct mm_struct *mm; 524 int r; 525 526 p = container_of(prange->svms, struct kfd_process, svms); 527 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, 528 prange->start, prange->last); 529 530 if (svm_range_validate_svm_bo(adev, prange)) 531 return 0; 532 533 svm_bo = svm_range_bo_new(); 534 if (!svm_bo) { 535 pr_debug("failed to alloc svm bo\n"); 536 return -ENOMEM; 537 } 538 mm = get_task_mm(p->lead_thread); 539 if (!mm) { 540 pr_debug("failed to get mm\n"); 541 kfree(svm_bo); 542 return -ESRCH; 543 } 544 svm_bo->svms = prange->svms; 545 svm_bo->eviction_fence = 546 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 547 mm, 548 svm_bo); 549 mmput(mm); 550 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker); 551 svm_bo->evicting = 0; 552 memset(&bp, 0, sizeof(bp)); 553 bp.size = prange->npages * PAGE_SIZE; 554 bp.byte_align = PAGE_SIZE; 555 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 556 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 557 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0; 558 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE; 559 bp.type = ttm_bo_type_device; 560 bp.resv = NULL; 561 562 r = amdgpu_bo_create_user(adev, &bp, &ubo); 563 if (r) { 564 pr_debug("failed %d to create bo\n", r); 565 goto create_bo_failed; 566 } 567 bo = &ubo->bo; 568 r = amdgpu_bo_reserve(bo, true); 569 if (r) { 570 pr_debug("failed %d to reserve bo\n", r); 571 goto reserve_bo_failed; 572 } 573 574 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 575 if (r) { 576 pr_debug("failed %d to reserve bo\n", r); 577 amdgpu_bo_unreserve(bo); 578 goto reserve_bo_failed; 579 } 580 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true); 581 582 amdgpu_bo_unreserve(bo); 583 584 svm_bo->bo = bo; 585 prange->svm_bo = svm_bo; 586 prange->ttm_res = bo->tbo.resource; 587 prange->offset = 0; 588 589 spin_lock(&svm_bo->list_lock); 590 list_add(&prange->svm_bo_list, &svm_bo->range_list); 591 spin_unlock(&svm_bo->list_lock); 592 593 return 0; 594 595 reserve_bo_failed: 596 amdgpu_bo_unref(&bo); 597 create_bo_failed: 598 dma_fence_put(&svm_bo->eviction_fence->base); 599 kfree(svm_bo); 600 prange->ttm_res = NULL; 601 602 return r; 603 } 604 605 void svm_range_vram_node_free(struct svm_range *prange) 606 { 607 svm_range_bo_unref(prange->svm_bo); 608 prange->ttm_res = NULL; 609 } 610 611 struct amdgpu_device * 612 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) 613 { 614 struct kfd_process_device *pdd; 615 struct kfd_process *p; 616 int32_t gpu_idx; 617 618 p = container_of(prange->svms, struct kfd_process, svms); 619 620 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id); 621 if (gpu_idx < 0) { 622 pr_debug("failed to get device by id 0x%x\n", gpu_id); 623 return NULL; 624 } 625 pdd = kfd_process_device_from_gpuidx(p, gpu_idx); 626 if (!pdd) { 627 pr_debug("failed to get device by idx 0x%x\n", gpu_idx); 628 return NULL; 629 } 630 631 return pdd->dev->adev; 632 } 633 634 struct kfd_process_device * 635 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) 636 { 637 struct kfd_process *p; 638 int32_t gpu_idx, gpuid; 639 int r; 640 641 p = container_of(prange->svms, struct kfd_process, svms); 642 643 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); 644 if (r) { 645 pr_debug("failed to get device id by adev %p\n", adev); 646 return NULL; 647 } 648 649 return kfd_process_device_from_gpuidx(p, gpu_idx); 650 } 651 652 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) 653 { 654 struct ttm_operation_ctx ctx = { false, false }; 655 656 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 657 658 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 659 } 660 661 static int 662 svm_range_check_attr(struct kfd_process *p, 663 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 664 { 665 uint32_t i; 666 667 for (i = 0; i < nattr; i++) { 668 uint32_t val = attrs[i].value; 669 int gpuidx = MAX_GPU_INSTANCE; 670 671 switch (attrs[i].type) { 672 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 673 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM && 674 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED) 675 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 676 break; 677 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 678 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM) 679 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 680 break; 681 case KFD_IOCTL_SVM_ATTR_ACCESS: 682 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 683 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 684 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 685 break; 686 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 687 break; 688 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 689 break; 690 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 691 break; 692 default: 693 pr_debug("unknown attr type 0x%x\n", attrs[i].type); 694 return -EINVAL; 695 } 696 697 if (gpuidx < 0) { 698 pr_debug("no GPU 0x%x found\n", val); 699 return -EINVAL; 700 } else if (gpuidx < MAX_GPU_INSTANCE && 701 !test_bit(gpuidx, p->svms.bitmap_supported)) { 702 pr_debug("GPU 0x%x not supported\n", val); 703 return -EINVAL; 704 } 705 } 706 707 return 0; 708 } 709 710 static void 711 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, 712 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 713 bool *update_mapping) 714 { 715 uint32_t i; 716 int gpuidx; 717 718 for (i = 0; i < nattr; i++) { 719 switch (attrs[i].type) { 720 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 721 prange->preferred_loc = attrs[i].value; 722 break; 723 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 724 prange->prefetch_loc = attrs[i].value; 725 break; 726 case KFD_IOCTL_SVM_ATTR_ACCESS: 727 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 728 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 729 *update_mapping = true; 730 gpuidx = kfd_process_gpuidx_from_gpuid(p, 731 attrs[i].value); 732 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 733 bitmap_clear(prange->bitmap_access, gpuidx, 1); 734 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 735 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 736 bitmap_set(prange->bitmap_access, gpuidx, 1); 737 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 738 } else { 739 bitmap_clear(prange->bitmap_access, gpuidx, 1); 740 bitmap_set(prange->bitmap_aip, gpuidx, 1); 741 } 742 break; 743 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 744 *update_mapping = true; 745 prange->flags |= attrs[i].value; 746 break; 747 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 748 *update_mapping = true; 749 prange->flags &= ~attrs[i].value; 750 break; 751 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 752 prange->granularity = attrs[i].value; 753 break; 754 default: 755 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 756 } 757 } 758 } 759 760 static bool 761 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, 762 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 763 { 764 uint32_t i; 765 int gpuidx; 766 767 for (i = 0; i < nattr; i++) { 768 switch (attrs[i].type) { 769 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 770 if (prange->preferred_loc != attrs[i].value) 771 return false; 772 break; 773 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 774 /* Prefetch should always trigger a migration even 775 * if the value of the attribute didn't change. 776 */ 777 return false; 778 case KFD_IOCTL_SVM_ATTR_ACCESS: 779 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 780 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 781 gpuidx = kfd_process_gpuidx_from_gpuid(p, 782 attrs[i].value); 783 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 784 if (test_bit(gpuidx, prange->bitmap_access) || 785 test_bit(gpuidx, prange->bitmap_aip)) 786 return false; 787 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 788 if (!test_bit(gpuidx, prange->bitmap_access)) 789 return false; 790 } else { 791 if (!test_bit(gpuidx, prange->bitmap_aip)) 792 return false; 793 } 794 break; 795 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 796 if ((prange->flags & attrs[i].value) != attrs[i].value) 797 return false; 798 break; 799 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 800 if ((prange->flags & attrs[i].value) != 0) 801 return false; 802 break; 803 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 804 if (prange->granularity != attrs[i].value) 805 return false; 806 break; 807 default: 808 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 809 } 810 } 811 812 return true; 813 } 814 815 /** 816 * svm_range_debug_dump - print all range information from svms 817 * @svms: svm range list header 818 * 819 * debug output svm range start, end, prefetch location from svms 820 * interval tree and link list 821 * 822 * Context: The caller must hold svms->lock 823 */ 824 static void svm_range_debug_dump(struct svm_range_list *svms) 825 { 826 struct interval_tree_node *node; 827 struct svm_range *prange; 828 829 pr_debug("dump svms 0x%p list\n", svms); 830 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 831 832 list_for_each_entry(prange, &svms->list, list) { 833 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 834 prange, prange->start, prange->npages, 835 prange->start + prange->npages - 1, 836 prange->actual_loc); 837 } 838 839 pr_debug("dump svms 0x%p interval tree\n", svms); 840 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 841 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL); 842 while (node) { 843 prange = container_of(node, struct svm_range, it_node); 844 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 845 prange, prange->start, prange->npages, 846 prange->start + prange->npages - 1, 847 prange->actual_loc); 848 node = interval_tree_iter_next(node, 0, ~0ULL); 849 } 850 } 851 852 static int 853 svm_range_split_array(void *ppnew, void *ppold, size_t size, 854 uint64_t old_start, uint64_t old_n, 855 uint64_t new_start, uint64_t new_n) 856 { 857 unsigned char *new, *old, *pold; 858 uint64_t d; 859 860 if (!ppold) 861 return 0; 862 pold = *(unsigned char **)ppold; 863 if (!pold) 864 return 0; 865 866 new = kvmalloc_array(new_n, size, GFP_KERNEL); 867 if (!new) 868 return -ENOMEM; 869 870 d = (new_start - old_start) * size; 871 memcpy(new, pold + d, new_n * size); 872 873 old = kvmalloc_array(old_n, size, GFP_KERNEL); 874 if (!old) { 875 kvfree(new); 876 return -ENOMEM; 877 } 878 879 d = (new_start == old_start) ? new_n * size : 0; 880 memcpy(old, pold + d, old_n * size); 881 882 kvfree(pold); 883 *(void **)ppold = old; 884 *(void **)ppnew = new; 885 886 return 0; 887 } 888 889 static int 890 svm_range_split_pages(struct svm_range *new, struct svm_range *old, 891 uint64_t start, uint64_t last) 892 { 893 uint64_t npages = last - start + 1; 894 int i, r; 895 896 for (i = 0; i < MAX_GPU_INSTANCE; i++) { 897 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], 898 sizeof(*old->dma_addr[i]), old->start, 899 npages, new->start, new->npages); 900 if (r) 901 return r; 902 } 903 904 return 0; 905 } 906 907 static int 908 svm_range_split_nodes(struct svm_range *new, struct svm_range *old, 909 uint64_t start, uint64_t last) 910 { 911 uint64_t npages = last - start + 1; 912 913 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n", 914 new->svms, new, new->start, start, last); 915 916 if (new->start == old->start) { 917 new->offset = old->offset; 918 old->offset += new->npages; 919 } else { 920 new->offset = old->offset + npages; 921 } 922 923 new->svm_bo = svm_range_bo_ref(old->svm_bo); 924 new->ttm_res = old->ttm_res; 925 926 spin_lock(&new->svm_bo->list_lock); 927 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 928 spin_unlock(&new->svm_bo->list_lock); 929 930 return 0; 931 } 932 933 /** 934 * svm_range_split_adjust - split range and adjust 935 * 936 * @new: new range 937 * @old: the old range 938 * @start: the old range adjust to start address in pages 939 * @last: the old range adjust to last address in pages 940 * 941 * Copy system memory dma_addr or vram ttm_res in old range to new 942 * range from new_start up to size new->npages, the remaining old range is from 943 * start to last 944 * 945 * Return: 946 * 0 - OK, -ENOMEM - out of memory 947 */ 948 static int 949 svm_range_split_adjust(struct svm_range *new, struct svm_range *old, 950 uint64_t start, uint64_t last) 951 { 952 int r; 953 954 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n", 955 new->svms, new->start, old->start, old->last, start, last); 956 957 if (new->start < old->start || 958 new->last > old->last) { 959 WARN_ONCE(1, "invalid new range start or last\n"); 960 return -EINVAL; 961 } 962 963 r = svm_range_split_pages(new, old, start, last); 964 if (r) 965 return r; 966 967 if (old->actual_loc && old->ttm_res) { 968 r = svm_range_split_nodes(new, old, start, last); 969 if (r) 970 return r; 971 } 972 973 old->npages = last - start + 1; 974 old->start = start; 975 old->last = last; 976 new->flags = old->flags; 977 new->preferred_loc = old->preferred_loc; 978 new->prefetch_loc = old->prefetch_loc; 979 new->actual_loc = old->actual_loc; 980 new->granularity = old->granularity; 981 new->mapped_to_gpu = old->mapped_to_gpu; 982 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 983 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 984 985 return 0; 986 } 987 988 /** 989 * svm_range_split - split a range in 2 ranges 990 * 991 * @prange: the svm range to split 992 * @start: the remaining range start address in pages 993 * @last: the remaining range last address in pages 994 * @new: the result new range generated 995 * 996 * Two cases only: 997 * case 1: if start == prange->start 998 * prange ==> prange[start, last] 999 * new range [last + 1, prange->last] 1000 * 1001 * case 2: if last == prange->last 1002 * prange ==> prange[start, last] 1003 * new range [prange->start, start - 1] 1004 * 1005 * Return: 1006 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last 1007 */ 1008 static int 1009 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, 1010 struct svm_range **new) 1011 { 1012 uint64_t old_start = prange->start; 1013 uint64_t old_last = prange->last; 1014 struct svm_range_list *svms; 1015 int r = 0; 1016 1017 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms, 1018 old_start, old_last, start, last); 1019 1020 if (old_start != start && old_last != last) 1021 return -EINVAL; 1022 if (start < old_start || last > old_last) 1023 return -EINVAL; 1024 1025 svms = prange->svms; 1026 if (old_start == start) 1027 *new = svm_range_new(svms, last + 1, old_last, false); 1028 else 1029 *new = svm_range_new(svms, old_start, start - 1, false); 1030 if (!*new) 1031 return -ENOMEM; 1032 1033 r = svm_range_split_adjust(*new, prange, start, last); 1034 if (r) { 1035 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", 1036 r, old_start, old_last, start, last); 1037 svm_range_free(*new, false); 1038 *new = NULL; 1039 } 1040 1041 return r; 1042 } 1043 1044 static int 1045 svm_range_split_tail(struct svm_range *prange, 1046 uint64_t new_last, struct list_head *insert_list) 1047 { 1048 struct svm_range *tail; 1049 int r = svm_range_split(prange, prange->start, new_last, &tail); 1050 1051 if (!r) 1052 list_add(&tail->list, insert_list); 1053 return r; 1054 } 1055 1056 static int 1057 svm_range_split_head(struct svm_range *prange, 1058 uint64_t new_start, struct list_head *insert_list) 1059 { 1060 struct svm_range *head; 1061 int r = svm_range_split(prange, new_start, prange->last, &head); 1062 1063 if (!r) 1064 list_add(&head->list, insert_list); 1065 return r; 1066 } 1067 1068 static void 1069 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, 1070 struct svm_range *pchild, enum svm_work_list_ops op) 1071 { 1072 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", 1073 pchild, pchild->start, pchild->last, prange, op); 1074 1075 pchild->work_item.mm = mm; 1076 pchild->work_item.op = op; 1077 list_add_tail(&pchild->child_list, &prange->child_list); 1078 } 1079 1080 /** 1081 * svm_range_split_by_granularity - collect ranges within granularity boundary 1082 * 1083 * @p: the process with svms list 1084 * @mm: mm structure 1085 * @addr: the vm fault address in pages, to split the prange 1086 * @parent: parent range if prange is from child list 1087 * @prange: prange to split 1088 * 1089 * Trims @prange to be a single aligned block of prange->granularity if 1090 * possible. The head and tail are added to the child_list in @parent. 1091 * 1092 * Context: caller must hold mmap_read_lock and prange->lock 1093 * 1094 * Return: 1095 * 0 - OK, otherwise error code 1096 */ 1097 int 1098 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, 1099 unsigned long addr, struct svm_range *parent, 1100 struct svm_range *prange) 1101 { 1102 struct svm_range *head, *tail; 1103 unsigned long start, last, size; 1104 int r; 1105 1106 /* Align splited range start and size to granularity size, then a single 1107 * PTE will be used for whole range, this reduces the number of PTE 1108 * updated and the L1 TLB space used for translation. 1109 */ 1110 size = 1UL << prange->granularity; 1111 start = ALIGN_DOWN(addr, size); 1112 last = ALIGN(addr + 1, size) - 1; 1113 1114 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", 1115 prange->svms, prange->start, prange->last, start, last, size); 1116 1117 if (start > prange->start) { 1118 r = svm_range_split(prange, start, prange->last, &head); 1119 if (r) 1120 return r; 1121 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); 1122 } 1123 1124 if (last < prange->last) { 1125 r = svm_range_split(prange, prange->start, last, &tail); 1126 if (r) 1127 return r; 1128 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 1129 } 1130 1131 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 1132 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { 1133 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; 1134 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", 1135 prange, prange->start, prange->last, 1136 SVM_OP_ADD_RANGE_AND_MAP); 1137 } 1138 return 0; 1139 } 1140 1141 static uint64_t 1142 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, 1143 int domain) 1144 { 1145 struct amdgpu_device *bo_adev; 1146 uint32_t flags = prange->flags; 1147 uint32_t mapping_flags = 0; 1148 uint64_t pte_flags; 1149 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); 1150 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; 1151 1152 if (domain == SVM_RANGE_VRAM_DOMAIN) 1153 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); 1154 1155 switch (KFD_GC_VERSION(adev->kfd.dev)) { 1156 case IP_VERSION(9, 4, 1): 1157 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1158 if (bo_adev == adev) { 1159 mapping_flags |= coherent ? 1160 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1161 } else { 1162 mapping_flags |= coherent ? 1163 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1164 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 1165 snoop = true; 1166 } 1167 } else { 1168 mapping_flags |= coherent ? 1169 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1170 } 1171 break; 1172 case IP_VERSION(9, 4, 2): 1173 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1174 if (bo_adev == adev) { 1175 mapping_flags |= coherent ? 1176 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1177 if (adev->gmc.xgmi.connected_to_cpu) 1178 snoop = true; 1179 } else { 1180 mapping_flags |= coherent ? 1181 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1182 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 1183 snoop = true; 1184 } 1185 } else { 1186 mapping_flags |= coherent ? 1187 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1188 } 1189 break; 1190 default: 1191 mapping_flags |= coherent ? 1192 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1193 } 1194 1195 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE; 1196 1197 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO) 1198 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE; 1199 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC) 1200 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 1201 1202 pte_flags = AMDGPU_PTE_VALID; 1203 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; 1204 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 1205 1206 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); 1207 return pte_flags; 1208 } 1209 1210 static int 1211 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1212 uint64_t start, uint64_t last, 1213 struct dma_fence **fence) 1214 { 1215 uint64_t init_pte_value = 0; 1216 1217 pr_debug("[0x%llx 0x%llx]\n", start, last); 1218 1219 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start, 1220 last, init_pte_value, 0, 0, NULL, NULL, 1221 fence); 1222 } 1223 1224 static int 1225 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, 1226 unsigned long last, uint32_t trigger) 1227 { 1228 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1229 struct kfd_process_device *pdd; 1230 struct dma_fence *fence = NULL; 1231 struct kfd_process *p; 1232 uint32_t gpuidx; 1233 int r = 0; 1234 1235 if (!prange->mapped_to_gpu) { 1236 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n", 1237 prange, prange->start, prange->last); 1238 return 0; 1239 } 1240 1241 if (prange->start == start && prange->last == last) { 1242 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange); 1243 prange->mapped_to_gpu = false; 1244 } 1245 1246 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 1247 MAX_GPU_INSTANCE); 1248 p = container_of(prange->svms, struct kfd_process, svms); 1249 1250 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1251 pr_debug("unmap from gpu idx 0x%x\n", gpuidx); 1252 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1253 if (!pdd) { 1254 pr_debug("failed to find device idx %d\n", gpuidx); 1255 return -EINVAL; 1256 } 1257 1258 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, 1259 start, last, trigger); 1260 1261 r = svm_range_unmap_from_gpu(pdd->dev->adev, 1262 drm_priv_to_vm(pdd->drm_priv), 1263 start, last, &fence); 1264 if (r) 1265 break; 1266 1267 if (fence) { 1268 r = dma_fence_wait(fence, false); 1269 dma_fence_put(fence); 1270 fence = NULL; 1271 if (r) 1272 break; 1273 } 1274 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT); 1275 } 1276 1277 return r; 1278 } 1279 1280 static int 1281 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, 1282 unsigned long offset, unsigned long npages, bool readonly, 1283 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, 1284 struct dma_fence **fence, bool flush_tlb) 1285 { 1286 struct amdgpu_device *adev = pdd->dev->adev; 1287 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 1288 uint64_t pte_flags; 1289 unsigned long last_start; 1290 int last_domain; 1291 int r = 0; 1292 int64_t i, j; 1293 1294 last_start = prange->start + offset; 1295 1296 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, 1297 last_start, last_start + npages - 1, readonly); 1298 1299 for (i = offset; i < offset + npages; i++) { 1300 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; 1301 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; 1302 1303 /* Collect all pages in the same address range and memory domain 1304 * that can be mapped with a single call to update mapping. 1305 */ 1306 if (i < offset + npages - 1 && 1307 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) 1308 continue; 1309 1310 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", 1311 last_start, prange->start + i, last_domain ? "GPU" : "CPU"); 1312 1313 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain); 1314 if (readonly) 1315 pte_flags &= ~AMDGPU_PTE_WRITEABLE; 1316 1317 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n", 1318 prange->svms, last_start, prange->start + i, 1319 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, 1320 pte_flags); 1321 1322 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, 1323 last_start, prange->start + i, 1324 pte_flags, 1325 (last_start - prange->start) << PAGE_SHIFT, 1326 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, 1327 NULL, dma_addr, &vm->last_update); 1328 1329 for (j = last_start - prange->start; j <= i; j++) 1330 dma_addr[j] |= last_domain; 1331 1332 if (r) { 1333 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); 1334 goto out; 1335 } 1336 last_start = prange->start + i + 1; 1337 } 1338 1339 r = amdgpu_vm_update_pdes(adev, vm, false); 1340 if (r) { 1341 pr_debug("failed %d to update directories 0x%lx\n", r, 1342 prange->start); 1343 goto out; 1344 } 1345 1346 if (fence) 1347 *fence = dma_fence_get(vm->last_update); 1348 1349 out: 1350 return r; 1351 } 1352 1353 static int 1354 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, 1355 unsigned long npages, bool readonly, 1356 unsigned long *bitmap, bool wait, bool flush_tlb) 1357 { 1358 struct kfd_process_device *pdd; 1359 struct amdgpu_device *bo_adev; 1360 struct kfd_process *p; 1361 struct dma_fence *fence = NULL; 1362 uint32_t gpuidx; 1363 int r = 0; 1364 1365 if (prange->svm_bo && prange->ttm_res) 1366 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); 1367 else 1368 bo_adev = NULL; 1369 1370 p = container_of(prange->svms, struct kfd_process, svms); 1371 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1372 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 1373 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1374 if (!pdd) { 1375 pr_debug("failed to find device idx %d\n", gpuidx); 1376 return -EINVAL; 1377 } 1378 1379 pdd = kfd_bind_process_to_device(pdd->dev, p); 1380 if (IS_ERR(pdd)) 1381 return -EINVAL; 1382 1383 if (bo_adev && pdd->dev->adev != bo_adev && 1384 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { 1385 pr_debug("cannot map to device idx %d\n", gpuidx); 1386 continue; 1387 } 1388 1389 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly, 1390 prange->dma_addr[gpuidx], 1391 bo_adev, wait ? &fence : NULL, 1392 flush_tlb); 1393 if (r) 1394 break; 1395 1396 if (fence) { 1397 r = dma_fence_wait(fence, false); 1398 dma_fence_put(fence); 1399 fence = NULL; 1400 if (r) { 1401 pr_debug("failed %d to dma fence wait\n", r); 1402 break; 1403 } 1404 } 1405 1406 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); 1407 } 1408 1409 return r; 1410 } 1411 1412 struct svm_validate_context { 1413 struct kfd_process *process; 1414 struct svm_range *prange; 1415 bool intr; 1416 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1417 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; 1418 struct list_head validate_list; 1419 struct ww_acquire_ctx ticket; 1420 }; 1421 1422 static int svm_range_reserve_bos(struct svm_validate_context *ctx) 1423 { 1424 struct kfd_process_device *pdd; 1425 struct amdgpu_vm *vm; 1426 uint32_t gpuidx; 1427 int r; 1428 1429 INIT_LIST_HEAD(&ctx->validate_list); 1430 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1431 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1432 if (!pdd) { 1433 pr_debug("failed to find device idx %d\n", gpuidx); 1434 return -EINVAL; 1435 } 1436 vm = drm_priv_to_vm(pdd->drm_priv); 1437 1438 ctx->tv[gpuidx].bo = &vm->root.bo->tbo; 1439 ctx->tv[gpuidx].num_shared = 4; 1440 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); 1441 } 1442 1443 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, 1444 ctx->intr, NULL); 1445 if (r) { 1446 pr_debug("failed %d to reserve bo\n", r); 1447 return r; 1448 } 1449 1450 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1451 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1452 if (!pdd) { 1453 pr_debug("failed to find device idx %d\n", gpuidx); 1454 r = -EINVAL; 1455 goto unreserve_out; 1456 } 1457 1458 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, 1459 drm_priv_to_vm(pdd->drm_priv), 1460 svm_range_bo_validate, NULL); 1461 if (r) { 1462 pr_debug("failed %d validate pt bos\n", r); 1463 goto unreserve_out; 1464 } 1465 } 1466 1467 return 0; 1468 1469 unreserve_out: 1470 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); 1471 return r; 1472 } 1473 1474 static void svm_range_unreserve_bos(struct svm_validate_context *ctx) 1475 { 1476 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); 1477 } 1478 1479 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) 1480 { 1481 struct kfd_process_device *pdd; 1482 1483 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1484 1485 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); 1486 } 1487 1488 /* 1489 * Validation+GPU mapping with concurrent invalidation (MMU notifiers) 1490 * 1491 * To prevent concurrent destruction or change of range attributes, the 1492 * svm_read_lock must be held. The caller must not hold the svm_write_lock 1493 * because that would block concurrent evictions and lead to deadlocks. To 1494 * serialize concurrent migrations or validations of the same range, the 1495 * prange->migrate_mutex must be held. 1496 * 1497 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its 1498 * eviction fence. 1499 * 1500 * The following sequence ensures race-free validation and GPU mapping: 1501 * 1502 * 1. Reserve page table (and SVM BO if range is in VRAM) 1503 * 2. hmm_range_fault to get page addresses (if system memory) 1504 * 3. DMA-map pages (if system memory) 1505 * 4-a. Take notifier lock 1506 * 4-b. Check that pages still valid (mmu_interval_read_retry) 1507 * 4-c. Check that the range was not split or otherwise invalidated 1508 * 4-d. Update GPU page table 1509 * 4.e. Release notifier lock 1510 * 5. Release page table (and SVM BO) reservation 1511 */ 1512 static int svm_range_validate_and_map(struct mm_struct *mm, 1513 struct svm_range *prange, int32_t gpuidx, 1514 bool intr, bool wait, bool flush_tlb) 1515 { 1516 struct svm_validate_context ctx; 1517 unsigned long start, end, addr; 1518 struct kfd_process *p; 1519 void *owner; 1520 int32_t idx; 1521 int r = 0; 1522 1523 ctx.process = container_of(prange->svms, struct kfd_process, svms); 1524 ctx.prange = prange; 1525 ctx.intr = intr; 1526 1527 if (gpuidx < MAX_GPU_INSTANCE) { 1528 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE); 1529 bitmap_set(ctx.bitmap, gpuidx, 1); 1530 } else if (ctx.process->xnack_enabled) { 1531 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 1532 1533 /* If prefetch range to GPU, or GPU retry fault migrate range to 1534 * GPU, which has ACCESS attribute to the range, create mapping 1535 * on that GPU. 1536 */ 1537 if (prange->actual_loc) { 1538 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process, 1539 prange->actual_loc); 1540 if (gpuidx < 0) { 1541 WARN_ONCE(1, "failed get device by id 0x%x\n", 1542 prange->actual_loc); 1543 return -EINVAL; 1544 } 1545 if (test_bit(gpuidx, prange->bitmap_access)) 1546 bitmap_set(ctx.bitmap, gpuidx, 1); 1547 } 1548 } else { 1549 bitmap_or(ctx.bitmap, prange->bitmap_access, 1550 prange->bitmap_aip, MAX_GPU_INSTANCE); 1551 } 1552 1553 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) { 1554 if (!prange->mapped_to_gpu) 1555 return 0; 1556 1557 bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); 1558 } 1559 1560 if (prange->actual_loc && !prange->ttm_res) { 1561 /* This should never happen. actual_loc gets set by 1562 * svm_migrate_ram_to_vram after allocating a BO. 1563 */ 1564 WARN_ONCE(1, "VRAM BO missing during validation\n"); 1565 return -EINVAL; 1566 } 1567 1568 svm_range_reserve_bos(&ctx); 1569 1570 p = container_of(prange->svms, struct kfd_process, svms); 1571 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap, 1572 MAX_GPU_INSTANCE)); 1573 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) { 1574 if (kfd_svm_page_owner(p, idx) != owner) { 1575 owner = NULL; 1576 break; 1577 } 1578 } 1579 1580 start = prange->start << PAGE_SHIFT; 1581 end = (prange->last + 1) << PAGE_SHIFT; 1582 for (addr = start; addr < end && !r; ) { 1583 struct hmm_range *hmm_range; 1584 struct vm_area_struct *vma; 1585 unsigned long next; 1586 unsigned long offset; 1587 unsigned long npages; 1588 bool readonly; 1589 1590 vma = find_vma(mm, addr); 1591 if (!vma || addr < vma->vm_start) { 1592 r = -EFAULT; 1593 goto unreserve_out; 1594 } 1595 readonly = !(vma->vm_flags & VM_WRITE); 1596 1597 next = min(vma->vm_end, end); 1598 npages = (next - addr) >> PAGE_SHIFT; 1599 WRITE_ONCE(p->svms.faulting_task, current); 1600 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, 1601 addr, npages, &hmm_range, 1602 readonly, true, owner); 1603 WRITE_ONCE(p->svms.faulting_task, NULL); 1604 if (r) { 1605 pr_debug("failed %d to get svm range pages\n", r); 1606 goto unreserve_out; 1607 } 1608 1609 offset = (addr - start) >> PAGE_SHIFT; 1610 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages, 1611 hmm_range->hmm_pfns); 1612 if (r) { 1613 pr_debug("failed %d to dma map range\n", r); 1614 goto unreserve_out; 1615 } 1616 1617 svm_range_lock(prange); 1618 if (amdgpu_hmm_range_get_pages_done(hmm_range)) { 1619 pr_debug("hmm update the range, need validate again\n"); 1620 r = -EAGAIN; 1621 goto unlock_out; 1622 } 1623 if (!list_empty(&prange->child_list)) { 1624 pr_debug("range split by unmap in parallel, validate again\n"); 1625 r = -EAGAIN; 1626 goto unlock_out; 1627 } 1628 1629 r = svm_range_map_to_gpus(prange, offset, npages, readonly, 1630 ctx.bitmap, wait, flush_tlb); 1631 1632 unlock_out: 1633 svm_range_unlock(prange); 1634 1635 addr = next; 1636 } 1637 1638 if (addr == end) { 1639 prange->validated_once = true; 1640 prange->mapped_to_gpu = true; 1641 } 1642 1643 unreserve_out: 1644 svm_range_unreserve_bos(&ctx); 1645 1646 if (!r) 1647 prange->validate_timestamp = ktime_get_boottime(); 1648 1649 return r; 1650 } 1651 1652 /** 1653 * svm_range_list_lock_and_flush_work - flush pending deferred work 1654 * 1655 * @svms: the svm range list 1656 * @mm: the mm structure 1657 * 1658 * Context: Returns with mmap write lock held, pending deferred work flushed 1659 * 1660 */ 1661 void 1662 svm_range_list_lock_and_flush_work(struct svm_range_list *svms, 1663 struct mm_struct *mm) 1664 { 1665 retry_flush_work: 1666 flush_work(&svms->deferred_list_work); 1667 mmap_write_lock(mm); 1668 1669 if (list_empty(&svms->deferred_range_list)) 1670 return; 1671 mmap_write_unlock(mm); 1672 pr_debug("retry flush\n"); 1673 goto retry_flush_work; 1674 } 1675 1676 static void svm_range_restore_work(struct work_struct *work) 1677 { 1678 struct delayed_work *dwork = to_delayed_work(work); 1679 struct amdkfd_process_info *process_info; 1680 struct svm_range_list *svms; 1681 struct svm_range *prange; 1682 struct kfd_process *p; 1683 struct mm_struct *mm; 1684 int evicted_ranges; 1685 int invalid; 1686 int r; 1687 1688 svms = container_of(dwork, struct svm_range_list, restore_work); 1689 evicted_ranges = atomic_read(&svms->evicted_ranges); 1690 if (!evicted_ranges) 1691 return; 1692 1693 pr_debug("restore svm ranges\n"); 1694 1695 p = container_of(svms, struct kfd_process, svms); 1696 process_info = p->kgd_process_info; 1697 1698 /* Keep mm reference when svm_range_validate_and_map ranges */ 1699 mm = get_task_mm(p->lead_thread); 1700 if (!mm) { 1701 pr_debug("svms 0x%p process mm gone\n", svms); 1702 return; 1703 } 1704 1705 mutex_lock(&process_info->lock); 1706 svm_range_list_lock_and_flush_work(svms, mm); 1707 mutex_lock(&svms->lock); 1708 1709 evicted_ranges = atomic_read(&svms->evicted_ranges); 1710 1711 list_for_each_entry(prange, &svms->list, list) { 1712 invalid = atomic_read(&prange->invalid); 1713 if (!invalid) 1714 continue; 1715 1716 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n", 1717 prange->svms, prange, prange->start, prange->last, 1718 invalid); 1719 1720 /* 1721 * If range is migrating, wait for migration is done. 1722 */ 1723 mutex_lock(&prange->migrate_mutex); 1724 1725 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 1726 false, true, false); 1727 if (r) 1728 pr_debug("failed %d to map 0x%lx to gpus\n", r, 1729 prange->start); 1730 1731 mutex_unlock(&prange->migrate_mutex); 1732 if (r) 1733 goto out_reschedule; 1734 1735 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid) 1736 goto out_reschedule; 1737 } 1738 1739 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) != 1740 evicted_ranges) 1741 goto out_reschedule; 1742 1743 evicted_ranges = 0; 1744 1745 r = kgd2kfd_resume_mm(mm); 1746 if (r) { 1747 /* No recovery from this failure. Probably the CP is 1748 * hanging. No point trying again. 1749 */ 1750 pr_debug("failed %d to resume KFD\n", r); 1751 } 1752 1753 pr_debug("restore svm ranges successfully\n"); 1754 1755 out_reschedule: 1756 mutex_unlock(&svms->lock); 1757 mmap_write_unlock(mm); 1758 mutex_unlock(&process_info->lock); 1759 1760 /* If validation failed, reschedule another attempt */ 1761 if (evicted_ranges) { 1762 pr_debug("reschedule to restore svm range\n"); 1763 schedule_delayed_work(&svms->restore_work, 1764 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1765 1766 kfd_smi_event_queue_restore_rescheduled(mm); 1767 } 1768 mmput(mm); 1769 } 1770 1771 /** 1772 * svm_range_evict - evict svm range 1773 * @prange: svm range structure 1774 * @mm: current process mm_struct 1775 * @start: starting process queue number 1776 * @last: last process queue number 1777 * 1778 * Stop all queues of the process to ensure GPU doesn't access the memory, then 1779 * return to let CPU evict the buffer and proceed CPU pagetable update. 1780 * 1781 * Don't need use lock to sync cpu pagetable invalidation with GPU execution. 1782 * If invalidation happens while restore work is running, restore work will 1783 * restart to ensure to get the latest CPU pages mapping to GPU, then start 1784 * the queues. 1785 */ 1786 static int 1787 svm_range_evict(struct svm_range *prange, struct mm_struct *mm, 1788 unsigned long start, unsigned long last, 1789 enum mmu_notifier_event event) 1790 { 1791 struct svm_range_list *svms = prange->svms; 1792 struct svm_range *pchild; 1793 struct kfd_process *p; 1794 int r = 0; 1795 1796 p = container_of(svms, struct kfd_process, svms); 1797 1798 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 1799 svms, prange->start, prange->last, start, last); 1800 1801 if (!p->xnack_enabled || 1802 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) { 1803 int evicted_ranges; 1804 bool mapped = prange->mapped_to_gpu; 1805 1806 list_for_each_entry(pchild, &prange->child_list, child_list) { 1807 if (!pchild->mapped_to_gpu) 1808 continue; 1809 mapped = true; 1810 mutex_lock_nested(&pchild->lock, 1); 1811 if (pchild->start <= last && pchild->last >= start) { 1812 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n", 1813 pchild->start, pchild->last); 1814 atomic_inc(&pchild->invalid); 1815 } 1816 mutex_unlock(&pchild->lock); 1817 } 1818 1819 if (!mapped) 1820 return r; 1821 1822 if (prange->start <= last && prange->last >= start) 1823 atomic_inc(&prange->invalid); 1824 1825 evicted_ranges = atomic_inc_return(&svms->evicted_ranges); 1826 if (evicted_ranges != 1) 1827 return r; 1828 1829 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n", 1830 prange->svms, prange->start, prange->last); 1831 1832 /* First eviction, stop the queues */ 1833 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM); 1834 if (r) 1835 pr_debug("failed to quiesce KFD\n"); 1836 1837 pr_debug("schedule to restore svm %p ranges\n", svms); 1838 schedule_delayed_work(&svms->restore_work, 1839 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1840 } else { 1841 unsigned long s, l; 1842 uint32_t trigger; 1843 1844 if (event == MMU_NOTIFY_MIGRATE) 1845 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE; 1846 else 1847 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY; 1848 1849 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n", 1850 prange->svms, start, last); 1851 list_for_each_entry(pchild, &prange->child_list, child_list) { 1852 mutex_lock_nested(&pchild->lock, 1); 1853 s = max(start, pchild->start); 1854 l = min(last, pchild->last); 1855 if (l >= s) 1856 svm_range_unmap_from_gpus(pchild, s, l, trigger); 1857 mutex_unlock(&pchild->lock); 1858 } 1859 s = max(start, prange->start); 1860 l = min(last, prange->last); 1861 if (l >= s) 1862 svm_range_unmap_from_gpus(prange, s, l, trigger); 1863 } 1864 1865 return r; 1866 } 1867 1868 static struct svm_range *svm_range_clone(struct svm_range *old) 1869 { 1870 struct svm_range *new; 1871 1872 new = svm_range_new(old->svms, old->start, old->last, false); 1873 if (!new) 1874 return NULL; 1875 1876 if (old->svm_bo) { 1877 new->ttm_res = old->ttm_res; 1878 new->offset = old->offset; 1879 new->svm_bo = svm_range_bo_ref(old->svm_bo); 1880 spin_lock(&new->svm_bo->list_lock); 1881 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 1882 spin_unlock(&new->svm_bo->list_lock); 1883 } 1884 new->flags = old->flags; 1885 new->preferred_loc = old->preferred_loc; 1886 new->prefetch_loc = old->prefetch_loc; 1887 new->actual_loc = old->actual_loc; 1888 new->granularity = old->granularity; 1889 new->mapped_to_gpu = old->mapped_to_gpu; 1890 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 1891 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 1892 1893 return new; 1894 } 1895 1896 void svm_range_set_max_pages(struct amdgpu_device *adev) 1897 { 1898 uint64_t max_pages; 1899 uint64_t pages, _pages; 1900 1901 /* 1/32 VRAM size in pages */ 1902 pages = adev->gmc.real_vram_size >> 17; 1903 pages = clamp(pages, 1ULL << 9, 1ULL << 18); 1904 pages = rounddown_pow_of_two(pages); 1905 do { 1906 max_pages = READ_ONCE(max_svm_range_pages); 1907 _pages = min_not_zero(max_pages, pages); 1908 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); 1909 } 1910 1911 static int 1912 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, 1913 uint64_t max_pages, struct list_head *insert_list, 1914 struct list_head *update_list) 1915 { 1916 struct svm_range *prange; 1917 uint64_t l; 1918 1919 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n", 1920 max_pages, start, last); 1921 1922 while (last >= start) { 1923 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1); 1924 1925 prange = svm_range_new(svms, start, l, true); 1926 if (!prange) 1927 return -ENOMEM; 1928 list_add(&prange->list, insert_list); 1929 list_add(&prange->update_list, update_list); 1930 1931 start = l + 1; 1932 } 1933 return 0; 1934 } 1935 1936 /** 1937 * svm_range_add - add svm range and handle overlap 1938 * @p: the range add to this process svms 1939 * @start: page size aligned 1940 * @size: page size aligned 1941 * @nattr: number of attributes 1942 * @attrs: array of attributes 1943 * @update_list: output, the ranges need validate and update GPU mapping 1944 * @insert_list: output, the ranges need insert to svms 1945 * @remove_list: output, the ranges are replaced and need remove from svms 1946 * 1947 * Check if the virtual address range has overlap with any existing ranges, 1948 * split partly overlapping ranges and add new ranges in the gaps. All changes 1949 * should be applied to the range_list and interval tree transactionally. If 1950 * any range split or allocation fails, the entire update fails. Therefore any 1951 * existing overlapping svm_ranges are cloned and the original svm_ranges left 1952 * unchanged. 1953 * 1954 * If the transaction succeeds, the caller can update and insert clones and 1955 * new ranges, then free the originals. 1956 * 1957 * Otherwise the caller can free the clones and new ranges, while the old 1958 * svm_ranges remain unchanged. 1959 * 1960 * Context: Process context, caller must hold svms->lock 1961 * 1962 * Return: 1963 * 0 - OK, otherwise error code 1964 */ 1965 static int 1966 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, 1967 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 1968 struct list_head *update_list, struct list_head *insert_list, 1969 struct list_head *remove_list) 1970 { 1971 unsigned long last = start + size - 1UL; 1972 struct svm_range_list *svms = &p->svms; 1973 struct interval_tree_node *node; 1974 struct svm_range *prange; 1975 struct svm_range *tmp; 1976 struct list_head new_list; 1977 int r = 0; 1978 1979 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); 1980 1981 INIT_LIST_HEAD(update_list); 1982 INIT_LIST_HEAD(insert_list); 1983 INIT_LIST_HEAD(remove_list); 1984 INIT_LIST_HEAD(&new_list); 1985 1986 node = interval_tree_iter_first(&svms->objects, start, last); 1987 while (node) { 1988 struct interval_tree_node *next; 1989 unsigned long next_start; 1990 1991 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, 1992 node->last); 1993 1994 prange = container_of(node, struct svm_range, it_node); 1995 next = interval_tree_iter_next(node, start, last); 1996 next_start = min(node->last, last) + 1; 1997 1998 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { 1999 /* nothing to do */ 2000 } else if (node->start < start || node->last > last) { 2001 /* node intersects the update range and its attributes 2002 * will change. Clone and split it, apply updates only 2003 * to the overlapping part 2004 */ 2005 struct svm_range *old = prange; 2006 2007 prange = svm_range_clone(old); 2008 if (!prange) { 2009 r = -ENOMEM; 2010 goto out; 2011 } 2012 2013 list_add(&old->update_list, remove_list); 2014 list_add(&prange->list, insert_list); 2015 list_add(&prange->update_list, update_list); 2016 2017 if (node->start < start) { 2018 pr_debug("change old range start\n"); 2019 r = svm_range_split_head(prange, start, 2020 insert_list); 2021 if (r) 2022 goto out; 2023 } 2024 if (node->last > last) { 2025 pr_debug("change old range last\n"); 2026 r = svm_range_split_tail(prange, last, 2027 insert_list); 2028 if (r) 2029 goto out; 2030 } 2031 } else { 2032 /* The node is contained within start..last, 2033 * just update it 2034 */ 2035 list_add(&prange->update_list, update_list); 2036 } 2037 2038 /* insert a new node if needed */ 2039 if (node->start > start) { 2040 r = svm_range_split_new(svms, start, node->start - 1, 2041 READ_ONCE(max_svm_range_pages), 2042 &new_list, update_list); 2043 if (r) 2044 goto out; 2045 } 2046 2047 node = next; 2048 start = next_start; 2049 } 2050 2051 /* add a final range at the end if needed */ 2052 if (start <= last) 2053 r = svm_range_split_new(svms, start, last, 2054 READ_ONCE(max_svm_range_pages), 2055 &new_list, update_list); 2056 2057 out: 2058 if (r) { 2059 list_for_each_entry_safe(prange, tmp, insert_list, list) 2060 svm_range_free(prange, false); 2061 list_for_each_entry_safe(prange, tmp, &new_list, list) 2062 svm_range_free(prange, true); 2063 } else { 2064 list_splice(&new_list, insert_list); 2065 } 2066 2067 return r; 2068 } 2069 2070 static void 2071 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm, 2072 struct svm_range *prange) 2073 { 2074 unsigned long start; 2075 unsigned long last; 2076 2077 start = prange->notifier.interval_tree.start >> PAGE_SHIFT; 2078 last = prange->notifier.interval_tree.last >> PAGE_SHIFT; 2079 2080 if (prange->start == start && prange->last == last) 2081 return; 2082 2083 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 2084 prange->svms, prange, start, last, prange->start, 2085 prange->last); 2086 2087 if (start != 0 && last != 0) { 2088 interval_tree_remove(&prange->it_node, &prange->svms->objects); 2089 svm_range_remove_notifier(prange); 2090 } 2091 prange->it_node.start = prange->start; 2092 prange->it_node.last = prange->last; 2093 2094 interval_tree_insert(&prange->it_node, &prange->svms->objects); 2095 svm_range_add_notifier_locked(mm, prange); 2096 } 2097 2098 static void 2099 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, 2100 struct mm_struct *mm) 2101 { 2102 switch (prange->work_item.op) { 2103 case SVM_OP_NULL: 2104 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2105 svms, prange, prange->start, prange->last); 2106 break; 2107 case SVM_OP_UNMAP_RANGE: 2108 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2109 svms, prange, prange->start, prange->last); 2110 svm_range_unlink(prange); 2111 svm_range_remove_notifier(prange); 2112 svm_range_free(prange, true); 2113 break; 2114 case SVM_OP_UPDATE_RANGE_NOTIFIER: 2115 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2116 svms, prange, prange->start, prange->last); 2117 svm_range_update_notifier_and_interval_tree(mm, prange); 2118 break; 2119 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP: 2120 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2121 svms, prange, prange->start, prange->last); 2122 svm_range_update_notifier_and_interval_tree(mm, prange); 2123 /* TODO: implement deferred validation and mapping */ 2124 break; 2125 case SVM_OP_ADD_RANGE: 2126 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange, 2127 prange->start, prange->last); 2128 svm_range_add_to_svms(prange); 2129 svm_range_add_notifier_locked(mm, prange); 2130 break; 2131 case SVM_OP_ADD_RANGE_AND_MAP: 2132 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, 2133 prange, prange->start, prange->last); 2134 svm_range_add_to_svms(prange); 2135 svm_range_add_notifier_locked(mm, prange); 2136 /* TODO: implement deferred validation and mapping */ 2137 break; 2138 default: 2139 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange, 2140 prange->work_item.op); 2141 } 2142 } 2143 2144 static void svm_range_drain_retry_fault(struct svm_range_list *svms) 2145 { 2146 struct kfd_process_device *pdd; 2147 struct kfd_process *p; 2148 int drain; 2149 uint32_t i; 2150 2151 p = container_of(svms, struct kfd_process, svms); 2152 2153 restart: 2154 drain = atomic_read(&svms->drain_pagefaults); 2155 if (!drain) 2156 return; 2157 2158 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { 2159 pdd = p->pdds[i]; 2160 if (!pdd) 2161 continue; 2162 2163 pr_debug("drain retry fault gpu %d svms %p\n", i, svms); 2164 2165 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, 2166 &pdd->dev->adev->irq.ih1); 2167 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); 2168 } 2169 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) 2170 goto restart; 2171 } 2172 2173 static void svm_range_deferred_list_work(struct work_struct *work) 2174 { 2175 struct svm_range_list *svms; 2176 struct svm_range *prange; 2177 struct mm_struct *mm; 2178 2179 svms = container_of(work, struct svm_range_list, deferred_list_work); 2180 pr_debug("enter svms 0x%p\n", svms); 2181 2182 spin_lock(&svms->deferred_list_lock); 2183 while (!list_empty(&svms->deferred_range_list)) { 2184 prange = list_first_entry(&svms->deferred_range_list, 2185 struct svm_range, deferred_list); 2186 spin_unlock(&svms->deferred_list_lock); 2187 2188 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, 2189 prange->start, prange->last, prange->work_item.op); 2190 2191 mm = prange->work_item.mm; 2192 retry: 2193 mmap_write_lock(mm); 2194 2195 /* Checking for the need to drain retry faults must be inside 2196 * mmap write lock to serialize with munmap notifiers. 2197 */ 2198 if (unlikely(atomic_read(&svms->drain_pagefaults))) { 2199 mmap_write_unlock(mm); 2200 svm_range_drain_retry_fault(svms); 2201 goto retry; 2202 } 2203 2204 /* Remove from deferred_list must be inside mmap write lock, for 2205 * two race cases: 2206 * 1. unmap_from_cpu may change work_item.op and add the range 2207 * to deferred_list again, cause use after free bug. 2208 * 2. svm_range_list_lock_and_flush_work may hold mmap write 2209 * lock and continue because deferred_list is empty, but 2210 * deferred_list work is actually waiting for mmap lock. 2211 */ 2212 spin_lock(&svms->deferred_list_lock); 2213 list_del_init(&prange->deferred_list); 2214 spin_unlock(&svms->deferred_list_lock); 2215 2216 mutex_lock(&svms->lock); 2217 mutex_lock(&prange->migrate_mutex); 2218 while (!list_empty(&prange->child_list)) { 2219 struct svm_range *pchild; 2220 2221 pchild = list_first_entry(&prange->child_list, 2222 struct svm_range, child_list); 2223 pr_debug("child prange 0x%p op %d\n", pchild, 2224 pchild->work_item.op); 2225 list_del_init(&pchild->child_list); 2226 svm_range_handle_list_op(svms, pchild, mm); 2227 } 2228 mutex_unlock(&prange->migrate_mutex); 2229 2230 svm_range_handle_list_op(svms, prange, mm); 2231 mutex_unlock(&svms->lock); 2232 mmap_write_unlock(mm); 2233 2234 /* Pairs with mmget in svm_range_add_list_work */ 2235 mmput(mm); 2236 2237 spin_lock(&svms->deferred_list_lock); 2238 } 2239 spin_unlock(&svms->deferred_list_lock); 2240 pr_debug("exit svms 0x%p\n", svms); 2241 } 2242 2243 void 2244 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, 2245 struct mm_struct *mm, enum svm_work_list_ops op) 2246 { 2247 spin_lock(&svms->deferred_list_lock); 2248 /* if prange is on the deferred list */ 2249 if (!list_empty(&prange->deferred_list)) { 2250 pr_debug("update exist prange 0x%p work op %d\n", prange, op); 2251 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n"); 2252 if (op != SVM_OP_NULL && 2253 prange->work_item.op != SVM_OP_UNMAP_RANGE) 2254 prange->work_item.op = op; 2255 } else { 2256 prange->work_item.op = op; 2257 2258 /* Pairs with mmput in deferred_list_work */ 2259 mmget(mm); 2260 prange->work_item.mm = mm; 2261 list_add_tail(&prange->deferred_list, 2262 &prange->svms->deferred_range_list); 2263 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", 2264 prange, prange->start, prange->last, op); 2265 } 2266 spin_unlock(&svms->deferred_list_lock); 2267 } 2268 2269 void schedule_deferred_list_work(struct svm_range_list *svms) 2270 { 2271 spin_lock(&svms->deferred_list_lock); 2272 if (!list_empty(&svms->deferred_range_list)) 2273 schedule_work(&svms->deferred_list_work); 2274 spin_unlock(&svms->deferred_list_lock); 2275 } 2276 2277 static void 2278 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, 2279 struct svm_range *prange, unsigned long start, 2280 unsigned long last) 2281 { 2282 struct svm_range *head; 2283 struct svm_range *tail; 2284 2285 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2286 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange, 2287 prange->start, prange->last); 2288 return; 2289 } 2290 if (start > prange->last || last < prange->start) 2291 return; 2292 2293 head = tail = prange; 2294 if (start > prange->start) 2295 svm_range_split(prange, prange->start, start - 1, &tail); 2296 if (last < tail->last) 2297 svm_range_split(tail, last + 1, tail->last, &head); 2298 2299 if (head != prange && tail != prange) { 2300 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2301 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 2302 } else if (tail != prange) { 2303 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); 2304 } else if (head != prange) { 2305 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2306 } else if (parent != prange) { 2307 prange->work_item.op = SVM_OP_UNMAP_RANGE; 2308 } 2309 } 2310 2311 static void 2312 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, 2313 unsigned long start, unsigned long last) 2314 { 2315 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU; 2316 struct svm_range_list *svms; 2317 struct svm_range *pchild; 2318 struct kfd_process *p; 2319 unsigned long s, l; 2320 bool unmap_parent; 2321 2322 p = kfd_lookup_process_by_mm(mm); 2323 if (!p) 2324 return; 2325 svms = &p->svms; 2326 2327 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, 2328 prange, prange->start, prange->last, start, last); 2329 2330 /* Make sure pending page faults are drained in the deferred worker 2331 * before the range is freed to avoid straggler interrupts on 2332 * unmapped memory causing "phantom faults". 2333 */ 2334 atomic_inc(&svms->drain_pagefaults); 2335 2336 unmap_parent = start <= prange->start && last >= prange->last; 2337 2338 list_for_each_entry(pchild, &prange->child_list, child_list) { 2339 mutex_lock_nested(&pchild->lock, 1); 2340 s = max(start, pchild->start); 2341 l = min(last, pchild->last); 2342 if (l >= s) 2343 svm_range_unmap_from_gpus(pchild, s, l, trigger); 2344 svm_range_unmap_split(mm, prange, pchild, start, last); 2345 mutex_unlock(&pchild->lock); 2346 } 2347 s = max(start, prange->start); 2348 l = min(last, prange->last); 2349 if (l >= s) 2350 svm_range_unmap_from_gpus(prange, s, l, trigger); 2351 svm_range_unmap_split(mm, prange, prange, start, last); 2352 2353 if (unmap_parent) 2354 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); 2355 else 2356 svm_range_add_list_work(svms, prange, mm, 2357 SVM_OP_UPDATE_RANGE_NOTIFIER); 2358 schedule_deferred_list_work(svms); 2359 2360 kfd_unref_process(p); 2361 } 2362 2363 /** 2364 * svm_range_cpu_invalidate_pagetables - interval notifier callback 2365 * @mni: mmu_interval_notifier struct 2366 * @range: mmu_notifier_range struct 2367 * @cur_seq: value to pass to mmu_interval_set_seq() 2368 * 2369 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it 2370 * is from migration, or CPU page invalidation callback. 2371 * 2372 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed 2373 * work thread, and split prange if only part of prange is unmapped. 2374 * 2375 * For invalidation event, if GPU retry fault is not enabled, evict the queues, 2376 * then schedule svm_range_restore_work to update GPU mapping and resume queues. 2377 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will 2378 * update GPU mapping to recover. 2379 * 2380 * Context: mmap lock, notifier_invalidate_start lock are held 2381 * for invalidate event, prange lock is held if this is from migration 2382 */ 2383 static bool 2384 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 2385 const struct mmu_notifier_range *range, 2386 unsigned long cur_seq) 2387 { 2388 struct svm_range *prange; 2389 unsigned long start; 2390 unsigned long last; 2391 2392 if (range->event == MMU_NOTIFY_RELEASE) 2393 return true; 2394 if (!mmget_not_zero(mni->mm)) 2395 return true; 2396 2397 start = mni->interval_tree.start; 2398 last = mni->interval_tree.last; 2399 start = max(start, range->start) >> PAGE_SHIFT; 2400 last = min(last, range->end - 1) >> PAGE_SHIFT; 2401 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n", 2402 start, last, range->start >> PAGE_SHIFT, 2403 (range->end - 1) >> PAGE_SHIFT, 2404 mni->interval_tree.start >> PAGE_SHIFT, 2405 mni->interval_tree.last >> PAGE_SHIFT, range->event); 2406 2407 prange = container_of(mni, struct svm_range, notifier); 2408 2409 svm_range_lock(prange); 2410 mmu_interval_set_seq(mni, cur_seq); 2411 2412 switch (range->event) { 2413 case MMU_NOTIFY_UNMAP: 2414 svm_range_unmap_from_cpu(mni->mm, prange, start, last); 2415 break; 2416 default: 2417 svm_range_evict(prange, mni->mm, start, last, range->event); 2418 break; 2419 } 2420 2421 svm_range_unlock(prange); 2422 mmput(mni->mm); 2423 2424 return true; 2425 } 2426 2427 /** 2428 * svm_range_from_addr - find svm range from fault address 2429 * @svms: svm range list header 2430 * @addr: address to search range interval tree, in pages 2431 * @parent: parent range if range is on child list 2432 * 2433 * Context: The caller must hold svms->lock 2434 * 2435 * Return: the svm_range found or NULL 2436 */ 2437 struct svm_range * 2438 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, 2439 struct svm_range **parent) 2440 { 2441 struct interval_tree_node *node; 2442 struct svm_range *prange; 2443 struct svm_range *pchild; 2444 2445 node = interval_tree_iter_first(&svms->objects, addr, addr); 2446 if (!node) 2447 return NULL; 2448 2449 prange = container_of(node, struct svm_range, it_node); 2450 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n", 2451 addr, prange->start, prange->last, node->start, node->last); 2452 2453 if (addr >= prange->start && addr <= prange->last) { 2454 if (parent) 2455 *parent = prange; 2456 return prange; 2457 } 2458 list_for_each_entry(pchild, &prange->child_list, child_list) 2459 if (addr >= pchild->start && addr <= pchild->last) { 2460 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n", 2461 addr, pchild->start, pchild->last); 2462 if (parent) 2463 *parent = prange; 2464 return pchild; 2465 } 2466 2467 return NULL; 2468 } 2469 2470 /* svm_range_best_restore_location - decide the best fault restore location 2471 * @prange: svm range structure 2472 * @adev: the GPU on which vm fault happened 2473 * 2474 * This is only called when xnack is on, to decide the best location to restore 2475 * the range mapping after GPU vm fault. Caller uses the best location to do 2476 * migration if actual loc is not best location, then update GPU page table 2477 * mapping to the best location. 2478 * 2479 * If the preferred loc is accessible by faulting GPU, use preferred loc. 2480 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu 2481 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then 2482 * if range actual loc is cpu, best_loc is cpu 2483 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is 2484 * range actual loc. 2485 * Otherwise, GPU no access, best_loc is -1. 2486 * 2487 * Return: 2488 * -1 means vm fault GPU no access 2489 * 0 for CPU or GPU id 2490 */ 2491 static int32_t 2492 svm_range_best_restore_location(struct svm_range *prange, 2493 struct amdgpu_device *adev, 2494 int32_t *gpuidx) 2495 { 2496 struct amdgpu_device *bo_adev, *preferred_adev; 2497 struct kfd_process *p; 2498 uint32_t gpuid; 2499 int r; 2500 2501 p = container_of(prange->svms, struct kfd_process, svms); 2502 2503 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); 2504 if (r < 0) { 2505 pr_debug("failed to get gpuid from kgd\n"); 2506 return -1; 2507 } 2508 2509 if (prange->preferred_loc == gpuid || 2510 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { 2511 return prange->preferred_loc; 2512 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 2513 preferred_adev = svm_range_get_adev_by_id(prange, 2514 prange->preferred_loc); 2515 if (amdgpu_xgmi_same_hive(adev, preferred_adev)) 2516 return prange->preferred_loc; 2517 /* fall through */ 2518 } 2519 2520 if (test_bit(*gpuidx, prange->bitmap_access)) 2521 return gpuid; 2522 2523 if (test_bit(*gpuidx, prange->bitmap_aip)) { 2524 if (!prange->actual_loc) 2525 return 0; 2526 2527 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 2528 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 2529 return prange->actual_loc; 2530 else 2531 return 0; 2532 } 2533 2534 return -1; 2535 } 2536 2537 static int 2538 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, 2539 unsigned long *start, unsigned long *last, 2540 bool *is_heap_stack) 2541 { 2542 struct vm_area_struct *vma; 2543 struct interval_tree_node *node; 2544 unsigned long start_limit, end_limit; 2545 2546 vma = find_vma(p->mm, addr << PAGE_SHIFT); 2547 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { 2548 pr_debug("VMA does not exist in address [0x%llx]\n", addr); 2549 return -EFAULT; 2550 } 2551 2552 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk && 2553 vma->vm_end >= vma->vm_mm->start_brk) || 2554 (vma->vm_start <= vma->vm_mm->start_stack && 2555 vma->vm_end >= vma->vm_mm->start_stack); 2556 2557 start_limit = max(vma->vm_start >> PAGE_SHIFT, 2558 (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); 2559 end_limit = min(vma->vm_end >> PAGE_SHIFT, 2560 (unsigned long)ALIGN(addr + 1, 2UL << 8)); 2561 /* First range that starts after the fault address */ 2562 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); 2563 if (node) { 2564 end_limit = min(end_limit, node->start); 2565 /* Last range that ends before the fault address */ 2566 node = container_of(rb_prev(&node->rb), 2567 struct interval_tree_node, rb); 2568 } else { 2569 /* Last range must end before addr because 2570 * there was no range after addr 2571 */ 2572 node = container_of(rb_last(&p->svms.objects.rb_root), 2573 struct interval_tree_node, rb); 2574 } 2575 if (node) { 2576 if (node->last >= addr) { 2577 WARN(1, "Overlap with prev node and page fault addr\n"); 2578 return -EFAULT; 2579 } 2580 start_limit = max(start_limit, node->last + 1); 2581 } 2582 2583 *start = start_limit; 2584 *last = end_limit - 1; 2585 2586 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", 2587 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, 2588 *start, *last, *is_heap_stack); 2589 2590 return 0; 2591 } 2592 2593 static int 2594 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, 2595 uint64_t *bo_s, uint64_t *bo_l) 2596 { 2597 struct amdgpu_bo_va_mapping *mapping; 2598 struct interval_tree_node *node; 2599 struct amdgpu_bo *bo = NULL; 2600 unsigned long userptr; 2601 uint32_t i; 2602 int r; 2603 2604 for (i = 0; i < p->n_pdds; i++) { 2605 struct amdgpu_vm *vm; 2606 2607 if (!p->pdds[i]->drm_priv) 2608 continue; 2609 2610 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 2611 r = amdgpu_bo_reserve(vm->root.bo, false); 2612 if (r) 2613 return r; 2614 2615 /* Check userptr by searching entire vm->va interval tree */ 2616 node = interval_tree_iter_first(&vm->va, 0, ~0ULL); 2617 while (node) { 2618 mapping = container_of((struct rb_node *)node, 2619 struct amdgpu_bo_va_mapping, rb); 2620 bo = mapping->bo_va->base.bo; 2621 2622 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, 2623 start << PAGE_SHIFT, 2624 last << PAGE_SHIFT, 2625 &userptr)) { 2626 node = interval_tree_iter_next(node, 0, ~0ULL); 2627 continue; 2628 } 2629 2630 pr_debug("[0x%llx 0x%llx] already userptr mapped\n", 2631 start, last); 2632 if (bo_s && bo_l) { 2633 *bo_s = userptr >> PAGE_SHIFT; 2634 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1; 2635 } 2636 amdgpu_bo_unreserve(vm->root.bo); 2637 return -EADDRINUSE; 2638 } 2639 amdgpu_bo_unreserve(vm->root.bo); 2640 } 2641 return 0; 2642 } 2643 2644 static struct 2645 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, 2646 struct kfd_process *p, 2647 struct mm_struct *mm, 2648 int64_t addr) 2649 { 2650 struct svm_range *prange = NULL; 2651 unsigned long start, last; 2652 uint32_t gpuid, gpuidx; 2653 bool is_heap_stack; 2654 uint64_t bo_s = 0; 2655 uint64_t bo_l = 0; 2656 int r; 2657 2658 if (svm_range_get_range_boundaries(p, addr, &start, &last, 2659 &is_heap_stack)) 2660 return NULL; 2661 2662 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); 2663 if (r != -EADDRINUSE) 2664 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); 2665 2666 if (r == -EADDRINUSE) { 2667 if (addr >= bo_s && addr <= bo_l) 2668 return NULL; 2669 2670 /* Create one page svm range if 2MB range overlapping */ 2671 start = addr; 2672 last = addr; 2673 } 2674 2675 prange = svm_range_new(&p->svms, start, last, true); 2676 if (!prange) { 2677 pr_debug("Failed to create prange in address [0x%llx]\n", addr); 2678 return NULL; 2679 } 2680 if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { 2681 pr_debug("failed to get gpuid from kgd\n"); 2682 svm_range_free(prange, true); 2683 return NULL; 2684 } 2685 2686 if (is_heap_stack) 2687 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; 2688 2689 svm_range_add_to_svms(prange); 2690 svm_range_add_notifier_locked(mm, prange); 2691 2692 return prange; 2693 } 2694 2695 /* svm_range_skip_recover - decide if prange can be recovered 2696 * @prange: svm range structure 2697 * 2698 * GPU vm retry fault handle skip recover the range for cases: 2699 * 1. prange is on deferred list to be removed after unmap, it is stale fault, 2700 * deferred list work will drain the stale fault before free the prange. 2701 * 2. prange is on deferred list to add interval notifier after split, or 2702 * 3. prange is child range, it is split from parent prange, recover later 2703 * after interval notifier is added. 2704 * 2705 * Return: true to skip recover, false to recover 2706 */ 2707 static bool svm_range_skip_recover(struct svm_range *prange) 2708 { 2709 struct svm_range_list *svms = prange->svms; 2710 2711 spin_lock(&svms->deferred_list_lock); 2712 if (list_empty(&prange->deferred_list) && 2713 list_empty(&prange->child_list)) { 2714 spin_unlock(&svms->deferred_list_lock); 2715 return false; 2716 } 2717 spin_unlock(&svms->deferred_list_lock); 2718 2719 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2720 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n", 2721 svms, prange, prange->start, prange->last); 2722 return true; 2723 } 2724 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP || 2725 prange->work_item.op == SVM_OP_ADD_RANGE) { 2726 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n", 2727 svms, prange, prange->start, prange->last); 2728 return true; 2729 } 2730 return false; 2731 } 2732 2733 static void 2734 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, 2735 int32_t gpuidx) 2736 { 2737 struct kfd_process_device *pdd; 2738 2739 /* fault is on different page of same range 2740 * or fault is skipped to recover later 2741 * or fault is on invalid virtual address 2742 */ 2743 if (gpuidx == MAX_GPU_INSTANCE) { 2744 uint32_t gpuid; 2745 int r; 2746 2747 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); 2748 if (r < 0) 2749 return; 2750 } 2751 2752 /* fault is recovered 2753 * or fault cannot recover because GPU no access on the range 2754 */ 2755 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 2756 if (pdd) 2757 WRITE_ONCE(pdd->faults, pdd->faults + 1); 2758 } 2759 2760 static bool 2761 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) 2762 { 2763 unsigned long requested = VM_READ; 2764 2765 if (write_fault) 2766 requested |= VM_WRITE; 2767 2768 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, 2769 vma->vm_flags); 2770 return (vma->vm_flags & requested) == requested; 2771 } 2772 2773 int 2774 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, 2775 uint64_t addr, bool write_fault) 2776 { 2777 struct mm_struct *mm = NULL; 2778 struct svm_range_list *svms; 2779 struct svm_range *prange; 2780 struct kfd_process *p; 2781 ktime_t timestamp = ktime_get_boottime(); 2782 int32_t best_loc; 2783 int32_t gpuidx = MAX_GPU_INSTANCE; 2784 bool write_locked = false; 2785 struct vm_area_struct *vma; 2786 bool migration = false; 2787 int r = 0; 2788 2789 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { 2790 pr_debug("device does not support SVM\n"); 2791 return -EFAULT; 2792 } 2793 2794 p = kfd_lookup_process_by_pasid(pasid); 2795 if (!p) { 2796 pr_debug("kfd process not founded pasid 0x%x\n", pasid); 2797 return 0; 2798 } 2799 svms = &p->svms; 2800 2801 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); 2802 2803 if (atomic_read(&svms->drain_pagefaults)) { 2804 pr_debug("draining retry fault, drop fault 0x%llx\n", addr); 2805 r = 0; 2806 goto out; 2807 } 2808 2809 if (!p->xnack_enabled) { 2810 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); 2811 r = -EFAULT; 2812 goto out; 2813 } 2814 2815 /* p->lead_thread is available as kfd_process_wq_release flush the work 2816 * before releasing task ref. 2817 */ 2818 mm = get_task_mm(p->lead_thread); 2819 if (!mm) { 2820 pr_debug("svms 0x%p failed to get mm\n", svms); 2821 r = 0; 2822 goto out; 2823 } 2824 2825 mmap_read_lock(mm); 2826 retry_write_locked: 2827 mutex_lock(&svms->lock); 2828 prange = svm_range_from_addr(svms, addr, NULL); 2829 if (!prange) { 2830 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n", 2831 svms, addr); 2832 if (!write_locked) { 2833 /* Need the write lock to create new range with MMU notifier. 2834 * Also flush pending deferred work to make sure the interval 2835 * tree is up to date before we add a new range 2836 */ 2837 mutex_unlock(&svms->lock); 2838 mmap_read_unlock(mm); 2839 mmap_write_lock(mm); 2840 write_locked = true; 2841 goto retry_write_locked; 2842 } 2843 prange = svm_range_create_unregistered_range(adev, p, mm, addr); 2844 if (!prange) { 2845 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", 2846 svms, addr); 2847 mmap_write_downgrade(mm); 2848 r = -EFAULT; 2849 goto out_unlock_svms; 2850 } 2851 } 2852 if (write_locked) 2853 mmap_write_downgrade(mm); 2854 2855 mutex_lock(&prange->migrate_mutex); 2856 2857 if (svm_range_skip_recover(prange)) { 2858 amdgpu_gmc_filter_faults_remove(adev, addr, pasid); 2859 r = 0; 2860 goto out_unlock_range; 2861 } 2862 2863 /* skip duplicate vm fault on different pages of same range */ 2864 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp, 2865 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) { 2866 pr_debug("svms 0x%p [0x%lx %lx] already restored\n", 2867 svms, prange->start, prange->last); 2868 r = 0; 2869 goto out_unlock_range; 2870 } 2871 2872 /* __do_munmap removed VMA, return success as we are handling stale 2873 * retry fault. 2874 */ 2875 vma = find_vma(mm, addr << PAGE_SHIFT); 2876 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { 2877 pr_debug("address 0x%llx VMA is removed\n", addr); 2878 r = 0; 2879 goto out_unlock_range; 2880 } 2881 2882 if (!svm_fault_allowed(vma, write_fault)) { 2883 pr_debug("fault addr 0x%llx no %s permission\n", addr, 2884 write_fault ? "write" : "read"); 2885 r = -EPERM; 2886 goto out_unlock_range; 2887 } 2888 2889 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx); 2890 if (best_loc == -1) { 2891 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", 2892 svms, prange->start, prange->last); 2893 r = -EACCES; 2894 goto out_unlock_range; 2895 } 2896 2897 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n", 2898 svms, prange->start, prange->last, best_loc, 2899 prange->actual_loc); 2900 2901 kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr, 2902 write_fault, timestamp); 2903 2904 if (prange->actual_loc != best_loc) { 2905 migration = true; 2906 if (best_loc) { 2907 r = svm_migrate_to_vram(prange, best_loc, mm, 2908 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2909 if (r) { 2910 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", 2911 r, addr); 2912 /* Fallback to system memory if migration to 2913 * VRAM failed 2914 */ 2915 if (prange->actual_loc) 2916 r = svm_migrate_vram_to_ram(prange, mm, 2917 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2918 else 2919 r = 0; 2920 } 2921 } else { 2922 r = svm_migrate_vram_to_ram(prange, mm, 2923 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2924 } 2925 if (r) { 2926 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", 2927 r, svms, prange->start, prange->last); 2928 goto out_unlock_range; 2929 } 2930 } 2931 2932 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false); 2933 if (r) 2934 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", 2935 r, svms, prange->start, prange->last); 2936 2937 kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr, 2938 migration); 2939 2940 out_unlock_range: 2941 mutex_unlock(&prange->migrate_mutex); 2942 out_unlock_svms: 2943 mutex_unlock(&svms->lock); 2944 mmap_read_unlock(mm); 2945 2946 svm_range_count_fault(adev, p, gpuidx); 2947 2948 mmput(mm); 2949 out: 2950 kfd_unref_process(p); 2951 2952 if (r == -EAGAIN) { 2953 pr_debug("recover vm fault later\n"); 2954 amdgpu_gmc_filter_faults_remove(adev, addr, pasid); 2955 r = 0; 2956 } 2957 return r; 2958 } 2959 2960 void svm_range_list_fini(struct kfd_process *p) 2961 { 2962 struct svm_range *prange; 2963 struct svm_range *next; 2964 2965 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms); 2966 2967 cancel_delayed_work_sync(&p->svms.restore_work); 2968 2969 /* Ensure list work is finished before process is destroyed */ 2970 flush_work(&p->svms.deferred_list_work); 2971 2972 /* 2973 * Ensure no retry fault comes in afterwards, as page fault handler will 2974 * not find kfd process and take mm lock to recover fault. 2975 */ 2976 atomic_inc(&p->svms.drain_pagefaults); 2977 svm_range_drain_retry_fault(&p->svms); 2978 2979 list_for_each_entry_safe(prange, next, &p->svms.list, list) { 2980 svm_range_unlink(prange); 2981 svm_range_remove_notifier(prange); 2982 svm_range_free(prange, true); 2983 } 2984 2985 mutex_destroy(&p->svms.lock); 2986 2987 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms); 2988 } 2989 2990 int svm_range_list_init(struct kfd_process *p) 2991 { 2992 struct svm_range_list *svms = &p->svms; 2993 int i; 2994 2995 svms->objects = RB_ROOT_CACHED; 2996 mutex_init(&svms->lock); 2997 INIT_LIST_HEAD(&svms->list); 2998 atomic_set(&svms->evicted_ranges, 0); 2999 atomic_set(&svms->drain_pagefaults, 0); 3000 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); 3001 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); 3002 INIT_LIST_HEAD(&svms->deferred_range_list); 3003 INIT_LIST_HEAD(&svms->criu_svm_metadata_list); 3004 spin_lock_init(&svms->deferred_list_lock); 3005 3006 for (i = 0; i < p->n_pdds; i++) 3007 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev)) 3008 bitmap_set(svms->bitmap_supported, i, 1); 3009 3010 return 0; 3011 } 3012 3013 /** 3014 * svm_range_check_vm - check if virtual address range mapped already 3015 * @p: current kfd_process 3016 * @start: range start address, in pages 3017 * @last: range last address, in pages 3018 * @bo_s: mapping start address in pages if address range already mapped 3019 * @bo_l: mapping last address in pages if address range already mapped 3020 * 3021 * The purpose is to avoid virtual address ranges already allocated by 3022 * kfd_ioctl_alloc_memory_of_gpu ioctl. 3023 * It looks for each pdd in the kfd_process. 3024 * 3025 * Context: Process context 3026 * 3027 * Return 0 - OK, if the range is not mapped. 3028 * Otherwise error code: 3029 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu 3030 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by 3031 * a signal. Release all buffer reservations and return to user-space. 3032 */ 3033 static int 3034 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 3035 uint64_t *bo_s, uint64_t *bo_l) 3036 { 3037 struct amdgpu_bo_va_mapping *mapping; 3038 struct interval_tree_node *node; 3039 uint32_t i; 3040 int r; 3041 3042 for (i = 0; i < p->n_pdds; i++) { 3043 struct amdgpu_vm *vm; 3044 3045 if (!p->pdds[i]->drm_priv) 3046 continue; 3047 3048 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 3049 r = amdgpu_bo_reserve(vm->root.bo, false); 3050 if (r) 3051 return r; 3052 3053 node = interval_tree_iter_first(&vm->va, start, last); 3054 if (node) { 3055 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n", 3056 start, last); 3057 mapping = container_of((struct rb_node *)node, 3058 struct amdgpu_bo_va_mapping, rb); 3059 if (bo_s && bo_l) { 3060 *bo_s = mapping->start; 3061 *bo_l = mapping->last; 3062 } 3063 amdgpu_bo_unreserve(vm->root.bo); 3064 return -EADDRINUSE; 3065 } 3066 amdgpu_bo_unreserve(vm->root.bo); 3067 } 3068 3069 return 0; 3070 } 3071 3072 /** 3073 * svm_range_is_valid - check if virtual address range is valid 3074 * @p: current kfd_process 3075 * @start: range start address, in pages 3076 * @size: range size, in pages 3077 * 3078 * Valid virtual address range means it belongs to one or more VMAs 3079 * 3080 * Context: Process context 3081 * 3082 * Return: 3083 * 0 - OK, otherwise error code 3084 */ 3085 static int 3086 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) 3087 { 3088 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 3089 struct vm_area_struct *vma; 3090 unsigned long end; 3091 unsigned long start_unchg = start; 3092 3093 start <<= PAGE_SHIFT; 3094 end = start + (size << PAGE_SHIFT); 3095 do { 3096 vma = find_vma(p->mm, start); 3097 if (!vma || start < vma->vm_start || 3098 (vma->vm_flags & device_vma)) 3099 return -EFAULT; 3100 start = min(end, vma->vm_end); 3101 } while (start < end); 3102 3103 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL, 3104 NULL); 3105 } 3106 3107 /** 3108 * svm_range_best_prefetch_location - decide the best prefetch location 3109 * @prange: svm range structure 3110 * 3111 * For xnack off: 3112 * If range map to single GPU, the best prefetch location is prefetch_loc, which 3113 * can be CPU or GPU. 3114 * 3115 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on 3116 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise 3117 * the best prefetch location is always CPU, because GPU can not have coherent 3118 * mapping VRAM of other GPUs even with large-BAR PCIe connection. 3119 * 3120 * For xnack on: 3121 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is 3122 * prefetch_loc, other GPU access will generate vm fault and trigger migration. 3123 * 3124 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same 3125 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best 3126 * prefetch location is always CPU. 3127 * 3128 * Context: Process context 3129 * 3130 * Return: 3131 * 0 for CPU or GPU id 3132 */ 3133 static uint32_t 3134 svm_range_best_prefetch_location(struct svm_range *prange) 3135 { 3136 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 3137 uint32_t best_loc = prange->prefetch_loc; 3138 struct kfd_process_device *pdd; 3139 struct amdgpu_device *bo_adev; 3140 struct kfd_process *p; 3141 uint32_t gpuidx; 3142 3143 p = container_of(prange->svms, struct kfd_process, svms); 3144 3145 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) 3146 goto out; 3147 3148 bo_adev = svm_range_get_adev_by_id(prange, best_loc); 3149 if (!bo_adev) { 3150 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc); 3151 best_loc = 0; 3152 goto out; 3153 } 3154 3155 if (p->xnack_enabled) 3156 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 3157 else 3158 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 3159 MAX_GPU_INSTANCE); 3160 3161 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 3162 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 3163 if (!pdd) { 3164 pr_debug("failed to get device by idx 0x%x\n", gpuidx); 3165 continue; 3166 } 3167 3168 if (pdd->dev->adev == bo_adev) 3169 continue; 3170 3171 if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { 3172 best_loc = 0; 3173 break; 3174 } 3175 } 3176 3177 out: 3178 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n", 3179 p->xnack_enabled, &p->svms, prange->start, prange->last, 3180 best_loc); 3181 3182 return best_loc; 3183 } 3184 3185 /* FIXME: This is a workaround for page locking bug when some pages are 3186 * invalid during migration to VRAM 3187 */ 3188 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm, 3189 void *owner) 3190 { 3191 struct hmm_range *hmm_range; 3192 int r; 3193 3194 if (prange->validated_once) 3195 return; 3196 3197 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, 3198 prange->start << PAGE_SHIFT, 3199 prange->npages, &hmm_range, 3200 false, true, owner); 3201 if (!r) { 3202 amdgpu_hmm_range_get_pages_done(hmm_range); 3203 prange->validated_once = true; 3204 } 3205 } 3206 3207 /* svm_range_trigger_migration - start page migration if prefetch loc changed 3208 * @mm: current process mm_struct 3209 * @prange: svm range structure 3210 * @migrated: output, true if migration is triggered 3211 * 3212 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range 3213 * from ram to vram. 3214 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range 3215 * from vram to ram. 3216 * 3217 * If GPU vm fault retry is not enabled, migration interact with MMU notifier 3218 * and restore work: 3219 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict 3220 * stops all queues, schedule restore work 3221 * 2. svm_range_restore_work wait for migration is done by 3222 * a. svm_range_validate_vram takes prange->migrate_mutex 3223 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns 3224 * 3. restore work update mappings of GPU, resume all queues. 3225 * 3226 * Context: Process context 3227 * 3228 * Return: 3229 * 0 - OK, otherwise - error code of migration 3230 */ 3231 static int 3232 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, 3233 bool *migrated) 3234 { 3235 uint32_t best_loc; 3236 int r = 0; 3237 3238 *migrated = false; 3239 best_loc = svm_range_best_prefetch_location(prange); 3240 3241 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3242 best_loc == prange->actual_loc) 3243 return 0; 3244 3245 if (!best_loc) { 3246 r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3247 *migrated = !r; 3248 return r; 3249 } 3250 3251 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3252 *migrated = !r; 3253 3254 return r; 3255 } 3256 3257 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) 3258 { 3259 if (!fence) 3260 return -EINVAL; 3261 3262 if (dma_fence_is_signaled(&fence->base)) 3263 return 0; 3264 3265 if (fence->svm_bo) { 3266 WRITE_ONCE(fence->svm_bo->evicting, 1); 3267 schedule_work(&fence->svm_bo->eviction_work); 3268 } 3269 3270 return 0; 3271 } 3272 3273 static void svm_range_evict_svm_bo_worker(struct work_struct *work) 3274 { 3275 struct svm_range_bo *svm_bo; 3276 struct kfd_process *p; 3277 struct mm_struct *mm; 3278 int r = 0; 3279 3280 svm_bo = container_of(work, struct svm_range_bo, eviction_work); 3281 if (!svm_bo_ref_unless_zero(svm_bo)) 3282 return; /* svm_bo was freed while eviction was pending */ 3283 3284 /* svm_range_bo_release destroys this worker thread. So during 3285 * the lifetime of this thread, kfd_process and mm will be valid. 3286 */ 3287 p = container_of(svm_bo->svms, struct kfd_process, svms); 3288 mm = p->mm; 3289 if (!mm) 3290 return; 3291 3292 mmap_read_lock(mm); 3293 spin_lock(&svm_bo->list_lock); 3294 while (!list_empty(&svm_bo->range_list) && !r) { 3295 struct svm_range *prange = 3296 list_first_entry(&svm_bo->range_list, 3297 struct svm_range, svm_bo_list); 3298 int retries = 3; 3299 3300 list_del_init(&prange->svm_bo_list); 3301 spin_unlock(&svm_bo->list_lock); 3302 3303 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 3304 prange->start, prange->last); 3305 3306 mutex_lock(&prange->migrate_mutex); 3307 do { 3308 r = svm_migrate_vram_to_ram(prange, 3309 svm_bo->eviction_fence->mm, 3310 KFD_MIGRATE_TRIGGER_TTM_EVICTION); 3311 } while (!r && prange->actual_loc && --retries); 3312 3313 if (!r && prange->actual_loc) 3314 pr_info_once("Migration failed during eviction"); 3315 3316 if (!prange->actual_loc) { 3317 mutex_lock(&prange->lock); 3318 prange->svm_bo = NULL; 3319 mutex_unlock(&prange->lock); 3320 } 3321 mutex_unlock(&prange->migrate_mutex); 3322 3323 spin_lock(&svm_bo->list_lock); 3324 } 3325 spin_unlock(&svm_bo->list_lock); 3326 mmap_read_unlock(mm); 3327 3328 dma_fence_signal(&svm_bo->eviction_fence->base); 3329 3330 /* This is the last reference to svm_bo, after svm_range_vram_node_free 3331 * has been called in svm_migrate_vram_to_ram 3332 */ 3333 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); 3334 svm_range_bo_unref(svm_bo); 3335 } 3336 3337 static int 3338 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, 3339 uint64_t start, uint64_t size, uint32_t nattr, 3340 struct kfd_ioctl_svm_attribute *attrs) 3341 { 3342 struct amdkfd_process_info *process_info = p->kgd_process_info; 3343 struct list_head update_list; 3344 struct list_head insert_list; 3345 struct list_head remove_list; 3346 struct svm_range_list *svms; 3347 struct svm_range *prange; 3348 struct svm_range *next; 3349 bool update_mapping = false; 3350 bool flush_tlb; 3351 int r = 0; 3352 3353 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n", 3354 p->pasid, &p->svms, start, start + size - 1, size); 3355 3356 r = svm_range_check_attr(p, nattr, attrs); 3357 if (r) 3358 return r; 3359 3360 svms = &p->svms; 3361 3362 mutex_lock(&process_info->lock); 3363 3364 svm_range_list_lock_and_flush_work(svms, mm); 3365 3366 r = svm_range_is_valid(p, start, size); 3367 if (r) { 3368 pr_debug("invalid range r=%d\n", r); 3369 mmap_write_unlock(mm); 3370 goto out; 3371 } 3372 3373 mutex_lock(&svms->lock); 3374 3375 /* Add new range and split existing ranges as needed */ 3376 r = svm_range_add(p, start, size, nattr, attrs, &update_list, 3377 &insert_list, &remove_list); 3378 if (r) { 3379 mutex_unlock(&svms->lock); 3380 mmap_write_unlock(mm); 3381 goto out; 3382 } 3383 /* Apply changes as a transaction */ 3384 list_for_each_entry_safe(prange, next, &insert_list, list) { 3385 svm_range_add_to_svms(prange); 3386 svm_range_add_notifier_locked(mm, prange); 3387 } 3388 list_for_each_entry(prange, &update_list, update_list) { 3389 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping); 3390 /* TODO: unmap ranges from GPU that lost access */ 3391 } 3392 list_for_each_entry_safe(prange, next, &remove_list, update_list) { 3393 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", 3394 prange->svms, prange, prange->start, 3395 prange->last); 3396 svm_range_unlink(prange); 3397 svm_range_remove_notifier(prange); 3398 svm_range_free(prange, false); 3399 } 3400 3401 mmap_write_downgrade(mm); 3402 /* Trigger migrations and revalidate and map to GPUs as needed. If 3403 * this fails we may be left with partially completed actions. There 3404 * is no clean way of rolling back to the previous state in such a 3405 * case because the rollback wouldn't be guaranteed to work either. 3406 */ 3407 list_for_each_entry(prange, &update_list, update_list) { 3408 bool migrated; 3409 3410 mutex_lock(&prange->migrate_mutex); 3411 3412 r = svm_range_trigger_migration(mm, prange, &migrated); 3413 if (r) 3414 goto out_unlock_range; 3415 3416 if (migrated && (!p->xnack_enabled || 3417 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) && 3418 prange->mapped_to_gpu) { 3419 pr_debug("restore_work will update mappings of GPUs\n"); 3420 mutex_unlock(&prange->migrate_mutex); 3421 continue; 3422 } 3423 3424 if (!migrated && !update_mapping) { 3425 mutex_unlock(&prange->migrate_mutex); 3426 continue; 3427 } 3428 3429 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; 3430 3431 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 3432 true, true, flush_tlb); 3433 if (r) 3434 pr_debug("failed %d to map svm range\n", r); 3435 3436 out_unlock_range: 3437 mutex_unlock(&prange->migrate_mutex); 3438 if (r) 3439 break; 3440 } 3441 3442 svm_range_debug_dump(svms); 3443 3444 mutex_unlock(&svms->lock); 3445 mmap_read_unlock(mm); 3446 out: 3447 mutex_unlock(&process_info->lock); 3448 3449 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, 3450 &p->svms, start, start + size - 1, r); 3451 3452 return r; 3453 } 3454 3455 static int 3456 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm, 3457 uint64_t start, uint64_t size, uint32_t nattr, 3458 struct kfd_ioctl_svm_attribute *attrs) 3459 { 3460 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); 3461 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); 3462 bool get_preferred_loc = false; 3463 bool get_prefetch_loc = false; 3464 bool get_granularity = false; 3465 bool get_accessible = false; 3466 bool get_flags = false; 3467 uint64_t last = start + size - 1UL; 3468 uint8_t granularity = 0xff; 3469 struct interval_tree_node *node; 3470 struct svm_range_list *svms; 3471 struct svm_range *prange; 3472 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3473 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3474 uint32_t flags_and = 0xffffffff; 3475 uint32_t flags_or = 0; 3476 int gpuidx; 3477 uint32_t i; 3478 int r = 0; 3479 3480 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, 3481 start + size - 1, nattr); 3482 3483 /* Flush pending deferred work to avoid racing with deferred actions from 3484 * previous memory map changes (e.g. munmap). Concurrent memory map changes 3485 * can still race with get_attr because we don't hold the mmap lock. But that 3486 * would be a race condition in the application anyway, and undefined 3487 * behaviour is acceptable in that case. 3488 */ 3489 flush_work(&p->svms.deferred_list_work); 3490 3491 mmap_read_lock(mm); 3492 r = svm_range_is_valid(p, start, size); 3493 mmap_read_unlock(mm); 3494 if (r) { 3495 pr_debug("invalid range r=%d\n", r); 3496 return r; 3497 } 3498 3499 for (i = 0; i < nattr; i++) { 3500 switch (attrs[i].type) { 3501 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3502 get_preferred_loc = true; 3503 break; 3504 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3505 get_prefetch_loc = true; 3506 break; 3507 case KFD_IOCTL_SVM_ATTR_ACCESS: 3508 get_accessible = true; 3509 break; 3510 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3511 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3512 get_flags = true; 3513 break; 3514 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3515 get_granularity = true; 3516 break; 3517 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 3518 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 3519 fallthrough; 3520 default: 3521 pr_debug("get invalid attr type 0x%x\n", attrs[i].type); 3522 return -EINVAL; 3523 } 3524 } 3525 3526 svms = &p->svms; 3527 3528 mutex_lock(&svms->lock); 3529 3530 node = interval_tree_iter_first(&svms->objects, start, last); 3531 if (!node) { 3532 pr_debug("range attrs not found return default values\n"); 3533 svm_range_set_default_attributes(&location, &prefetch_loc, 3534 &granularity, &flags_and); 3535 flags_or = flags_and; 3536 if (p->xnack_enabled) 3537 bitmap_copy(bitmap_access, svms->bitmap_supported, 3538 MAX_GPU_INSTANCE); 3539 else 3540 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE); 3541 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE); 3542 goto fill_values; 3543 } 3544 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); 3545 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); 3546 3547 while (node) { 3548 struct interval_tree_node *next; 3549 3550 prange = container_of(node, struct svm_range, it_node); 3551 next = interval_tree_iter_next(node, start, last); 3552 3553 if (get_preferred_loc) { 3554 if (prange->preferred_loc == 3555 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3556 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3557 location != prange->preferred_loc)) { 3558 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3559 get_preferred_loc = false; 3560 } else { 3561 location = prange->preferred_loc; 3562 } 3563 } 3564 if (get_prefetch_loc) { 3565 if (prange->prefetch_loc == 3566 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3567 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3568 prefetch_loc != prange->prefetch_loc)) { 3569 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3570 get_prefetch_loc = false; 3571 } else { 3572 prefetch_loc = prange->prefetch_loc; 3573 } 3574 } 3575 if (get_accessible) { 3576 bitmap_and(bitmap_access, bitmap_access, 3577 prange->bitmap_access, MAX_GPU_INSTANCE); 3578 bitmap_and(bitmap_aip, bitmap_aip, 3579 prange->bitmap_aip, MAX_GPU_INSTANCE); 3580 } 3581 if (get_flags) { 3582 flags_and &= prange->flags; 3583 flags_or |= prange->flags; 3584 } 3585 3586 if (get_granularity && prange->granularity < granularity) 3587 granularity = prange->granularity; 3588 3589 node = next; 3590 } 3591 fill_values: 3592 mutex_unlock(&svms->lock); 3593 3594 for (i = 0; i < nattr; i++) { 3595 switch (attrs[i].type) { 3596 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3597 attrs[i].value = location; 3598 break; 3599 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3600 attrs[i].value = prefetch_loc; 3601 break; 3602 case KFD_IOCTL_SVM_ATTR_ACCESS: 3603 gpuidx = kfd_process_gpuidx_from_gpuid(p, 3604 attrs[i].value); 3605 if (gpuidx < 0) { 3606 pr_debug("invalid gpuid %x\n", attrs[i].value); 3607 return -EINVAL; 3608 } 3609 if (test_bit(gpuidx, bitmap_access)) 3610 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS; 3611 else if (test_bit(gpuidx, bitmap_aip)) 3612 attrs[i].type = 3613 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE; 3614 else 3615 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS; 3616 break; 3617 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3618 attrs[i].value = flags_and; 3619 break; 3620 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3621 attrs[i].value = ~flags_or; 3622 break; 3623 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3624 attrs[i].value = (uint32_t)granularity; 3625 break; 3626 } 3627 } 3628 3629 return 0; 3630 } 3631 3632 int kfd_criu_resume_svm(struct kfd_process *p) 3633 { 3634 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL; 3635 int nattr_common = 4, nattr_accessibility = 1; 3636 struct criu_svm_metadata *criu_svm_md = NULL; 3637 struct svm_range_list *svms = &p->svms; 3638 struct criu_svm_metadata *next = NULL; 3639 uint32_t set_flags = 0xffffffff; 3640 int i, j, num_attrs, ret = 0; 3641 uint64_t set_attr_size; 3642 struct mm_struct *mm; 3643 3644 if (list_empty(&svms->criu_svm_metadata_list)) { 3645 pr_debug("No SVM data from CRIU restore stage 2\n"); 3646 return ret; 3647 } 3648 3649 mm = get_task_mm(p->lead_thread); 3650 if (!mm) { 3651 pr_err("failed to get mm for the target process\n"); 3652 return -ESRCH; 3653 } 3654 3655 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds); 3656 3657 i = j = 0; 3658 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) { 3659 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n", 3660 i, criu_svm_md->data.start_addr, criu_svm_md->data.size); 3661 3662 for (j = 0; j < num_attrs; j++) { 3663 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n", 3664 i, j, criu_svm_md->data.attrs[j].type, 3665 i, j, criu_svm_md->data.attrs[j].value); 3666 switch (criu_svm_md->data.attrs[j].type) { 3667 /* During Checkpoint operation, the query for 3668 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might 3669 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were 3670 * not used by the range which was checkpointed. Care 3671 * must be taken to not restore with an invalid value 3672 * otherwise the gpuidx value will be invalid and 3673 * set_attr would eventually fail so just replace those 3674 * with another dummy attribute such as 3675 * KFD_IOCTL_SVM_ATTR_SET_FLAGS. 3676 */ 3677 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3678 if (criu_svm_md->data.attrs[j].value == 3679 KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 3680 criu_svm_md->data.attrs[j].type = 3681 KFD_IOCTL_SVM_ATTR_SET_FLAGS; 3682 criu_svm_md->data.attrs[j].value = 0; 3683 } 3684 break; 3685 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3686 set_flags = criu_svm_md->data.attrs[j].value; 3687 break; 3688 default: 3689 break; 3690 } 3691 } 3692 3693 /* CLR_FLAGS is not available via get_attr during checkpoint but 3694 * it needs to be inserted before restoring the ranges so 3695 * allocate extra space for it before calling set_attr 3696 */ 3697 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3698 (num_attrs + 1); 3699 set_attr_new = krealloc(set_attr, set_attr_size, 3700 GFP_KERNEL); 3701 if (!set_attr_new) { 3702 ret = -ENOMEM; 3703 goto exit; 3704 } 3705 set_attr = set_attr_new; 3706 3707 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs * 3708 sizeof(struct kfd_ioctl_svm_attribute)); 3709 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS; 3710 set_attr[num_attrs].value = ~set_flags; 3711 3712 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr, 3713 criu_svm_md->data.size, num_attrs + 1, 3714 set_attr); 3715 if (ret) { 3716 pr_err("CRIU: failed to set range attributes\n"); 3717 goto exit; 3718 } 3719 3720 i++; 3721 } 3722 exit: 3723 kfree(set_attr); 3724 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) { 3725 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n", 3726 criu_svm_md->data.start_addr); 3727 kfree(criu_svm_md); 3728 } 3729 3730 mmput(mm); 3731 return ret; 3732 3733 } 3734 3735 int kfd_criu_restore_svm(struct kfd_process *p, 3736 uint8_t __user *user_priv_ptr, 3737 uint64_t *priv_data_offset, 3738 uint64_t max_priv_data_size) 3739 { 3740 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size; 3741 int nattr_common = 4, nattr_accessibility = 1; 3742 struct criu_svm_metadata *criu_svm_md = NULL; 3743 struct svm_range_list *svms = &p->svms; 3744 uint32_t num_devices; 3745 int ret = 0; 3746 3747 num_devices = p->n_pdds; 3748 /* Handle one SVM range object at a time, also the number of gpus are 3749 * assumed to be same on the restore node, checking must be done while 3750 * evaluating the topology earlier 3751 */ 3752 3753 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) * 3754 (nattr_common + nattr_accessibility * num_devices); 3755 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size; 3756 3757 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) + 3758 svm_attrs_size; 3759 3760 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL); 3761 if (!criu_svm_md) { 3762 pr_err("failed to allocate memory to store svm metadata\n"); 3763 return -ENOMEM; 3764 } 3765 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) { 3766 ret = -EINVAL; 3767 goto exit; 3768 } 3769 3770 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset, 3771 svm_priv_data_size); 3772 if (ret) { 3773 ret = -EFAULT; 3774 goto exit; 3775 } 3776 *priv_data_offset += svm_priv_data_size; 3777 3778 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list); 3779 3780 return 0; 3781 3782 3783 exit: 3784 kfree(criu_svm_md); 3785 return ret; 3786 } 3787 3788 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 3789 uint64_t *svm_priv_data_size) 3790 { 3791 uint64_t total_size, accessibility_size, common_attr_size; 3792 int nattr_common = 4, nattr_accessibility = 1; 3793 int num_devices = p->n_pdds; 3794 struct svm_range_list *svms; 3795 struct svm_range *prange; 3796 uint32_t count = 0; 3797 3798 *svm_priv_data_size = 0; 3799 3800 svms = &p->svms; 3801 if (!svms) 3802 return -EINVAL; 3803 3804 mutex_lock(&svms->lock); 3805 list_for_each_entry(prange, &svms->list, list) { 3806 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n", 3807 prange, prange->start, prange->npages, 3808 prange->start + prange->npages - 1); 3809 count++; 3810 } 3811 mutex_unlock(&svms->lock); 3812 3813 *num_svm_ranges = count; 3814 /* Only the accessbility attributes need to be queried for all the gpus 3815 * individually, remaining ones are spanned across the entire process 3816 * regardless of the various gpu nodes. Of the remaining attributes, 3817 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved. 3818 * 3819 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC 3820 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC 3821 * KFD_IOCTL_SVM_ATTR_SET_FLAGS 3822 * KFD_IOCTL_SVM_ATTR_GRANULARITY 3823 * 3824 * ** ACCESSBILITY ATTRIBUTES ** 3825 * (Considered as one, type is altered during query, value is gpuid) 3826 * KFD_IOCTL_SVM_ATTR_ACCESS 3827 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE 3828 * KFD_IOCTL_SVM_ATTR_NO_ACCESS 3829 */ 3830 if (*num_svm_ranges > 0) { 3831 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3832 nattr_common; 3833 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) * 3834 nattr_accessibility * num_devices; 3835 3836 total_size = sizeof(struct kfd_criu_svm_range_priv_data) + 3837 common_attr_size + accessibility_size; 3838 3839 *svm_priv_data_size = *num_svm_ranges * total_size; 3840 } 3841 3842 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges, 3843 *svm_priv_data_size); 3844 return 0; 3845 } 3846 3847 int kfd_criu_checkpoint_svm(struct kfd_process *p, 3848 uint8_t __user *user_priv_data, 3849 uint64_t *priv_data_offset) 3850 { 3851 struct kfd_criu_svm_range_priv_data *svm_priv = NULL; 3852 struct kfd_ioctl_svm_attribute *query_attr = NULL; 3853 uint64_t svm_priv_data_size, query_attr_size = 0; 3854 int index, nattr_common = 4, ret = 0; 3855 struct svm_range_list *svms; 3856 int num_devices = p->n_pdds; 3857 struct svm_range *prange; 3858 struct mm_struct *mm; 3859 3860 svms = &p->svms; 3861 if (!svms) 3862 return -EINVAL; 3863 3864 mm = get_task_mm(p->lead_thread); 3865 if (!mm) { 3866 pr_err("failed to get mm for the target process\n"); 3867 return -ESRCH; 3868 } 3869 3870 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3871 (nattr_common + num_devices); 3872 3873 query_attr = kzalloc(query_attr_size, GFP_KERNEL); 3874 if (!query_attr) { 3875 ret = -ENOMEM; 3876 goto exit; 3877 } 3878 3879 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC; 3880 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC; 3881 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS; 3882 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY; 3883 3884 for (index = 0; index < num_devices; index++) { 3885 struct kfd_process_device *pdd = p->pdds[index]; 3886 3887 query_attr[index + nattr_common].type = 3888 KFD_IOCTL_SVM_ATTR_ACCESS; 3889 query_attr[index + nattr_common].value = pdd->user_gpu_id; 3890 } 3891 3892 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size; 3893 3894 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL); 3895 if (!svm_priv) { 3896 ret = -ENOMEM; 3897 goto exit_query; 3898 } 3899 3900 index = 0; 3901 list_for_each_entry(prange, &svms->list, list) { 3902 3903 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE; 3904 svm_priv->start_addr = prange->start; 3905 svm_priv->size = prange->npages; 3906 memcpy(&svm_priv->attrs, query_attr, query_attr_size); 3907 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n", 3908 prange, prange->start, prange->npages, 3909 prange->start + prange->npages - 1, 3910 prange->npages * PAGE_SIZE); 3911 3912 ret = svm_range_get_attr(p, mm, svm_priv->start_addr, 3913 svm_priv->size, 3914 (nattr_common + num_devices), 3915 svm_priv->attrs); 3916 if (ret) { 3917 pr_err("CRIU: failed to obtain range attributes\n"); 3918 goto exit_priv; 3919 } 3920 3921 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv, 3922 svm_priv_data_size)) { 3923 pr_err("Failed to copy svm priv to user\n"); 3924 ret = -EFAULT; 3925 goto exit_priv; 3926 } 3927 3928 *priv_data_offset += svm_priv_data_size; 3929 3930 } 3931 3932 3933 exit_priv: 3934 kfree(svm_priv); 3935 exit_query: 3936 kfree(query_attr); 3937 exit: 3938 mmput(mm); 3939 return ret; 3940 } 3941 3942 int 3943 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, 3944 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs) 3945 { 3946 struct mm_struct *mm = current->mm; 3947 int r; 3948 3949 start >>= PAGE_SHIFT; 3950 size >>= PAGE_SHIFT; 3951 3952 switch (op) { 3953 case KFD_IOCTL_SVM_OP_SET_ATTR: 3954 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs); 3955 break; 3956 case KFD_IOCTL_SVM_OP_GET_ATTR: 3957 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); 3958 break; 3959 default: 3960 r = EINVAL; 3961 break; 3962 } 3963 3964 return r; 3965 } 3966