1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/types.h> 25 #include <linux/sched/task.h> 26 #include <drm/ttm/ttm_tt.h> 27 #include "amdgpu_sync.h" 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_hmm.h" 31 #include "amdgpu.h" 32 #include "amdgpu_xgmi.h" 33 #include "kfd_priv.h" 34 #include "kfd_svm.h" 35 #include "kfd_migrate.h" 36 #include "kfd_smi_events.h" 37 38 #ifdef dev_fmt 39 #undef dev_fmt 40 #endif 41 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__ 42 43 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 44 45 /* Long enough to ensure no retry fault comes after svm range is restored and 46 * page table is updated. 47 */ 48 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) 49 50 /* Giant svm range split into smaller ranges based on this, it is decided using 51 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to 52 * power of 2MB. 53 */ 54 static uint64_t max_svm_range_pages; 55 56 struct criu_svm_metadata { 57 struct list_head list; 58 struct kfd_criu_svm_range_priv_data data; 59 }; 60 61 static void svm_range_evict_svm_bo_worker(struct work_struct *work); 62 static bool 63 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 64 const struct mmu_notifier_range *range, 65 unsigned long cur_seq); 66 static int 67 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 68 uint64_t *bo_s, uint64_t *bo_l); 69 static const struct mmu_interval_notifier_ops svm_range_mn_ops = { 70 .invalidate = svm_range_cpu_invalidate_pagetables, 71 }; 72 73 /** 74 * svm_range_unlink - unlink svm_range from lists and interval tree 75 * @prange: svm range structure to be removed 76 * 77 * Remove the svm_range from the svms and svm_bo lists and the svms 78 * interval tree. 79 * 80 * Context: The caller must hold svms->lock 81 */ 82 static void svm_range_unlink(struct svm_range *prange) 83 { 84 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 85 prange, prange->start, prange->last); 86 87 if (prange->svm_bo) { 88 spin_lock(&prange->svm_bo->list_lock); 89 list_del(&prange->svm_bo_list); 90 spin_unlock(&prange->svm_bo->list_lock); 91 } 92 93 list_del(&prange->list); 94 if (prange->it_node.start != 0 && prange->it_node.last != 0) 95 interval_tree_remove(&prange->it_node, &prange->svms->objects); 96 } 97 98 static void 99 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange) 100 { 101 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 102 prange, prange->start, prange->last); 103 104 mmu_interval_notifier_insert_locked(&prange->notifier, mm, 105 prange->start << PAGE_SHIFT, 106 prange->npages << PAGE_SHIFT, 107 &svm_range_mn_ops); 108 } 109 110 /** 111 * svm_range_add_to_svms - add svm range to svms 112 * @prange: svm range structure to be added 113 * 114 * Add the svm range to svms interval tree and link list 115 * 116 * Context: The caller must hold svms->lock 117 */ 118 static void svm_range_add_to_svms(struct svm_range *prange) 119 { 120 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 121 prange, prange->start, prange->last); 122 123 list_move_tail(&prange->list, &prange->svms->list); 124 prange->it_node.start = prange->start; 125 prange->it_node.last = prange->last; 126 interval_tree_insert(&prange->it_node, &prange->svms->objects); 127 } 128 129 static void svm_range_remove_notifier(struct svm_range *prange) 130 { 131 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", 132 prange->svms, prange, 133 prange->notifier.interval_tree.start >> PAGE_SHIFT, 134 prange->notifier.interval_tree.last >> PAGE_SHIFT); 135 136 if (prange->notifier.interval_tree.start != 0 && 137 prange->notifier.interval_tree.last != 0) 138 mmu_interval_notifier_remove(&prange->notifier); 139 } 140 141 static bool 142 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) 143 { 144 return dma_addr && !dma_mapping_error(dev, dma_addr) && 145 !(dma_addr & SVM_RANGE_VRAM_DOMAIN); 146 } 147 148 static int 149 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, 150 unsigned long offset, unsigned long npages, 151 unsigned long *hmm_pfns, uint32_t gpuidx) 152 { 153 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 154 dma_addr_t *addr = prange->dma_addr[gpuidx]; 155 struct device *dev = adev->dev; 156 struct page *page; 157 int i, r; 158 159 if (!addr) { 160 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL); 161 if (!addr) 162 return -ENOMEM; 163 prange->dma_addr[gpuidx] = addr; 164 } 165 166 addr += offset; 167 for (i = 0; i < npages; i++) { 168 if (svm_is_valid_dma_mapping_addr(dev, addr[i])) 169 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); 170 171 page = hmm_pfn_to_page(hmm_pfns[i]); 172 if (is_zone_device_page(page)) { 173 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; 174 175 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + 176 bo_adev->vm_manager.vram_base_offset - 177 bo_adev->kfd.pgmap.range.start; 178 addr[i] |= SVM_RANGE_VRAM_DOMAIN; 179 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); 180 continue; 181 } 182 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 183 r = dma_mapping_error(dev, addr[i]); 184 if (r) { 185 dev_err(dev, "failed %d dma_map_page\n", r); 186 return r; 187 } 188 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", 189 addr[i] >> PAGE_SHIFT, page_to_pfn(page)); 190 } 191 return 0; 192 } 193 194 static int 195 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, 196 unsigned long offset, unsigned long npages, 197 unsigned long *hmm_pfns) 198 { 199 struct kfd_process *p; 200 uint32_t gpuidx; 201 int r; 202 203 p = container_of(prange->svms, struct kfd_process, svms); 204 205 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 206 struct kfd_process_device *pdd; 207 208 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 209 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 210 if (!pdd) { 211 pr_debug("failed to find device idx %d\n", gpuidx); 212 return -EINVAL; 213 } 214 215 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, 216 hmm_pfns, gpuidx); 217 if (r) 218 break; 219 } 220 221 return r; 222 } 223 224 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, 225 unsigned long offset, unsigned long npages) 226 { 227 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 228 int i; 229 230 if (!dma_addr) 231 return; 232 233 for (i = offset; i < offset + npages; i++) { 234 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) 235 continue; 236 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); 237 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); 238 dma_addr[i] = 0; 239 } 240 } 241 242 void svm_range_free_dma_mappings(struct svm_range *prange) 243 { 244 struct kfd_process_device *pdd; 245 dma_addr_t *dma_addr; 246 struct device *dev; 247 struct kfd_process *p; 248 uint32_t gpuidx; 249 250 p = container_of(prange->svms, struct kfd_process, svms); 251 252 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) { 253 dma_addr = prange->dma_addr[gpuidx]; 254 if (!dma_addr) 255 continue; 256 257 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 258 if (!pdd) { 259 pr_debug("failed to find device idx %d\n", gpuidx); 260 continue; 261 } 262 dev = &pdd->dev->adev->pdev->dev; 263 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); 264 kvfree(dma_addr); 265 prange->dma_addr[gpuidx] = NULL; 266 } 267 } 268 269 static void svm_range_free(struct svm_range *prange, bool update_mem_usage) 270 { 271 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; 272 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 273 274 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, 275 prange->start, prange->last); 276 277 svm_range_vram_node_free(prange); 278 svm_range_free_dma_mappings(prange); 279 280 if (update_mem_usage && !p->xnack_enabled) { 281 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); 282 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 283 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 284 } 285 mutex_destroy(&prange->lock); 286 mutex_destroy(&prange->migrate_mutex); 287 kfree(prange); 288 } 289 290 static void 291 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc, 292 uint8_t *granularity, uint32_t *flags) 293 { 294 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 295 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 296 *granularity = 9; 297 *flags = 298 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT; 299 } 300 301 static struct 302 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, 303 uint64_t last, bool update_mem_usage) 304 { 305 uint64_t size = last - start + 1; 306 struct svm_range *prange; 307 struct kfd_process *p; 308 309 prange = kzalloc(sizeof(*prange), GFP_KERNEL); 310 if (!prange) 311 return NULL; 312 313 p = container_of(svms, struct kfd_process, svms); 314 if (!p->xnack_enabled && update_mem_usage && 315 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, 316 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) { 317 pr_info("SVM mapping failed, exceeds resident system memory limit\n"); 318 kfree(prange); 319 return NULL; 320 } 321 prange->npages = size; 322 prange->svms = svms; 323 prange->start = start; 324 prange->last = last; 325 INIT_LIST_HEAD(&prange->list); 326 INIT_LIST_HEAD(&prange->update_list); 327 INIT_LIST_HEAD(&prange->svm_bo_list); 328 INIT_LIST_HEAD(&prange->deferred_list); 329 INIT_LIST_HEAD(&prange->child_list); 330 atomic_set(&prange->invalid, 0); 331 prange->validate_timestamp = 0; 332 mutex_init(&prange->migrate_mutex); 333 mutex_init(&prange->lock); 334 335 if (p->xnack_enabled) 336 bitmap_copy(prange->bitmap_access, svms->bitmap_supported, 337 MAX_GPU_INSTANCE); 338 339 svm_range_set_default_attributes(&prange->preferred_loc, 340 &prange->prefetch_loc, 341 &prange->granularity, &prange->flags); 342 343 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); 344 345 return prange; 346 } 347 348 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo) 349 { 350 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref)) 351 return false; 352 353 return true; 354 } 355 356 static void svm_range_bo_release(struct kref *kref) 357 { 358 struct svm_range_bo *svm_bo; 359 360 svm_bo = container_of(kref, struct svm_range_bo, kref); 361 pr_debug("svm_bo 0x%p\n", svm_bo); 362 363 spin_lock(&svm_bo->list_lock); 364 while (!list_empty(&svm_bo->range_list)) { 365 struct svm_range *prange = 366 list_first_entry(&svm_bo->range_list, 367 struct svm_range, svm_bo_list); 368 /* list_del_init tells a concurrent svm_range_vram_node_new when 369 * it's safe to reuse the svm_bo pointer and svm_bo_list head. 370 */ 371 list_del_init(&prange->svm_bo_list); 372 spin_unlock(&svm_bo->list_lock); 373 374 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 375 prange->start, prange->last); 376 mutex_lock(&prange->lock); 377 prange->svm_bo = NULL; 378 mutex_unlock(&prange->lock); 379 380 spin_lock(&svm_bo->list_lock); 381 } 382 spin_unlock(&svm_bo->list_lock); 383 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) { 384 /* We're not in the eviction worker. 385 * Signal the fence and synchronize with any 386 * pending eviction work. 387 */ 388 dma_fence_signal(&svm_bo->eviction_fence->base); 389 cancel_work_sync(&svm_bo->eviction_work); 390 } 391 dma_fence_put(&svm_bo->eviction_fence->base); 392 amdgpu_bo_unref(&svm_bo->bo); 393 kfree(svm_bo); 394 } 395 396 static void svm_range_bo_wq_release(struct work_struct *work) 397 { 398 struct svm_range_bo *svm_bo; 399 400 svm_bo = container_of(work, struct svm_range_bo, release_work); 401 svm_range_bo_release(&svm_bo->kref); 402 } 403 404 static void svm_range_bo_release_async(struct kref *kref) 405 { 406 struct svm_range_bo *svm_bo; 407 408 svm_bo = container_of(kref, struct svm_range_bo, kref); 409 pr_debug("svm_bo 0x%p\n", svm_bo); 410 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release); 411 schedule_work(&svm_bo->release_work); 412 } 413 414 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo) 415 { 416 kref_put(&svm_bo->kref, svm_range_bo_release_async); 417 } 418 419 static void svm_range_bo_unref(struct svm_range_bo *svm_bo) 420 { 421 if (svm_bo) 422 kref_put(&svm_bo->kref, svm_range_bo_release); 423 } 424 425 static bool 426 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) 427 { 428 mutex_lock(&prange->lock); 429 if (!prange->svm_bo) { 430 mutex_unlock(&prange->lock); 431 return false; 432 } 433 if (prange->ttm_res) { 434 /* We still have a reference, all is well */ 435 mutex_unlock(&prange->lock); 436 return true; 437 } 438 if (svm_bo_ref_unless_zero(prange->svm_bo)) { 439 /* 440 * Migrate from GPU to GPU, remove range from source svm_bo->node 441 * range list, and return false to allocate svm_bo from destination 442 * node. 443 */ 444 if (prange->svm_bo->node != node) { 445 mutex_unlock(&prange->lock); 446 447 spin_lock(&prange->svm_bo->list_lock); 448 list_del_init(&prange->svm_bo_list); 449 spin_unlock(&prange->svm_bo->list_lock); 450 451 svm_range_bo_unref(prange->svm_bo); 452 return false; 453 } 454 if (READ_ONCE(prange->svm_bo->evicting)) { 455 struct dma_fence *f; 456 struct svm_range_bo *svm_bo; 457 /* The BO is getting evicted, 458 * we need to get a new one 459 */ 460 mutex_unlock(&prange->lock); 461 svm_bo = prange->svm_bo; 462 f = dma_fence_get(&svm_bo->eviction_fence->base); 463 svm_range_bo_unref(prange->svm_bo); 464 /* wait for the fence to avoid long spin-loop 465 * at list_empty_careful 466 */ 467 dma_fence_wait(f, false); 468 dma_fence_put(f); 469 } else { 470 /* The BO was still around and we got 471 * a new reference to it 472 */ 473 mutex_unlock(&prange->lock); 474 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", 475 prange->svms, prange->start, prange->last); 476 477 prange->ttm_res = prange->svm_bo->bo->tbo.resource; 478 return true; 479 } 480 481 } else { 482 mutex_unlock(&prange->lock); 483 } 484 485 /* We need a new svm_bo. Spin-loop to wait for concurrent 486 * svm_range_bo_release to finish removing this range from 487 * its range list. After this, it is safe to reuse the 488 * svm_bo pointer and svm_bo_list head. 489 */ 490 while (!list_empty_careful(&prange->svm_bo_list)) 491 ; 492 493 return false; 494 } 495 496 static struct svm_range_bo *svm_range_bo_new(void) 497 { 498 struct svm_range_bo *svm_bo; 499 500 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL); 501 if (!svm_bo) 502 return NULL; 503 504 kref_init(&svm_bo->kref); 505 INIT_LIST_HEAD(&svm_bo->range_list); 506 spin_lock_init(&svm_bo->list_lock); 507 508 return svm_bo; 509 } 510 511 int 512 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, 513 bool clear) 514 { 515 struct amdgpu_bo_param bp; 516 struct svm_range_bo *svm_bo; 517 struct amdgpu_bo_user *ubo; 518 struct amdgpu_bo *bo; 519 struct kfd_process *p; 520 struct mm_struct *mm; 521 int r; 522 523 p = container_of(prange->svms, struct kfd_process, svms); 524 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, 525 prange->start, prange->last); 526 527 if (svm_range_validate_svm_bo(node, prange)) 528 return 0; 529 530 svm_bo = svm_range_bo_new(); 531 if (!svm_bo) { 532 pr_debug("failed to alloc svm bo\n"); 533 return -ENOMEM; 534 } 535 mm = get_task_mm(p->lead_thread); 536 if (!mm) { 537 pr_debug("failed to get mm\n"); 538 kfree(svm_bo); 539 return -ESRCH; 540 } 541 svm_bo->node = node; 542 svm_bo->eviction_fence = 543 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 544 mm, 545 svm_bo); 546 mmput(mm); 547 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker); 548 svm_bo->evicting = 0; 549 memset(&bp, 0, sizeof(bp)); 550 bp.size = prange->npages * PAGE_SIZE; 551 bp.byte_align = PAGE_SIZE; 552 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 553 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 554 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0; 555 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE; 556 bp.type = ttm_bo_type_device; 557 bp.resv = NULL; 558 if (node->xcp) 559 bp.xcp_id_plus1 = node->xcp->id + 1; 560 561 r = amdgpu_bo_create_user(node->adev, &bp, &ubo); 562 if (r) { 563 pr_debug("failed %d to create bo\n", r); 564 goto create_bo_failed; 565 } 566 bo = &ubo->bo; 567 568 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", 569 bo->tbo.resource->start << PAGE_SHIFT, bp.size, 570 bp.xcp_id_plus1 - 1); 571 572 r = amdgpu_bo_reserve(bo, true); 573 if (r) { 574 pr_debug("failed %d to reserve bo\n", r); 575 goto reserve_bo_failed; 576 } 577 578 if (clear) { 579 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 580 if (r) { 581 pr_debug("failed %d to sync bo\n", r); 582 amdgpu_bo_unreserve(bo); 583 goto reserve_bo_failed; 584 } 585 } 586 587 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 588 if (r) { 589 pr_debug("failed %d to reserve bo\n", r); 590 amdgpu_bo_unreserve(bo); 591 goto reserve_bo_failed; 592 } 593 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true); 594 595 amdgpu_bo_unreserve(bo); 596 597 svm_bo->bo = bo; 598 prange->svm_bo = svm_bo; 599 prange->ttm_res = bo->tbo.resource; 600 prange->offset = 0; 601 602 spin_lock(&svm_bo->list_lock); 603 list_add(&prange->svm_bo_list, &svm_bo->range_list); 604 spin_unlock(&svm_bo->list_lock); 605 606 return 0; 607 608 reserve_bo_failed: 609 amdgpu_bo_unref(&bo); 610 create_bo_failed: 611 dma_fence_put(&svm_bo->eviction_fence->base); 612 kfree(svm_bo); 613 prange->ttm_res = NULL; 614 615 return r; 616 } 617 618 void svm_range_vram_node_free(struct svm_range *prange) 619 { 620 svm_range_bo_unref(prange->svm_bo); 621 prange->ttm_res = NULL; 622 } 623 624 struct kfd_node * 625 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id) 626 { 627 struct kfd_process *p; 628 struct kfd_process_device *pdd; 629 630 p = container_of(prange->svms, struct kfd_process, svms); 631 pdd = kfd_process_device_data_by_id(p, gpu_id); 632 if (!pdd) { 633 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); 634 return NULL; 635 } 636 637 return pdd->dev; 638 } 639 640 struct kfd_process_device * 641 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node) 642 { 643 struct kfd_process *p; 644 645 p = container_of(prange->svms, struct kfd_process, svms); 646 647 return kfd_get_process_device_data(node, p); 648 } 649 650 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) 651 { 652 struct ttm_operation_ctx ctx = { false, false }; 653 654 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 655 656 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 657 } 658 659 static int 660 svm_range_check_attr(struct kfd_process *p, 661 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 662 { 663 uint32_t i; 664 665 for (i = 0; i < nattr; i++) { 666 uint32_t val = attrs[i].value; 667 int gpuidx = MAX_GPU_INSTANCE; 668 669 switch (attrs[i].type) { 670 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 671 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM && 672 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED) 673 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 674 break; 675 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 676 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM) 677 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 678 break; 679 case KFD_IOCTL_SVM_ATTR_ACCESS: 680 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 681 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 682 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 683 break; 684 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 685 break; 686 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 687 break; 688 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 689 break; 690 default: 691 pr_debug("unknown attr type 0x%x\n", attrs[i].type); 692 return -EINVAL; 693 } 694 695 if (gpuidx < 0) { 696 pr_debug("no GPU 0x%x found\n", val); 697 return -EINVAL; 698 } else if (gpuidx < MAX_GPU_INSTANCE && 699 !test_bit(gpuidx, p->svms.bitmap_supported)) { 700 pr_debug("GPU 0x%x not supported\n", val); 701 return -EINVAL; 702 } 703 } 704 705 return 0; 706 } 707 708 static void 709 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, 710 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 711 bool *update_mapping) 712 { 713 uint32_t i; 714 int gpuidx; 715 716 for (i = 0; i < nattr; i++) { 717 switch (attrs[i].type) { 718 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 719 prange->preferred_loc = attrs[i].value; 720 break; 721 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 722 prange->prefetch_loc = attrs[i].value; 723 break; 724 case KFD_IOCTL_SVM_ATTR_ACCESS: 725 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 726 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 727 if (!p->xnack_enabled) 728 *update_mapping = true; 729 730 gpuidx = kfd_process_gpuidx_from_gpuid(p, 731 attrs[i].value); 732 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 733 bitmap_clear(prange->bitmap_access, gpuidx, 1); 734 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 735 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 736 bitmap_set(prange->bitmap_access, gpuidx, 1); 737 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 738 } else { 739 bitmap_clear(prange->bitmap_access, gpuidx, 1); 740 bitmap_set(prange->bitmap_aip, gpuidx, 1); 741 } 742 break; 743 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 744 *update_mapping = true; 745 prange->flags |= attrs[i].value; 746 break; 747 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 748 *update_mapping = true; 749 prange->flags &= ~attrs[i].value; 750 break; 751 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 752 prange->granularity = attrs[i].value; 753 break; 754 default: 755 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 756 } 757 } 758 } 759 760 static bool 761 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, 762 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 763 { 764 uint32_t i; 765 int gpuidx; 766 767 for (i = 0; i < nattr; i++) { 768 switch (attrs[i].type) { 769 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 770 if (prange->preferred_loc != attrs[i].value) 771 return false; 772 break; 773 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 774 /* Prefetch should always trigger a migration even 775 * if the value of the attribute didn't change. 776 */ 777 return false; 778 case KFD_IOCTL_SVM_ATTR_ACCESS: 779 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 780 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 781 gpuidx = kfd_process_gpuidx_from_gpuid(p, 782 attrs[i].value); 783 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 784 if (test_bit(gpuidx, prange->bitmap_access) || 785 test_bit(gpuidx, prange->bitmap_aip)) 786 return false; 787 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 788 if (!test_bit(gpuidx, prange->bitmap_access)) 789 return false; 790 } else { 791 if (!test_bit(gpuidx, prange->bitmap_aip)) 792 return false; 793 } 794 break; 795 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 796 if ((prange->flags & attrs[i].value) != attrs[i].value) 797 return false; 798 break; 799 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 800 if ((prange->flags & attrs[i].value) != 0) 801 return false; 802 break; 803 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 804 if (prange->granularity != attrs[i].value) 805 return false; 806 break; 807 default: 808 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 809 } 810 } 811 812 return !prange->is_error_flag; 813 } 814 815 /** 816 * svm_range_debug_dump - print all range information from svms 817 * @svms: svm range list header 818 * 819 * debug output svm range start, end, prefetch location from svms 820 * interval tree and link list 821 * 822 * Context: The caller must hold svms->lock 823 */ 824 static void svm_range_debug_dump(struct svm_range_list *svms) 825 { 826 struct interval_tree_node *node; 827 struct svm_range *prange; 828 829 pr_debug("dump svms 0x%p list\n", svms); 830 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 831 832 list_for_each_entry(prange, &svms->list, list) { 833 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 834 prange, prange->start, prange->npages, 835 prange->start + prange->npages - 1, 836 prange->actual_loc); 837 } 838 839 pr_debug("dump svms 0x%p interval tree\n", svms); 840 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 841 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL); 842 while (node) { 843 prange = container_of(node, struct svm_range, it_node); 844 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 845 prange, prange->start, prange->npages, 846 prange->start + prange->npages - 1, 847 prange->actual_loc); 848 node = interval_tree_iter_next(node, 0, ~0ULL); 849 } 850 } 851 852 static int 853 svm_range_split_array(void *ppnew, void *ppold, size_t size, 854 uint64_t old_start, uint64_t old_n, 855 uint64_t new_start, uint64_t new_n) 856 { 857 unsigned char *new, *old, *pold; 858 uint64_t d; 859 860 if (!ppold) 861 return 0; 862 pold = *(unsigned char **)ppold; 863 if (!pold) 864 return 0; 865 866 new = kvmalloc_array(new_n, size, GFP_KERNEL); 867 if (!new) 868 return -ENOMEM; 869 870 d = (new_start - old_start) * size; 871 memcpy(new, pold + d, new_n * size); 872 873 old = kvmalloc_array(old_n, size, GFP_KERNEL); 874 if (!old) { 875 kvfree(new); 876 return -ENOMEM; 877 } 878 879 d = (new_start == old_start) ? new_n * size : 0; 880 memcpy(old, pold + d, old_n * size); 881 882 kvfree(pold); 883 *(void **)ppold = old; 884 *(void **)ppnew = new; 885 886 return 0; 887 } 888 889 static int 890 svm_range_split_pages(struct svm_range *new, struct svm_range *old, 891 uint64_t start, uint64_t last) 892 { 893 uint64_t npages = last - start + 1; 894 int i, r; 895 896 for (i = 0; i < MAX_GPU_INSTANCE; i++) { 897 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], 898 sizeof(*old->dma_addr[i]), old->start, 899 npages, new->start, new->npages); 900 if (r) 901 return r; 902 } 903 904 return 0; 905 } 906 907 static int 908 svm_range_split_nodes(struct svm_range *new, struct svm_range *old, 909 uint64_t start, uint64_t last) 910 { 911 uint64_t npages = last - start + 1; 912 913 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n", 914 new->svms, new, new->start, start, last); 915 916 if (new->start == old->start) { 917 new->offset = old->offset; 918 old->offset += new->npages; 919 } else { 920 new->offset = old->offset + npages; 921 } 922 923 new->svm_bo = svm_range_bo_ref(old->svm_bo); 924 new->ttm_res = old->ttm_res; 925 926 spin_lock(&new->svm_bo->list_lock); 927 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 928 spin_unlock(&new->svm_bo->list_lock); 929 930 return 0; 931 } 932 933 /** 934 * svm_range_split_adjust - split range and adjust 935 * 936 * @new: new range 937 * @old: the old range 938 * @start: the old range adjust to start address in pages 939 * @last: the old range adjust to last address in pages 940 * 941 * Copy system memory dma_addr or vram ttm_res in old range to new 942 * range from new_start up to size new->npages, the remaining old range is from 943 * start to last 944 * 945 * Return: 946 * 0 - OK, -ENOMEM - out of memory 947 */ 948 static int 949 svm_range_split_adjust(struct svm_range *new, struct svm_range *old, 950 uint64_t start, uint64_t last) 951 { 952 int r; 953 954 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n", 955 new->svms, new->start, old->start, old->last, start, last); 956 957 if (new->start < old->start || 958 new->last > old->last) { 959 WARN_ONCE(1, "invalid new range start or last\n"); 960 return -EINVAL; 961 } 962 963 r = svm_range_split_pages(new, old, start, last); 964 if (r) 965 return r; 966 967 if (old->actual_loc && old->ttm_res) { 968 r = svm_range_split_nodes(new, old, start, last); 969 if (r) 970 return r; 971 } 972 973 old->npages = last - start + 1; 974 old->start = start; 975 old->last = last; 976 new->flags = old->flags; 977 new->preferred_loc = old->preferred_loc; 978 new->prefetch_loc = old->prefetch_loc; 979 new->actual_loc = old->actual_loc; 980 new->granularity = old->granularity; 981 new->mapped_to_gpu = old->mapped_to_gpu; 982 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 983 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 984 985 return 0; 986 } 987 988 /** 989 * svm_range_split - split a range in 2 ranges 990 * 991 * @prange: the svm range to split 992 * @start: the remaining range start address in pages 993 * @last: the remaining range last address in pages 994 * @new: the result new range generated 995 * 996 * Two cases only: 997 * case 1: if start == prange->start 998 * prange ==> prange[start, last] 999 * new range [last + 1, prange->last] 1000 * 1001 * case 2: if last == prange->last 1002 * prange ==> prange[start, last] 1003 * new range [prange->start, start - 1] 1004 * 1005 * Return: 1006 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last 1007 */ 1008 static int 1009 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, 1010 struct svm_range **new) 1011 { 1012 uint64_t old_start = prange->start; 1013 uint64_t old_last = prange->last; 1014 struct svm_range_list *svms; 1015 int r = 0; 1016 1017 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms, 1018 old_start, old_last, start, last); 1019 1020 if (old_start != start && old_last != last) 1021 return -EINVAL; 1022 if (start < old_start || last > old_last) 1023 return -EINVAL; 1024 1025 svms = prange->svms; 1026 if (old_start == start) 1027 *new = svm_range_new(svms, last + 1, old_last, false); 1028 else 1029 *new = svm_range_new(svms, old_start, start - 1, false); 1030 if (!*new) 1031 return -ENOMEM; 1032 1033 r = svm_range_split_adjust(*new, prange, start, last); 1034 if (r) { 1035 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", 1036 r, old_start, old_last, start, last); 1037 svm_range_free(*new, false); 1038 *new = NULL; 1039 } 1040 1041 return r; 1042 } 1043 1044 static int 1045 svm_range_split_tail(struct svm_range *prange, 1046 uint64_t new_last, struct list_head *insert_list) 1047 { 1048 struct svm_range *tail; 1049 int r = svm_range_split(prange, prange->start, new_last, &tail); 1050 1051 if (!r) 1052 list_add(&tail->list, insert_list); 1053 return r; 1054 } 1055 1056 static int 1057 svm_range_split_head(struct svm_range *prange, 1058 uint64_t new_start, struct list_head *insert_list) 1059 { 1060 struct svm_range *head; 1061 int r = svm_range_split(prange, new_start, prange->last, &head); 1062 1063 if (!r) 1064 list_add(&head->list, insert_list); 1065 return r; 1066 } 1067 1068 static void 1069 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, 1070 struct svm_range *pchild, enum svm_work_list_ops op) 1071 { 1072 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", 1073 pchild, pchild->start, pchild->last, prange, op); 1074 1075 pchild->work_item.mm = mm; 1076 pchild->work_item.op = op; 1077 list_add_tail(&pchild->child_list, &prange->child_list); 1078 } 1079 1080 /** 1081 * svm_range_split_by_granularity - collect ranges within granularity boundary 1082 * 1083 * @p: the process with svms list 1084 * @mm: mm structure 1085 * @addr: the vm fault address in pages, to split the prange 1086 * @parent: parent range if prange is from child list 1087 * @prange: prange to split 1088 * 1089 * Trims @prange to be a single aligned block of prange->granularity if 1090 * possible. The head and tail are added to the child_list in @parent. 1091 * 1092 * Context: caller must hold mmap_read_lock and prange->lock 1093 * 1094 * Return: 1095 * 0 - OK, otherwise error code 1096 */ 1097 int 1098 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, 1099 unsigned long addr, struct svm_range *parent, 1100 struct svm_range *prange) 1101 { 1102 struct svm_range *head, *tail; 1103 unsigned long start, last, size; 1104 int r; 1105 1106 /* Align splited range start and size to granularity size, then a single 1107 * PTE will be used for whole range, this reduces the number of PTE 1108 * updated and the L1 TLB space used for translation. 1109 */ 1110 size = 1UL << prange->granularity; 1111 start = ALIGN_DOWN(addr, size); 1112 last = ALIGN(addr + 1, size) - 1; 1113 1114 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", 1115 prange->svms, prange->start, prange->last, start, last, size); 1116 1117 if (start > prange->start) { 1118 r = svm_range_split(prange, start, prange->last, &head); 1119 if (r) 1120 return r; 1121 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); 1122 } 1123 1124 if (last < prange->last) { 1125 r = svm_range_split(prange, prange->start, last, &tail); 1126 if (r) 1127 return r; 1128 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 1129 } 1130 1131 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 1132 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { 1133 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; 1134 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", 1135 prange, prange->start, prange->last, 1136 SVM_OP_ADD_RANGE_AND_MAP); 1137 } 1138 return 0; 1139 } 1140 static bool 1141 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) 1142 { 1143 return (node_a->adev == node_b->adev || 1144 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev)); 1145 } 1146 1147 static uint64_t 1148 svm_range_get_pte_flags(struct kfd_node *node, 1149 struct svm_range *prange, int domain) 1150 { 1151 struct kfd_node *bo_node; 1152 uint32_t flags = prange->flags; 1153 uint32_t mapping_flags = 0; 1154 uint64_t pte_flags; 1155 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); 1156 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; 1157 bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/ 1158 unsigned int mtype_local; 1159 1160 if (domain == SVM_RANGE_VRAM_DOMAIN) 1161 bo_node = prange->svm_bo->node; 1162 1163 switch (node->adev->ip_versions[GC_HWIP][0]) { 1164 case IP_VERSION(9, 4, 1): 1165 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1166 if (bo_node == node) { 1167 mapping_flags |= coherent ? 1168 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1169 } else { 1170 mapping_flags |= coherent ? 1171 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1172 if (svm_nodes_in_same_hive(node, bo_node)) 1173 snoop = true; 1174 } 1175 } else { 1176 mapping_flags |= coherent ? 1177 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1178 } 1179 break; 1180 case IP_VERSION(9, 4, 2): 1181 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1182 if (bo_node == node) { 1183 mapping_flags |= coherent ? 1184 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1185 if (node->adev->gmc.xgmi.connected_to_cpu) 1186 snoop = true; 1187 } else { 1188 mapping_flags |= coherent ? 1189 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1190 if (svm_nodes_in_same_hive(node, bo_node)) 1191 snoop = true; 1192 } 1193 } else { 1194 mapping_flags |= coherent ? 1195 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1196 } 1197 break; 1198 case IP_VERSION(9, 4, 3): 1199 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : 1200 (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW); 1201 snoop = true; 1202 if (uncached) { 1203 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1204 } else if (domain == SVM_RANGE_VRAM_DOMAIN) { 1205 /* local HBM region close to partition */ 1206 if (bo_node->adev == node->adev && 1207 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) 1208 mapping_flags |= mtype_local; 1209 /* local HBM region far from partition or remote XGMI GPU */ 1210 else if (svm_nodes_in_same_hive(bo_node, node)) 1211 mapping_flags |= AMDGPU_VM_MTYPE_NC; 1212 /* PCIe P2P */ 1213 else 1214 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1215 /* system memory accessed by the APU */ 1216 } else if (node->adev->flags & AMD_IS_APU) { 1217 /* On NUMA systems, locality is determined per-page 1218 * in amdgpu_gmc_override_vm_pte_flags 1219 */ 1220 if (num_possible_nodes() <= 1) 1221 mapping_flags |= mtype_local; 1222 else 1223 mapping_flags |= AMDGPU_VM_MTYPE_NC; 1224 /* system memory accessed by the dGPU */ 1225 } else { 1226 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1227 } 1228 break; 1229 default: 1230 mapping_flags |= coherent ? 1231 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1232 } 1233 1234 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE; 1235 1236 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO) 1237 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE; 1238 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC) 1239 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 1240 1241 pte_flags = AMDGPU_PTE_VALID; 1242 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; 1243 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 1244 1245 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); 1246 return pte_flags; 1247 } 1248 1249 static int 1250 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1251 uint64_t start, uint64_t last, 1252 struct dma_fence **fence) 1253 { 1254 uint64_t init_pte_value = 0; 1255 1256 pr_debug("[0x%llx 0x%llx]\n", start, last); 1257 1258 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start, 1259 last, init_pte_value, 0, 0, NULL, NULL, 1260 fence); 1261 } 1262 1263 static int 1264 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, 1265 unsigned long last, uint32_t trigger) 1266 { 1267 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1268 struct kfd_process_device *pdd; 1269 struct dma_fence *fence = NULL; 1270 struct kfd_process *p; 1271 uint32_t gpuidx; 1272 int r = 0; 1273 1274 if (!prange->mapped_to_gpu) { 1275 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n", 1276 prange, prange->start, prange->last); 1277 return 0; 1278 } 1279 1280 if (prange->start == start && prange->last == last) { 1281 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange); 1282 prange->mapped_to_gpu = false; 1283 } 1284 1285 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 1286 MAX_GPU_INSTANCE); 1287 p = container_of(prange->svms, struct kfd_process, svms); 1288 1289 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1290 pr_debug("unmap from gpu idx 0x%x\n", gpuidx); 1291 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1292 if (!pdd) { 1293 pr_debug("failed to find device idx %d\n", gpuidx); 1294 return -EINVAL; 1295 } 1296 1297 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, 1298 start, last, trigger); 1299 1300 r = svm_range_unmap_from_gpu(pdd->dev->adev, 1301 drm_priv_to_vm(pdd->drm_priv), 1302 start, last, &fence); 1303 if (r) 1304 break; 1305 1306 if (fence) { 1307 r = dma_fence_wait(fence, false); 1308 dma_fence_put(fence); 1309 fence = NULL; 1310 if (r) 1311 break; 1312 } 1313 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT); 1314 } 1315 1316 return r; 1317 } 1318 1319 static int 1320 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, 1321 unsigned long offset, unsigned long npages, bool readonly, 1322 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, 1323 struct dma_fence **fence, bool flush_tlb) 1324 { 1325 struct amdgpu_device *adev = pdd->dev->adev; 1326 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 1327 uint64_t pte_flags; 1328 unsigned long last_start; 1329 int last_domain; 1330 int r = 0; 1331 int64_t i, j; 1332 1333 last_start = prange->start + offset; 1334 1335 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, 1336 last_start, last_start + npages - 1, readonly); 1337 1338 for (i = offset; i < offset + npages; i++) { 1339 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; 1340 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; 1341 1342 /* Collect all pages in the same address range and memory domain 1343 * that can be mapped with a single call to update mapping. 1344 */ 1345 if (i < offset + npages - 1 && 1346 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) 1347 continue; 1348 1349 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", 1350 last_start, prange->start + i, last_domain ? "GPU" : "CPU"); 1351 1352 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); 1353 if (readonly) 1354 pte_flags &= ~AMDGPU_PTE_WRITEABLE; 1355 1356 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n", 1357 prange->svms, last_start, prange->start + i, 1358 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, 1359 pte_flags); 1360 1361 /* For dGPU mode, we use same vm_manager to allocate VRAM for 1362 * different memory partition based on fpfn/lpfn, we should use 1363 * same vm_manager.vram_base_offset regardless memory partition. 1364 */ 1365 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, 1366 last_start, prange->start + i, 1367 pte_flags, 1368 (last_start - prange->start) << PAGE_SHIFT, 1369 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, 1370 NULL, dma_addr, &vm->last_update); 1371 1372 for (j = last_start - prange->start; j <= i; j++) 1373 dma_addr[j] |= last_domain; 1374 1375 if (r) { 1376 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); 1377 goto out; 1378 } 1379 last_start = prange->start + i + 1; 1380 } 1381 1382 r = amdgpu_vm_update_pdes(adev, vm, false); 1383 if (r) { 1384 pr_debug("failed %d to update directories 0x%lx\n", r, 1385 prange->start); 1386 goto out; 1387 } 1388 1389 if (fence) 1390 *fence = dma_fence_get(vm->last_update); 1391 1392 out: 1393 return r; 1394 } 1395 1396 static int 1397 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, 1398 unsigned long npages, bool readonly, 1399 unsigned long *bitmap, bool wait, bool flush_tlb) 1400 { 1401 struct kfd_process_device *pdd; 1402 struct amdgpu_device *bo_adev = NULL; 1403 struct kfd_process *p; 1404 struct dma_fence *fence = NULL; 1405 uint32_t gpuidx; 1406 int r = 0; 1407 1408 if (prange->svm_bo && prange->ttm_res) 1409 bo_adev = prange->svm_bo->node->adev; 1410 1411 p = container_of(prange->svms, struct kfd_process, svms); 1412 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1413 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 1414 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1415 if (!pdd) { 1416 pr_debug("failed to find device idx %d\n", gpuidx); 1417 return -EINVAL; 1418 } 1419 1420 pdd = kfd_bind_process_to_device(pdd->dev, p); 1421 if (IS_ERR(pdd)) 1422 return -EINVAL; 1423 1424 if (bo_adev && pdd->dev->adev != bo_adev && 1425 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { 1426 pr_debug("cannot map to device idx %d\n", gpuidx); 1427 continue; 1428 } 1429 1430 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly, 1431 prange->dma_addr[gpuidx], 1432 bo_adev, wait ? &fence : NULL, 1433 flush_tlb); 1434 if (r) 1435 break; 1436 1437 if (fence) { 1438 r = dma_fence_wait(fence, false); 1439 dma_fence_put(fence); 1440 fence = NULL; 1441 if (r) { 1442 pr_debug("failed %d to dma fence wait\n", r); 1443 break; 1444 } 1445 } 1446 1447 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); 1448 } 1449 1450 return r; 1451 } 1452 1453 struct svm_validate_context { 1454 struct kfd_process *process; 1455 struct svm_range *prange; 1456 bool intr; 1457 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1458 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; 1459 struct list_head validate_list; 1460 struct ww_acquire_ctx ticket; 1461 }; 1462 1463 static int svm_range_reserve_bos(struct svm_validate_context *ctx) 1464 { 1465 struct kfd_process_device *pdd; 1466 struct amdgpu_vm *vm; 1467 uint32_t gpuidx; 1468 int r; 1469 1470 INIT_LIST_HEAD(&ctx->validate_list); 1471 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1472 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1473 if (!pdd) { 1474 pr_debug("failed to find device idx %d\n", gpuidx); 1475 return -EINVAL; 1476 } 1477 vm = drm_priv_to_vm(pdd->drm_priv); 1478 1479 ctx->tv[gpuidx].bo = &vm->root.bo->tbo; 1480 ctx->tv[gpuidx].num_shared = 4; 1481 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); 1482 } 1483 1484 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, 1485 ctx->intr, NULL); 1486 if (r) { 1487 pr_debug("failed %d to reserve bo\n", r); 1488 return r; 1489 } 1490 1491 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1492 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1493 if (!pdd) { 1494 pr_debug("failed to find device idx %d\n", gpuidx); 1495 r = -EINVAL; 1496 goto unreserve_out; 1497 } 1498 1499 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, 1500 drm_priv_to_vm(pdd->drm_priv), 1501 svm_range_bo_validate, NULL); 1502 if (r) { 1503 pr_debug("failed %d validate pt bos\n", r); 1504 goto unreserve_out; 1505 } 1506 } 1507 1508 return 0; 1509 1510 unreserve_out: 1511 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); 1512 return r; 1513 } 1514 1515 static void svm_range_unreserve_bos(struct svm_validate_context *ctx) 1516 { 1517 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); 1518 } 1519 1520 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) 1521 { 1522 struct kfd_process_device *pdd; 1523 1524 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1525 1526 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); 1527 } 1528 1529 /* 1530 * Validation+GPU mapping with concurrent invalidation (MMU notifiers) 1531 * 1532 * To prevent concurrent destruction or change of range attributes, the 1533 * svm_read_lock must be held. The caller must not hold the svm_write_lock 1534 * because that would block concurrent evictions and lead to deadlocks. To 1535 * serialize concurrent migrations or validations of the same range, the 1536 * prange->migrate_mutex must be held. 1537 * 1538 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its 1539 * eviction fence. 1540 * 1541 * The following sequence ensures race-free validation and GPU mapping: 1542 * 1543 * 1. Reserve page table (and SVM BO if range is in VRAM) 1544 * 2. hmm_range_fault to get page addresses (if system memory) 1545 * 3. DMA-map pages (if system memory) 1546 * 4-a. Take notifier lock 1547 * 4-b. Check that pages still valid (mmu_interval_read_retry) 1548 * 4-c. Check that the range was not split or otherwise invalidated 1549 * 4-d. Update GPU page table 1550 * 4.e. Release notifier lock 1551 * 5. Release page table (and SVM BO) reservation 1552 */ 1553 static int svm_range_validate_and_map(struct mm_struct *mm, 1554 struct svm_range *prange, int32_t gpuidx, 1555 bool intr, bool wait, bool flush_tlb) 1556 { 1557 struct svm_validate_context *ctx; 1558 unsigned long start, end, addr; 1559 struct kfd_process *p; 1560 void *owner; 1561 int32_t idx; 1562 int r = 0; 1563 1564 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL); 1565 if (!ctx) 1566 return -ENOMEM; 1567 ctx->process = container_of(prange->svms, struct kfd_process, svms); 1568 ctx->prange = prange; 1569 ctx->intr = intr; 1570 1571 if (gpuidx < MAX_GPU_INSTANCE) { 1572 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE); 1573 bitmap_set(ctx->bitmap, gpuidx, 1); 1574 } else if (ctx->process->xnack_enabled) { 1575 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 1576 1577 /* If prefetch range to GPU, or GPU retry fault migrate range to 1578 * GPU, which has ACCESS attribute to the range, create mapping 1579 * on that GPU. 1580 */ 1581 if (prange->actual_loc) { 1582 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process, 1583 prange->actual_loc); 1584 if (gpuidx < 0) { 1585 WARN_ONCE(1, "failed get device by id 0x%x\n", 1586 prange->actual_loc); 1587 r = -EINVAL; 1588 goto free_ctx; 1589 } 1590 if (test_bit(gpuidx, prange->bitmap_access)) 1591 bitmap_set(ctx->bitmap, gpuidx, 1); 1592 } 1593 } else { 1594 bitmap_or(ctx->bitmap, prange->bitmap_access, 1595 prange->bitmap_aip, MAX_GPU_INSTANCE); 1596 } 1597 1598 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1599 if (!prange->mapped_to_gpu) { 1600 r = 0; 1601 goto free_ctx; 1602 } 1603 1604 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); 1605 } 1606 1607 if (prange->actual_loc && !prange->ttm_res) { 1608 /* This should never happen. actual_loc gets set by 1609 * svm_migrate_ram_to_vram after allocating a BO. 1610 */ 1611 WARN_ONCE(1, "VRAM BO missing during validation\n"); 1612 r = -EINVAL; 1613 goto free_ctx; 1614 } 1615 1616 svm_range_reserve_bos(ctx); 1617 1618 p = container_of(prange->svms, struct kfd_process, svms); 1619 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, 1620 MAX_GPU_INSTANCE)); 1621 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) { 1622 if (kfd_svm_page_owner(p, idx) != owner) { 1623 owner = NULL; 1624 break; 1625 } 1626 } 1627 1628 start = prange->start << PAGE_SHIFT; 1629 end = (prange->last + 1) << PAGE_SHIFT; 1630 for (addr = start; addr < end && !r; ) { 1631 struct hmm_range *hmm_range; 1632 struct vm_area_struct *vma; 1633 unsigned long next; 1634 unsigned long offset; 1635 unsigned long npages; 1636 bool readonly; 1637 1638 vma = vma_lookup(mm, addr); 1639 if (!vma) { 1640 r = -EFAULT; 1641 goto unreserve_out; 1642 } 1643 readonly = !(vma->vm_flags & VM_WRITE); 1644 1645 next = min(vma->vm_end, end); 1646 npages = (next - addr) >> PAGE_SHIFT; 1647 WRITE_ONCE(p->svms.faulting_task, current); 1648 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, 1649 readonly, owner, NULL, 1650 &hmm_range); 1651 WRITE_ONCE(p->svms.faulting_task, NULL); 1652 if (r) { 1653 pr_debug("failed %d to get svm range pages\n", r); 1654 goto unreserve_out; 1655 } 1656 1657 offset = (addr - start) >> PAGE_SHIFT; 1658 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, 1659 hmm_range->hmm_pfns); 1660 if (r) { 1661 pr_debug("failed %d to dma map range\n", r); 1662 goto unreserve_out; 1663 } 1664 1665 svm_range_lock(prange); 1666 if (amdgpu_hmm_range_get_pages_done(hmm_range)) { 1667 pr_debug("hmm update the range, need validate again\n"); 1668 r = -EAGAIN; 1669 goto unlock_out; 1670 } 1671 if (!list_empty(&prange->child_list)) { 1672 pr_debug("range split by unmap in parallel, validate again\n"); 1673 r = -EAGAIN; 1674 goto unlock_out; 1675 } 1676 1677 r = svm_range_map_to_gpus(prange, offset, npages, readonly, 1678 ctx->bitmap, wait, flush_tlb); 1679 1680 unlock_out: 1681 svm_range_unlock(prange); 1682 1683 addr = next; 1684 } 1685 1686 if (addr == end) { 1687 prange->validated_once = true; 1688 prange->mapped_to_gpu = true; 1689 } 1690 1691 unreserve_out: 1692 svm_range_unreserve_bos(ctx); 1693 1694 prange->is_error_flag = !!r; 1695 if (!r) 1696 prange->validate_timestamp = ktime_get_boottime(); 1697 1698 free_ctx: 1699 kfree(ctx); 1700 1701 return r; 1702 } 1703 1704 /** 1705 * svm_range_list_lock_and_flush_work - flush pending deferred work 1706 * 1707 * @svms: the svm range list 1708 * @mm: the mm structure 1709 * 1710 * Context: Returns with mmap write lock held, pending deferred work flushed 1711 * 1712 */ 1713 void 1714 svm_range_list_lock_and_flush_work(struct svm_range_list *svms, 1715 struct mm_struct *mm) 1716 { 1717 retry_flush_work: 1718 flush_work(&svms->deferred_list_work); 1719 mmap_write_lock(mm); 1720 1721 if (list_empty(&svms->deferred_range_list)) 1722 return; 1723 mmap_write_unlock(mm); 1724 pr_debug("retry flush\n"); 1725 goto retry_flush_work; 1726 } 1727 1728 static void svm_range_restore_work(struct work_struct *work) 1729 { 1730 struct delayed_work *dwork = to_delayed_work(work); 1731 struct amdkfd_process_info *process_info; 1732 struct svm_range_list *svms; 1733 struct svm_range *prange; 1734 struct kfd_process *p; 1735 struct mm_struct *mm; 1736 int evicted_ranges; 1737 int invalid; 1738 int r; 1739 1740 svms = container_of(dwork, struct svm_range_list, restore_work); 1741 evicted_ranges = atomic_read(&svms->evicted_ranges); 1742 if (!evicted_ranges) 1743 return; 1744 1745 pr_debug("restore svm ranges\n"); 1746 1747 p = container_of(svms, struct kfd_process, svms); 1748 process_info = p->kgd_process_info; 1749 1750 /* Keep mm reference when svm_range_validate_and_map ranges */ 1751 mm = get_task_mm(p->lead_thread); 1752 if (!mm) { 1753 pr_debug("svms 0x%p process mm gone\n", svms); 1754 return; 1755 } 1756 1757 mutex_lock(&process_info->lock); 1758 svm_range_list_lock_and_flush_work(svms, mm); 1759 mutex_lock(&svms->lock); 1760 1761 evicted_ranges = atomic_read(&svms->evicted_ranges); 1762 1763 list_for_each_entry(prange, &svms->list, list) { 1764 invalid = atomic_read(&prange->invalid); 1765 if (!invalid) 1766 continue; 1767 1768 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n", 1769 prange->svms, prange, prange->start, prange->last, 1770 invalid); 1771 1772 /* 1773 * If range is migrating, wait for migration is done. 1774 */ 1775 mutex_lock(&prange->migrate_mutex); 1776 1777 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 1778 false, true, false); 1779 if (r) 1780 pr_debug("failed %d to map 0x%lx to gpus\n", r, 1781 prange->start); 1782 1783 mutex_unlock(&prange->migrate_mutex); 1784 if (r) 1785 goto out_reschedule; 1786 1787 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid) 1788 goto out_reschedule; 1789 } 1790 1791 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) != 1792 evicted_ranges) 1793 goto out_reschedule; 1794 1795 evicted_ranges = 0; 1796 1797 r = kgd2kfd_resume_mm(mm); 1798 if (r) { 1799 /* No recovery from this failure. Probably the CP is 1800 * hanging. No point trying again. 1801 */ 1802 pr_debug("failed %d to resume KFD\n", r); 1803 } 1804 1805 pr_debug("restore svm ranges successfully\n"); 1806 1807 out_reschedule: 1808 mutex_unlock(&svms->lock); 1809 mmap_write_unlock(mm); 1810 mutex_unlock(&process_info->lock); 1811 1812 /* If validation failed, reschedule another attempt */ 1813 if (evicted_ranges) { 1814 pr_debug("reschedule to restore svm range\n"); 1815 schedule_delayed_work(&svms->restore_work, 1816 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1817 1818 kfd_smi_event_queue_restore_rescheduled(mm); 1819 } 1820 mmput(mm); 1821 } 1822 1823 /** 1824 * svm_range_evict - evict svm range 1825 * @prange: svm range structure 1826 * @mm: current process mm_struct 1827 * @start: starting process queue number 1828 * @last: last process queue number 1829 * @event: mmu notifier event when range is evicted or migrated 1830 * 1831 * Stop all queues of the process to ensure GPU doesn't access the memory, then 1832 * return to let CPU evict the buffer and proceed CPU pagetable update. 1833 * 1834 * Don't need use lock to sync cpu pagetable invalidation with GPU execution. 1835 * If invalidation happens while restore work is running, restore work will 1836 * restart to ensure to get the latest CPU pages mapping to GPU, then start 1837 * the queues. 1838 */ 1839 static int 1840 svm_range_evict(struct svm_range *prange, struct mm_struct *mm, 1841 unsigned long start, unsigned long last, 1842 enum mmu_notifier_event event) 1843 { 1844 struct svm_range_list *svms = prange->svms; 1845 struct svm_range *pchild; 1846 struct kfd_process *p; 1847 int r = 0; 1848 1849 p = container_of(svms, struct kfd_process, svms); 1850 1851 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 1852 svms, prange->start, prange->last, start, last); 1853 1854 if (!p->xnack_enabled || 1855 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) { 1856 int evicted_ranges; 1857 bool mapped = prange->mapped_to_gpu; 1858 1859 list_for_each_entry(pchild, &prange->child_list, child_list) { 1860 if (!pchild->mapped_to_gpu) 1861 continue; 1862 mapped = true; 1863 mutex_lock_nested(&pchild->lock, 1); 1864 if (pchild->start <= last && pchild->last >= start) { 1865 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n", 1866 pchild->start, pchild->last); 1867 atomic_inc(&pchild->invalid); 1868 } 1869 mutex_unlock(&pchild->lock); 1870 } 1871 1872 if (!mapped) 1873 return r; 1874 1875 if (prange->start <= last && prange->last >= start) 1876 atomic_inc(&prange->invalid); 1877 1878 evicted_ranges = atomic_inc_return(&svms->evicted_ranges); 1879 if (evicted_ranges != 1) 1880 return r; 1881 1882 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n", 1883 prange->svms, prange->start, prange->last); 1884 1885 /* First eviction, stop the queues */ 1886 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM); 1887 if (r) 1888 pr_debug("failed to quiesce KFD\n"); 1889 1890 pr_debug("schedule to restore svm %p ranges\n", svms); 1891 schedule_delayed_work(&svms->restore_work, 1892 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1893 } else { 1894 unsigned long s, l; 1895 uint32_t trigger; 1896 1897 if (event == MMU_NOTIFY_MIGRATE) 1898 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE; 1899 else 1900 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY; 1901 1902 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n", 1903 prange->svms, start, last); 1904 list_for_each_entry(pchild, &prange->child_list, child_list) { 1905 mutex_lock_nested(&pchild->lock, 1); 1906 s = max(start, pchild->start); 1907 l = min(last, pchild->last); 1908 if (l >= s) 1909 svm_range_unmap_from_gpus(pchild, s, l, trigger); 1910 mutex_unlock(&pchild->lock); 1911 } 1912 s = max(start, prange->start); 1913 l = min(last, prange->last); 1914 if (l >= s) 1915 svm_range_unmap_from_gpus(prange, s, l, trigger); 1916 } 1917 1918 return r; 1919 } 1920 1921 static struct svm_range *svm_range_clone(struct svm_range *old) 1922 { 1923 struct svm_range *new; 1924 1925 new = svm_range_new(old->svms, old->start, old->last, false); 1926 if (!new) 1927 return NULL; 1928 1929 if (old->svm_bo) { 1930 new->ttm_res = old->ttm_res; 1931 new->offset = old->offset; 1932 new->svm_bo = svm_range_bo_ref(old->svm_bo); 1933 spin_lock(&new->svm_bo->list_lock); 1934 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 1935 spin_unlock(&new->svm_bo->list_lock); 1936 } 1937 new->flags = old->flags; 1938 new->preferred_loc = old->preferred_loc; 1939 new->prefetch_loc = old->prefetch_loc; 1940 new->actual_loc = old->actual_loc; 1941 new->granularity = old->granularity; 1942 new->mapped_to_gpu = old->mapped_to_gpu; 1943 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 1944 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 1945 1946 return new; 1947 } 1948 1949 void svm_range_set_max_pages(struct amdgpu_device *adev) 1950 { 1951 uint64_t max_pages; 1952 uint64_t pages, _pages; 1953 uint64_t min_pages = 0; 1954 int i, id; 1955 1956 for (i = 0; i < adev->kfd.dev->num_nodes; i++) { 1957 if (adev->kfd.dev->nodes[i]->xcp) 1958 id = adev->kfd.dev->nodes[i]->xcp->id; 1959 else 1960 id = -1; 1961 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17; 1962 pages = clamp(pages, 1ULL << 9, 1ULL << 18); 1963 pages = rounddown_pow_of_two(pages); 1964 min_pages = min_not_zero(min_pages, pages); 1965 } 1966 1967 do { 1968 max_pages = READ_ONCE(max_svm_range_pages); 1969 _pages = min_not_zero(max_pages, min_pages); 1970 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); 1971 } 1972 1973 static int 1974 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, 1975 uint64_t max_pages, struct list_head *insert_list, 1976 struct list_head *update_list) 1977 { 1978 struct svm_range *prange; 1979 uint64_t l; 1980 1981 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n", 1982 max_pages, start, last); 1983 1984 while (last >= start) { 1985 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1); 1986 1987 prange = svm_range_new(svms, start, l, true); 1988 if (!prange) 1989 return -ENOMEM; 1990 list_add(&prange->list, insert_list); 1991 list_add(&prange->update_list, update_list); 1992 1993 start = l + 1; 1994 } 1995 return 0; 1996 } 1997 1998 /** 1999 * svm_range_add - add svm range and handle overlap 2000 * @p: the range add to this process svms 2001 * @start: page size aligned 2002 * @size: page size aligned 2003 * @nattr: number of attributes 2004 * @attrs: array of attributes 2005 * @update_list: output, the ranges need validate and update GPU mapping 2006 * @insert_list: output, the ranges need insert to svms 2007 * @remove_list: output, the ranges are replaced and need remove from svms 2008 * 2009 * Check if the virtual address range has overlap with any existing ranges, 2010 * split partly overlapping ranges and add new ranges in the gaps. All changes 2011 * should be applied to the range_list and interval tree transactionally. If 2012 * any range split or allocation fails, the entire update fails. Therefore any 2013 * existing overlapping svm_ranges are cloned and the original svm_ranges left 2014 * unchanged. 2015 * 2016 * If the transaction succeeds, the caller can update and insert clones and 2017 * new ranges, then free the originals. 2018 * 2019 * Otherwise the caller can free the clones and new ranges, while the old 2020 * svm_ranges remain unchanged. 2021 * 2022 * Context: Process context, caller must hold svms->lock 2023 * 2024 * Return: 2025 * 0 - OK, otherwise error code 2026 */ 2027 static int 2028 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, 2029 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 2030 struct list_head *update_list, struct list_head *insert_list, 2031 struct list_head *remove_list) 2032 { 2033 unsigned long last = start + size - 1UL; 2034 struct svm_range_list *svms = &p->svms; 2035 struct interval_tree_node *node; 2036 struct svm_range *prange; 2037 struct svm_range *tmp; 2038 struct list_head new_list; 2039 int r = 0; 2040 2041 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); 2042 2043 INIT_LIST_HEAD(update_list); 2044 INIT_LIST_HEAD(insert_list); 2045 INIT_LIST_HEAD(remove_list); 2046 INIT_LIST_HEAD(&new_list); 2047 2048 node = interval_tree_iter_first(&svms->objects, start, last); 2049 while (node) { 2050 struct interval_tree_node *next; 2051 unsigned long next_start; 2052 2053 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, 2054 node->last); 2055 2056 prange = container_of(node, struct svm_range, it_node); 2057 next = interval_tree_iter_next(node, start, last); 2058 next_start = min(node->last, last) + 1; 2059 2060 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { 2061 /* nothing to do */ 2062 } else if (node->start < start || node->last > last) { 2063 /* node intersects the update range and its attributes 2064 * will change. Clone and split it, apply updates only 2065 * to the overlapping part 2066 */ 2067 struct svm_range *old = prange; 2068 2069 prange = svm_range_clone(old); 2070 if (!prange) { 2071 r = -ENOMEM; 2072 goto out; 2073 } 2074 2075 list_add(&old->update_list, remove_list); 2076 list_add(&prange->list, insert_list); 2077 list_add(&prange->update_list, update_list); 2078 2079 if (node->start < start) { 2080 pr_debug("change old range start\n"); 2081 r = svm_range_split_head(prange, start, 2082 insert_list); 2083 if (r) 2084 goto out; 2085 } 2086 if (node->last > last) { 2087 pr_debug("change old range last\n"); 2088 r = svm_range_split_tail(prange, last, 2089 insert_list); 2090 if (r) 2091 goto out; 2092 } 2093 } else { 2094 /* The node is contained within start..last, 2095 * just update it 2096 */ 2097 list_add(&prange->update_list, update_list); 2098 } 2099 2100 /* insert a new node if needed */ 2101 if (node->start > start) { 2102 r = svm_range_split_new(svms, start, node->start - 1, 2103 READ_ONCE(max_svm_range_pages), 2104 &new_list, update_list); 2105 if (r) 2106 goto out; 2107 } 2108 2109 node = next; 2110 start = next_start; 2111 } 2112 2113 /* add a final range at the end if needed */ 2114 if (start <= last) 2115 r = svm_range_split_new(svms, start, last, 2116 READ_ONCE(max_svm_range_pages), 2117 &new_list, update_list); 2118 2119 out: 2120 if (r) { 2121 list_for_each_entry_safe(prange, tmp, insert_list, list) 2122 svm_range_free(prange, false); 2123 list_for_each_entry_safe(prange, tmp, &new_list, list) 2124 svm_range_free(prange, true); 2125 } else { 2126 list_splice(&new_list, insert_list); 2127 } 2128 2129 return r; 2130 } 2131 2132 static void 2133 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm, 2134 struct svm_range *prange) 2135 { 2136 unsigned long start; 2137 unsigned long last; 2138 2139 start = prange->notifier.interval_tree.start >> PAGE_SHIFT; 2140 last = prange->notifier.interval_tree.last >> PAGE_SHIFT; 2141 2142 if (prange->start == start && prange->last == last) 2143 return; 2144 2145 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 2146 prange->svms, prange, start, last, prange->start, 2147 prange->last); 2148 2149 if (start != 0 && last != 0) { 2150 interval_tree_remove(&prange->it_node, &prange->svms->objects); 2151 svm_range_remove_notifier(prange); 2152 } 2153 prange->it_node.start = prange->start; 2154 prange->it_node.last = prange->last; 2155 2156 interval_tree_insert(&prange->it_node, &prange->svms->objects); 2157 svm_range_add_notifier_locked(mm, prange); 2158 } 2159 2160 static void 2161 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, 2162 struct mm_struct *mm) 2163 { 2164 switch (prange->work_item.op) { 2165 case SVM_OP_NULL: 2166 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2167 svms, prange, prange->start, prange->last); 2168 break; 2169 case SVM_OP_UNMAP_RANGE: 2170 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2171 svms, prange, prange->start, prange->last); 2172 svm_range_unlink(prange); 2173 svm_range_remove_notifier(prange); 2174 svm_range_free(prange, true); 2175 break; 2176 case SVM_OP_UPDATE_RANGE_NOTIFIER: 2177 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2178 svms, prange, prange->start, prange->last); 2179 svm_range_update_notifier_and_interval_tree(mm, prange); 2180 break; 2181 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP: 2182 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2183 svms, prange, prange->start, prange->last); 2184 svm_range_update_notifier_and_interval_tree(mm, prange); 2185 /* TODO: implement deferred validation and mapping */ 2186 break; 2187 case SVM_OP_ADD_RANGE: 2188 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange, 2189 prange->start, prange->last); 2190 svm_range_add_to_svms(prange); 2191 svm_range_add_notifier_locked(mm, prange); 2192 break; 2193 case SVM_OP_ADD_RANGE_AND_MAP: 2194 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, 2195 prange, prange->start, prange->last); 2196 svm_range_add_to_svms(prange); 2197 svm_range_add_notifier_locked(mm, prange); 2198 /* TODO: implement deferred validation and mapping */ 2199 break; 2200 default: 2201 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange, 2202 prange->work_item.op); 2203 } 2204 } 2205 2206 static void svm_range_drain_retry_fault(struct svm_range_list *svms) 2207 { 2208 struct kfd_process_device *pdd; 2209 struct kfd_process *p; 2210 int drain; 2211 uint32_t i; 2212 2213 p = container_of(svms, struct kfd_process, svms); 2214 2215 restart: 2216 drain = atomic_read(&svms->drain_pagefaults); 2217 if (!drain) 2218 return; 2219 2220 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { 2221 pdd = p->pdds[i]; 2222 if (!pdd) 2223 continue; 2224 2225 pr_debug("drain retry fault gpu %d svms %p\n", i, svms); 2226 2227 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, 2228 pdd->dev->adev->irq.retry_cam_enabled ? 2229 &pdd->dev->adev->irq.ih : 2230 &pdd->dev->adev->irq.ih1); 2231 2232 if (pdd->dev->adev->irq.retry_cam_enabled) 2233 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, 2234 &pdd->dev->adev->irq.ih_soft); 2235 2236 2237 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); 2238 } 2239 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) 2240 goto restart; 2241 } 2242 2243 static void svm_range_deferred_list_work(struct work_struct *work) 2244 { 2245 struct svm_range_list *svms; 2246 struct svm_range *prange; 2247 struct mm_struct *mm; 2248 2249 svms = container_of(work, struct svm_range_list, deferred_list_work); 2250 pr_debug("enter svms 0x%p\n", svms); 2251 2252 spin_lock(&svms->deferred_list_lock); 2253 while (!list_empty(&svms->deferred_range_list)) { 2254 prange = list_first_entry(&svms->deferred_range_list, 2255 struct svm_range, deferred_list); 2256 spin_unlock(&svms->deferred_list_lock); 2257 2258 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, 2259 prange->start, prange->last, prange->work_item.op); 2260 2261 mm = prange->work_item.mm; 2262 retry: 2263 mmap_write_lock(mm); 2264 2265 /* Checking for the need to drain retry faults must be inside 2266 * mmap write lock to serialize with munmap notifiers. 2267 */ 2268 if (unlikely(atomic_read(&svms->drain_pagefaults))) { 2269 mmap_write_unlock(mm); 2270 svm_range_drain_retry_fault(svms); 2271 goto retry; 2272 } 2273 2274 /* Remove from deferred_list must be inside mmap write lock, for 2275 * two race cases: 2276 * 1. unmap_from_cpu may change work_item.op and add the range 2277 * to deferred_list again, cause use after free bug. 2278 * 2. svm_range_list_lock_and_flush_work may hold mmap write 2279 * lock and continue because deferred_list is empty, but 2280 * deferred_list work is actually waiting for mmap lock. 2281 */ 2282 spin_lock(&svms->deferred_list_lock); 2283 list_del_init(&prange->deferred_list); 2284 spin_unlock(&svms->deferred_list_lock); 2285 2286 mutex_lock(&svms->lock); 2287 mutex_lock(&prange->migrate_mutex); 2288 while (!list_empty(&prange->child_list)) { 2289 struct svm_range *pchild; 2290 2291 pchild = list_first_entry(&prange->child_list, 2292 struct svm_range, child_list); 2293 pr_debug("child prange 0x%p op %d\n", pchild, 2294 pchild->work_item.op); 2295 list_del_init(&pchild->child_list); 2296 svm_range_handle_list_op(svms, pchild, mm); 2297 } 2298 mutex_unlock(&prange->migrate_mutex); 2299 2300 svm_range_handle_list_op(svms, prange, mm); 2301 mutex_unlock(&svms->lock); 2302 mmap_write_unlock(mm); 2303 2304 /* Pairs with mmget in svm_range_add_list_work */ 2305 mmput(mm); 2306 2307 spin_lock(&svms->deferred_list_lock); 2308 } 2309 spin_unlock(&svms->deferred_list_lock); 2310 pr_debug("exit svms 0x%p\n", svms); 2311 } 2312 2313 void 2314 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, 2315 struct mm_struct *mm, enum svm_work_list_ops op) 2316 { 2317 spin_lock(&svms->deferred_list_lock); 2318 /* if prange is on the deferred list */ 2319 if (!list_empty(&prange->deferred_list)) { 2320 pr_debug("update exist prange 0x%p work op %d\n", prange, op); 2321 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n"); 2322 if (op != SVM_OP_NULL && 2323 prange->work_item.op != SVM_OP_UNMAP_RANGE) 2324 prange->work_item.op = op; 2325 } else { 2326 prange->work_item.op = op; 2327 2328 /* Pairs with mmput in deferred_list_work */ 2329 mmget(mm); 2330 prange->work_item.mm = mm; 2331 list_add_tail(&prange->deferred_list, 2332 &prange->svms->deferred_range_list); 2333 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", 2334 prange, prange->start, prange->last, op); 2335 } 2336 spin_unlock(&svms->deferred_list_lock); 2337 } 2338 2339 void schedule_deferred_list_work(struct svm_range_list *svms) 2340 { 2341 spin_lock(&svms->deferred_list_lock); 2342 if (!list_empty(&svms->deferred_range_list)) 2343 schedule_work(&svms->deferred_list_work); 2344 spin_unlock(&svms->deferred_list_lock); 2345 } 2346 2347 static void 2348 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, 2349 struct svm_range *prange, unsigned long start, 2350 unsigned long last) 2351 { 2352 struct svm_range *head; 2353 struct svm_range *tail; 2354 2355 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2356 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange, 2357 prange->start, prange->last); 2358 return; 2359 } 2360 if (start > prange->last || last < prange->start) 2361 return; 2362 2363 head = tail = prange; 2364 if (start > prange->start) 2365 svm_range_split(prange, prange->start, start - 1, &tail); 2366 if (last < tail->last) 2367 svm_range_split(tail, last + 1, tail->last, &head); 2368 2369 if (head != prange && tail != prange) { 2370 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2371 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 2372 } else if (tail != prange) { 2373 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); 2374 } else if (head != prange) { 2375 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2376 } else if (parent != prange) { 2377 prange->work_item.op = SVM_OP_UNMAP_RANGE; 2378 } 2379 } 2380 2381 static void 2382 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, 2383 unsigned long start, unsigned long last) 2384 { 2385 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU; 2386 struct svm_range_list *svms; 2387 struct svm_range *pchild; 2388 struct kfd_process *p; 2389 unsigned long s, l; 2390 bool unmap_parent; 2391 2392 p = kfd_lookup_process_by_mm(mm); 2393 if (!p) 2394 return; 2395 svms = &p->svms; 2396 2397 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, 2398 prange, prange->start, prange->last, start, last); 2399 2400 /* Make sure pending page faults are drained in the deferred worker 2401 * before the range is freed to avoid straggler interrupts on 2402 * unmapped memory causing "phantom faults". 2403 */ 2404 atomic_inc(&svms->drain_pagefaults); 2405 2406 unmap_parent = start <= prange->start && last >= prange->last; 2407 2408 list_for_each_entry(pchild, &prange->child_list, child_list) { 2409 mutex_lock_nested(&pchild->lock, 1); 2410 s = max(start, pchild->start); 2411 l = min(last, pchild->last); 2412 if (l >= s) 2413 svm_range_unmap_from_gpus(pchild, s, l, trigger); 2414 svm_range_unmap_split(mm, prange, pchild, start, last); 2415 mutex_unlock(&pchild->lock); 2416 } 2417 s = max(start, prange->start); 2418 l = min(last, prange->last); 2419 if (l >= s) 2420 svm_range_unmap_from_gpus(prange, s, l, trigger); 2421 svm_range_unmap_split(mm, prange, prange, start, last); 2422 2423 if (unmap_parent) 2424 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); 2425 else 2426 svm_range_add_list_work(svms, prange, mm, 2427 SVM_OP_UPDATE_RANGE_NOTIFIER); 2428 schedule_deferred_list_work(svms); 2429 2430 kfd_unref_process(p); 2431 } 2432 2433 /** 2434 * svm_range_cpu_invalidate_pagetables - interval notifier callback 2435 * @mni: mmu_interval_notifier struct 2436 * @range: mmu_notifier_range struct 2437 * @cur_seq: value to pass to mmu_interval_set_seq() 2438 * 2439 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it 2440 * is from migration, or CPU page invalidation callback. 2441 * 2442 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed 2443 * work thread, and split prange if only part of prange is unmapped. 2444 * 2445 * For invalidation event, if GPU retry fault is not enabled, evict the queues, 2446 * then schedule svm_range_restore_work to update GPU mapping and resume queues. 2447 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will 2448 * update GPU mapping to recover. 2449 * 2450 * Context: mmap lock, notifier_invalidate_start lock are held 2451 * for invalidate event, prange lock is held if this is from migration 2452 */ 2453 static bool 2454 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 2455 const struct mmu_notifier_range *range, 2456 unsigned long cur_seq) 2457 { 2458 struct svm_range *prange; 2459 unsigned long start; 2460 unsigned long last; 2461 2462 if (range->event == MMU_NOTIFY_RELEASE) 2463 return true; 2464 if (!mmget_not_zero(mni->mm)) 2465 return true; 2466 2467 start = mni->interval_tree.start; 2468 last = mni->interval_tree.last; 2469 start = max(start, range->start) >> PAGE_SHIFT; 2470 last = min(last, range->end - 1) >> PAGE_SHIFT; 2471 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n", 2472 start, last, range->start >> PAGE_SHIFT, 2473 (range->end - 1) >> PAGE_SHIFT, 2474 mni->interval_tree.start >> PAGE_SHIFT, 2475 mni->interval_tree.last >> PAGE_SHIFT, range->event); 2476 2477 prange = container_of(mni, struct svm_range, notifier); 2478 2479 svm_range_lock(prange); 2480 mmu_interval_set_seq(mni, cur_seq); 2481 2482 switch (range->event) { 2483 case MMU_NOTIFY_UNMAP: 2484 svm_range_unmap_from_cpu(mni->mm, prange, start, last); 2485 break; 2486 default: 2487 svm_range_evict(prange, mni->mm, start, last, range->event); 2488 break; 2489 } 2490 2491 svm_range_unlock(prange); 2492 mmput(mni->mm); 2493 2494 return true; 2495 } 2496 2497 /** 2498 * svm_range_from_addr - find svm range from fault address 2499 * @svms: svm range list header 2500 * @addr: address to search range interval tree, in pages 2501 * @parent: parent range if range is on child list 2502 * 2503 * Context: The caller must hold svms->lock 2504 * 2505 * Return: the svm_range found or NULL 2506 */ 2507 struct svm_range * 2508 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, 2509 struct svm_range **parent) 2510 { 2511 struct interval_tree_node *node; 2512 struct svm_range *prange; 2513 struct svm_range *pchild; 2514 2515 node = interval_tree_iter_first(&svms->objects, addr, addr); 2516 if (!node) 2517 return NULL; 2518 2519 prange = container_of(node, struct svm_range, it_node); 2520 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n", 2521 addr, prange->start, prange->last, node->start, node->last); 2522 2523 if (addr >= prange->start && addr <= prange->last) { 2524 if (parent) 2525 *parent = prange; 2526 return prange; 2527 } 2528 list_for_each_entry(pchild, &prange->child_list, child_list) 2529 if (addr >= pchild->start && addr <= pchild->last) { 2530 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n", 2531 addr, pchild->start, pchild->last); 2532 if (parent) 2533 *parent = prange; 2534 return pchild; 2535 } 2536 2537 return NULL; 2538 } 2539 2540 /* svm_range_best_restore_location - decide the best fault restore location 2541 * @prange: svm range structure 2542 * @adev: the GPU on which vm fault happened 2543 * 2544 * This is only called when xnack is on, to decide the best location to restore 2545 * the range mapping after GPU vm fault. Caller uses the best location to do 2546 * migration if actual loc is not best location, then update GPU page table 2547 * mapping to the best location. 2548 * 2549 * If the preferred loc is accessible by faulting GPU, use preferred loc. 2550 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu 2551 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then 2552 * if range actual loc is cpu, best_loc is cpu 2553 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is 2554 * range actual loc. 2555 * Otherwise, GPU no access, best_loc is -1. 2556 * 2557 * Return: 2558 * -1 means vm fault GPU no access 2559 * 0 for CPU or GPU id 2560 */ 2561 static int32_t 2562 svm_range_best_restore_location(struct svm_range *prange, 2563 struct kfd_node *node, 2564 int32_t *gpuidx) 2565 { 2566 struct kfd_node *bo_node, *preferred_node; 2567 struct kfd_process *p; 2568 uint32_t gpuid; 2569 int r; 2570 2571 p = container_of(prange->svms, struct kfd_process, svms); 2572 2573 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx); 2574 if (r < 0) { 2575 pr_debug("failed to get gpuid from kgd\n"); 2576 return -1; 2577 } 2578 2579 if (node->adev->gmc.is_app_apu) 2580 return 0; 2581 2582 if (prange->preferred_loc == gpuid || 2583 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { 2584 return prange->preferred_loc; 2585 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 2586 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc); 2587 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node)) 2588 return prange->preferred_loc; 2589 /* fall through */ 2590 } 2591 2592 if (test_bit(*gpuidx, prange->bitmap_access)) 2593 return gpuid; 2594 2595 if (test_bit(*gpuidx, prange->bitmap_aip)) { 2596 if (!prange->actual_loc) 2597 return 0; 2598 2599 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc); 2600 if (bo_node && svm_nodes_in_same_hive(node, bo_node)) 2601 return prange->actual_loc; 2602 else 2603 return 0; 2604 } 2605 2606 return -1; 2607 } 2608 2609 static int 2610 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, 2611 unsigned long *start, unsigned long *last, 2612 bool *is_heap_stack) 2613 { 2614 struct vm_area_struct *vma; 2615 struct interval_tree_node *node; 2616 unsigned long start_limit, end_limit; 2617 2618 vma = vma_lookup(p->mm, addr << PAGE_SHIFT); 2619 if (!vma) { 2620 pr_debug("VMA does not exist in address [0x%llx]\n", addr); 2621 return -EFAULT; 2622 } 2623 2624 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma); 2625 2626 start_limit = max(vma->vm_start >> PAGE_SHIFT, 2627 (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); 2628 end_limit = min(vma->vm_end >> PAGE_SHIFT, 2629 (unsigned long)ALIGN(addr + 1, 2UL << 8)); 2630 /* First range that starts after the fault address */ 2631 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); 2632 if (node) { 2633 end_limit = min(end_limit, node->start); 2634 /* Last range that ends before the fault address */ 2635 node = container_of(rb_prev(&node->rb), 2636 struct interval_tree_node, rb); 2637 } else { 2638 /* Last range must end before addr because 2639 * there was no range after addr 2640 */ 2641 node = container_of(rb_last(&p->svms.objects.rb_root), 2642 struct interval_tree_node, rb); 2643 } 2644 if (node) { 2645 if (node->last >= addr) { 2646 WARN(1, "Overlap with prev node and page fault addr\n"); 2647 return -EFAULT; 2648 } 2649 start_limit = max(start_limit, node->last + 1); 2650 } 2651 2652 *start = start_limit; 2653 *last = end_limit - 1; 2654 2655 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", 2656 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, 2657 *start, *last, *is_heap_stack); 2658 2659 return 0; 2660 } 2661 2662 static int 2663 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, 2664 uint64_t *bo_s, uint64_t *bo_l) 2665 { 2666 struct amdgpu_bo_va_mapping *mapping; 2667 struct interval_tree_node *node; 2668 struct amdgpu_bo *bo = NULL; 2669 unsigned long userptr; 2670 uint32_t i; 2671 int r; 2672 2673 for (i = 0; i < p->n_pdds; i++) { 2674 struct amdgpu_vm *vm; 2675 2676 if (!p->pdds[i]->drm_priv) 2677 continue; 2678 2679 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 2680 r = amdgpu_bo_reserve(vm->root.bo, false); 2681 if (r) 2682 return r; 2683 2684 /* Check userptr by searching entire vm->va interval tree */ 2685 node = interval_tree_iter_first(&vm->va, 0, ~0ULL); 2686 while (node) { 2687 mapping = container_of((struct rb_node *)node, 2688 struct amdgpu_bo_va_mapping, rb); 2689 bo = mapping->bo_va->base.bo; 2690 2691 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, 2692 start << PAGE_SHIFT, 2693 last << PAGE_SHIFT, 2694 &userptr)) { 2695 node = interval_tree_iter_next(node, 0, ~0ULL); 2696 continue; 2697 } 2698 2699 pr_debug("[0x%llx 0x%llx] already userptr mapped\n", 2700 start, last); 2701 if (bo_s && bo_l) { 2702 *bo_s = userptr >> PAGE_SHIFT; 2703 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1; 2704 } 2705 amdgpu_bo_unreserve(vm->root.bo); 2706 return -EADDRINUSE; 2707 } 2708 amdgpu_bo_unreserve(vm->root.bo); 2709 } 2710 return 0; 2711 } 2712 2713 static struct 2714 svm_range *svm_range_create_unregistered_range(struct kfd_node *node, 2715 struct kfd_process *p, 2716 struct mm_struct *mm, 2717 int64_t addr) 2718 { 2719 struct svm_range *prange = NULL; 2720 unsigned long start, last; 2721 uint32_t gpuid, gpuidx; 2722 bool is_heap_stack; 2723 uint64_t bo_s = 0; 2724 uint64_t bo_l = 0; 2725 int r; 2726 2727 if (svm_range_get_range_boundaries(p, addr, &start, &last, 2728 &is_heap_stack)) 2729 return NULL; 2730 2731 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); 2732 if (r != -EADDRINUSE) 2733 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); 2734 2735 if (r == -EADDRINUSE) { 2736 if (addr >= bo_s && addr <= bo_l) 2737 return NULL; 2738 2739 /* Create one page svm range if 2MB range overlapping */ 2740 start = addr; 2741 last = addr; 2742 } 2743 2744 prange = svm_range_new(&p->svms, start, last, true); 2745 if (!prange) { 2746 pr_debug("Failed to create prange in address [0x%llx]\n", addr); 2747 return NULL; 2748 } 2749 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { 2750 pr_debug("failed to get gpuid from kgd\n"); 2751 svm_range_free(prange, true); 2752 return NULL; 2753 } 2754 2755 if (is_heap_stack) 2756 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; 2757 2758 svm_range_add_to_svms(prange); 2759 svm_range_add_notifier_locked(mm, prange); 2760 2761 return prange; 2762 } 2763 2764 /* svm_range_skip_recover - decide if prange can be recovered 2765 * @prange: svm range structure 2766 * 2767 * GPU vm retry fault handle skip recover the range for cases: 2768 * 1. prange is on deferred list to be removed after unmap, it is stale fault, 2769 * deferred list work will drain the stale fault before free the prange. 2770 * 2. prange is on deferred list to add interval notifier after split, or 2771 * 3. prange is child range, it is split from parent prange, recover later 2772 * after interval notifier is added. 2773 * 2774 * Return: true to skip recover, false to recover 2775 */ 2776 static bool svm_range_skip_recover(struct svm_range *prange) 2777 { 2778 struct svm_range_list *svms = prange->svms; 2779 2780 spin_lock(&svms->deferred_list_lock); 2781 if (list_empty(&prange->deferred_list) && 2782 list_empty(&prange->child_list)) { 2783 spin_unlock(&svms->deferred_list_lock); 2784 return false; 2785 } 2786 spin_unlock(&svms->deferred_list_lock); 2787 2788 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2789 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n", 2790 svms, prange, prange->start, prange->last); 2791 return true; 2792 } 2793 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP || 2794 prange->work_item.op == SVM_OP_ADD_RANGE) { 2795 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n", 2796 svms, prange, prange->start, prange->last); 2797 return true; 2798 } 2799 return false; 2800 } 2801 2802 static void 2803 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p, 2804 int32_t gpuidx) 2805 { 2806 struct kfd_process_device *pdd; 2807 2808 /* fault is on different page of same range 2809 * or fault is skipped to recover later 2810 * or fault is on invalid virtual address 2811 */ 2812 if (gpuidx == MAX_GPU_INSTANCE) { 2813 uint32_t gpuid; 2814 int r; 2815 2816 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx); 2817 if (r < 0) 2818 return; 2819 } 2820 2821 /* fault is recovered 2822 * or fault cannot recover because GPU no access on the range 2823 */ 2824 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 2825 if (pdd) 2826 WRITE_ONCE(pdd->faults, pdd->faults + 1); 2827 } 2828 2829 static bool 2830 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) 2831 { 2832 unsigned long requested = VM_READ; 2833 2834 if (write_fault) 2835 requested |= VM_WRITE; 2836 2837 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, 2838 vma->vm_flags); 2839 return (vma->vm_flags & requested) == requested; 2840 } 2841 2842 int 2843 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, 2844 uint32_t vmid, uint32_t node_id, 2845 uint64_t addr, bool write_fault) 2846 { 2847 struct mm_struct *mm = NULL; 2848 struct svm_range_list *svms; 2849 struct svm_range *prange; 2850 struct kfd_process *p; 2851 ktime_t timestamp = ktime_get_boottime(); 2852 struct kfd_node *node; 2853 int32_t best_loc; 2854 int32_t gpuidx = MAX_GPU_INSTANCE; 2855 bool write_locked = false; 2856 struct vm_area_struct *vma; 2857 bool migration = false; 2858 int r = 0; 2859 2860 if (!KFD_IS_SVM_API_SUPPORTED(adev)) { 2861 pr_debug("device does not support SVM\n"); 2862 return -EFAULT; 2863 } 2864 2865 p = kfd_lookup_process_by_pasid(pasid); 2866 if (!p) { 2867 pr_debug("kfd process not founded pasid 0x%x\n", pasid); 2868 return 0; 2869 } 2870 svms = &p->svms; 2871 2872 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); 2873 2874 if (atomic_read(&svms->drain_pagefaults)) { 2875 pr_debug("draining retry fault, drop fault 0x%llx\n", addr); 2876 r = 0; 2877 goto out; 2878 } 2879 2880 if (!p->xnack_enabled) { 2881 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); 2882 r = -EFAULT; 2883 goto out; 2884 } 2885 2886 /* p->lead_thread is available as kfd_process_wq_release flush the work 2887 * before releasing task ref. 2888 */ 2889 mm = get_task_mm(p->lead_thread); 2890 if (!mm) { 2891 pr_debug("svms 0x%p failed to get mm\n", svms); 2892 r = 0; 2893 goto out; 2894 } 2895 2896 node = kfd_node_by_irq_ids(adev, node_id, vmid); 2897 if (!node) { 2898 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, 2899 vmid); 2900 r = -EFAULT; 2901 goto out; 2902 } 2903 mmap_read_lock(mm); 2904 retry_write_locked: 2905 mutex_lock(&svms->lock); 2906 prange = svm_range_from_addr(svms, addr, NULL); 2907 if (!prange) { 2908 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n", 2909 svms, addr); 2910 if (!write_locked) { 2911 /* Need the write lock to create new range with MMU notifier. 2912 * Also flush pending deferred work to make sure the interval 2913 * tree is up to date before we add a new range 2914 */ 2915 mutex_unlock(&svms->lock); 2916 mmap_read_unlock(mm); 2917 mmap_write_lock(mm); 2918 write_locked = true; 2919 goto retry_write_locked; 2920 } 2921 prange = svm_range_create_unregistered_range(node, p, mm, addr); 2922 if (!prange) { 2923 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", 2924 svms, addr); 2925 mmap_write_downgrade(mm); 2926 r = -EFAULT; 2927 goto out_unlock_svms; 2928 } 2929 } 2930 if (write_locked) 2931 mmap_write_downgrade(mm); 2932 2933 mutex_lock(&prange->migrate_mutex); 2934 2935 if (svm_range_skip_recover(prange)) { 2936 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); 2937 r = 0; 2938 goto out_unlock_range; 2939 } 2940 2941 /* skip duplicate vm fault on different pages of same range */ 2942 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp, 2943 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) { 2944 pr_debug("svms 0x%p [0x%lx %lx] already restored\n", 2945 svms, prange->start, prange->last); 2946 r = 0; 2947 goto out_unlock_range; 2948 } 2949 2950 /* __do_munmap removed VMA, return success as we are handling stale 2951 * retry fault. 2952 */ 2953 vma = vma_lookup(mm, addr << PAGE_SHIFT); 2954 if (!vma) { 2955 pr_debug("address 0x%llx VMA is removed\n", addr); 2956 r = 0; 2957 goto out_unlock_range; 2958 } 2959 2960 if (!svm_fault_allowed(vma, write_fault)) { 2961 pr_debug("fault addr 0x%llx no %s permission\n", addr, 2962 write_fault ? "write" : "read"); 2963 r = -EPERM; 2964 goto out_unlock_range; 2965 } 2966 2967 best_loc = svm_range_best_restore_location(prange, node, &gpuidx); 2968 if (best_loc == -1) { 2969 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", 2970 svms, prange->start, prange->last); 2971 r = -EACCES; 2972 goto out_unlock_range; 2973 } 2974 2975 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n", 2976 svms, prange->start, prange->last, best_loc, 2977 prange->actual_loc); 2978 2979 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, 2980 write_fault, timestamp); 2981 2982 if (prange->actual_loc != best_loc) { 2983 migration = true; 2984 if (best_loc) { 2985 r = svm_migrate_to_vram(prange, best_loc, mm, 2986 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2987 if (r) { 2988 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", 2989 r, addr); 2990 /* Fallback to system memory if migration to 2991 * VRAM failed 2992 */ 2993 if (prange->actual_loc) 2994 r = svm_migrate_vram_to_ram(prange, mm, 2995 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 2996 NULL); 2997 else 2998 r = 0; 2999 } 3000 } else { 3001 r = svm_migrate_vram_to_ram(prange, mm, 3002 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 3003 NULL); 3004 } 3005 if (r) { 3006 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", 3007 r, svms, prange->start, prange->last); 3008 goto out_unlock_range; 3009 } 3010 } 3011 3012 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false); 3013 if (r) 3014 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", 3015 r, svms, prange->start, prange->last); 3016 3017 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, 3018 migration); 3019 3020 out_unlock_range: 3021 mutex_unlock(&prange->migrate_mutex); 3022 out_unlock_svms: 3023 mutex_unlock(&svms->lock); 3024 mmap_read_unlock(mm); 3025 3026 svm_range_count_fault(node, p, gpuidx); 3027 3028 mmput(mm); 3029 out: 3030 kfd_unref_process(p); 3031 3032 if (r == -EAGAIN) { 3033 pr_debug("recover vm fault later\n"); 3034 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); 3035 r = 0; 3036 } 3037 return r; 3038 } 3039 3040 int 3041 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) 3042 { 3043 struct svm_range *prange, *pchild; 3044 uint64_t reserved_size = 0; 3045 uint64_t size; 3046 int r = 0; 3047 3048 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled); 3049 3050 mutex_lock(&p->svms.lock); 3051 3052 list_for_each_entry(prange, &p->svms.list, list) { 3053 svm_range_lock(prange); 3054 list_for_each_entry(pchild, &prange->child_list, child_list) { 3055 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; 3056 if (xnack_enabled) { 3057 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 3058 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3059 } else { 3060 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, 3061 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3062 if (r) 3063 goto out_unlock; 3064 reserved_size += size; 3065 } 3066 } 3067 3068 size = (prange->last - prange->start + 1) << PAGE_SHIFT; 3069 if (xnack_enabled) { 3070 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 3071 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3072 } else { 3073 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, 3074 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3075 if (r) 3076 goto out_unlock; 3077 reserved_size += size; 3078 } 3079 out_unlock: 3080 svm_range_unlock(prange); 3081 if (r) 3082 break; 3083 } 3084 3085 if (r) 3086 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size, 3087 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3088 else 3089 /* Change xnack mode must be inside svms lock, to avoid race with 3090 * svm_range_deferred_list_work unreserve memory in parallel. 3091 */ 3092 p->xnack_enabled = xnack_enabled; 3093 3094 mutex_unlock(&p->svms.lock); 3095 return r; 3096 } 3097 3098 void svm_range_list_fini(struct kfd_process *p) 3099 { 3100 struct svm_range *prange; 3101 struct svm_range *next; 3102 3103 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms); 3104 3105 cancel_delayed_work_sync(&p->svms.restore_work); 3106 3107 /* Ensure list work is finished before process is destroyed */ 3108 flush_work(&p->svms.deferred_list_work); 3109 3110 /* 3111 * Ensure no retry fault comes in afterwards, as page fault handler will 3112 * not find kfd process and take mm lock to recover fault. 3113 */ 3114 atomic_inc(&p->svms.drain_pagefaults); 3115 svm_range_drain_retry_fault(&p->svms); 3116 3117 list_for_each_entry_safe(prange, next, &p->svms.list, list) { 3118 svm_range_unlink(prange); 3119 svm_range_remove_notifier(prange); 3120 svm_range_free(prange, true); 3121 } 3122 3123 mutex_destroy(&p->svms.lock); 3124 3125 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms); 3126 } 3127 3128 int svm_range_list_init(struct kfd_process *p) 3129 { 3130 struct svm_range_list *svms = &p->svms; 3131 int i; 3132 3133 svms->objects = RB_ROOT_CACHED; 3134 mutex_init(&svms->lock); 3135 INIT_LIST_HEAD(&svms->list); 3136 atomic_set(&svms->evicted_ranges, 0); 3137 atomic_set(&svms->drain_pagefaults, 0); 3138 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); 3139 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); 3140 INIT_LIST_HEAD(&svms->deferred_range_list); 3141 INIT_LIST_HEAD(&svms->criu_svm_metadata_list); 3142 spin_lock_init(&svms->deferred_list_lock); 3143 3144 for (i = 0; i < p->n_pdds; i++) 3145 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) 3146 bitmap_set(svms->bitmap_supported, i, 1); 3147 3148 return 0; 3149 } 3150 3151 /** 3152 * svm_range_check_vm - check if virtual address range mapped already 3153 * @p: current kfd_process 3154 * @start: range start address, in pages 3155 * @last: range last address, in pages 3156 * @bo_s: mapping start address in pages if address range already mapped 3157 * @bo_l: mapping last address in pages if address range already mapped 3158 * 3159 * The purpose is to avoid virtual address ranges already allocated by 3160 * kfd_ioctl_alloc_memory_of_gpu ioctl. 3161 * It looks for each pdd in the kfd_process. 3162 * 3163 * Context: Process context 3164 * 3165 * Return 0 - OK, if the range is not mapped. 3166 * Otherwise error code: 3167 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu 3168 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by 3169 * a signal. Release all buffer reservations and return to user-space. 3170 */ 3171 static int 3172 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 3173 uint64_t *bo_s, uint64_t *bo_l) 3174 { 3175 struct amdgpu_bo_va_mapping *mapping; 3176 struct interval_tree_node *node; 3177 uint32_t i; 3178 int r; 3179 3180 for (i = 0; i < p->n_pdds; i++) { 3181 struct amdgpu_vm *vm; 3182 3183 if (!p->pdds[i]->drm_priv) 3184 continue; 3185 3186 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 3187 r = amdgpu_bo_reserve(vm->root.bo, false); 3188 if (r) 3189 return r; 3190 3191 node = interval_tree_iter_first(&vm->va, start, last); 3192 if (node) { 3193 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n", 3194 start, last); 3195 mapping = container_of((struct rb_node *)node, 3196 struct amdgpu_bo_va_mapping, rb); 3197 if (bo_s && bo_l) { 3198 *bo_s = mapping->start; 3199 *bo_l = mapping->last; 3200 } 3201 amdgpu_bo_unreserve(vm->root.bo); 3202 return -EADDRINUSE; 3203 } 3204 amdgpu_bo_unreserve(vm->root.bo); 3205 } 3206 3207 return 0; 3208 } 3209 3210 /** 3211 * svm_range_is_valid - check if virtual address range is valid 3212 * @p: current kfd_process 3213 * @start: range start address, in pages 3214 * @size: range size, in pages 3215 * 3216 * Valid virtual address range means it belongs to one or more VMAs 3217 * 3218 * Context: Process context 3219 * 3220 * Return: 3221 * 0 - OK, otherwise error code 3222 */ 3223 static int 3224 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) 3225 { 3226 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 3227 struct vm_area_struct *vma; 3228 unsigned long end; 3229 unsigned long start_unchg = start; 3230 3231 start <<= PAGE_SHIFT; 3232 end = start + (size << PAGE_SHIFT); 3233 do { 3234 vma = vma_lookup(p->mm, start); 3235 if (!vma || (vma->vm_flags & device_vma)) 3236 return -EFAULT; 3237 start = min(end, vma->vm_end); 3238 } while (start < end); 3239 3240 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL, 3241 NULL); 3242 } 3243 3244 /** 3245 * svm_range_best_prefetch_location - decide the best prefetch location 3246 * @prange: svm range structure 3247 * 3248 * For xnack off: 3249 * If range map to single GPU, the best prefetch location is prefetch_loc, which 3250 * can be CPU or GPU. 3251 * 3252 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on 3253 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise 3254 * the best prefetch location is always CPU, because GPU can not have coherent 3255 * mapping VRAM of other GPUs even with large-BAR PCIe connection. 3256 * 3257 * For xnack on: 3258 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is 3259 * prefetch_loc, other GPU access will generate vm fault and trigger migration. 3260 * 3261 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same 3262 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best 3263 * prefetch location is always CPU. 3264 * 3265 * Context: Process context 3266 * 3267 * Return: 3268 * 0 for CPU or GPU id 3269 */ 3270 static uint32_t 3271 svm_range_best_prefetch_location(struct svm_range *prange) 3272 { 3273 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 3274 uint32_t best_loc = prange->prefetch_loc; 3275 struct kfd_process_device *pdd; 3276 struct kfd_node *bo_node; 3277 struct kfd_process *p; 3278 uint32_t gpuidx; 3279 3280 p = container_of(prange->svms, struct kfd_process, svms); 3281 3282 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) 3283 goto out; 3284 3285 bo_node = svm_range_get_node_by_id(prange, best_loc); 3286 if (!bo_node) { 3287 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc); 3288 best_loc = 0; 3289 goto out; 3290 } 3291 3292 if (bo_node->adev->gmc.is_app_apu) { 3293 best_loc = 0; 3294 goto out; 3295 } 3296 3297 if (p->xnack_enabled) 3298 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 3299 else 3300 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 3301 MAX_GPU_INSTANCE); 3302 3303 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 3304 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 3305 if (!pdd) { 3306 pr_debug("failed to get device by idx 0x%x\n", gpuidx); 3307 continue; 3308 } 3309 3310 if (pdd->dev->adev == bo_node->adev) 3311 continue; 3312 3313 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) { 3314 best_loc = 0; 3315 break; 3316 } 3317 } 3318 3319 out: 3320 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n", 3321 p->xnack_enabled, &p->svms, prange->start, prange->last, 3322 best_loc); 3323 3324 return best_loc; 3325 } 3326 3327 /* svm_range_trigger_migration - start page migration if prefetch loc changed 3328 * @mm: current process mm_struct 3329 * @prange: svm range structure 3330 * @migrated: output, true if migration is triggered 3331 * 3332 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range 3333 * from ram to vram. 3334 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range 3335 * from vram to ram. 3336 * 3337 * If GPU vm fault retry is not enabled, migration interact with MMU notifier 3338 * and restore work: 3339 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict 3340 * stops all queues, schedule restore work 3341 * 2. svm_range_restore_work wait for migration is done by 3342 * a. svm_range_validate_vram takes prange->migrate_mutex 3343 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns 3344 * 3. restore work update mappings of GPU, resume all queues. 3345 * 3346 * Context: Process context 3347 * 3348 * Return: 3349 * 0 - OK, otherwise - error code of migration 3350 */ 3351 static int 3352 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, 3353 bool *migrated) 3354 { 3355 uint32_t best_loc; 3356 int r = 0; 3357 3358 *migrated = false; 3359 best_loc = svm_range_best_prefetch_location(prange); 3360 3361 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3362 best_loc == prange->actual_loc) 3363 return 0; 3364 3365 if (!best_loc) { 3366 r = svm_migrate_vram_to_ram(prange, mm, 3367 KFD_MIGRATE_TRIGGER_PREFETCH, NULL); 3368 *migrated = !r; 3369 return r; 3370 } 3371 3372 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3373 *migrated = !r; 3374 3375 return r; 3376 } 3377 3378 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) 3379 { 3380 if (!fence) 3381 return -EINVAL; 3382 3383 if (dma_fence_is_signaled(&fence->base)) 3384 return 0; 3385 3386 if (fence->svm_bo) { 3387 WRITE_ONCE(fence->svm_bo->evicting, 1); 3388 schedule_work(&fence->svm_bo->eviction_work); 3389 } 3390 3391 return 0; 3392 } 3393 3394 static void svm_range_evict_svm_bo_worker(struct work_struct *work) 3395 { 3396 struct svm_range_bo *svm_bo; 3397 struct mm_struct *mm; 3398 int r = 0; 3399 3400 svm_bo = container_of(work, struct svm_range_bo, eviction_work); 3401 if (!svm_bo_ref_unless_zero(svm_bo)) 3402 return; /* svm_bo was freed while eviction was pending */ 3403 3404 if (mmget_not_zero(svm_bo->eviction_fence->mm)) { 3405 mm = svm_bo->eviction_fence->mm; 3406 } else { 3407 svm_range_bo_unref(svm_bo); 3408 return; 3409 } 3410 3411 mmap_read_lock(mm); 3412 spin_lock(&svm_bo->list_lock); 3413 while (!list_empty(&svm_bo->range_list) && !r) { 3414 struct svm_range *prange = 3415 list_first_entry(&svm_bo->range_list, 3416 struct svm_range, svm_bo_list); 3417 int retries = 3; 3418 3419 list_del_init(&prange->svm_bo_list); 3420 spin_unlock(&svm_bo->list_lock); 3421 3422 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 3423 prange->start, prange->last); 3424 3425 mutex_lock(&prange->migrate_mutex); 3426 do { 3427 r = svm_migrate_vram_to_ram(prange, mm, 3428 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); 3429 } while (!r && prange->actual_loc && --retries); 3430 3431 if (!r && prange->actual_loc) 3432 pr_info_once("Migration failed during eviction"); 3433 3434 if (!prange->actual_loc) { 3435 mutex_lock(&prange->lock); 3436 prange->svm_bo = NULL; 3437 mutex_unlock(&prange->lock); 3438 } 3439 mutex_unlock(&prange->migrate_mutex); 3440 3441 spin_lock(&svm_bo->list_lock); 3442 } 3443 spin_unlock(&svm_bo->list_lock); 3444 mmap_read_unlock(mm); 3445 mmput(mm); 3446 3447 dma_fence_signal(&svm_bo->eviction_fence->base); 3448 3449 /* This is the last reference to svm_bo, after svm_range_vram_node_free 3450 * has been called in svm_migrate_vram_to_ram 3451 */ 3452 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); 3453 svm_range_bo_unref(svm_bo); 3454 } 3455 3456 static int 3457 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, 3458 uint64_t start, uint64_t size, uint32_t nattr, 3459 struct kfd_ioctl_svm_attribute *attrs) 3460 { 3461 struct amdkfd_process_info *process_info = p->kgd_process_info; 3462 struct list_head update_list; 3463 struct list_head insert_list; 3464 struct list_head remove_list; 3465 struct svm_range_list *svms; 3466 struct svm_range *prange; 3467 struct svm_range *next; 3468 bool update_mapping = false; 3469 bool flush_tlb; 3470 int r = 0; 3471 3472 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n", 3473 p->pasid, &p->svms, start, start + size - 1, size); 3474 3475 r = svm_range_check_attr(p, nattr, attrs); 3476 if (r) 3477 return r; 3478 3479 svms = &p->svms; 3480 3481 mutex_lock(&process_info->lock); 3482 3483 svm_range_list_lock_and_flush_work(svms, mm); 3484 3485 r = svm_range_is_valid(p, start, size); 3486 if (r) { 3487 pr_debug("invalid range r=%d\n", r); 3488 mmap_write_unlock(mm); 3489 goto out; 3490 } 3491 3492 mutex_lock(&svms->lock); 3493 3494 /* Add new range and split existing ranges as needed */ 3495 r = svm_range_add(p, start, size, nattr, attrs, &update_list, 3496 &insert_list, &remove_list); 3497 if (r) { 3498 mutex_unlock(&svms->lock); 3499 mmap_write_unlock(mm); 3500 goto out; 3501 } 3502 /* Apply changes as a transaction */ 3503 list_for_each_entry_safe(prange, next, &insert_list, list) { 3504 svm_range_add_to_svms(prange); 3505 svm_range_add_notifier_locked(mm, prange); 3506 } 3507 list_for_each_entry(prange, &update_list, update_list) { 3508 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping); 3509 /* TODO: unmap ranges from GPU that lost access */ 3510 } 3511 list_for_each_entry_safe(prange, next, &remove_list, update_list) { 3512 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", 3513 prange->svms, prange, prange->start, 3514 prange->last); 3515 svm_range_unlink(prange); 3516 svm_range_remove_notifier(prange); 3517 svm_range_free(prange, false); 3518 } 3519 3520 mmap_write_downgrade(mm); 3521 /* Trigger migrations and revalidate and map to GPUs as needed. If 3522 * this fails we may be left with partially completed actions. There 3523 * is no clean way of rolling back to the previous state in such a 3524 * case because the rollback wouldn't be guaranteed to work either. 3525 */ 3526 list_for_each_entry(prange, &update_list, update_list) { 3527 bool migrated; 3528 3529 mutex_lock(&prange->migrate_mutex); 3530 3531 r = svm_range_trigger_migration(mm, prange, &migrated); 3532 if (r) 3533 goto out_unlock_range; 3534 3535 if (migrated && (!p->xnack_enabled || 3536 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) && 3537 prange->mapped_to_gpu) { 3538 pr_debug("restore_work will update mappings of GPUs\n"); 3539 mutex_unlock(&prange->migrate_mutex); 3540 continue; 3541 } 3542 3543 if (!migrated && !update_mapping) { 3544 mutex_unlock(&prange->migrate_mutex); 3545 continue; 3546 } 3547 3548 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; 3549 3550 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, 3551 true, true, flush_tlb); 3552 if (r) 3553 pr_debug("failed %d to map svm range\n", r); 3554 3555 out_unlock_range: 3556 mutex_unlock(&prange->migrate_mutex); 3557 if (r) 3558 break; 3559 } 3560 3561 svm_range_debug_dump(svms); 3562 3563 mutex_unlock(&svms->lock); 3564 mmap_read_unlock(mm); 3565 out: 3566 mutex_unlock(&process_info->lock); 3567 3568 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, 3569 &p->svms, start, start + size - 1, r); 3570 3571 return r; 3572 } 3573 3574 static int 3575 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm, 3576 uint64_t start, uint64_t size, uint32_t nattr, 3577 struct kfd_ioctl_svm_attribute *attrs) 3578 { 3579 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); 3580 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); 3581 bool get_preferred_loc = false; 3582 bool get_prefetch_loc = false; 3583 bool get_granularity = false; 3584 bool get_accessible = false; 3585 bool get_flags = false; 3586 uint64_t last = start + size - 1UL; 3587 uint8_t granularity = 0xff; 3588 struct interval_tree_node *node; 3589 struct svm_range_list *svms; 3590 struct svm_range *prange; 3591 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3592 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3593 uint32_t flags_and = 0xffffffff; 3594 uint32_t flags_or = 0; 3595 int gpuidx; 3596 uint32_t i; 3597 int r = 0; 3598 3599 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, 3600 start + size - 1, nattr); 3601 3602 /* Flush pending deferred work to avoid racing with deferred actions from 3603 * previous memory map changes (e.g. munmap). Concurrent memory map changes 3604 * can still race with get_attr because we don't hold the mmap lock. But that 3605 * would be a race condition in the application anyway, and undefined 3606 * behaviour is acceptable in that case. 3607 */ 3608 flush_work(&p->svms.deferred_list_work); 3609 3610 mmap_read_lock(mm); 3611 r = svm_range_is_valid(p, start, size); 3612 mmap_read_unlock(mm); 3613 if (r) { 3614 pr_debug("invalid range r=%d\n", r); 3615 return r; 3616 } 3617 3618 for (i = 0; i < nattr; i++) { 3619 switch (attrs[i].type) { 3620 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3621 get_preferred_loc = true; 3622 break; 3623 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3624 get_prefetch_loc = true; 3625 break; 3626 case KFD_IOCTL_SVM_ATTR_ACCESS: 3627 get_accessible = true; 3628 break; 3629 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3630 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3631 get_flags = true; 3632 break; 3633 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3634 get_granularity = true; 3635 break; 3636 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 3637 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 3638 fallthrough; 3639 default: 3640 pr_debug("get invalid attr type 0x%x\n", attrs[i].type); 3641 return -EINVAL; 3642 } 3643 } 3644 3645 svms = &p->svms; 3646 3647 mutex_lock(&svms->lock); 3648 3649 node = interval_tree_iter_first(&svms->objects, start, last); 3650 if (!node) { 3651 pr_debug("range attrs not found return default values\n"); 3652 svm_range_set_default_attributes(&location, &prefetch_loc, 3653 &granularity, &flags_and); 3654 flags_or = flags_and; 3655 if (p->xnack_enabled) 3656 bitmap_copy(bitmap_access, svms->bitmap_supported, 3657 MAX_GPU_INSTANCE); 3658 else 3659 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE); 3660 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE); 3661 goto fill_values; 3662 } 3663 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); 3664 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); 3665 3666 while (node) { 3667 struct interval_tree_node *next; 3668 3669 prange = container_of(node, struct svm_range, it_node); 3670 next = interval_tree_iter_next(node, start, last); 3671 3672 if (get_preferred_loc) { 3673 if (prange->preferred_loc == 3674 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3675 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3676 location != prange->preferred_loc)) { 3677 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3678 get_preferred_loc = false; 3679 } else { 3680 location = prange->preferred_loc; 3681 } 3682 } 3683 if (get_prefetch_loc) { 3684 if (prange->prefetch_loc == 3685 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3686 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3687 prefetch_loc != prange->prefetch_loc)) { 3688 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3689 get_prefetch_loc = false; 3690 } else { 3691 prefetch_loc = prange->prefetch_loc; 3692 } 3693 } 3694 if (get_accessible) { 3695 bitmap_and(bitmap_access, bitmap_access, 3696 prange->bitmap_access, MAX_GPU_INSTANCE); 3697 bitmap_and(bitmap_aip, bitmap_aip, 3698 prange->bitmap_aip, MAX_GPU_INSTANCE); 3699 } 3700 if (get_flags) { 3701 flags_and &= prange->flags; 3702 flags_or |= prange->flags; 3703 } 3704 3705 if (get_granularity && prange->granularity < granularity) 3706 granularity = prange->granularity; 3707 3708 node = next; 3709 } 3710 fill_values: 3711 mutex_unlock(&svms->lock); 3712 3713 for (i = 0; i < nattr; i++) { 3714 switch (attrs[i].type) { 3715 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3716 attrs[i].value = location; 3717 break; 3718 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3719 attrs[i].value = prefetch_loc; 3720 break; 3721 case KFD_IOCTL_SVM_ATTR_ACCESS: 3722 gpuidx = kfd_process_gpuidx_from_gpuid(p, 3723 attrs[i].value); 3724 if (gpuidx < 0) { 3725 pr_debug("invalid gpuid %x\n", attrs[i].value); 3726 return -EINVAL; 3727 } 3728 if (test_bit(gpuidx, bitmap_access)) 3729 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS; 3730 else if (test_bit(gpuidx, bitmap_aip)) 3731 attrs[i].type = 3732 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE; 3733 else 3734 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS; 3735 break; 3736 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3737 attrs[i].value = flags_and; 3738 break; 3739 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3740 attrs[i].value = ~flags_or; 3741 break; 3742 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3743 attrs[i].value = (uint32_t)granularity; 3744 break; 3745 } 3746 } 3747 3748 return 0; 3749 } 3750 3751 int kfd_criu_resume_svm(struct kfd_process *p) 3752 { 3753 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL; 3754 int nattr_common = 4, nattr_accessibility = 1; 3755 struct criu_svm_metadata *criu_svm_md = NULL; 3756 struct svm_range_list *svms = &p->svms; 3757 struct criu_svm_metadata *next = NULL; 3758 uint32_t set_flags = 0xffffffff; 3759 int i, j, num_attrs, ret = 0; 3760 uint64_t set_attr_size; 3761 struct mm_struct *mm; 3762 3763 if (list_empty(&svms->criu_svm_metadata_list)) { 3764 pr_debug("No SVM data from CRIU restore stage 2\n"); 3765 return ret; 3766 } 3767 3768 mm = get_task_mm(p->lead_thread); 3769 if (!mm) { 3770 pr_err("failed to get mm for the target process\n"); 3771 return -ESRCH; 3772 } 3773 3774 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds); 3775 3776 i = j = 0; 3777 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) { 3778 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n", 3779 i, criu_svm_md->data.start_addr, criu_svm_md->data.size); 3780 3781 for (j = 0; j < num_attrs; j++) { 3782 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n", 3783 i, j, criu_svm_md->data.attrs[j].type, 3784 i, j, criu_svm_md->data.attrs[j].value); 3785 switch (criu_svm_md->data.attrs[j].type) { 3786 /* During Checkpoint operation, the query for 3787 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might 3788 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were 3789 * not used by the range which was checkpointed. Care 3790 * must be taken to not restore with an invalid value 3791 * otherwise the gpuidx value will be invalid and 3792 * set_attr would eventually fail so just replace those 3793 * with another dummy attribute such as 3794 * KFD_IOCTL_SVM_ATTR_SET_FLAGS. 3795 */ 3796 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3797 if (criu_svm_md->data.attrs[j].value == 3798 KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 3799 criu_svm_md->data.attrs[j].type = 3800 KFD_IOCTL_SVM_ATTR_SET_FLAGS; 3801 criu_svm_md->data.attrs[j].value = 0; 3802 } 3803 break; 3804 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3805 set_flags = criu_svm_md->data.attrs[j].value; 3806 break; 3807 default: 3808 break; 3809 } 3810 } 3811 3812 /* CLR_FLAGS is not available via get_attr during checkpoint but 3813 * it needs to be inserted before restoring the ranges so 3814 * allocate extra space for it before calling set_attr 3815 */ 3816 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3817 (num_attrs + 1); 3818 set_attr_new = krealloc(set_attr, set_attr_size, 3819 GFP_KERNEL); 3820 if (!set_attr_new) { 3821 ret = -ENOMEM; 3822 goto exit; 3823 } 3824 set_attr = set_attr_new; 3825 3826 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs * 3827 sizeof(struct kfd_ioctl_svm_attribute)); 3828 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS; 3829 set_attr[num_attrs].value = ~set_flags; 3830 3831 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr, 3832 criu_svm_md->data.size, num_attrs + 1, 3833 set_attr); 3834 if (ret) { 3835 pr_err("CRIU: failed to set range attributes\n"); 3836 goto exit; 3837 } 3838 3839 i++; 3840 } 3841 exit: 3842 kfree(set_attr); 3843 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) { 3844 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n", 3845 criu_svm_md->data.start_addr); 3846 kfree(criu_svm_md); 3847 } 3848 3849 mmput(mm); 3850 return ret; 3851 3852 } 3853 3854 int kfd_criu_restore_svm(struct kfd_process *p, 3855 uint8_t __user *user_priv_ptr, 3856 uint64_t *priv_data_offset, 3857 uint64_t max_priv_data_size) 3858 { 3859 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size; 3860 int nattr_common = 4, nattr_accessibility = 1; 3861 struct criu_svm_metadata *criu_svm_md = NULL; 3862 struct svm_range_list *svms = &p->svms; 3863 uint32_t num_devices; 3864 int ret = 0; 3865 3866 num_devices = p->n_pdds; 3867 /* Handle one SVM range object at a time, also the number of gpus are 3868 * assumed to be same on the restore node, checking must be done while 3869 * evaluating the topology earlier 3870 */ 3871 3872 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) * 3873 (nattr_common + nattr_accessibility * num_devices); 3874 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size; 3875 3876 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) + 3877 svm_attrs_size; 3878 3879 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL); 3880 if (!criu_svm_md) { 3881 pr_err("failed to allocate memory to store svm metadata\n"); 3882 return -ENOMEM; 3883 } 3884 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) { 3885 ret = -EINVAL; 3886 goto exit; 3887 } 3888 3889 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset, 3890 svm_priv_data_size); 3891 if (ret) { 3892 ret = -EFAULT; 3893 goto exit; 3894 } 3895 *priv_data_offset += svm_priv_data_size; 3896 3897 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list); 3898 3899 return 0; 3900 3901 3902 exit: 3903 kfree(criu_svm_md); 3904 return ret; 3905 } 3906 3907 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 3908 uint64_t *svm_priv_data_size) 3909 { 3910 uint64_t total_size, accessibility_size, common_attr_size; 3911 int nattr_common = 4, nattr_accessibility = 1; 3912 int num_devices = p->n_pdds; 3913 struct svm_range_list *svms; 3914 struct svm_range *prange; 3915 uint32_t count = 0; 3916 3917 *svm_priv_data_size = 0; 3918 3919 svms = &p->svms; 3920 if (!svms) 3921 return -EINVAL; 3922 3923 mutex_lock(&svms->lock); 3924 list_for_each_entry(prange, &svms->list, list) { 3925 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n", 3926 prange, prange->start, prange->npages, 3927 prange->start + prange->npages - 1); 3928 count++; 3929 } 3930 mutex_unlock(&svms->lock); 3931 3932 *num_svm_ranges = count; 3933 /* Only the accessbility attributes need to be queried for all the gpus 3934 * individually, remaining ones are spanned across the entire process 3935 * regardless of the various gpu nodes. Of the remaining attributes, 3936 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved. 3937 * 3938 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC 3939 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC 3940 * KFD_IOCTL_SVM_ATTR_SET_FLAGS 3941 * KFD_IOCTL_SVM_ATTR_GRANULARITY 3942 * 3943 * ** ACCESSBILITY ATTRIBUTES ** 3944 * (Considered as one, type is altered during query, value is gpuid) 3945 * KFD_IOCTL_SVM_ATTR_ACCESS 3946 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE 3947 * KFD_IOCTL_SVM_ATTR_NO_ACCESS 3948 */ 3949 if (*num_svm_ranges > 0) { 3950 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3951 nattr_common; 3952 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) * 3953 nattr_accessibility * num_devices; 3954 3955 total_size = sizeof(struct kfd_criu_svm_range_priv_data) + 3956 common_attr_size + accessibility_size; 3957 3958 *svm_priv_data_size = *num_svm_ranges * total_size; 3959 } 3960 3961 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges, 3962 *svm_priv_data_size); 3963 return 0; 3964 } 3965 3966 int kfd_criu_checkpoint_svm(struct kfd_process *p, 3967 uint8_t __user *user_priv_data, 3968 uint64_t *priv_data_offset) 3969 { 3970 struct kfd_criu_svm_range_priv_data *svm_priv = NULL; 3971 struct kfd_ioctl_svm_attribute *query_attr = NULL; 3972 uint64_t svm_priv_data_size, query_attr_size = 0; 3973 int index, nattr_common = 4, ret = 0; 3974 struct svm_range_list *svms; 3975 int num_devices = p->n_pdds; 3976 struct svm_range *prange; 3977 struct mm_struct *mm; 3978 3979 svms = &p->svms; 3980 if (!svms) 3981 return -EINVAL; 3982 3983 mm = get_task_mm(p->lead_thread); 3984 if (!mm) { 3985 pr_err("failed to get mm for the target process\n"); 3986 return -ESRCH; 3987 } 3988 3989 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3990 (nattr_common + num_devices); 3991 3992 query_attr = kzalloc(query_attr_size, GFP_KERNEL); 3993 if (!query_attr) { 3994 ret = -ENOMEM; 3995 goto exit; 3996 } 3997 3998 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC; 3999 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC; 4000 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS; 4001 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY; 4002 4003 for (index = 0; index < num_devices; index++) { 4004 struct kfd_process_device *pdd = p->pdds[index]; 4005 4006 query_attr[index + nattr_common].type = 4007 KFD_IOCTL_SVM_ATTR_ACCESS; 4008 query_attr[index + nattr_common].value = pdd->user_gpu_id; 4009 } 4010 4011 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size; 4012 4013 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL); 4014 if (!svm_priv) { 4015 ret = -ENOMEM; 4016 goto exit_query; 4017 } 4018 4019 index = 0; 4020 list_for_each_entry(prange, &svms->list, list) { 4021 4022 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE; 4023 svm_priv->start_addr = prange->start; 4024 svm_priv->size = prange->npages; 4025 memcpy(&svm_priv->attrs, query_attr, query_attr_size); 4026 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n", 4027 prange, prange->start, prange->npages, 4028 prange->start + prange->npages - 1, 4029 prange->npages * PAGE_SIZE); 4030 4031 ret = svm_range_get_attr(p, mm, svm_priv->start_addr, 4032 svm_priv->size, 4033 (nattr_common + num_devices), 4034 svm_priv->attrs); 4035 if (ret) { 4036 pr_err("CRIU: failed to obtain range attributes\n"); 4037 goto exit_priv; 4038 } 4039 4040 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv, 4041 svm_priv_data_size)) { 4042 pr_err("Failed to copy svm priv to user\n"); 4043 ret = -EFAULT; 4044 goto exit_priv; 4045 } 4046 4047 *priv_data_offset += svm_priv_data_size; 4048 4049 } 4050 4051 4052 exit_priv: 4053 kfree(svm_priv); 4054 exit_query: 4055 kfree(query_attr); 4056 exit: 4057 mmput(mm); 4058 return ret; 4059 } 4060 4061 int 4062 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, 4063 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs) 4064 { 4065 struct mm_struct *mm = current->mm; 4066 int r; 4067 4068 start >>= PAGE_SHIFT; 4069 size >>= PAGE_SHIFT; 4070 4071 switch (op) { 4072 case KFD_IOCTL_SVM_OP_SET_ATTR: 4073 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs); 4074 break; 4075 case KFD_IOCTL_SVM_OP_GET_ATTR: 4076 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); 4077 break; 4078 default: 4079 r = EINVAL; 4080 break; 4081 } 4082 4083 return r; 4084 } 4085