1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include "amdgpu_sync.h" 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_mn.h" 31 #include "amdgpu_res_cursor.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 36 #ifdef dev_fmt 37 #undef dev_fmt 38 #endif 39 #define dev_fmt(fmt) "kfd_migrate: " fmt 40 41 static uint64_t 42 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 43 { 44 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 45 } 46 47 static int 48 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 49 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 50 { 51 struct amdgpu_device *adev = ring->adev; 52 struct amdgpu_job *job; 53 unsigned int num_dw, num_bytes; 54 struct dma_fence *fence; 55 uint64_t src_addr, dst_addr; 56 uint64_t pte_flags; 57 void *cpu_addr; 58 int r; 59 60 /* use gart window 0 */ 61 *gart_addr = adev->gmc.gart_start; 62 63 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 64 num_bytes = npages * 8; 65 66 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 67 AMDGPU_IB_POOL_DELAYED, &job); 68 if (r) 69 return r; 70 71 src_addr = num_dw * 4; 72 src_addr += job->ibs[0].gpu_addr; 73 74 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 75 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 76 dst_addr, num_bytes, false); 77 78 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 79 WARN_ON(job->ibs[0].length_dw > num_dw); 80 81 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 82 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 83 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 84 pte_flags |= AMDGPU_PTE_WRITEABLE; 85 pte_flags |= adev->gart.gart_pte_flags; 86 87 cpu_addr = &job->ibs[0].ptr[num_dw]; 88 89 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 90 r = amdgpu_job_submit(job, &adev->mman.entity, 91 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 92 if (r) 93 goto error_free; 94 95 dma_fence_put(fence); 96 97 return r; 98 99 error_free: 100 amdgpu_job_free(job); 101 return r; 102 } 103 104 /** 105 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 106 * 107 * @adev: amdgpu device the sdma ring running 108 * @sys: system DMA pointer to be copied 109 * @vram: vram destination DMA pointer 110 * @npages: number of pages to copy 111 * @direction: enum MIGRATION_COPY_DIR 112 * @mfence: output, sdma fence to signal after sdma is done 113 * 114 * ram address uses GART table continuous entries mapping to ram pages, 115 * vram address uses direct mapping of vram pages, which must have npages 116 * number of continuous pages. 117 * GART update and sdma uses same buf copy function ring, sdma is splited to 118 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 119 * the last sdma finish fence which is returned to check copy memory is done. 120 * 121 * Context: Process context, takes and releases gtt_window_lock 122 * 123 * Return: 124 * 0 - OK, otherwise error code 125 */ 126 127 static int 128 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 129 uint64_t *vram, uint64_t npages, 130 enum MIGRATION_COPY_DIR direction, 131 struct dma_fence **mfence) 132 { 133 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 134 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 135 uint64_t gart_s, gart_d; 136 struct dma_fence *next; 137 uint64_t size; 138 int r; 139 140 mutex_lock(&adev->mman.gtt_window_lock); 141 142 while (npages) { 143 size = min(GTT_MAX_PAGES, npages); 144 145 if (direction == FROM_VRAM_TO_RAM) { 146 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 147 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 148 149 } else if (direction == FROM_RAM_TO_VRAM) { 150 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 151 KFD_IOCTL_SVM_FLAG_GPU_RO); 152 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 153 } 154 if (r) { 155 dev_err(adev->dev, "fail %d create gart mapping\n", r); 156 goto out_unlock; 157 } 158 159 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 160 NULL, &next, false, true, false); 161 if (r) { 162 dev_err(adev->dev, "fail %d to copy memory\n", r); 163 goto out_unlock; 164 } 165 166 dma_fence_put(*mfence); 167 *mfence = next; 168 npages -= size; 169 if (npages) { 170 sys += size; 171 vram += size; 172 } 173 } 174 175 out_unlock: 176 mutex_unlock(&adev->mman.gtt_window_lock); 177 178 return r; 179 } 180 181 /** 182 * svm_migrate_copy_done - wait for memory copy sdma is done 183 * 184 * @adev: amdgpu device the sdma memory copy is executing on 185 * @mfence: migrate fence 186 * 187 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 188 * operations, this is the last sdma operation fence. 189 * 190 * Context: called after svm_migrate_copy_memory 191 * 192 * Return: 193 * 0 - success 194 * otherwise - error code from dma fence signal 195 */ 196 static int 197 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 198 { 199 int r = 0; 200 201 if (mfence) { 202 r = dma_fence_wait(mfence, false); 203 dma_fence_put(mfence); 204 pr_debug("sdma copy memory fence done\n"); 205 } 206 207 return r; 208 } 209 210 unsigned long 211 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 212 { 213 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; 214 } 215 216 static void 217 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 218 { 219 struct page *page; 220 221 page = pfn_to_page(pfn); 222 svm_range_bo_ref(prange->svm_bo); 223 page->zone_device_data = prange->svm_bo; 224 get_page(page); 225 lock_page(page); 226 } 227 228 static void 229 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 230 { 231 struct page *page; 232 233 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 234 unlock_page(page); 235 put_page(page); 236 } 237 238 static unsigned long 239 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 240 { 241 unsigned long addr; 242 243 addr = page_to_pfn(page) << PAGE_SHIFT; 244 return (addr - adev->kfd.dev->pgmap.range.start); 245 } 246 247 static struct page * 248 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 249 { 250 struct page *page; 251 252 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 253 if (page) 254 lock_page(page); 255 256 return page; 257 } 258 259 static void svm_migrate_put_sys_page(unsigned long addr) 260 { 261 struct page *page; 262 263 page = pfn_to_page(addr >> PAGE_SHIFT); 264 unlock_page(page); 265 put_page(page); 266 } 267 268 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 269 { 270 unsigned long cpages = 0; 271 unsigned long i; 272 273 for (i = 0; i < migrate->npages; i++) { 274 if (migrate->src[i] & MIGRATE_PFN_VALID && 275 migrate->src[i] & MIGRATE_PFN_MIGRATE) 276 cpages++; 277 } 278 return cpages; 279 } 280 281 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) 282 { 283 unsigned long upages = 0; 284 unsigned long i; 285 286 for (i = 0; i < migrate->npages; i++) { 287 if (migrate->src[i] & MIGRATE_PFN_VALID && 288 !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 289 upages++; 290 } 291 return upages; 292 } 293 294 static int 295 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 296 struct migrate_vma *migrate, struct dma_fence **mfence, 297 dma_addr_t *scratch) 298 { 299 uint64_t npages = migrate->cpages; 300 struct device *dev = adev->dev; 301 struct amdgpu_res_cursor cursor; 302 dma_addr_t *src; 303 uint64_t *dst; 304 uint64_t i, j; 305 int r; 306 307 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 308 prange->last); 309 310 src = scratch; 311 dst = (uint64_t *)(scratch + npages); 312 313 r = svm_range_vram_node_new(adev, prange, true); 314 if (r) { 315 dev_dbg(adev->dev, "fail %d to alloc vram\n", r); 316 goto out; 317 } 318 319 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, 320 npages << PAGE_SHIFT, &cursor); 321 for (i = j = 0; i < npages; i++) { 322 struct page *spage; 323 324 spage = migrate_pfn_to_page(migrate->src[i]); 325 if (spage && !is_zone_device_page(spage)) { 326 dst[i] = cursor.start + (j << PAGE_SHIFT); 327 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 328 svm_migrate_get_vram_page(prange, migrate->dst[i]); 329 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 330 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 331 DMA_TO_DEVICE); 332 r = dma_mapping_error(dev, src[i]); 333 if (r) { 334 dev_err(adev->dev, "%s: fail %d dma_map_page\n", 335 __func__, r); 336 goto out_free_vram_pages; 337 } 338 } else { 339 if (j) { 340 r = svm_migrate_copy_memory_gart( 341 adev, src + i - j, 342 dst + i - j, j, 343 FROM_RAM_TO_VRAM, 344 mfence); 345 if (r) 346 goto out_free_vram_pages; 347 amdgpu_res_next(&cursor, j << PAGE_SHIFT); 348 j = 0; 349 } else { 350 amdgpu_res_next(&cursor, PAGE_SIZE); 351 } 352 continue; 353 } 354 355 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 356 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 357 358 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 359 r = svm_migrate_copy_memory_gart(adev, src + i - j, 360 dst + i - j, j + 1, 361 FROM_RAM_TO_VRAM, 362 mfence); 363 if (r) 364 goto out_free_vram_pages; 365 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 366 j = 0; 367 } else { 368 j++; 369 } 370 } 371 372 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 373 FROM_RAM_TO_VRAM, mfence); 374 375 out_free_vram_pages: 376 if (r) { 377 pr_debug("failed %d to copy memory to vram\n", r); 378 while (i--) { 379 svm_migrate_put_vram_page(adev, dst[i]); 380 migrate->dst[i] = 0; 381 } 382 } 383 384 #ifdef DEBUG_FORCE_MIXED_DOMAINS 385 for (i = 0, j = 0; i < npages; i += 4, j++) { 386 if (j & 1) 387 continue; 388 svm_migrate_put_vram_page(adev, dst[i]); 389 migrate->dst[i] = 0; 390 svm_migrate_put_vram_page(adev, dst[i + 1]); 391 migrate->dst[i + 1] = 0; 392 svm_migrate_put_vram_page(adev, dst[i + 2]); 393 migrate->dst[i + 2] = 0; 394 svm_migrate_put_vram_page(adev, dst[i + 3]); 395 migrate->dst[i + 3] = 0; 396 } 397 #endif 398 out: 399 return r; 400 } 401 402 static long 403 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 404 struct vm_area_struct *vma, uint64_t start, 405 uint64_t end) 406 { 407 uint64_t npages = (end - start) >> PAGE_SHIFT; 408 struct kfd_process_device *pdd; 409 struct dma_fence *mfence = NULL; 410 struct migrate_vma migrate; 411 unsigned long cpages = 0; 412 dma_addr_t *scratch; 413 size_t size; 414 void *buf; 415 int r = -ENOMEM; 416 417 memset(&migrate, 0, sizeof(migrate)); 418 migrate.vma = vma; 419 migrate.start = start; 420 migrate.end = end; 421 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 422 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 423 424 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 425 size *= npages; 426 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 427 if (!buf) 428 goto out; 429 430 migrate.src = buf; 431 migrate.dst = migrate.src + npages; 432 scratch = (dma_addr_t *)(migrate.dst + npages); 433 434 r = migrate_vma_setup(&migrate); 435 if (r) { 436 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 437 __func__, r, prange->start, prange->last); 438 goto out_free; 439 } 440 441 cpages = migrate.cpages; 442 if (!cpages) { 443 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 444 prange->start, prange->last); 445 goto out_free; 446 } 447 if (cpages != npages) 448 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 449 cpages, npages); 450 else 451 pr_debug("0x%lx pages migrated\n", cpages); 452 453 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); 454 migrate_vma_pages(&migrate); 455 456 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 457 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 458 459 svm_migrate_copy_done(adev, mfence); 460 migrate_vma_finalize(&migrate); 461 462 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 463 svm_range_free_dma_mappings(prange); 464 465 out_free: 466 kvfree(buf); 467 out: 468 if (!r && cpages) { 469 pdd = svm_range_get_pdd_by_adev(prange, adev); 470 if (pdd) 471 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); 472 473 return cpages; 474 } 475 return r; 476 } 477 478 /** 479 * svm_migrate_ram_to_vram - migrate svm range from system to device 480 * @prange: range structure 481 * @best_loc: the device to migrate to 482 * @mm: the process mm structure 483 * 484 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 485 * 486 * Return: 487 * 0 - OK, otherwise error code 488 */ 489 static int 490 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 491 struct mm_struct *mm) 492 { 493 unsigned long addr, start, end; 494 struct vm_area_struct *vma; 495 struct amdgpu_device *adev; 496 unsigned long cpages = 0; 497 long r = 0; 498 499 if (prange->actual_loc == best_loc) { 500 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 501 prange->svms, prange->start, prange->last, best_loc); 502 return 0; 503 } 504 505 adev = svm_range_get_adev_by_id(prange, best_loc); 506 if (!adev) { 507 pr_debug("failed to get device by id 0x%x\n", best_loc); 508 return -ENODEV; 509 } 510 511 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 512 prange->start, prange->last, best_loc); 513 514 /* FIXME: workaround for page locking bug with invalid pages */ 515 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev)); 516 517 start = prange->start << PAGE_SHIFT; 518 end = (prange->last + 1) << PAGE_SHIFT; 519 520 for (addr = start; addr < end;) { 521 unsigned long next; 522 523 vma = find_vma(mm, addr); 524 if (!vma || addr < vma->vm_start) 525 break; 526 527 next = min(vma->vm_end, end); 528 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); 529 if (r < 0) { 530 pr_debug("failed %ld to migrate\n", r); 531 break; 532 } else { 533 cpages += r; 534 } 535 addr = next; 536 } 537 538 if (cpages) 539 prange->actual_loc = best_loc; 540 541 return r < 0 ? r : 0; 542 } 543 544 static void svm_migrate_page_free(struct page *page) 545 { 546 struct svm_range_bo *svm_bo = page->zone_device_data; 547 548 if (svm_bo) { 549 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 550 svm_range_bo_unref_async(svm_bo); 551 } 552 } 553 554 static int 555 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 556 struct migrate_vma *migrate, struct dma_fence **mfence, 557 dma_addr_t *scratch, uint64_t npages) 558 { 559 struct device *dev = adev->dev; 560 uint64_t *src; 561 dma_addr_t *dst; 562 struct page *dpage; 563 uint64_t i = 0, j; 564 uint64_t addr; 565 int r = 0; 566 567 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 568 prange->last); 569 570 addr = prange->start << PAGE_SHIFT; 571 572 src = (uint64_t *)(scratch + npages); 573 dst = scratch; 574 575 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 576 struct page *spage; 577 578 spage = migrate_pfn_to_page(migrate->src[i]); 579 if (!spage || !is_zone_device_page(spage)) { 580 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 581 prange->svms, prange->start, prange->last); 582 if (j) { 583 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 584 src + i - j, j, 585 FROM_VRAM_TO_RAM, 586 mfence); 587 if (r) 588 goto out_oom; 589 j = 0; 590 } 591 continue; 592 } 593 src[i] = svm_migrate_addr(adev, spage); 594 if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 595 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 596 src + i - j, j, 597 FROM_VRAM_TO_RAM, 598 mfence); 599 if (r) 600 goto out_oom; 601 j = 0; 602 } 603 604 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 605 if (!dpage) { 606 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 607 prange->svms, prange->start, prange->last); 608 r = -ENOMEM; 609 goto out_oom; 610 } 611 612 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); 613 r = dma_mapping_error(dev, dst[i]); 614 if (r) { 615 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); 616 goto out_oom; 617 } 618 619 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 620 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 621 622 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 623 j++; 624 } 625 626 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 627 FROM_VRAM_TO_RAM, mfence); 628 629 out_oom: 630 if (r) { 631 pr_debug("failed %d copy to ram\n", r); 632 while (i--) { 633 svm_migrate_put_sys_page(dst[i]); 634 migrate->dst[i] = 0; 635 } 636 } 637 638 return r; 639 } 640 641 static long 642 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 643 struct vm_area_struct *vma, uint64_t start, uint64_t end) 644 { 645 uint64_t npages = (end - start) >> PAGE_SHIFT; 646 unsigned long upages = npages; 647 unsigned long cpages = 0; 648 struct kfd_process_device *pdd; 649 struct dma_fence *mfence = NULL; 650 struct migrate_vma migrate; 651 dma_addr_t *scratch; 652 size_t size; 653 void *buf; 654 int r = -ENOMEM; 655 656 memset(&migrate, 0, sizeof(migrate)); 657 migrate.vma = vma; 658 migrate.start = start; 659 migrate.end = end; 660 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 661 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 662 663 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 664 size *= npages; 665 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 666 if (!buf) 667 goto out; 668 669 migrate.src = buf; 670 migrate.dst = migrate.src + npages; 671 scratch = (dma_addr_t *)(migrate.dst + npages); 672 673 r = migrate_vma_setup(&migrate); 674 if (r) { 675 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 676 __func__, r, prange->start, prange->last); 677 goto out_free; 678 } 679 680 cpages = migrate.cpages; 681 if (!cpages) { 682 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 683 prange->start, prange->last); 684 upages = svm_migrate_unsuccessful_pages(&migrate); 685 goto out_free; 686 } 687 if (cpages != npages) 688 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 689 cpages, npages); 690 else 691 pr_debug("0x%lx pages migrated\n", cpages); 692 693 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 694 scratch, npages); 695 migrate_vma_pages(&migrate); 696 697 upages = svm_migrate_unsuccessful_pages(&migrate); 698 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 699 upages, cpages, migrate.npages); 700 701 svm_migrate_copy_done(adev, mfence); 702 migrate_vma_finalize(&migrate); 703 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 704 705 out_free: 706 kvfree(buf); 707 out: 708 if (!r && cpages) { 709 pdd = svm_range_get_pdd_by_adev(prange, adev); 710 if (pdd) 711 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 712 713 return upages; 714 } 715 return r ? r : upages; 716 } 717 718 /** 719 * svm_migrate_vram_to_ram - migrate svm range from device to system 720 * @prange: range structure 721 * @mm: process mm, use current->mm if NULL 722 * 723 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 724 * 725 * Return: 726 * 0 - OK, otherwise error code 727 */ 728 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) 729 { 730 struct amdgpu_device *adev; 731 struct vm_area_struct *vma; 732 unsigned long addr; 733 unsigned long start; 734 unsigned long end; 735 unsigned long upages = 0; 736 long r = 0; 737 738 if (!prange->actual_loc) { 739 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 740 prange->start, prange->last); 741 return 0; 742 } 743 744 adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 745 if (!adev) { 746 pr_debug("failed to get device by id 0x%x\n", 747 prange->actual_loc); 748 return -ENODEV; 749 } 750 751 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 752 prange->svms, prange, prange->start, prange->last, 753 prange->actual_loc); 754 755 start = prange->start << PAGE_SHIFT; 756 end = (prange->last + 1) << PAGE_SHIFT; 757 758 for (addr = start; addr < end;) { 759 unsigned long next; 760 761 vma = find_vma(mm, addr); 762 if (!vma || addr < vma->vm_start) 763 break; 764 765 next = min(vma->vm_end, end); 766 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); 767 if (r < 0) { 768 pr_debug("failed %ld to migrate\n", r); 769 break; 770 } else { 771 upages += r; 772 } 773 addr = next; 774 } 775 776 if (!upages) { 777 svm_range_vram_node_free(prange); 778 prange->actual_loc = 0; 779 } 780 781 return r < 0 ? r : 0; 782 } 783 784 /** 785 * svm_migrate_vram_to_vram - migrate svm range from device to device 786 * @prange: range structure 787 * @best_loc: the device to migrate to 788 * @mm: process mm, use current->mm if NULL 789 * 790 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 791 * 792 * Return: 793 * 0 - OK, otherwise error code 794 */ 795 static int 796 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 797 struct mm_struct *mm) 798 { 799 int r, retries = 3; 800 801 /* 802 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 803 * system memory as migration bridge 804 */ 805 806 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 807 808 do { 809 r = svm_migrate_vram_to_ram(prange, mm); 810 if (r) 811 return r; 812 } while (prange->actual_loc && --retries); 813 814 if (prange->actual_loc) 815 return -EDEADLK; 816 817 return svm_migrate_ram_to_vram(prange, best_loc, mm); 818 } 819 820 int 821 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 822 struct mm_struct *mm) 823 { 824 if (!prange->actual_loc) 825 return svm_migrate_ram_to_vram(prange, best_loc, mm); 826 else 827 return svm_migrate_vram_to_vram(prange, best_loc, mm); 828 829 } 830 831 /** 832 * svm_migrate_to_ram - CPU page fault handler 833 * @vmf: CPU vm fault vma, address 834 * 835 * Context: vm fault handler, caller holds the mmap read lock 836 * 837 * Return: 838 * 0 - OK 839 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 840 */ 841 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 842 { 843 unsigned long addr = vmf->address; 844 struct vm_area_struct *vma; 845 enum svm_work_list_ops op; 846 struct svm_range *parent; 847 struct svm_range *prange; 848 struct kfd_process *p; 849 struct mm_struct *mm; 850 int r = 0; 851 852 vma = vmf->vma; 853 mm = vma->vm_mm; 854 855 p = kfd_lookup_process_by_mm(vma->vm_mm); 856 if (!p) { 857 pr_debug("failed find process at fault address 0x%lx\n", addr); 858 return VM_FAULT_SIGBUS; 859 } 860 if (READ_ONCE(p->svms.faulting_task) == current) { 861 pr_debug("skipping ram migration\n"); 862 kfd_unref_process(p); 863 return 0; 864 } 865 addr >>= PAGE_SHIFT; 866 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 867 868 mutex_lock(&p->svms.lock); 869 870 prange = svm_range_from_addr(&p->svms, addr, &parent); 871 if (!prange) { 872 pr_debug("cannot find svm range at 0x%lx\n", addr); 873 r = -EFAULT; 874 goto out; 875 } 876 877 mutex_lock(&parent->migrate_mutex); 878 if (prange != parent) 879 mutex_lock_nested(&prange->migrate_mutex, 1); 880 881 if (!prange->actual_loc) 882 goto out_unlock_prange; 883 884 svm_range_lock(parent); 885 if (prange != parent) 886 mutex_lock_nested(&prange->lock, 1); 887 r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 888 if (prange != parent) 889 mutex_unlock(&prange->lock); 890 svm_range_unlock(parent); 891 if (r) { 892 pr_debug("failed %d to split range by granularity\n", r); 893 goto out_unlock_prange; 894 } 895 896 r = svm_migrate_vram_to_ram(prange, mm); 897 if (r) 898 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, 899 prange, prange->start, prange->last); 900 901 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 902 if (p->xnack_enabled && parent == prange) 903 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 904 else 905 op = SVM_OP_UPDATE_RANGE_NOTIFIER; 906 svm_range_add_list_work(&p->svms, parent, mm, op); 907 schedule_deferred_list_work(&p->svms); 908 909 out_unlock_prange: 910 if (prange != parent) 911 mutex_unlock(&prange->migrate_mutex); 912 mutex_unlock(&parent->migrate_mutex); 913 out: 914 mutex_unlock(&p->svms.lock); 915 kfd_unref_process(p); 916 917 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 918 919 return r ? VM_FAULT_SIGBUS : 0; 920 } 921 922 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 923 .page_free = svm_migrate_page_free, 924 .migrate_to_ram = svm_migrate_to_ram, 925 }; 926 927 /* Each VRAM page uses sizeof(struct page) on system memory */ 928 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 929 930 int svm_migrate_init(struct amdgpu_device *adev) 931 { 932 struct kfd_dev *kfddev = adev->kfd.dev; 933 struct dev_pagemap *pgmap; 934 struct resource *res; 935 unsigned long size; 936 void *r; 937 938 /* Page migration works on Vega10 or newer */ 939 if (!KFD_IS_SOC15(kfddev)) 940 return -EINVAL; 941 942 pgmap = &kfddev->pgmap; 943 memset(pgmap, 0, sizeof(*pgmap)); 944 945 /* TODO: register all vram to HMM for now. 946 * should remove reserved size 947 */ 948 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 949 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 950 if (IS_ERR(res)) 951 return -ENOMEM; 952 953 pgmap->type = MEMORY_DEVICE_PRIVATE; 954 pgmap->nr_range = 1; 955 pgmap->range.start = res->start; 956 pgmap->range.end = res->end; 957 pgmap->ops = &svm_migrate_pgmap_ops; 958 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 959 pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 960 961 /* Device manager releases device-specific resources, memory region and 962 * pgmap when driver disconnects from device. 963 */ 964 r = devm_memremap_pages(adev->dev, pgmap); 965 if (IS_ERR(r)) { 966 pr_err("failed to register HMM device memory\n"); 967 968 /* Disable SVM support capability */ 969 pgmap->type = 0; 970 devm_release_mem_region(adev->dev, res->start, resource_size(res)); 971 return PTR_ERR(r); 972 } 973 974 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 975 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 976 977 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 978 979 pr_info("HMM registered %ldMB device memory\n", size >> 20); 980 981 return 0; 982 } 983