1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include "amdgpu_sync.h" 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_mn.h" 31 #include "amdgpu_res_cursor.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 36 #ifdef dev_fmt 37 #undef dev_fmt 38 #endif 39 #define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__ 40 41 static uint64_t 42 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 43 { 44 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 45 } 46 47 static int 48 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 49 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 50 { 51 struct amdgpu_device *adev = ring->adev; 52 struct amdgpu_job *job; 53 unsigned int num_dw, num_bytes; 54 struct dma_fence *fence; 55 uint64_t src_addr, dst_addr; 56 uint64_t pte_flags; 57 void *cpu_addr; 58 int r; 59 60 /* use gart window 0 */ 61 *gart_addr = adev->gmc.gart_start; 62 63 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 64 num_bytes = npages * 8; 65 66 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 67 AMDGPU_IB_POOL_DELAYED, &job); 68 if (r) 69 return r; 70 71 src_addr = num_dw * 4; 72 src_addr += job->ibs[0].gpu_addr; 73 74 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 75 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 76 dst_addr, num_bytes, false); 77 78 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 79 WARN_ON(job->ibs[0].length_dw > num_dw); 80 81 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 82 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 83 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 84 pte_flags |= AMDGPU_PTE_WRITEABLE; 85 pte_flags |= adev->gart.gart_pte_flags; 86 87 cpu_addr = &job->ibs[0].ptr[num_dw]; 88 89 r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 90 if (r) 91 goto error_free; 92 93 r = amdgpu_job_submit(job, &adev->mman.entity, 94 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 95 if (r) 96 goto error_free; 97 98 dma_fence_put(fence); 99 100 return r; 101 102 error_free: 103 amdgpu_job_free(job); 104 return r; 105 } 106 107 /** 108 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 109 * 110 * @adev: amdgpu device the sdma ring running 111 * @src: source page address array 112 * @dst: destination page address array 113 * @npages: number of pages to copy 114 * @direction: enum MIGRATION_COPY_DIR 115 * @mfence: output, sdma fence to signal after sdma is done 116 * 117 * ram address uses GART table continuous entries mapping to ram pages, 118 * vram address uses direct mapping of vram pages, which must have npages 119 * number of continuous pages. 120 * GART update and sdma uses same buf copy function ring, sdma is splited to 121 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 122 * the last sdma finish fence which is returned to check copy memory is done. 123 * 124 * Context: Process context, takes and releases gtt_window_lock 125 * 126 * Return: 127 * 0 - OK, otherwise error code 128 */ 129 130 static int 131 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 132 uint64_t *vram, uint64_t npages, 133 enum MIGRATION_COPY_DIR direction, 134 struct dma_fence **mfence) 135 { 136 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 137 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 138 uint64_t gart_s, gart_d; 139 struct dma_fence *next; 140 uint64_t size; 141 int r; 142 143 mutex_lock(&adev->mman.gtt_window_lock); 144 145 while (npages) { 146 size = min(GTT_MAX_PAGES, npages); 147 148 if (direction == FROM_VRAM_TO_RAM) { 149 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 150 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 151 152 } else if (direction == FROM_RAM_TO_VRAM) { 153 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 154 KFD_IOCTL_SVM_FLAG_GPU_RO); 155 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 156 } 157 if (r) { 158 dev_err(adev->dev, "fail %d create gart mapping\n", r); 159 goto out_unlock; 160 } 161 162 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 163 NULL, &next, false, true, false); 164 if (r) { 165 dev_err(adev->dev, "fail %d to copy memory\n", r); 166 goto out_unlock; 167 } 168 169 dma_fence_put(*mfence); 170 *mfence = next; 171 npages -= size; 172 if (npages) { 173 sys += size; 174 vram += size; 175 } 176 } 177 178 out_unlock: 179 mutex_unlock(&adev->mman.gtt_window_lock); 180 181 return r; 182 } 183 184 /** 185 * svm_migrate_copy_done - wait for memory copy sdma is done 186 * 187 * @adev: amdgpu device the sdma memory copy is executing on 188 * @mfence: migrate fence 189 * 190 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 191 * operations, this is the last sdma operation fence. 192 * 193 * Context: called after svm_migrate_copy_memory 194 * 195 * Return: 196 * 0 - success 197 * otherwise - error code from dma fence signal 198 */ 199 static int 200 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 201 { 202 int r = 0; 203 204 if (mfence) { 205 r = dma_fence_wait(mfence, false); 206 dma_fence_put(mfence); 207 pr_debug("sdma copy memory fence done\n"); 208 } 209 210 return r; 211 } 212 213 unsigned long 214 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 215 { 216 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; 217 } 218 219 static void 220 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 221 { 222 struct page *page; 223 224 page = pfn_to_page(pfn); 225 svm_range_bo_ref(prange->svm_bo); 226 page->zone_device_data = prange->svm_bo; 227 get_page(page); 228 lock_page(page); 229 } 230 231 static void 232 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 233 { 234 struct page *page; 235 236 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 237 unlock_page(page); 238 put_page(page); 239 } 240 241 static unsigned long 242 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 243 { 244 unsigned long addr; 245 246 addr = page_to_pfn(page) << PAGE_SHIFT; 247 return (addr - adev->kfd.dev->pgmap.range.start); 248 } 249 250 static struct page * 251 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 252 { 253 struct page *page; 254 255 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 256 if (page) 257 lock_page(page); 258 259 return page; 260 } 261 262 static void svm_migrate_put_sys_page(unsigned long addr) 263 { 264 struct page *page; 265 266 page = pfn_to_page(addr >> PAGE_SHIFT); 267 unlock_page(page); 268 put_page(page); 269 } 270 271 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 272 { 273 unsigned long cpages = 0; 274 unsigned long i; 275 276 for (i = 0; i < migrate->npages; i++) { 277 if (migrate->src[i] & MIGRATE_PFN_VALID && 278 migrate->src[i] & MIGRATE_PFN_MIGRATE) 279 cpages++; 280 } 281 return cpages; 282 } 283 284 static int 285 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 286 struct migrate_vma *migrate, struct dma_fence **mfence, 287 dma_addr_t *scratch) 288 { 289 uint64_t npages = migrate->cpages; 290 struct device *dev = adev->dev; 291 struct amdgpu_res_cursor cursor; 292 dma_addr_t *src; 293 uint64_t *dst; 294 uint64_t i, j; 295 int r; 296 297 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 298 prange->last); 299 300 src = scratch; 301 dst = (uint64_t *)(scratch + npages); 302 303 r = svm_range_vram_node_new(adev, prange, true); 304 if (r) { 305 dev_err(adev->dev, "fail %d to alloc vram\n", r); 306 goto out; 307 } 308 309 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, 310 npages << PAGE_SHIFT, &cursor); 311 for (i = j = 0; i < npages; i++) { 312 struct page *spage; 313 314 spage = migrate_pfn_to_page(migrate->src[i]); 315 if (spage && !is_zone_device_page(spage)) { 316 dst[i] = cursor.start + (j << PAGE_SHIFT); 317 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 318 svm_migrate_get_vram_page(prange, migrate->dst[i]); 319 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 320 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 321 DMA_TO_DEVICE); 322 r = dma_mapping_error(dev, src[i]); 323 if (r) { 324 dev_err(adev->dev, "fail %d dma_map_page\n", r); 325 goto out_free_vram_pages; 326 } 327 } else { 328 if (j) { 329 r = svm_migrate_copy_memory_gart( 330 adev, src + i - j, 331 dst + i - j, j, 332 FROM_RAM_TO_VRAM, 333 mfence); 334 if (r) 335 goto out_free_vram_pages; 336 amdgpu_res_next(&cursor, j << PAGE_SHIFT); 337 j = 0; 338 } else { 339 amdgpu_res_next(&cursor, PAGE_SIZE); 340 } 341 continue; 342 } 343 344 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 345 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 346 347 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 348 r = svm_migrate_copy_memory_gart(adev, src + i - j, 349 dst + i - j, j + 1, 350 FROM_RAM_TO_VRAM, 351 mfence); 352 if (r) 353 goto out_free_vram_pages; 354 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 355 j= 0; 356 } else { 357 j++; 358 } 359 } 360 361 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 362 FROM_RAM_TO_VRAM, mfence); 363 364 out_free_vram_pages: 365 if (r) { 366 pr_debug("failed %d to copy memory to vram\n", r); 367 while (i--) { 368 svm_migrate_put_vram_page(adev, dst[i]); 369 migrate->dst[i] = 0; 370 } 371 } 372 373 #ifdef DEBUG_FORCE_MIXED_DOMAINS 374 for (i = 0, j = 0; i < npages; i += 4, j++) { 375 if (j & 1) 376 continue; 377 svm_migrate_put_vram_page(adev, dst[i]); 378 migrate->dst[i] = 0; 379 svm_migrate_put_vram_page(adev, dst[i + 1]); 380 migrate->dst[i + 1] = 0; 381 svm_migrate_put_vram_page(adev, dst[i + 2]); 382 migrate->dst[i + 2] = 0; 383 svm_migrate_put_vram_page(adev, dst[i + 3]); 384 migrate->dst[i + 3] = 0; 385 } 386 #endif 387 out: 388 return r; 389 } 390 391 static long 392 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 393 struct vm_area_struct *vma, uint64_t start, 394 uint64_t end) 395 { 396 uint64_t npages = (end - start) >> PAGE_SHIFT; 397 struct kfd_process_device *pdd; 398 struct dma_fence *mfence = NULL; 399 struct migrate_vma migrate; 400 unsigned long cpages = 0; 401 dma_addr_t *scratch; 402 size_t size; 403 void *buf; 404 int r = -ENOMEM; 405 406 memset(&migrate, 0, sizeof(migrate)); 407 migrate.vma = vma; 408 migrate.start = start; 409 migrate.end = end; 410 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 411 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 412 413 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 414 size *= npages; 415 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 416 if (!buf) 417 goto out; 418 419 migrate.src = buf; 420 migrate.dst = migrate.src + npages; 421 scratch = (dma_addr_t *)(migrate.dst + npages); 422 423 r = migrate_vma_setup(&migrate); 424 if (r) { 425 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, 426 prange->start, prange->last); 427 goto out_free; 428 } 429 430 cpages = migrate.cpages; 431 if (!cpages) { 432 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 433 prange->start, prange->last); 434 goto out_free; 435 } 436 if (cpages != npages) 437 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 438 cpages, npages); 439 else 440 pr_debug("0x%lx pages migrated\n", cpages); 441 442 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); 443 migrate_vma_pages(&migrate); 444 445 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 446 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 447 448 svm_migrate_copy_done(adev, mfence); 449 migrate_vma_finalize(&migrate); 450 451 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 452 svm_range_free_dma_mappings(prange); 453 454 out_free: 455 kvfree(buf); 456 out: 457 if (!r && cpages) { 458 pdd = svm_range_get_pdd_by_adev(prange, adev); 459 if (pdd) 460 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); 461 462 return cpages; 463 } 464 return r; 465 } 466 467 /** 468 * svm_migrate_ram_to_vram - migrate svm range from system to device 469 * @prange: range structure 470 * @best_loc: the device to migrate to 471 * @mm: the process mm structure 472 * 473 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 474 * 475 * Return: 476 * 0 - OK, otherwise error code 477 */ 478 static int 479 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 480 struct mm_struct *mm) 481 { 482 unsigned long addr, start, end; 483 struct vm_area_struct *vma; 484 struct amdgpu_device *adev; 485 unsigned long cpages = 0; 486 long r = 0; 487 488 if (prange->actual_loc == best_loc) { 489 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 490 prange->svms, prange->start, prange->last, best_loc); 491 return 0; 492 } 493 494 adev = svm_range_get_adev_by_id(prange, best_loc); 495 if (!adev) { 496 pr_debug("failed to get device by id 0x%x\n", best_loc); 497 return -ENODEV; 498 } 499 500 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 501 prange->start, prange->last, best_loc); 502 503 /* FIXME: workaround for page locking bug with invalid pages */ 504 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev)); 505 506 start = prange->start << PAGE_SHIFT; 507 end = (prange->last + 1) << PAGE_SHIFT; 508 509 for (addr = start; addr < end;) { 510 unsigned long next; 511 512 vma = find_vma(mm, addr); 513 if (!vma || addr < vma->vm_start) 514 break; 515 516 next = min(vma->vm_end, end); 517 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); 518 if (r < 0) { 519 pr_debug("failed %ld to migrate\n", r); 520 break; 521 } else { 522 cpages += r; 523 } 524 addr = next; 525 } 526 527 if (cpages) 528 prange->actual_loc = best_loc; 529 530 return r < 0 ? r : 0; 531 } 532 533 static void svm_migrate_page_free(struct page *page) 534 { 535 struct svm_range_bo *svm_bo = page->zone_device_data; 536 537 if (svm_bo) { 538 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 539 svm_range_bo_unref(svm_bo); 540 } 541 } 542 543 static int 544 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 545 struct migrate_vma *migrate, struct dma_fence **mfence, 546 dma_addr_t *scratch, uint64_t npages) 547 { 548 struct device *dev = adev->dev; 549 uint64_t *src; 550 dma_addr_t *dst; 551 struct page *dpage; 552 uint64_t i = 0, j; 553 uint64_t addr; 554 int r = 0; 555 556 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 557 prange->last); 558 559 addr = prange->start << PAGE_SHIFT; 560 561 src = (uint64_t *)(scratch + npages); 562 dst = scratch; 563 564 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 565 struct page *spage; 566 567 spage = migrate_pfn_to_page(migrate->src[i]); 568 if (!spage || !is_zone_device_page(spage)) { 569 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 570 prange->svms, prange->start, prange->last); 571 if (j) { 572 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 573 src + i - j, j, 574 FROM_VRAM_TO_RAM, 575 mfence); 576 if (r) 577 goto out_oom; 578 j = 0; 579 } 580 continue; 581 } 582 src[i] = svm_migrate_addr(adev, spage); 583 if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 584 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 585 src + i - j, j, 586 FROM_VRAM_TO_RAM, 587 mfence); 588 if (r) 589 goto out_oom; 590 j = 0; 591 } 592 593 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 594 if (!dpage) { 595 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 596 prange->svms, prange->start, prange->last); 597 r = -ENOMEM; 598 goto out_oom; 599 } 600 601 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); 602 r = dma_mapping_error(dev, dst[i]); 603 if (r) { 604 dev_err(adev->dev, "fail %d dma_map_page\n", r); 605 goto out_oom; 606 } 607 608 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 609 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 610 611 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 612 j++; 613 } 614 615 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 616 FROM_VRAM_TO_RAM, mfence); 617 618 out_oom: 619 if (r) { 620 pr_debug("failed %d copy to ram\n", r); 621 while (i--) { 622 svm_migrate_put_sys_page(dst[i]); 623 migrate->dst[i] = 0; 624 } 625 } 626 627 return r; 628 } 629 630 static long 631 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 632 struct vm_area_struct *vma, uint64_t start, uint64_t end) 633 { 634 uint64_t npages = (end - start) >> PAGE_SHIFT; 635 struct kfd_process_device *pdd; 636 struct dma_fence *mfence = NULL; 637 struct migrate_vma migrate; 638 unsigned long cpages = 0; 639 dma_addr_t *scratch; 640 size_t size; 641 void *buf; 642 int r = -ENOMEM; 643 644 memset(&migrate, 0, sizeof(migrate)); 645 migrate.vma = vma; 646 migrate.start = start; 647 migrate.end = end; 648 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 649 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 650 651 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 652 size *= npages; 653 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 654 if (!buf) 655 goto out; 656 657 migrate.src = buf; 658 migrate.dst = migrate.src + npages; 659 scratch = (dma_addr_t *)(migrate.dst + npages); 660 661 r = migrate_vma_setup(&migrate); 662 if (r) { 663 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, 664 prange->start, prange->last); 665 goto out_free; 666 } 667 668 cpages = migrate.cpages; 669 if (!cpages) { 670 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 671 prange->start, prange->last); 672 goto out_free; 673 } 674 if (cpages != npages) 675 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 676 cpages, npages); 677 else 678 pr_debug("0x%lx pages migrated\n", cpages); 679 680 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 681 scratch, npages); 682 migrate_vma_pages(&migrate); 683 684 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 685 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 686 687 svm_migrate_copy_done(adev, mfence); 688 migrate_vma_finalize(&migrate); 689 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 690 691 out_free: 692 kvfree(buf); 693 out: 694 if (!r && cpages) { 695 pdd = svm_range_get_pdd_by_adev(prange, adev); 696 if (pdd) 697 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 698 699 return cpages; 700 } 701 return r; 702 } 703 704 /** 705 * svm_migrate_vram_to_ram - migrate svm range from device to system 706 * @prange: range structure 707 * @mm: process mm, use current->mm if NULL 708 * 709 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 710 * 711 * Return: 712 * 0 - OK, otherwise error code 713 */ 714 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) 715 { 716 struct amdgpu_device *adev; 717 struct vm_area_struct *vma; 718 unsigned long addr; 719 unsigned long start; 720 unsigned long end; 721 unsigned long cpages = 0; 722 long r = 0; 723 724 if (!prange->actual_loc) { 725 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 726 prange->start, prange->last); 727 return 0; 728 } 729 730 adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 731 if (!adev) { 732 pr_debug("failed to get device by id 0x%x\n", 733 prange->actual_loc); 734 return -ENODEV; 735 } 736 737 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 738 prange->svms, prange, prange->start, prange->last, 739 prange->actual_loc); 740 741 start = prange->start << PAGE_SHIFT; 742 end = (prange->last + 1) << PAGE_SHIFT; 743 744 for (addr = start; addr < end;) { 745 unsigned long next; 746 747 vma = find_vma(mm, addr); 748 if (!vma || addr < vma->vm_start) 749 break; 750 751 next = min(vma->vm_end, end); 752 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); 753 if (r < 0) { 754 pr_debug("failed %ld to migrate\n", r); 755 break; 756 } else { 757 cpages += r; 758 } 759 addr = next; 760 } 761 762 if (cpages) { 763 svm_range_vram_node_free(prange); 764 prange->actual_loc = 0; 765 } 766 767 return r < 0 ? r : 0; 768 } 769 770 /** 771 * svm_migrate_vram_to_vram - migrate svm range from device to device 772 * @prange: range structure 773 * @best_loc: the device to migrate to 774 * @mm: process mm, use current->mm if NULL 775 * 776 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 777 * 778 * Return: 779 * 0 - OK, otherwise error code 780 */ 781 static int 782 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 783 struct mm_struct *mm) 784 { 785 int r; 786 787 /* 788 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 789 * system memory as migration bridge 790 */ 791 792 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 793 794 r = svm_migrate_vram_to_ram(prange, mm); 795 if (r) 796 return r; 797 798 return svm_migrate_ram_to_vram(prange, best_loc, mm); 799 } 800 801 int 802 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 803 struct mm_struct *mm) 804 { 805 if (!prange->actual_loc) 806 return svm_migrate_ram_to_vram(prange, best_loc, mm); 807 else 808 return svm_migrate_vram_to_vram(prange, best_loc, mm); 809 810 } 811 812 /** 813 * svm_migrate_to_ram - CPU page fault handler 814 * @vmf: CPU vm fault vma, address 815 * 816 * Context: vm fault handler, caller holds the mmap read lock 817 * 818 * Return: 819 * 0 - OK 820 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 821 */ 822 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 823 { 824 unsigned long addr = vmf->address; 825 struct vm_area_struct *vma; 826 enum svm_work_list_ops op; 827 struct svm_range *parent; 828 struct svm_range *prange; 829 struct kfd_process *p; 830 struct mm_struct *mm; 831 int r = 0; 832 833 vma = vmf->vma; 834 mm = vma->vm_mm; 835 836 p = kfd_lookup_process_by_mm(vma->vm_mm); 837 if (!p) { 838 pr_debug("failed find process at fault address 0x%lx\n", addr); 839 return VM_FAULT_SIGBUS; 840 } 841 addr >>= PAGE_SHIFT; 842 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 843 844 mutex_lock(&p->svms.lock); 845 846 prange = svm_range_from_addr(&p->svms, addr, &parent); 847 if (!prange) { 848 pr_debug("cannot find svm range at 0x%lx\n", addr); 849 r = -EFAULT; 850 goto out; 851 } 852 853 mutex_lock(&parent->migrate_mutex); 854 if (prange != parent) 855 mutex_lock_nested(&prange->migrate_mutex, 1); 856 857 if (!prange->actual_loc) 858 goto out_unlock_prange; 859 860 svm_range_lock(parent); 861 if (prange != parent) 862 mutex_lock_nested(&prange->lock, 1); 863 r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 864 if (prange != parent) 865 mutex_unlock(&prange->lock); 866 svm_range_unlock(parent); 867 if (r) { 868 pr_debug("failed %d to split range by granularity\n", r); 869 goto out_unlock_prange; 870 } 871 872 r = svm_migrate_vram_to_ram(prange, mm); 873 if (r) 874 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, 875 prange, prange->start, prange->last); 876 877 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 878 if (p->xnack_enabled && parent == prange) 879 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 880 else 881 op = SVM_OP_UPDATE_RANGE_NOTIFIER; 882 svm_range_add_list_work(&p->svms, parent, mm, op); 883 schedule_deferred_list_work(&p->svms); 884 885 out_unlock_prange: 886 if (prange != parent) 887 mutex_unlock(&prange->migrate_mutex); 888 mutex_unlock(&parent->migrate_mutex); 889 out: 890 mutex_unlock(&p->svms.lock); 891 kfd_unref_process(p); 892 893 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 894 895 return r ? VM_FAULT_SIGBUS : 0; 896 } 897 898 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 899 .page_free = svm_migrate_page_free, 900 .migrate_to_ram = svm_migrate_to_ram, 901 }; 902 903 /* Each VRAM page uses sizeof(struct page) on system memory */ 904 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 905 906 int svm_migrate_init(struct amdgpu_device *adev) 907 { 908 struct kfd_dev *kfddev = adev->kfd.dev; 909 struct dev_pagemap *pgmap; 910 struct resource *res; 911 unsigned long size; 912 void *r; 913 914 /* Page migration works on Vega10 or newer */ 915 if (kfddev->device_info->asic_family < CHIP_VEGA10) 916 return -EINVAL; 917 918 pgmap = &kfddev->pgmap; 919 memset(pgmap, 0, sizeof(*pgmap)); 920 921 /* TODO: register all vram to HMM for now. 922 * should remove reserved size 923 */ 924 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 925 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 926 if (IS_ERR(res)) 927 return -ENOMEM; 928 929 pgmap->type = MEMORY_DEVICE_PRIVATE; 930 pgmap->nr_range = 1; 931 pgmap->range.start = res->start; 932 pgmap->range.end = res->end; 933 pgmap->ops = &svm_migrate_pgmap_ops; 934 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 935 pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 936 937 /* Device manager releases device-specific resources, memory region and 938 * pgmap when driver disconnects from device. 939 */ 940 r = devm_memremap_pages(adev->dev, pgmap); 941 if (IS_ERR(r)) { 942 pr_err("failed to register HMM device memory\n"); 943 944 /* Disable SVM support capability */ 945 pgmap->type = 0; 946 devm_release_mem_region(adev->dev, res->start, resource_size(res)); 947 return PTR_ERR(r); 948 } 949 950 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 951 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 952 953 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 954 955 pr_info("HMM registered %ldMB device memory\n", size >> 20); 956 957 return 0; 958 } 959