1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/migrate.h> 28 #include "amdgpu_sync.h" 29 #include "amdgpu_object.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_res_cursor.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 #include "kfd_smi_events.h" 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 #define dev_fmt(fmt) "kfd_migrate: " fmt 41 42 static uint64_t 43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 44 { 45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 46 } 47 48 static int 49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 51 { 52 struct amdgpu_device *adev = ring->adev; 53 struct amdgpu_job *job; 54 unsigned int num_dw, num_bytes; 55 struct dma_fence *fence; 56 uint64_t src_addr, dst_addr; 57 uint64_t pte_flags; 58 void *cpu_addr; 59 int r; 60 61 /* use gart window 0 */ 62 *gart_addr = adev->gmc.gart_start; 63 64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 65 num_bytes = npages * 8; 66 67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 68 AMDGPU_FENCE_OWNER_UNDEFINED, 69 num_dw * 4 + num_bytes, 70 AMDGPU_IB_POOL_DELAYED, 71 &job); 72 if (r) 73 return r; 74 75 src_addr = num_dw * 4; 76 src_addr += job->ibs[0].gpu_addr; 77 78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 80 dst_addr, num_bytes, false); 81 82 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 83 WARN_ON(job->ibs[0].length_dw > num_dw); 84 85 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 86 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 87 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 88 pte_flags |= AMDGPU_PTE_WRITEABLE; 89 pte_flags |= adev->gart.gart_pte_flags; 90 91 cpu_addr = &job->ibs[0].ptr[num_dw]; 92 93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 94 fence = amdgpu_job_submit(job); 95 dma_fence_put(fence); 96 97 return r; 98 } 99 100 /** 101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 102 * 103 * @adev: amdgpu device the sdma ring running 104 * @sys: system DMA pointer to be copied 105 * @vram: vram destination DMA pointer 106 * @npages: number of pages to copy 107 * @direction: enum MIGRATION_COPY_DIR 108 * @mfence: output, sdma fence to signal after sdma is done 109 * 110 * ram address uses GART table continuous entries mapping to ram pages, 111 * vram address uses direct mapping of vram pages, which must have npages 112 * number of continuous pages. 113 * GART update and sdma uses same buf copy function ring, sdma is splited to 114 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 115 * the last sdma finish fence which is returned to check copy memory is done. 116 * 117 * Context: Process context, takes and releases gtt_window_lock 118 * 119 * Return: 120 * 0 - OK, otherwise error code 121 */ 122 123 static int 124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 125 uint64_t *vram, uint64_t npages, 126 enum MIGRATION_COPY_DIR direction, 127 struct dma_fence **mfence) 128 { 129 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 131 uint64_t gart_s, gart_d; 132 struct dma_fence *next; 133 uint64_t size; 134 int r; 135 136 mutex_lock(&adev->mman.gtt_window_lock); 137 138 while (npages) { 139 size = min(GTT_MAX_PAGES, npages); 140 141 if (direction == FROM_VRAM_TO_RAM) { 142 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 143 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 144 145 } else if (direction == FROM_RAM_TO_VRAM) { 146 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 147 KFD_IOCTL_SVM_FLAG_GPU_RO); 148 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 149 } 150 if (r) { 151 dev_err(adev->dev, "fail %d create gart mapping\n", r); 152 goto out_unlock; 153 } 154 155 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 156 NULL, &next, false, true, false); 157 if (r) { 158 dev_err(adev->dev, "fail %d to copy memory\n", r); 159 goto out_unlock; 160 } 161 162 dma_fence_put(*mfence); 163 *mfence = next; 164 npages -= size; 165 if (npages) { 166 sys += size; 167 vram += size; 168 } 169 } 170 171 out_unlock: 172 mutex_unlock(&adev->mman.gtt_window_lock); 173 174 return r; 175 } 176 177 /** 178 * svm_migrate_copy_done - wait for memory copy sdma is done 179 * 180 * @adev: amdgpu device the sdma memory copy is executing on 181 * @mfence: migrate fence 182 * 183 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 184 * operations, this is the last sdma operation fence. 185 * 186 * Context: called after svm_migrate_copy_memory 187 * 188 * Return: 189 * 0 - success 190 * otherwise - error code from dma fence signal 191 */ 192 static int 193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 194 { 195 int r = 0; 196 197 if (mfence) { 198 r = dma_fence_wait(mfence, false); 199 dma_fence_put(mfence); 200 pr_debug("sdma copy memory fence done\n"); 201 } 202 203 return r; 204 } 205 206 unsigned long 207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 208 { 209 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; 210 } 211 212 static void 213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 214 { 215 struct page *page; 216 217 page = pfn_to_page(pfn); 218 svm_range_bo_ref(prange->svm_bo); 219 page->zone_device_data = prange->svm_bo; 220 zone_device_page_init(page); 221 } 222 223 static void 224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 225 { 226 struct page *page; 227 228 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 229 unlock_page(page); 230 put_page(page); 231 } 232 233 static unsigned long 234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 235 { 236 unsigned long addr; 237 238 addr = page_to_pfn(page) << PAGE_SHIFT; 239 return (addr - adev->kfd.dev->pgmap.range.start); 240 } 241 242 static struct page * 243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 244 { 245 struct page *page; 246 247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 248 if (page) 249 lock_page(page); 250 251 return page; 252 } 253 254 static void svm_migrate_put_sys_page(unsigned long addr) 255 { 256 struct page *page; 257 258 page = pfn_to_page(addr >> PAGE_SHIFT); 259 unlock_page(page); 260 put_page(page); 261 } 262 263 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 264 { 265 unsigned long cpages = 0; 266 unsigned long i; 267 268 for (i = 0; i < migrate->npages; i++) { 269 if (migrate->src[i] & MIGRATE_PFN_VALID && 270 migrate->src[i] & MIGRATE_PFN_MIGRATE) 271 cpages++; 272 } 273 return cpages; 274 } 275 276 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) 277 { 278 unsigned long upages = 0; 279 unsigned long i; 280 281 for (i = 0; i < migrate->npages; i++) { 282 if (migrate->src[i] & MIGRATE_PFN_VALID && 283 !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 284 upages++; 285 } 286 return upages; 287 } 288 289 static int 290 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 291 struct migrate_vma *migrate, struct dma_fence **mfence, 292 dma_addr_t *scratch, uint64_t ttm_res_offset) 293 { 294 uint64_t npages = migrate->npages; 295 struct device *dev = adev->dev; 296 struct amdgpu_res_cursor cursor; 297 dma_addr_t *src; 298 uint64_t *dst; 299 uint64_t i, j; 300 int r; 301 302 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, 303 prange->last, ttm_res_offset); 304 305 src = scratch; 306 dst = (uint64_t *)(scratch + npages); 307 308 r = svm_range_vram_node_new(adev, prange, true); 309 if (r) { 310 dev_dbg(adev->dev, "fail %d to alloc vram\n", r); 311 goto out; 312 } 313 314 amdgpu_res_first(prange->ttm_res, ttm_res_offset, 315 npages << PAGE_SHIFT, &cursor); 316 for (i = j = 0; i < npages; i++) { 317 struct page *spage; 318 319 dst[i] = cursor.start + (j << PAGE_SHIFT); 320 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 321 svm_migrate_get_vram_page(prange, migrate->dst[i]); 322 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 323 324 spage = migrate_pfn_to_page(migrate->src[i]); 325 if (spage && !is_zone_device_page(spage)) { 326 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 327 DMA_TO_DEVICE); 328 r = dma_mapping_error(dev, src[i]); 329 if (r) { 330 dev_err(adev->dev, "%s: fail %d dma_map_page\n", 331 __func__, r); 332 goto out_free_vram_pages; 333 } 334 } else { 335 if (j) { 336 r = svm_migrate_copy_memory_gart( 337 adev, src + i - j, 338 dst + i - j, j, 339 FROM_RAM_TO_VRAM, 340 mfence); 341 if (r) 342 goto out_free_vram_pages; 343 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); 344 j = 0; 345 } else { 346 amdgpu_res_next(&cursor, PAGE_SIZE); 347 } 348 continue; 349 } 350 351 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 352 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 353 354 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 355 r = svm_migrate_copy_memory_gart(adev, src + i - j, 356 dst + i - j, j + 1, 357 FROM_RAM_TO_VRAM, 358 mfence); 359 if (r) 360 goto out_free_vram_pages; 361 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 362 j = 0; 363 } else { 364 j++; 365 } 366 } 367 368 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 369 FROM_RAM_TO_VRAM, mfence); 370 371 out_free_vram_pages: 372 if (r) { 373 pr_debug("failed %d to copy memory to vram\n", r); 374 while (i--) { 375 svm_migrate_put_vram_page(adev, dst[i]); 376 migrate->dst[i] = 0; 377 } 378 } 379 380 #ifdef DEBUG_FORCE_MIXED_DOMAINS 381 for (i = 0, j = 0; i < npages; i += 4, j++) { 382 if (j & 1) 383 continue; 384 svm_migrate_put_vram_page(adev, dst[i]); 385 migrate->dst[i] = 0; 386 svm_migrate_put_vram_page(adev, dst[i + 1]); 387 migrate->dst[i + 1] = 0; 388 svm_migrate_put_vram_page(adev, dst[i + 2]); 389 migrate->dst[i + 2] = 0; 390 svm_migrate_put_vram_page(adev, dst[i + 3]); 391 migrate->dst[i + 3] = 0; 392 } 393 #endif 394 out: 395 return r; 396 } 397 398 static long 399 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 400 struct vm_area_struct *vma, uint64_t start, 401 uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) 402 { 403 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 404 uint64_t npages = (end - start) >> PAGE_SHIFT; 405 struct kfd_process_device *pdd; 406 struct dma_fence *mfence = NULL; 407 struct migrate_vma migrate = { 0 }; 408 unsigned long cpages = 0; 409 dma_addr_t *scratch; 410 void *buf; 411 int r = -ENOMEM; 412 413 memset(&migrate, 0, sizeof(migrate)); 414 migrate.vma = vma; 415 migrate.start = start; 416 migrate.end = end; 417 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 418 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 419 420 buf = kvcalloc(npages, 421 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 422 GFP_KERNEL); 423 if (!buf) 424 goto out; 425 426 migrate.src = buf; 427 migrate.dst = migrate.src + npages; 428 scratch = (dma_addr_t *)(migrate.dst + npages); 429 430 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, 431 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 432 0, adev->kfd.dev->id, prange->prefetch_loc, 433 prange->preferred_loc, trigger); 434 435 r = migrate_vma_setup(&migrate); 436 if (r) { 437 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 438 __func__, r, prange->start, prange->last); 439 goto out_free; 440 } 441 442 cpages = migrate.cpages; 443 if (!cpages) { 444 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 445 prange->start, prange->last); 446 goto out_free; 447 } 448 if (cpages != npages) 449 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 450 cpages, npages); 451 else 452 pr_debug("0x%lx pages migrated\n", cpages); 453 454 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset); 455 migrate_vma_pages(&migrate); 456 457 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 458 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 459 460 svm_migrate_copy_done(adev, mfence); 461 migrate_vma_finalize(&migrate); 462 463 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, 464 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 465 0, adev->kfd.dev->id, trigger); 466 467 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 468 svm_range_free_dma_mappings(prange); 469 470 out_free: 471 kvfree(buf); 472 out: 473 if (!r && cpages) { 474 pdd = svm_range_get_pdd_by_adev(prange, adev); 475 if (pdd) 476 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); 477 478 return cpages; 479 } 480 return r; 481 } 482 483 /** 484 * svm_migrate_ram_to_vram - migrate svm range from system to device 485 * @prange: range structure 486 * @best_loc: the device to migrate to 487 * @mm: the process mm structure 488 * @trigger: reason of migration 489 * 490 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 491 * 492 * Return: 493 * 0 - OK, otherwise error code 494 */ 495 static int 496 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 497 struct mm_struct *mm, uint32_t trigger) 498 { 499 unsigned long addr, start, end; 500 struct vm_area_struct *vma; 501 struct amdgpu_device *adev; 502 uint64_t ttm_res_offset; 503 unsigned long cpages = 0; 504 long r = 0; 505 506 if (prange->actual_loc == best_loc) { 507 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 508 prange->svms, prange->start, prange->last, best_loc); 509 return 0; 510 } 511 512 adev = svm_range_get_adev_by_id(prange, best_loc); 513 if (!adev) { 514 pr_debug("failed to get device by id 0x%x\n", best_loc); 515 return -ENODEV; 516 } 517 518 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 519 prange->start, prange->last, best_loc); 520 521 start = prange->start << PAGE_SHIFT; 522 end = (prange->last + 1) << PAGE_SHIFT; 523 ttm_res_offset = prange->offset << PAGE_SHIFT; 524 525 for (addr = start; addr < end;) { 526 unsigned long next; 527 528 vma = vma_lookup(mm, addr); 529 if (!vma) 530 break; 531 532 next = min(vma->vm_end, end); 533 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset); 534 if (r < 0) { 535 pr_debug("failed %ld to migrate\n", r); 536 break; 537 } else { 538 cpages += r; 539 } 540 ttm_res_offset += next - addr; 541 addr = next; 542 } 543 544 if (cpages) 545 prange->actual_loc = best_loc; 546 547 return r < 0 ? r : 0; 548 } 549 550 static void svm_migrate_page_free(struct page *page) 551 { 552 struct svm_range_bo *svm_bo = page->zone_device_data; 553 554 if (svm_bo) { 555 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 556 svm_range_bo_unref_async(svm_bo); 557 } 558 } 559 560 static int 561 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 562 struct migrate_vma *migrate, struct dma_fence **mfence, 563 dma_addr_t *scratch, uint64_t npages) 564 { 565 struct device *dev = adev->dev; 566 uint64_t *src; 567 dma_addr_t *dst; 568 struct page *dpage; 569 uint64_t i = 0, j; 570 uint64_t addr; 571 int r = 0; 572 573 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 574 prange->last); 575 576 addr = prange->start << PAGE_SHIFT; 577 578 src = (uint64_t *)(scratch + npages); 579 dst = scratch; 580 581 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 582 struct page *spage; 583 584 spage = migrate_pfn_to_page(migrate->src[i]); 585 if (!spage || !is_zone_device_page(spage)) { 586 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 587 prange->svms, prange->start, prange->last); 588 if (j) { 589 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 590 src + i - j, j, 591 FROM_VRAM_TO_RAM, 592 mfence); 593 if (r) 594 goto out_oom; 595 j = 0; 596 } 597 continue; 598 } 599 src[i] = svm_migrate_addr(adev, spage); 600 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 601 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 602 src + i - j, j, 603 FROM_VRAM_TO_RAM, 604 mfence); 605 if (r) 606 goto out_oom; 607 j = 0; 608 } 609 610 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 611 if (!dpage) { 612 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 613 prange->svms, prange->start, prange->last); 614 r = -ENOMEM; 615 goto out_oom; 616 } 617 618 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); 619 r = dma_mapping_error(dev, dst[i]); 620 if (r) { 621 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); 622 goto out_oom; 623 } 624 625 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 626 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 627 628 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 629 j++; 630 } 631 632 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 633 FROM_VRAM_TO_RAM, mfence); 634 635 out_oom: 636 if (r) { 637 pr_debug("failed %d copy to ram\n", r); 638 while (i--) { 639 svm_migrate_put_sys_page(dst[i]); 640 migrate->dst[i] = 0; 641 } 642 } 643 644 return r; 645 } 646 647 /** 648 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system 649 * 650 * @adev: amdgpu device to migrate from 651 * @prange: svm range structure 652 * @vma: vm_area_struct that range [start, end] belongs to 653 * @start: range start virtual address in pages 654 * @end: range end virtual address in pages 655 * 656 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 657 * 658 * Return: 659 * 0 - success with all pages migrated 660 * negative values - indicate error 661 * positive values - partial migration, number of pages not migrated 662 */ 663 static long 664 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 665 struct vm_area_struct *vma, uint64_t start, uint64_t end, 666 uint32_t trigger, struct page *fault_page) 667 { 668 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 669 uint64_t npages = (end - start) >> PAGE_SHIFT; 670 unsigned long upages = npages; 671 unsigned long cpages = 0; 672 struct kfd_process_device *pdd; 673 struct dma_fence *mfence = NULL; 674 struct migrate_vma migrate = { 0 }; 675 dma_addr_t *scratch; 676 void *buf; 677 int r = -ENOMEM; 678 679 memset(&migrate, 0, sizeof(migrate)); 680 migrate.vma = vma; 681 migrate.start = start; 682 migrate.end = end; 683 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 684 if (adev->gmc.xgmi.connected_to_cpu) 685 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT; 686 else 687 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 688 689 buf = kvcalloc(npages, 690 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 691 GFP_KERNEL); 692 if (!buf) 693 goto out; 694 695 migrate.src = buf; 696 migrate.dst = migrate.src + npages; 697 migrate.fault_page = fault_page; 698 scratch = (dma_addr_t *)(migrate.dst + npages); 699 700 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, 701 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 702 adev->kfd.dev->id, 0, prange->prefetch_loc, 703 prange->preferred_loc, trigger); 704 705 r = migrate_vma_setup(&migrate); 706 if (r) { 707 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 708 __func__, r, prange->start, prange->last); 709 goto out_free; 710 } 711 712 cpages = migrate.cpages; 713 if (!cpages) { 714 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 715 prange->start, prange->last); 716 upages = svm_migrate_unsuccessful_pages(&migrate); 717 goto out_free; 718 } 719 if (cpages != npages) 720 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 721 cpages, npages); 722 else 723 pr_debug("0x%lx pages migrated\n", cpages); 724 725 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 726 scratch, npages); 727 migrate_vma_pages(&migrate); 728 729 upages = svm_migrate_unsuccessful_pages(&migrate); 730 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 731 upages, cpages, migrate.npages); 732 733 svm_migrate_copy_done(adev, mfence); 734 migrate_vma_finalize(&migrate); 735 736 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, 737 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 738 adev->kfd.dev->id, 0, trigger); 739 740 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 741 742 out_free: 743 kvfree(buf); 744 out: 745 if (!r && cpages) { 746 pdd = svm_range_get_pdd_by_adev(prange, adev); 747 if (pdd) 748 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 749 } 750 return r ? r : upages; 751 } 752 753 /** 754 * svm_migrate_vram_to_ram - migrate svm range from device to system 755 * @prange: range structure 756 * @mm: process mm, use current->mm if NULL 757 * @trigger: reason of migration 758 * 759 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 760 * 761 * Return: 762 * 0 - OK, otherwise error code 763 */ 764 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 765 uint32_t trigger, struct page *fault_page) 766 { 767 struct amdgpu_device *adev; 768 struct vm_area_struct *vma; 769 unsigned long addr; 770 unsigned long start; 771 unsigned long end; 772 unsigned long upages = 0; 773 long r = 0; 774 775 if (!prange->actual_loc) { 776 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 777 prange->start, prange->last); 778 return 0; 779 } 780 781 adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 782 if (!adev) { 783 pr_debug("failed to get device by id 0x%x\n", 784 prange->actual_loc); 785 return -ENODEV; 786 } 787 788 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 789 prange->svms, prange, prange->start, prange->last, 790 prange->actual_loc); 791 792 start = prange->start << PAGE_SHIFT; 793 end = (prange->last + 1) << PAGE_SHIFT; 794 795 for (addr = start; addr < end;) { 796 unsigned long next; 797 798 vma = vma_lookup(mm, addr); 799 if (!vma) { 800 pr_debug("failed to find vma for prange %p\n", prange); 801 r = -EFAULT; 802 break; 803 } 804 805 next = min(vma->vm_end, end); 806 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, 807 fault_page); 808 if (r < 0) { 809 pr_debug("failed %ld to migrate prange %p\n", r, prange); 810 break; 811 } else { 812 upages += r; 813 } 814 addr = next; 815 } 816 817 if (r >= 0 && !upages) { 818 svm_range_vram_node_free(prange); 819 prange->actual_loc = 0; 820 } 821 822 return r < 0 ? r : 0; 823 } 824 825 /** 826 * svm_migrate_vram_to_vram - migrate svm range from device to device 827 * @prange: range structure 828 * @best_loc: the device to migrate to 829 * @mm: process mm, use current->mm if NULL 830 * @trigger: reason of migration 831 * 832 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 833 * 834 * Return: 835 * 0 - OK, otherwise error code 836 */ 837 static int 838 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 839 struct mm_struct *mm, uint32_t trigger) 840 { 841 int r, retries = 3; 842 843 /* 844 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 845 * system memory as migration bridge 846 */ 847 848 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 849 850 do { 851 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); 852 if (r) 853 return r; 854 } while (prange->actual_loc && --retries); 855 856 if (prange->actual_loc) 857 return -EDEADLK; 858 859 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); 860 } 861 862 int 863 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 864 struct mm_struct *mm, uint32_t trigger) 865 { 866 if (!prange->actual_loc) 867 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); 868 else 869 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); 870 871 } 872 873 /** 874 * svm_migrate_to_ram - CPU page fault handler 875 * @vmf: CPU vm fault vma, address 876 * 877 * Context: vm fault handler, caller holds the mmap read lock 878 * 879 * Return: 880 * 0 - OK 881 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 882 */ 883 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 884 { 885 unsigned long addr = vmf->address; 886 struct svm_range_bo *svm_bo; 887 enum svm_work_list_ops op; 888 struct svm_range *parent; 889 struct svm_range *prange; 890 struct kfd_process *p; 891 struct mm_struct *mm; 892 int r = 0; 893 894 svm_bo = vmf->page->zone_device_data; 895 if (!svm_bo) { 896 pr_debug("failed get device page at addr 0x%lx\n", addr); 897 return VM_FAULT_SIGBUS; 898 } 899 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) { 900 pr_debug("addr 0x%lx of process mm is destroyed\n", addr); 901 return VM_FAULT_SIGBUS; 902 } 903 904 mm = svm_bo->eviction_fence->mm; 905 if (mm != vmf->vma->vm_mm) 906 pr_debug("addr 0x%lx is COW mapping in child process\n", addr); 907 908 p = kfd_lookup_process_by_mm(mm); 909 if (!p) { 910 pr_debug("failed find process at fault address 0x%lx\n", addr); 911 r = VM_FAULT_SIGBUS; 912 goto out_mmput; 913 } 914 if (READ_ONCE(p->svms.faulting_task) == current) { 915 pr_debug("skipping ram migration\n"); 916 r = 0; 917 goto out_unref_process; 918 } 919 920 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 921 addr >>= PAGE_SHIFT; 922 923 mutex_lock(&p->svms.lock); 924 925 prange = svm_range_from_addr(&p->svms, addr, &parent); 926 if (!prange) { 927 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); 928 r = -EFAULT; 929 goto out_unlock_svms; 930 } 931 932 mutex_lock(&parent->migrate_mutex); 933 if (prange != parent) 934 mutex_lock_nested(&prange->migrate_mutex, 1); 935 936 if (!prange->actual_loc) 937 goto out_unlock_prange; 938 939 svm_range_lock(parent); 940 if (prange != parent) 941 mutex_lock_nested(&prange->lock, 1); 942 r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 943 if (prange != parent) 944 mutex_unlock(&prange->lock); 945 svm_range_unlock(parent); 946 if (r) { 947 pr_debug("failed %d to split range by granularity\n", r); 948 goto out_unlock_prange; 949 } 950 951 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, 952 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, 953 vmf->page); 954 if (r) 955 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", 956 r, prange->svms, prange, prange->start, prange->last); 957 958 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 959 if (p->xnack_enabled && parent == prange) 960 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 961 else 962 op = SVM_OP_UPDATE_RANGE_NOTIFIER; 963 svm_range_add_list_work(&p->svms, parent, mm, op); 964 schedule_deferred_list_work(&p->svms); 965 966 out_unlock_prange: 967 if (prange != parent) 968 mutex_unlock(&prange->migrate_mutex); 969 mutex_unlock(&parent->migrate_mutex); 970 out_unlock_svms: 971 mutex_unlock(&p->svms.lock); 972 out_unref_process: 973 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 974 kfd_unref_process(p); 975 out_mmput: 976 mmput(mm); 977 return r ? VM_FAULT_SIGBUS : 0; 978 } 979 980 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 981 .page_free = svm_migrate_page_free, 982 .migrate_to_ram = svm_migrate_to_ram, 983 }; 984 985 /* Each VRAM page uses sizeof(struct page) on system memory */ 986 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 987 988 int svm_migrate_init(struct amdgpu_device *adev) 989 { 990 struct kfd_dev *kfddev = adev->kfd.dev; 991 struct dev_pagemap *pgmap; 992 struct resource *res = NULL; 993 unsigned long size; 994 void *r; 995 996 /* Page migration works on Vega10 or newer */ 997 if (!KFD_IS_SOC15(kfddev)) 998 return -EINVAL; 999 1000 pgmap = &kfddev->pgmap; 1001 memset(pgmap, 0, sizeof(*pgmap)); 1002 1003 /* TODO: register all vram to HMM for now. 1004 * should remove reserved size 1005 */ 1006 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 1007 if (adev->gmc.xgmi.connected_to_cpu) { 1008 pgmap->range.start = adev->gmc.aper_base; 1009 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; 1010 pgmap->type = MEMORY_DEVICE_COHERENT; 1011 } else { 1012 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 1013 if (IS_ERR(res)) 1014 return -ENOMEM; 1015 pgmap->range.start = res->start; 1016 pgmap->range.end = res->end; 1017 pgmap->type = MEMORY_DEVICE_PRIVATE; 1018 } 1019 1020 pgmap->nr_range = 1; 1021 pgmap->ops = &svm_migrate_pgmap_ops; 1022 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 1023 pgmap->flags = 0; 1024 /* Device manager releases device-specific resources, memory region and 1025 * pgmap when driver disconnects from device. 1026 */ 1027 r = devm_memremap_pages(adev->dev, pgmap); 1028 if (IS_ERR(r)) { 1029 pr_err("failed to register HMM device memory\n"); 1030 /* Disable SVM support capability */ 1031 pgmap->type = 0; 1032 if (pgmap->type == MEMORY_DEVICE_PRIVATE) 1033 devm_release_mem_region(adev->dev, res->start, resource_size(res)); 1034 return PTR_ERR(r); 1035 } 1036 1037 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 1038 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 1039 1040 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 1041 1042 svm_range_set_max_pages(adev); 1043 1044 pr_info("HMM registered %ldMB device memory\n", size >> 20); 1045 1046 return 0; 1047 } 1048