1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/dma-fence-array.h> 30 #include <linux/interval_tree_generic.h> 31 #include <linux/idr.h> 32 #include <linux/dma-buf.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_drv.h> 36 #include <drm/ttm/ttm_tt.h> 37 #include <drm/drm_exec.h> 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 #include "amdgpu_amdkfd.h" 41 #include "amdgpu_gmc.h" 42 #include "amdgpu_xgmi.h" 43 #include "amdgpu_dma_buf.h" 44 #include "amdgpu_res_cursor.h" 45 #include "kfd_svm.h" 46 47 /** 48 * DOC: GPUVM 49 * 50 * GPUVM is the MMU functionality provided on the GPU. 51 * GPUVM is similar to the legacy GART on older asics, however 52 * rather than there being a single global GART table 53 * for the entire GPU, there can be multiple GPUVM page tables active 54 * at any given time. The GPUVM page tables can contain a mix 55 * VRAM pages and system pages (both memory and MMIO) and system pages 56 * can be mapped as snooped (cached system pages) or unsnooped 57 * (uncached system pages). 58 * 59 * Each active GPUVM has an ID associated with it and there is a page table 60 * linked with each VMID. When executing a command buffer, 61 * the kernel tells the engine what VMID to use for that command 62 * buffer. VMIDs are allocated dynamically as commands are submitted. 63 * The userspace drivers maintain their own address space and the kernel 64 * sets up their pages tables accordingly when they submit their 65 * command buffers and a VMID is assigned. 66 * The hardware supports up to 16 active GPUVMs at any given time. 67 * 68 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending 69 * on the ASIC family. GPUVM supports RWX attributes on each page as well 70 * as other features such as encryption and caching attributes. 71 * 72 * VMID 0 is special. It is the GPUVM used for the kernel driver. In 73 * addition to an aperture managed by a page table, VMID 0 also has 74 * several other apertures. There is an aperture for direct access to VRAM 75 * and there is a legacy AGP aperture which just forwards accesses directly 76 * to the matching system physical addresses (or IOVAs when an IOMMU is 77 * present). These apertures provide direct access to these memories without 78 * incurring the overhead of a page table. VMID 0 is used by the kernel 79 * driver for tasks like memory management. 80 * 81 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory. 82 * For user applications, each application can have their own unique GPUVM 83 * address space. The application manages the address space and the kernel 84 * driver manages the GPUVM page tables for each process. If an GPU client 85 * accesses an invalid page, it will generate a GPU page fault, similar to 86 * accessing an invalid page on a CPU. 87 */ 88 89 #define START(node) ((node)->start) 90 #define LAST(node) ((node)->last) 91 92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 93 START, LAST, static, amdgpu_vm_it) 94 95 #undef START 96 #undef LAST 97 98 /** 99 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 100 */ 101 struct amdgpu_prt_cb { 102 103 /** 104 * @adev: amdgpu device 105 */ 106 struct amdgpu_device *adev; 107 108 /** 109 * @cb: callback 110 */ 111 struct dma_fence_cb cb; 112 }; 113 114 /** 115 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence 116 */ 117 struct amdgpu_vm_tlb_seq_struct { 118 /** 119 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on 120 */ 121 struct amdgpu_vm *vm; 122 123 /** 124 * @cb: callback 125 */ 126 struct dma_fence_cb cb; 127 }; 128 129 /** 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 131 * 132 * @adev: amdgpu_device pointer 133 * @vm: amdgpu_vm pointer 134 * @pasid: the pasid the VM is using on this GPU 135 * 136 * Set the pasid this VM is using on this GPU, can also be used to remove the 137 * pasid by passing in zero. 138 * 139 */ 140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, 141 u32 pasid) 142 { 143 int r; 144 145 if (vm->pasid == pasid) 146 return 0; 147 148 if (vm->pasid) { 149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); 150 if (r < 0) 151 return r; 152 153 vm->pasid = 0; 154 } 155 156 if (pasid) { 157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, 158 GFP_KERNEL)); 159 if (r < 0) 160 return r; 161 162 vm->pasid = pasid; 163 } 164 165 166 return 0; 167 } 168 169 /** 170 * amdgpu_vm_bo_evicted - vm_bo is evicted 171 * 172 * @vm_bo: vm_bo which is evicted 173 * 174 * State for PDs/PTs and per VM BOs which are not at the location they should 175 * be. 176 */ 177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 178 { 179 struct amdgpu_vm *vm = vm_bo->vm; 180 struct amdgpu_bo *bo = vm_bo->bo; 181 182 vm_bo->moved = true; 183 spin_lock(&vm_bo->vm->status_lock); 184 if (bo->tbo.type == ttm_bo_type_kernel) 185 list_move(&vm_bo->vm_status, &vm->evicted); 186 else 187 list_move_tail(&vm_bo->vm_status, &vm->evicted); 188 spin_unlock(&vm_bo->vm->status_lock); 189 } 190 /** 191 * amdgpu_vm_bo_moved - vm_bo is moved 192 * 193 * @vm_bo: vm_bo which is moved 194 * 195 * State for per VM BOs which are moved, but that change is not yet reflected 196 * in the page tables. 197 */ 198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 199 { 200 spin_lock(&vm_bo->vm->status_lock); 201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 202 spin_unlock(&vm_bo->vm->status_lock); 203 } 204 205 /** 206 * amdgpu_vm_bo_idle - vm_bo is idle 207 * 208 * @vm_bo: vm_bo which is now idle 209 * 210 * State for PDs/PTs and per VM BOs which have gone through the state machine 211 * and are now idle. 212 */ 213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 214 { 215 spin_lock(&vm_bo->vm->status_lock); 216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 217 spin_unlock(&vm_bo->vm->status_lock); 218 vm_bo->moved = false; 219 } 220 221 /** 222 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 223 * 224 * @vm_bo: vm_bo which is now invalidated 225 * 226 * State for normal BOs which are invalidated and that change not yet reflected 227 * in the PTs. 228 */ 229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 230 { 231 spin_lock(&vm_bo->vm->status_lock); 232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 233 spin_unlock(&vm_bo->vm->status_lock); 234 } 235 236 /** 237 * amdgpu_vm_bo_relocated - vm_bo is reloacted 238 * 239 * @vm_bo: vm_bo which is relocated 240 * 241 * State for PDs/PTs which needs to update their parent PD. 242 * For the root PD, just move to idle state. 243 */ 244 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 245 { 246 if (vm_bo->bo->parent) { 247 spin_lock(&vm_bo->vm->status_lock); 248 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 249 spin_unlock(&vm_bo->vm->status_lock); 250 } else { 251 amdgpu_vm_bo_idle(vm_bo); 252 } 253 } 254 255 /** 256 * amdgpu_vm_bo_done - vm_bo is done 257 * 258 * @vm_bo: vm_bo which is now done 259 * 260 * State for normal BOs which are invalidated and that change has been updated 261 * in the PTs. 262 */ 263 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 264 { 265 spin_lock(&vm_bo->vm->status_lock); 266 list_move(&vm_bo->vm_status, &vm_bo->vm->done); 267 spin_unlock(&vm_bo->vm->status_lock); 268 } 269 270 /** 271 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine 272 * @vm: the VM which state machine to reset 273 * 274 * Move all vm_bo object in the VM into a state where they will be updated 275 * again during validation. 276 */ 277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) 278 { 279 struct amdgpu_vm_bo_base *vm_bo, *tmp; 280 281 spin_lock(&vm->status_lock); 282 list_splice_init(&vm->done, &vm->invalidated); 283 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) 284 vm_bo->moved = true; 285 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { 286 struct amdgpu_bo *bo = vm_bo->bo; 287 288 if (!bo || bo->tbo.type != ttm_bo_type_kernel) 289 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 290 else if (bo->parent) 291 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 292 } 293 spin_unlock(&vm->status_lock); 294 } 295 296 /** 297 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 298 * 299 * @base: base structure for tracking BO usage in a VM 300 * @vm: vm to which bo is to be added 301 * @bo: amdgpu buffer object 302 * 303 * Initialize a bo_va_base structure and add it to the appropriate lists 304 * 305 */ 306 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 307 struct amdgpu_vm *vm, struct amdgpu_bo *bo) 308 { 309 base->vm = vm; 310 base->bo = bo; 311 base->next = NULL; 312 INIT_LIST_HEAD(&base->vm_status); 313 314 if (!bo) 315 return; 316 base->next = bo->vm_bo; 317 bo->vm_bo = base; 318 319 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 320 return; 321 322 dma_resv_assert_held(vm->root.bo->tbo.base.resv); 323 324 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); 325 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 326 amdgpu_vm_bo_relocated(base); 327 else 328 amdgpu_vm_bo_idle(base); 329 330 if (bo->preferred_domains & 331 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) 332 return; 333 334 /* 335 * we checked all the prerequisites, but it looks like this per vm bo 336 * is currently evicted. add the bo to the evicted list to make sure it 337 * is validated on next vm use to avoid fault. 338 * */ 339 amdgpu_vm_bo_evicted(base); 340 } 341 342 /** 343 * amdgpu_vm_lock_pd - lock PD in drm_exec 344 * 345 * @vm: vm providing the BOs 346 * @exec: drm execution context 347 * @num_fences: number of extra fences to reserve 348 * 349 * Lock the VM root PD in the DRM execution context. 350 */ 351 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, 352 unsigned int num_fences) 353 { 354 /* We need at least two fences for the VM PD/PT updates */ 355 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, 356 2 + num_fences); 357 } 358 359 /** 360 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 361 * 362 * @adev: amdgpu device pointer 363 * @vm: vm providing the BOs 364 * 365 * Move all BOs to the end of LRU and remember their positions to put them 366 * together. 367 */ 368 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 369 struct amdgpu_vm *vm) 370 { 371 spin_lock(&adev->mman.bdev.lru_lock); 372 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); 373 spin_unlock(&adev->mman.bdev.lru_lock); 374 } 375 376 /* Create scheduler entities for page table updates */ 377 static int amdgpu_vm_init_entities(struct amdgpu_device *adev, 378 struct amdgpu_vm *vm) 379 { 380 int r; 381 382 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, 383 adev->vm_manager.vm_pte_scheds, 384 adev->vm_manager.vm_pte_num_scheds, NULL); 385 if (r) 386 goto error; 387 388 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 389 adev->vm_manager.vm_pte_scheds, 390 adev->vm_manager.vm_pte_num_scheds, NULL); 391 392 error: 393 drm_sched_entity_destroy(&vm->immediate); 394 return r; 395 } 396 397 /* Destroy the entities for page table updates again */ 398 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) 399 { 400 drm_sched_entity_destroy(&vm->immediate); 401 drm_sched_entity_destroy(&vm->delayed); 402 } 403 404 /** 405 * amdgpu_vm_generation - return the page table re-generation counter 406 * @adev: the amdgpu_device 407 * @vm: optional VM to check, might be NULL 408 * 409 * Returns a page table re-generation token to allow checking if submissions 410 * are still valid to use this VM. The VM parameter might be NULL in which case 411 * just the VRAM lost counter will be used. 412 */ 413 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) 414 { 415 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32; 416 417 if (!vm) 418 return result; 419 420 result += vm->generation; 421 /* Add one if the page tables will be re-generated on next CS */ 422 if (drm_sched_entity_error(&vm->delayed)) 423 ++result; 424 425 return result; 426 } 427 428 /** 429 * amdgpu_vm_validate_pt_bos - validate the page table BOs 430 * 431 * @adev: amdgpu device pointer 432 * @vm: vm providing the BOs 433 * @validate: callback to do the validation 434 * @param: parameter for the validation callback 435 * 436 * Validate the page table BOs on command submission if neccessary. 437 * 438 * Returns: 439 * Validation result. 440 */ 441 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 442 int (*validate)(void *p, struct amdgpu_bo *bo), 443 void *param) 444 { 445 struct amdgpu_vm_bo_base *bo_base; 446 struct amdgpu_bo *shadow; 447 struct amdgpu_bo *bo; 448 int r; 449 450 if (drm_sched_entity_error(&vm->delayed)) { 451 ++vm->generation; 452 amdgpu_vm_bo_reset_state_machine(vm); 453 amdgpu_vm_fini_entities(vm); 454 r = amdgpu_vm_init_entities(adev, vm); 455 if (r) 456 return r; 457 } 458 459 spin_lock(&vm->status_lock); 460 while (!list_empty(&vm->evicted)) { 461 bo_base = list_first_entry(&vm->evicted, 462 struct amdgpu_vm_bo_base, 463 vm_status); 464 spin_unlock(&vm->status_lock); 465 466 bo = bo_base->bo; 467 shadow = amdgpu_bo_shadowed(bo); 468 469 r = validate(param, bo); 470 if (r) 471 return r; 472 if (shadow) { 473 r = validate(param, shadow); 474 if (r) 475 return r; 476 } 477 478 if (bo->tbo.type != ttm_bo_type_kernel) { 479 amdgpu_vm_bo_moved(bo_base); 480 } else { 481 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); 482 amdgpu_vm_bo_relocated(bo_base); 483 } 484 spin_lock(&vm->status_lock); 485 } 486 spin_unlock(&vm->status_lock); 487 488 amdgpu_vm_eviction_lock(vm); 489 vm->evicting = false; 490 amdgpu_vm_eviction_unlock(vm); 491 492 return 0; 493 } 494 495 /** 496 * amdgpu_vm_ready - check VM is ready for updates 497 * 498 * @vm: VM to check 499 * 500 * Check if all VM PDs/PTs are ready for updates 501 * 502 * Returns: 503 * True if VM is not evicting. 504 */ 505 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 506 { 507 bool empty; 508 bool ret; 509 510 amdgpu_vm_eviction_lock(vm); 511 ret = !vm->evicting; 512 amdgpu_vm_eviction_unlock(vm); 513 514 spin_lock(&vm->status_lock); 515 empty = list_empty(&vm->evicted); 516 spin_unlock(&vm->status_lock); 517 518 return ret && empty; 519 } 520 521 /** 522 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 523 * 524 * @adev: amdgpu_device pointer 525 */ 526 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 527 { 528 const struct amdgpu_ip_block *ip_block; 529 bool has_compute_vm_bug; 530 struct amdgpu_ring *ring; 531 int i; 532 533 has_compute_vm_bug = false; 534 535 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 536 if (ip_block) { 537 /* Compute has a VM bug for GFX version < 7. 538 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 539 if (ip_block->version->major <= 7) 540 has_compute_vm_bug = true; 541 else if (ip_block->version->major == 8) 542 if (adev->gfx.mec_fw_version < 673) 543 has_compute_vm_bug = true; 544 } 545 546 for (i = 0; i < adev->num_rings; i++) { 547 ring = adev->rings[i]; 548 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 549 /* only compute rings */ 550 ring->has_compute_vm_bug = has_compute_vm_bug; 551 else 552 ring->has_compute_vm_bug = false; 553 } 554 } 555 556 /** 557 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 558 * 559 * @ring: ring on which the job will be submitted 560 * @job: job to submit 561 * 562 * Returns: 563 * True if sync is needed. 564 */ 565 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 566 struct amdgpu_job *job) 567 { 568 struct amdgpu_device *adev = ring->adev; 569 unsigned vmhub = ring->vm_hub; 570 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 571 572 if (job->vmid == 0) 573 return false; 574 575 if (job->vm_needs_flush || ring->has_compute_vm_bug) 576 return true; 577 578 if (ring->funcs->emit_gds_switch && job->gds_switch_needed) 579 return true; 580 581 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid])) 582 return true; 583 584 return false; 585 } 586 587 /** 588 * amdgpu_vm_flush - hardware flush the vm 589 * 590 * @ring: ring to use for flush 591 * @job: related job 592 * @need_pipe_sync: is pipe sync needed 593 * 594 * Emit a VM flush when it is necessary. 595 * 596 * Returns: 597 * 0 on success, errno otherwise. 598 */ 599 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 600 bool need_pipe_sync) 601 { 602 struct amdgpu_device *adev = ring->adev; 603 unsigned vmhub = ring->vm_hub; 604 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 605 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 606 bool spm_update_needed = job->spm_update_needed; 607 bool gds_switch_needed = ring->funcs->emit_gds_switch && 608 job->gds_switch_needed; 609 bool vm_flush_needed = job->vm_needs_flush; 610 struct dma_fence *fence = NULL; 611 bool pasid_mapping_needed = false; 612 unsigned patch_offset = 0; 613 int r; 614 615 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 616 gds_switch_needed = true; 617 vm_flush_needed = true; 618 pasid_mapping_needed = true; 619 spm_update_needed = true; 620 } 621 622 mutex_lock(&id_mgr->lock); 623 if (id->pasid != job->pasid || !id->pasid_mapping || 624 !dma_fence_is_signaled(id->pasid_mapping)) 625 pasid_mapping_needed = true; 626 mutex_unlock(&id_mgr->lock); 627 628 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 629 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 630 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 631 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 632 ring->funcs->emit_wreg; 633 634 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 635 return 0; 636 637 amdgpu_ring_ib_begin(ring); 638 if (ring->funcs->init_cond_exec) 639 patch_offset = amdgpu_ring_init_cond_exec(ring); 640 641 if (need_pipe_sync) 642 amdgpu_ring_emit_pipeline_sync(ring); 643 644 if (vm_flush_needed) { 645 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 646 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 647 } 648 649 if (pasid_mapping_needed) 650 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 651 652 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) 653 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); 654 655 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && 656 gds_switch_needed) { 657 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 658 job->gds_size, job->gws_base, 659 job->gws_size, job->oa_base, 660 job->oa_size); 661 } 662 663 if (vm_flush_needed || pasid_mapping_needed) { 664 r = amdgpu_fence_emit(ring, &fence, NULL, 0); 665 if (r) 666 return r; 667 } 668 669 if (vm_flush_needed) { 670 mutex_lock(&id_mgr->lock); 671 dma_fence_put(id->last_flush); 672 id->last_flush = dma_fence_get(fence); 673 id->current_gpu_reset_count = 674 atomic_read(&adev->gpu_reset_counter); 675 mutex_unlock(&id_mgr->lock); 676 } 677 678 if (pasid_mapping_needed) { 679 mutex_lock(&id_mgr->lock); 680 id->pasid = job->pasid; 681 dma_fence_put(id->pasid_mapping); 682 id->pasid_mapping = dma_fence_get(fence); 683 mutex_unlock(&id_mgr->lock); 684 } 685 dma_fence_put(fence); 686 687 if (ring->funcs->patch_cond_exec) 688 amdgpu_ring_patch_cond_exec(ring, patch_offset); 689 690 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 691 if (ring->funcs->emit_switch_buffer) { 692 amdgpu_ring_emit_switch_buffer(ring); 693 amdgpu_ring_emit_switch_buffer(ring); 694 } 695 amdgpu_ring_ib_end(ring); 696 return 0; 697 } 698 699 /** 700 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 701 * 702 * @vm: requested vm 703 * @bo: requested buffer object 704 * 705 * Find @bo inside the requested vm. 706 * Search inside the @bos vm list for the requested vm 707 * Returns the found bo_va or NULL if none is found 708 * 709 * Object has to be reserved! 710 * 711 * Returns: 712 * Found bo_va or NULL. 713 */ 714 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 715 struct amdgpu_bo *bo) 716 { 717 struct amdgpu_vm_bo_base *base; 718 719 for (base = bo->vm_bo; base; base = base->next) { 720 if (base->vm != vm) 721 continue; 722 723 return container_of(base, struct amdgpu_bo_va, base); 724 } 725 return NULL; 726 } 727 728 /** 729 * amdgpu_vm_map_gart - Resolve gart mapping of addr 730 * 731 * @pages_addr: optional DMA address to use for lookup 732 * @addr: the unmapped addr 733 * 734 * Look up the physical address of the page that the pte resolves 735 * to. 736 * 737 * Returns: 738 * The pointer for the page table entry. 739 */ 740 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 741 { 742 uint64_t result; 743 744 /* page table offset */ 745 result = pages_addr[addr >> PAGE_SHIFT]; 746 747 /* in case cpu page size != gpu page size*/ 748 result |= addr & (~PAGE_MASK); 749 750 result &= 0xFFFFFFFFFFFFF000ULL; 751 752 return result; 753 } 754 755 /** 756 * amdgpu_vm_update_pdes - make sure that all directories are valid 757 * 758 * @adev: amdgpu_device pointer 759 * @vm: requested vm 760 * @immediate: submit immediately to the paging queue 761 * 762 * Makes sure all directories are up to date. 763 * 764 * Returns: 765 * 0 for success, error for failure. 766 */ 767 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 768 struct amdgpu_vm *vm, bool immediate) 769 { 770 struct amdgpu_vm_update_params params; 771 struct amdgpu_vm_bo_base *entry; 772 bool flush_tlb_needed = false; 773 LIST_HEAD(relocated); 774 int r, idx; 775 776 spin_lock(&vm->status_lock); 777 list_splice_init(&vm->relocated, &relocated); 778 spin_unlock(&vm->status_lock); 779 780 if (list_empty(&relocated)) 781 return 0; 782 783 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 784 return -ENODEV; 785 786 memset(¶ms, 0, sizeof(params)); 787 params.adev = adev; 788 params.vm = vm; 789 params.immediate = immediate; 790 791 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 792 if (r) 793 goto error; 794 795 list_for_each_entry(entry, &relocated, vm_status) { 796 /* vm_flush_needed after updating moved PDEs */ 797 flush_tlb_needed |= entry->moved; 798 799 r = amdgpu_vm_pde_update(¶ms, entry); 800 if (r) 801 goto error; 802 } 803 804 r = vm->update_funcs->commit(¶ms, &vm->last_update); 805 if (r) 806 goto error; 807 808 if (flush_tlb_needed) 809 atomic64_inc(&vm->tlb_seq); 810 811 while (!list_empty(&relocated)) { 812 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, 813 vm_status); 814 amdgpu_vm_bo_idle(entry); 815 } 816 817 error: 818 drm_dev_exit(idx); 819 return r; 820 } 821 822 /** 823 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence 824 * @fence: unused 825 * @cb: the callback structure 826 * 827 * Increments the tlb sequence to make sure that future CS execute a VM flush. 828 */ 829 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, 830 struct dma_fence_cb *cb) 831 { 832 struct amdgpu_vm_tlb_seq_struct *tlb_cb; 833 834 tlb_cb = container_of(cb, typeof(*tlb_cb), cb); 835 atomic64_inc(&tlb_cb->vm->tlb_seq); 836 kfree(tlb_cb); 837 } 838 839 /** 840 * amdgpu_vm_update_range - update a range in the vm page table 841 * 842 * @adev: amdgpu_device pointer to use for commands 843 * @vm: the VM to update the range 844 * @immediate: immediate submission in a page fault 845 * @unlocked: unlocked invalidation during MM callback 846 * @flush_tlb: trigger tlb invalidation after update completed 847 * @resv: fences we need to sync to 848 * @start: start of mapped range 849 * @last: last mapped entry 850 * @flags: flags for the entries 851 * @offset: offset into nodes and pages_addr 852 * @vram_base: base for vram mappings 853 * @res: ttm_resource to map 854 * @pages_addr: DMA addresses to use for mapping 855 * @fence: optional resulting fence 856 * 857 * Fill in the page table entries between @start and @last. 858 * 859 * Returns: 860 * 0 for success, negative erro code for failure. 861 */ 862 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, 863 bool immediate, bool unlocked, bool flush_tlb, 864 struct dma_resv *resv, uint64_t start, uint64_t last, 865 uint64_t flags, uint64_t offset, uint64_t vram_base, 866 struct ttm_resource *res, dma_addr_t *pages_addr, 867 struct dma_fence **fence) 868 { 869 struct amdgpu_vm_update_params params; 870 struct amdgpu_vm_tlb_seq_struct *tlb_cb; 871 struct amdgpu_res_cursor cursor; 872 enum amdgpu_sync_mode sync_mode; 873 int r, idx; 874 875 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 876 return -ENODEV; 877 878 tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL); 879 if (!tlb_cb) { 880 r = -ENOMEM; 881 goto error_unlock; 882 } 883 884 /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache, 885 * heavy-weight flush TLB unconditionally. 886 */ 887 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && 888 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); 889 890 /* 891 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB 892 */ 893 flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); 894 895 memset(¶ms, 0, sizeof(params)); 896 params.adev = adev; 897 params.vm = vm; 898 params.immediate = immediate; 899 params.pages_addr = pages_addr; 900 params.unlocked = unlocked; 901 902 /* Implicitly sync to command submissions in the same VM before 903 * unmapping. Sync to moving fences before mapping. 904 */ 905 if (!(flags & AMDGPU_PTE_VALID)) 906 sync_mode = AMDGPU_SYNC_EQ_OWNER; 907 else 908 sync_mode = AMDGPU_SYNC_EXPLICIT; 909 910 amdgpu_vm_eviction_lock(vm); 911 if (vm->evicting) { 912 r = -EBUSY; 913 goto error_free; 914 } 915 916 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { 917 struct dma_fence *tmp = dma_fence_get_stub(); 918 919 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); 920 swap(vm->last_unlocked, tmp); 921 dma_fence_put(tmp); 922 } 923 924 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); 925 if (r) 926 goto error_free; 927 928 amdgpu_res_first(pages_addr ? NULL : res, offset, 929 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); 930 while (cursor.remaining) { 931 uint64_t tmp, num_entries, addr; 932 933 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT; 934 if (pages_addr) { 935 bool contiguous = true; 936 937 if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { 938 uint64_t pfn = cursor.start >> PAGE_SHIFT; 939 uint64_t count; 940 941 contiguous = pages_addr[pfn + 1] == 942 pages_addr[pfn] + PAGE_SIZE; 943 944 tmp = num_entries / 945 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 946 for (count = 2; count < tmp; ++count) { 947 uint64_t idx = pfn + count; 948 949 if (contiguous != (pages_addr[idx] == 950 pages_addr[idx - 1] + PAGE_SIZE)) 951 break; 952 } 953 if (!contiguous) 954 count--; 955 num_entries = count * 956 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 957 } 958 959 if (!contiguous) { 960 addr = cursor.start; 961 params.pages_addr = pages_addr; 962 } else { 963 addr = pages_addr[cursor.start >> PAGE_SHIFT]; 964 params.pages_addr = NULL; 965 } 966 967 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 968 addr = vram_base + cursor.start; 969 } else { 970 addr = 0; 971 } 972 973 tmp = start + num_entries; 974 r = amdgpu_vm_ptes_update(¶ms, start, tmp, addr, flags); 975 if (r) 976 goto error_free; 977 978 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE); 979 start = tmp; 980 } 981 982 r = vm->update_funcs->commit(¶ms, fence); 983 984 if (flush_tlb || params.table_freed) { 985 tlb_cb->vm = vm; 986 if (fence && *fence && 987 !dma_fence_add_callback(*fence, &tlb_cb->cb, 988 amdgpu_vm_tlb_seq_cb)) { 989 dma_fence_put(vm->last_tlb_flush); 990 vm->last_tlb_flush = dma_fence_get(*fence); 991 } else { 992 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); 993 } 994 tlb_cb = NULL; 995 } 996 997 error_free: 998 kfree(tlb_cb); 999 1000 error_unlock: 1001 amdgpu_vm_eviction_unlock(vm); 1002 drm_dev_exit(idx); 1003 return r; 1004 } 1005 1006 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, 1007 struct amdgpu_mem_stats *stats) 1008 { 1009 struct amdgpu_vm *vm = bo_va->base.vm; 1010 struct amdgpu_bo *bo = bo_va->base.bo; 1011 1012 if (!bo) 1013 return; 1014 1015 /* 1016 * For now ignore BOs which are currently locked and potentially 1017 * changing their location. 1018 */ 1019 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && 1020 !dma_resv_trylock(bo->tbo.base.resv)) 1021 return; 1022 1023 amdgpu_bo_get_memory(bo, stats); 1024 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 1025 dma_resv_unlock(bo->tbo.base.resv); 1026 } 1027 1028 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, 1029 struct amdgpu_mem_stats *stats) 1030 { 1031 struct amdgpu_bo_va *bo_va, *tmp; 1032 1033 spin_lock(&vm->status_lock); 1034 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) 1035 amdgpu_vm_bo_get_memory(bo_va, stats); 1036 1037 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) 1038 amdgpu_vm_bo_get_memory(bo_va, stats); 1039 1040 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) 1041 amdgpu_vm_bo_get_memory(bo_va, stats); 1042 1043 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) 1044 amdgpu_vm_bo_get_memory(bo_va, stats); 1045 1046 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) 1047 amdgpu_vm_bo_get_memory(bo_va, stats); 1048 1049 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) 1050 amdgpu_vm_bo_get_memory(bo_va, stats); 1051 spin_unlock(&vm->status_lock); 1052 } 1053 1054 /** 1055 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1056 * 1057 * @adev: amdgpu_device pointer 1058 * @bo_va: requested BO and VM object 1059 * @clear: if true clear the entries 1060 * 1061 * Fill in the page table entries for @bo_va. 1062 * 1063 * Returns: 1064 * 0 for success, -EINVAL for failure. 1065 */ 1066 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1067 bool clear) 1068 { 1069 struct amdgpu_bo *bo = bo_va->base.bo; 1070 struct amdgpu_vm *vm = bo_va->base.vm; 1071 struct amdgpu_bo_va_mapping *mapping; 1072 dma_addr_t *pages_addr = NULL; 1073 struct ttm_resource *mem; 1074 struct dma_fence **last_update; 1075 bool flush_tlb = clear; 1076 struct dma_resv *resv; 1077 uint64_t vram_base; 1078 uint64_t flags; 1079 int r; 1080 1081 if (clear || !bo) { 1082 mem = NULL; 1083 resv = vm->root.bo->tbo.base.resv; 1084 } else { 1085 struct drm_gem_object *obj = &bo->tbo.base; 1086 1087 resv = bo->tbo.base.resv; 1088 if (obj->import_attach && bo_va->is_xgmi) { 1089 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 1090 struct drm_gem_object *gobj = dma_buf->priv; 1091 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 1092 1093 if (abo->tbo.resource->mem_type == TTM_PL_VRAM) 1094 bo = gem_to_amdgpu_bo(gobj); 1095 } 1096 mem = bo->tbo.resource; 1097 if (mem->mem_type == TTM_PL_TT || 1098 mem->mem_type == AMDGPU_PL_PREEMPT) 1099 pages_addr = bo->tbo.ttm->dma_address; 1100 } 1101 1102 if (bo) { 1103 struct amdgpu_device *bo_adev; 1104 1105 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1106 1107 if (amdgpu_bo_encrypted(bo)) 1108 flags |= AMDGPU_PTE_TMZ; 1109 1110 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1111 vram_base = bo_adev->vm_manager.vram_base_offset; 1112 } else { 1113 flags = 0x0; 1114 vram_base = 0; 1115 } 1116 1117 if (clear || (bo && bo->tbo.base.resv == 1118 vm->root.bo->tbo.base.resv)) 1119 last_update = &vm->last_update; 1120 else 1121 last_update = &bo_va->last_pt_update; 1122 1123 if (!clear && bo_va->base.moved) { 1124 flush_tlb = true; 1125 list_splice_init(&bo_va->valids, &bo_va->invalids); 1126 1127 } else if (bo_va->cleared != clear) { 1128 list_splice_init(&bo_va->valids, &bo_va->invalids); 1129 } 1130 1131 list_for_each_entry(mapping, &bo_va->invalids, list) { 1132 uint64_t update_flags = flags; 1133 1134 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1135 * but in case of something, we filter the flags in first place 1136 */ 1137 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1138 update_flags &= ~AMDGPU_PTE_READABLE; 1139 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1140 update_flags &= ~AMDGPU_PTE_WRITEABLE; 1141 1142 /* Apply ASIC specific mapping flags */ 1143 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); 1144 1145 trace_amdgpu_vm_bo_update(mapping); 1146 1147 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, 1148 resv, mapping->start, mapping->last, 1149 update_flags, mapping->offset, 1150 vram_base, mem, pages_addr, 1151 last_update); 1152 if (r) 1153 return r; 1154 } 1155 1156 /* If the BO is not in its preferred location add it back to 1157 * the evicted list so that it gets validated again on the 1158 * next command submission. 1159 */ 1160 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1161 uint32_t mem_type = bo->tbo.resource->mem_type; 1162 1163 if (!(bo->preferred_domains & 1164 amdgpu_mem_type_to_domain(mem_type))) 1165 amdgpu_vm_bo_evicted(&bo_va->base); 1166 else 1167 amdgpu_vm_bo_idle(&bo_va->base); 1168 } else { 1169 amdgpu_vm_bo_done(&bo_va->base); 1170 } 1171 1172 list_splice_init(&bo_va->invalids, &bo_va->valids); 1173 bo_va->cleared = clear; 1174 bo_va->base.moved = false; 1175 1176 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1177 list_for_each_entry(mapping, &bo_va->valids, list) 1178 trace_amdgpu_vm_bo_mapping(mapping); 1179 } 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * amdgpu_vm_update_prt_state - update the global PRT state 1186 * 1187 * @adev: amdgpu_device pointer 1188 */ 1189 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1190 { 1191 unsigned long flags; 1192 bool enable; 1193 1194 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1195 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1196 adev->gmc.gmc_funcs->set_prt(adev, enable); 1197 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1198 } 1199 1200 /** 1201 * amdgpu_vm_prt_get - add a PRT user 1202 * 1203 * @adev: amdgpu_device pointer 1204 */ 1205 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1206 { 1207 if (!adev->gmc.gmc_funcs->set_prt) 1208 return; 1209 1210 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1211 amdgpu_vm_update_prt_state(adev); 1212 } 1213 1214 /** 1215 * amdgpu_vm_prt_put - drop a PRT user 1216 * 1217 * @adev: amdgpu_device pointer 1218 */ 1219 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1220 { 1221 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1222 amdgpu_vm_update_prt_state(adev); 1223 } 1224 1225 /** 1226 * amdgpu_vm_prt_cb - callback for updating the PRT status 1227 * 1228 * @fence: fence for the callback 1229 * @_cb: the callback function 1230 */ 1231 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1232 { 1233 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1234 1235 amdgpu_vm_prt_put(cb->adev); 1236 kfree(cb); 1237 } 1238 1239 /** 1240 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1241 * 1242 * @adev: amdgpu_device pointer 1243 * @fence: fence for the callback 1244 */ 1245 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1246 struct dma_fence *fence) 1247 { 1248 struct amdgpu_prt_cb *cb; 1249 1250 if (!adev->gmc.gmc_funcs->set_prt) 1251 return; 1252 1253 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1254 if (!cb) { 1255 /* Last resort when we are OOM */ 1256 if (fence) 1257 dma_fence_wait(fence, false); 1258 1259 amdgpu_vm_prt_put(adev); 1260 } else { 1261 cb->adev = adev; 1262 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1263 amdgpu_vm_prt_cb)) 1264 amdgpu_vm_prt_cb(fence, &cb->cb); 1265 } 1266 } 1267 1268 /** 1269 * amdgpu_vm_free_mapping - free a mapping 1270 * 1271 * @adev: amdgpu_device pointer 1272 * @vm: requested vm 1273 * @mapping: mapping to be freed 1274 * @fence: fence of the unmap operation 1275 * 1276 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1277 */ 1278 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1279 struct amdgpu_vm *vm, 1280 struct amdgpu_bo_va_mapping *mapping, 1281 struct dma_fence *fence) 1282 { 1283 if (mapping->flags & AMDGPU_PTE_PRT) 1284 amdgpu_vm_add_prt_cb(adev, fence); 1285 kfree(mapping); 1286 } 1287 1288 /** 1289 * amdgpu_vm_prt_fini - finish all prt mappings 1290 * 1291 * @adev: amdgpu_device pointer 1292 * @vm: requested vm 1293 * 1294 * Register a cleanup callback to disable PRT support after VM dies. 1295 */ 1296 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1297 { 1298 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 1299 struct dma_resv_iter cursor; 1300 struct dma_fence *fence; 1301 1302 dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { 1303 /* Add a callback for each fence in the reservation object */ 1304 amdgpu_vm_prt_get(adev); 1305 amdgpu_vm_add_prt_cb(adev, fence); 1306 } 1307 } 1308 1309 /** 1310 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1311 * 1312 * @adev: amdgpu_device pointer 1313 * @vm: requested vm 1314 * @fence: optional resulting fence (unchanged if no work needed to be done 1315 * or if an error occurred) 1316 * 1317 * Make sure all freed BOs are cleared in the PT. 1318 * PTs have to be reserved and mutex must be locked! 1319 * 1320 * Returns: 1321 * 0 for success. 1322 * 1323 */ 1324 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1325 struct amdgpu_vm *vm, 1326 struct dma_fence **fence) 1327 { 1328 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 1329 struct amdgpu_bo_va_mapping *mapping; 1330 uint64_t init_pte_value = 0; 1331 struct dma_fence *f = NULL; 1332 int r; 1333 1334 while (!list_empty(&vm->freed)) { 1335 mapping = list_first_entry(&vm->freed, 1336 struct amdgpu_bo_va_mapping, list); 1337 list_del(&mapping->list); 1338 1339 if (vm->pte_support_ats && 1340 mapping->start < AMDGPU_GMC_HOLE_START) 1341 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 1342 1343 r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, 1344 mapping->start, mapping->last, 1345 init_pte_value, 0, 0, NULL, NULL, 1346 &f); 1347 amdgpu_vm_free_mapping(adev, vm, mapping, f); 1348 if (r) { 1349 dma_fence_put(f); 1350 return r; 1351 } 1352 } 1353 1354 if (fence && f) { 1355 dma_fence_put(*fence); 1356 *fence = f; 1357 } else { 1358 dma_fence_put(f); 1359 } 1360 1361 return 0; 1362 1363 } 1364 1365 /** 1366 * amdgpu_vm_handle_moved - handle moved BOs in the PT 1367 * 1368 * @adev: amdgpu_device pointer 1369 * @vm: requested vm 1370 * 1371 * Make sure all BOs which are moved are updated in the PTs. 1372 * 1373 * Returns: 1374 * 0 for success. 1375 * 1376 * PTs have to be reserved! 1377 */ 1378 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 1379 struct amdgpu_vm *vm) 1380 { 1381 struct amdgpu_bo_va *bo_va; 1382 struct dma_resv *resv; 1383 bool clear; 1384 int r; 1385 1386 spin_lock(&vm->status_lock); 1387 while (!list_empty(&vm->moved)) { 1388 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, 1389 base.vm_status); 1390 spin_unlock(&vm->status_lock); 1391 1392 /* Per VM BOs never need to bo cleared in the page tables */ 1393 r = amdgpu_vm_bo_update(adev, bo_va, false); 1394 if (r) 1395 return r; 1396 spin_lock(&vm->status_lock); 1397 } 1398 1399 while (!list_empty(&vm->invalidated)) { 1400 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 1401 base.vm_status); 1402 resv = bo_va->base.bo->tbo.base.resv; 1403 spin_unlock(&vm->status_lock); 1404 1405 /* Try to reserve the BO to avoid clearing its ptes */ 1406 if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 1407 clear = false; 1408 /* Somebody else is using the BO right now */ 1409 else 1410 clear = true; 1411 1412 r = amdgpu_vm_bo_update(adev, bo_va, clear); 1413 if (r) 1414 return r; 1415 1416 if (!clear) 1417 dma_resv_unlock(resv); 1418 spin_lock(&vm->status_lock); 1419 } 1420 spin_unlock(&vm->status_lock); 1421 1422 return 0; 1423 } 1424 1425 /** 1426 * amdgpu_vm_bo_add - add a bo to a specific vm 1427 * 1428 * @adev: amdgpu_device pointer 1429 * @vm: requested vm 1430 * @bo: amdgpu buffer object 1431 * 1432 * Add @bo into the requested vm. 1433 * Add @bo to the list of bos associated with the vm 1434 * 1435 * Returns: 1436 * Newly added bo_va or NULL for failure 1437 * 1438 * Object has to be reserved! 1439 */ 1440 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 1441 struct amdgpu_vm *vm, 1442 struct amdgpu_bo *bo) 1443 { 1444 struct amdgpu_bo_va *bo_va; 1445 1446 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 1447 if (bo_va == NULL) { 1448 return NULL; 1449 } 1450 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 1451 1452 bo_va->ref_count = 1; 1453 bo_va->last_pt_update = dma_fence_get_stub(); 1454 INIT_LIST_HEAD(&bo_va->valids); 1455 INIT_LIST_HEAD(&bo_va->invalids); 1456 1457 if (!bo) 1458 return bo_va; 1459 1460 dma_resv_assert_held(bo->tbo.base.resv); 1461 if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { 1462 bo_va->is_xgmi = true; 1463 /* Power up XGMI if it can be potentially used */ 1464 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); 1465 } 1466 1467 return bo_va; 1468 } 1469 1470 1471 /** 1472 * amdgpu_vm_bo_insert_map - insert a new mapping 1473 * 1474 * @adev: amdgpu_device pointer 1475 * @bo_va: bo_va to store the address 1476 * @mapping: the mapping to insert 1477 * 1478 * Insert a new mapping into all structures. 1479 */ 1480 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 1481 struct amdgpu_bo_va *bo_va, 1482 struct amdgpu_bo_va_mapping *mapping) 1483 { 1484 struct amdgpu_vm *vm = bo_va->base.vm; 1485 struct amdgpu_bo *bo = bo_va->base.bo; 1486 1487 mapping->bo_va = bo_va; 1488 list_add(&mapping->list, &bo_va->invalids); 1489 amdgpu_vm_it_insert(mapping, &vm->va); 1490 1491 if (mapping->flags & AMDGPU_PTE_PRT) 1492 amdgpu_vm_prt_get(adev); 1493 1494 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1495 !bo_va->base.moved) { 1496 amdgpu_vm_bo_moved(&bo_va->base); 1497 } 1498 trace_amdgpu_vm_bo_map(bo_va, mapping); 1499 } 1500 1501 /** 1502 * amdgpu_vm_bo_map - map bo inside a vm 1503 * 1504 * @adev: amdgpu_device pointer 1505 * @bo_va: bo_va to store the address 1506 * @saddr: where to map the BO 1507 * @offset: requested offset in the BO 1508 * @size: BO size in bytes 1509 * @flags: attributes of pages (read/write/valid/etc.) 1510 * 1511 * Add a mapping of the BO at the specefied addr into the VM. 1512 * 1513 * Returns: 1514 * 0 for success, error for failure. 1515 * 1516 * Object has to be reserved and unreserved outside! 1517 */ 1518 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 1519 struct amdgpu_bo_va *bo_va, 1520 uint64_t saddr, uint64_t offset, 1521 uint64_t size, uint64_t flags) 1522 { 1523 struct amdgpu_bo_va_mapping *mapping, *tmp; 1524 struct amdgpu_bo *bo = bo_va->base.bo; 1525 struct amdgpu_vm *vm = bo_va->base.vm; 1526 uint64_t eaddr; 1527 1528 /* validate the parameters */ 1529 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1530 return -EINVAL; 1531 if (saddr + size <= saddr || offset + size <= offset) 1532 return -EINVAL; 1533 1534 /* make sure object fit at this offset */ 1535 eaddr = saddr + size - 1; 1536 if ((bo && offset + size > amdgpu_bo_size(bo)) || 1537 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1538 return -EINVAL; 1539 1540 saddr /= AMDGPU_GPU_PAGE_SIZE; 1541 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1542 1543 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1544 if (tmp) { 1545 /* bo and tmp overlap, invalid addr */ 1546 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1547 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 1548 tmp->start, tmp->last + 1); 1549 return -EINVAL; 1550 } 1551 1552 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1553 if (!mapping) 1554 return -ENOMEM; 1555 1556 mapping->start = saddr; 1557 mapping->last = eaddr; 1558 mapping->offset = offset; 1559 mapping->flags = flags; 1560 1561 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 1562 1563 return 0; 1564 } 1565 1566 /** 1567 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 1568 * 1569 * @adev: amdgpu_device pointer 1570 * @bo_va: bo_va to store the address 1571 * @saddr: where to map the BO 1572 * @offset: requested offset in the BO 1573 * @size: BO size in bytes 1574 * @flags: attributes of pages (read/write/valid/etc.) 1575 * 1576 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 1577 * mappings as we do so. 1578 * 1579 * Returns: 1580 * 0 for success, error for failure. 1581 * 1582 * Object has to be reserved and unreserved outside! 1583 */ 1584 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 1585 struct amdgpu_bo_va *bo_va, 1586 uint64_t saddr, uint64_t offset, 1587 uint64_t size, uint64_t flags) 1588 { 1589 struct amdgpu_bo_va_mapping *mapping; 1590 struct amdgpu_bo *bo = bo_va->base.bo; 1591 uint64_t eaddr; 1592 int r; 1593 1594 /* validate the parameters */ 1595 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1596 return -EINVAL; 1597 if (saddr + size <= saddr || offset + size <= offset) 1598 return -EINVAL; 1599 1600 /* make sure object fit at this offset */ 1601 eaddr = saddr + size - 1; 1602 if ((bo && offset + size > amdgpu_bo_size(bo)) || 1603 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1604 return -EINVAL; 1605 1606 /* Allocate all the needed memory */ 1607 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1608 if (!mapping) 1609 return -ENOMEM; 1610 1611 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 1612 if (r) { 1613 kfree(mapping); 1614 return r; 1615 } 1616 1617 saddr /= AMDGPU_GPU_PAGE_SIZE; 1618 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1619 1620 mapping->start = saddr; 1621 mapping->last = eaddr; 1622 mapping->offset = offset; 1623 mapping->flags = flags; 1624 1625 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 1626 1627 return 0; 1628 } 1629 1630 /** 1631 * amdgpu_vm_bo_unmap - remove bo mapping from vm 1632 * 1633 * @adev: amdgpu_device pointer 1634 * @bo_va: bo_va to remove the address from 1635 * @saddr: where to the BO is mapped 1636 * 1637 * Remove a mapping of the BO at the specefied addr from the VM. 1638 * 1639 * Returns: 1640 * 0 for success, error for failure. 1641 * 1642 * Object has to be reserved and unreserved outside! 1643 */ 1644 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1645 struct amdgpu_bo_va *bo_va, 1646 uint64_t saddr) 1647 { 1648 struct amdgpu_bo_va_mapping *mapping; 1649 struct amdgpu_vm *vm = bo_va->base.vm; 1650 bool valid = true; 1651 1652 saddr /= AMDGPU_GPU_PAGE_SIZE; 1653 1654 list_for_each_entry(mapping, &bo_va->valids, list) { 1655 if (mapping->start == saddr) 1656 break; 1657 } 1658 1659 if (&mapping->list == &bo_va->valids) { 1660 valid = false; 1661 1662 list_for_each_entry(mapping, &bo_va->invalids, list) { 1663 if (mapping->start == saddr) 1664 break; 1665 } 1666 1667 if (&mapping->list == &bo_va->invalids) 1668 return -ENOENT; 1669 } 1670 1671 list_del(&mapping->list); 1672 amdgpu_vm_it_remove(mapping, &vm->va); 1673 mapping->bo_va = NULL; 1674 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1675 1676 if (valid) 1677 list_add(&mapping->list, &vm->freed); 1678 else 1679 amdgpu_vm_free_mapping(adev, vm, mapping, 1680 bo_va->last_pt_update); 1681 1682 return 0; 1683 } 1684 1685 /** 1686 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 1687 * 1688 * @adev: amdgpu_device pointer 1689 * @vm: VM structure to use 1690 * @saddr: start of the range 1691 * @size: size of the range 1692 * 1693 * Remove all mappings in a range, split them as appropriate. 1694 * 1695 * Returns: 1696 * 0 for success, error for failure. 1697 */ 1698 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 1699 struct amdgpu_vm *vm, 1700 uint64_t saddr, uint64_t size) 1701 { 1702 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 1703 LIST_HEAD(removed); 1704 uint64_t eaddr; 1705 1706 eaddr = saddr + size - 1; 1707 saddr /= AMDGPU_GPU_PAGE_SIZE; 1708 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1709 1710 /* Allocate all the needed memory */ 1711 before = kzalloc(sizeof(*before), GFP_KERNEL); 1712 if (!before) 1713 return -ENOMEM; 1714 INIT_LIST_HEAD(&before->list); 1715 1716 after = kzalloc(sizeof(*after), GFP_KERNEL); 1717 if (!after) { 1718 kfree(before); 1719 return -ENOMEM; 1720 } 1721 INIT_LIST_HEAD(&after->list); 1722 1723 /* Now gather all removed mappings */ 1724 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1725 while (tmp) { 1726 /* Remember mapping split at the start */ 1727 if (tmp->start < saddr) { 1728 before->start = tmp->start; 1729 before->last = saddr - 1; 1730 before->offset = tmp->offset; 1731 before->flags = tmp->flags; 1732 before->bo_va = tmp->bo_va; 1733 list_add(&before->list, &tmp->bo_va->invalids); 1734 } 1735 1736 /* Remember mapping split at the end */ 1737 if (tmp->last > eaddr) { 1738 after->start = eaddr + 1; 1739 after->last = tmp->last; 1740 after->offset = tmp->offset; 1741 after->offset += (after->start - tmp->start) << PAGE_SHIFT; 1742 after->flags = tmp->flags; 1743 after->bo_va = tmp->bo_va; 1744 list_add(&after->list, &tmp->bo_va->invalids); 1745 } 1746 1747 list_del(&tmp->list); 1748 list_add(&tmp->list, &removed); 1749 1750 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 1751 } 1752 1753 /* And free them up */ 1754 list_for_each_entry_safe(tmp, next, &removed, list) { 1755 amdgpu_vm_it_remove(tmp, &vm->va); 1756 list_del(&tmp->list); 1757 1758 if (tmp->start < saddr) 1759 tmp->start = saddr; 1760 if (tmp->last > eaddr) 1761 tmp->last = eaddr; 1762 1763 tmp->bo_va = NULL; 1764 list_add(&tmp->list, &vm->freed); 1765 trace_amdgpu_vm_bo_unmap(NULL, tmp); 1766 } 1767 1768 /* Insert partial mapping before the range */ 1769 if (!list_empty(&before->list)) { 1770 struct amdgpu_bo *bo = before->bo_va->base.bo; 1771 1772 amdgpu_vm_it_insert(before, &vm->va); 1773 if (before->flags & AMDGPU_PTE_PRT) 1774 amdgpu_vm_prt_get(adev); 1775 1776 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1777 !before->bo_va->base.moved) 1778 amdgpu_vm_bo_moved(&before->bo_va->base); 1779 } else { 1780 kfree(before); 1781 } 1782 1783 /* Insert partial mapping after the range */ 1784 if (!list_empty(&after->list)) { 1785 struct amdgpu_bo *bo = after->bo_va->base.bo; 1786 1787 amdgpu_vm_it_insert(after, &vm->va); 1788 if (after->flags & AMDGPU_PTE_PRT) 1789 amdgpu_vm_prt_get(adev); 1790 1791 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1792 !after->bo_va->base.moved) 1793 amdgpu_vm_bo_moved(&after->bo_va->base); 1794 } else { 1795 kfree(after); 1796 } 1797 1798 return 0; 1799 } 1800 1801 /** 1802 * amdgpu_vm_bo_lookup_mapping - find mapping by address 1803 * 1804 * @vm: the requested VM 1805 * @addr: the address 1806 * 1807 * Find a mapping by it's address. 1808 * 1809 * Returns: 1810 * The amdgpu_bo_va_mapping matching for addr or NULL 1811 * 1812 */ 1813 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 1814 uint64_t addr) 1815 { 1816 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 1817 } 1818 1819 /** 1820 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 1821 * 1822 * @vm: the requested vm 1823 * @ticket: CS ticket 1824 * 1825 * Trace all mappings of BOs reserved during a command submission. 1826 */ 1827 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 1828 { 1829 struct amdgpu_bo_va_mapping *mapping; 1830 1831 if (!trace_amdgpu_vm_bo_cs_enabled()) 1832 return; 1833 1834 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 1835 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 1836 if (mapping->bo_va && mapping->bo_va->base.bo) { 1837 struct amdgpu_bo *bo; 1838 1839 bo = mapping->bo_va->base.bo; 1840 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 1841 ticket) 1842 continue; 1843 } 1844 1845 trace_amdgpu_vm_bo_cs(mapping); 1846 } 1847 } 1848 1849 /** 1850 * amdgpu_vm_bo_del - remove a bo from a specific vm 1851 * 1852 * @adev: amdgpu_device pointer 1853 * @bo_va: requested bo_va 1854 * 1855 * Remove @bo_va->bo from the requested vm. 1856 * 1857 * Object have to be reserved! 1858 */ 1859 void amdgpu_vm_bo_del(struct amdgpu_device *adev, 1860 struct amdgpu_bo_va *bo_va) 1861 { 1862 struct amdgpu_bo_va_mapping *mapping, *next; 1863 struct amdgpu_bo *bo = bo_va->base.bo; 1864 struct amdgpu_vm *vm = bo_va->base.vm; 1865 struct amdgpu_vm_bo_base **base; 1866 1867 dma_resv_assert_held(vm->root.bo->tbo.base.resv); 1868 1869 if (bo) { 1870 dma_resv_assert_held(bo->tbo.base.resv); 1871 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 1872 ttm_bo_set_bulk_move(&bo->tbo, NULL); 1873 1874 for (base = &bo_va->base.bo->vm_bo; *base; 1875 base = &(*base)->next) { 1876 if (*base != &bo_va->base) 1877 continue; 1878 1879 *base = bo_va->base.next; 1880 break; 1881 } 1882 } 1883 1884 spin_lock(&vm->status_lock); 1885 list_del(&bo_va->base.vm_status); 1886 spin_unlock(&vm->status_lock); 1887 1888 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1889 list_del(&mapping->list); 1890 amdgpu_vm_it_remove(mapping, &vm->va); 1891 mapping->bo_va = NULL; 1892 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1893 list_add(&mapping->list, &vm->freed); 1894 } 1895 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1896 list_del(&mapping->list); 1897 amdgpu_vm_it_remove(mapping, &vm->va); 1898 amdgpu_vm_free_mapping(adev, vm, mapping, 1899 bo_va->last_pt_update); 1900 } 1901 1902 dma_fence_put(bo_va->last_pt_update); 1903 1904 if (bo && bo_va->is_xgmi) 1905 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); 1906 1907 kfree(bo_va); 1908 } 1909 1910 /** 1911 * amdgpu_vm_evictable - check if we can evict a VM 1912 * 1913 * @bo: A page table of the VM. 1914 * 1915 * Check if it is possible to evict a VM. 1916 */ 1917 bool amdgpu_vm_evictable(struct amdgpu_bo *bo) 1918 { 1919 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; 1920 1921 /* Page tables of a destroyed VM can go away immediately */ 1922 if (!bo_base || !bo_base->vm) 1923 return true; 1924 1925 /* Don't evict VM page tables while they are busy */ 1926 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) 1927 return false; 1928 1929 /* Try to block ongoing updates */ 1930 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) 1931 return false; 1932 1933 /* Don't evict VM page tables while they are updated */ 1934 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { 1935 amdgpu_vm_eviction_unlock(bo_base->vm); 1936 return false; 1937 } 1938 1939 bo_base->vm->evicting = true; 1940 amdgpu_vm_eviction_unlock(bo_base->vm); 1941 return true; 1942 } 1943 1944 /** 1945 * amdgpu_vm_bo_invalidate - mark the bo as invalid 1946 * 1947 * @adev: amdgpu_device pointer 1948 * @bo: amdgpu buffer object 1949 * @evicted: is the BO evicted 1950 * 1951 * Mark @bo as invalid. 1952 */ 1953 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 1954 struct amdgpu_bo *bo, bool evicted) 1955 { 1956 struct amdgpu_vm_bo_base *bo_base; 1957 1958 /* shadow bo doesn't have bo base, its validation needs its parent */ 1959 if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) 1960 bo = bo->parent; 1961 1962 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 1963 struct amdgpu_vm *vm = bo_base->vm; 1964 1965 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1966 amdgpu_vm_bo_evicted(bo_base); 1967 continue; 1968 } 1969 1970 if (bo_base->moved) 1971 continue; 1972 bo_base->moved = true; 1973 1974 if (bo->tbo.type == ttm_bo_type_kernel) 1975 amdgpu_vm_bo_relocated(bo_base); 1976 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 1977 amdgpu_vm_bo_moved(bo_base); 1978 else 1979 amdgpu_vm_bo_invalidated(bo_base); 1980 } 1981 } 1982 1983 /** 1984 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 1985 * 1986 * @vm_size: VM size 1987 * 1988 * Returns: 1989 * VM page table as power of two 1990 */ 1991 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 1992 { 1993 /* Total bits covered by PD + PTs */ 1994 unsigned bits = ilog2(vm_size) + 18; 1995 1996 /* Make sure the PD is 4K in size up to 8GB address space. 1997 Above that split equal between PD and PTs */ 1998 if (vm_size <= 8) 1999 return (bits - 9); 2000 else 2001 return ((bits + 3) / 2); 2002 } 2003 2004 /** 2005 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2006 * 2007 * @adev: amdgpu_device pointer 2008 * @min_vm_size: the minimum vm size in GB if it's set auto 2009 * @fragment_size_default: Default PTE fragment size 2010 * @max_level: max VMPT level 2011 * @max_bits: max address space size in bits 2012 * 2013 */ 2014 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2015 uint32_t fragment_size_default, unsigned max_level, 2016 unsigned max_bits) 2017 { 2018 unsigned int max_size = 1 << (max_bits - 30); 2019 unsigned int vm_size; 2020 uint64_t tmp; 2021 2022 /* adjust vm size first */ 2023 if (amdgpu_vm_size != -1) { 2024 vm_size = amdgpu_vm_size; 2025 if (vm_size > max_size) { 2026 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2027 amdgpu_vm_size, max_size); 2028 vm_size = max_size; 2029 } 2030 } else { 2031 struct sysinfo si; 2032 unsigned int phys_ram_gb; 2033 2034 /* Optimal VM size depends on the amount of physical 2035 * RAM available. Underlying requirements and 2036 * assumptions: 2037 * 2038 * - Need to map system memory and VRAM from all GPUs 2039 * - VRAM from other GPUs not known here 2040 * - Assume VRAM <= system memory 2041 * - On GFX8 and older, VM space can be segmented for 2042 * different MTYPEs 2043 * - Need to allow room for fragmentation, guard pages etc. 2044 * 2045 * This adds up to a rough guess of system memory x3. 2046 * Round up to power of two to maximize the available 2047 * VM size with the given page table size. 2048 */ 2049 si_meminfo(&si); 2050 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2051 (1 << 30) - 1) >> 30; 2052 vm_size = roundup_pow_of_two( 2053 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2054 } 2055 2056 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2057 2058 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2059 if (amdgpu_vm_block_size != -1) 2060 tmp >>= amdgpu_vm_block_size - 9; 2061 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2062 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); 2063 switch (adev->vm_manager.num_level) { 2064 case 3: 2065 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2066 break; 2067 case 2: 2068 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2069 break; 2070 case 1: 2071 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2072 break; 2073 default: 2074 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2075 } 2076 /* block size depends on vm size and hw setup*/ 2077 if (amdgpu_vm_block_size != -1) 2078 adev->vm_manager.block_size = 2079 min((unsigned)amdgpu_vm_block_size, max_bits 2080 - AMDGPU_GPU_PAGE_SHIFT 2081 - 9 * adev->vm_manager.num_level); 2082 else if (adev->vm_manager.num_level > 1) 2083 adev->vm_manager.block_size = 9; 2084 else 2085 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2086 2087 if (amdgpu_vm_fragment_size == -1) 2088 adev->vm_manager.fragment_size = fragment_size_default; 2089 else 2090 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2091 2092 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2093 vm_size, adev->vm_manager.num_level + 1, 2094 adev->vm_manager.block_size, 2095 adev->vm_manager.fragment_size); 2096 } 2097 2098 /** 2099 * amdgpu_vm_wait_idle - wait for the VM to become idle 2100 * 2101 * @vm: VM object to wait for 2102 * @timeout: timeout to wait for VM to become idle 2103 */ 2104 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2105 { 2106 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, 2107 DMA_RESV_USAGE_BOOKKEEP, 2108 true, timeout); 2109 if (timeout <= 0) 2110 return timeout; 2111 2112 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); 2113 } 2114 2115 /** 2116 * amdgpu_vm_init - initialize a vm instance 2117 * 2118 * @adev: amdgpu_device pointer 2119 * @vm: requested vm 2120 * @xcp_id: GPU partition selection id 2121 * 2122 * Init @vm fields. 2123 * 2124 * Returns: 2125 * 0 for success, error for failure. 2126 */ 2127 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) 2128 { 2129 struct amdgpu_bo *root_bo; 2130 struct amdgpu_bo_vm *root; 2131 int r, i; 2132 2133 vm->va = RB_ROOT_CACHED; 2134 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2135 vm->reserved_vmid[i] = NULL; 2136 INIT_LIST_HEAD(&vm->evicted); 2137 INIT_LIST_HEAD(&vm->relocated); 2138 INIT_LIST_HEAD(&vm->moved); 2139 INIT_LIST_HEAD(&vm->idle); 2140 INIT_LIST_HEAD(&vm->invalidated); 2141 spin_lock_init(&vm->status_lock); 2142 INIT_LIST_HEAD(&vm->freed); 2143 INIT_LIST_HEAD(&vm->done); 2144 INIT_LIST_HEAD(&vm->pt_freed); 2145 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); 2146 2147 r = amdgpu_vm_init_entities(adev, vm); 2148 if (r) 2149 return r; 2150 2151 vm->pte_support_ats = false; 2152 vm->is_compute_context = false; 2153 2154 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2155 AMDGPU_VM_USE_CPU_FOR_GFX); 2156 2157 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2158 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2159 WARN_ONCE((vm->use_cpu_for_update && 2160 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2161 "CPU update of VM recommended only for large BAR system\n"); 2162 2163 if (vm->use_cpu_for_update) 2164 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2165 else 2166 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2167 2168 vm->last_update = dma_fence_get_stub(); 2169 vm->last_unlocked = dma_fence_get_stub(); 2170 vm->last_tlb_flush = dma_fence_get_stub(); 2171 vm->generation = 0; 2172 2173 mutex_init(&vm->eviction_lock); 2174 vm->evicting = false; 2175 2176 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, 2177 false, &root, xcp_id); 2178 if (r) 2179 goto error_free_delayed; 2180 root_bo = &root->bo; 2181 r = amdgpu_bo_reserve(root_bo, true); 2182 if (r) 2183 goto error_free_root; 2184 2185 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); 2186 if (r) 2187 goto error_unreserve; 2188 2189 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); 2190 2191 r = amdgpu_vm_pt_clear(adev, vm, root, false); 2192 if (r) 2193 goto error_unreserve; 2194 2195 amdgpu_bo_unreserve(vm->root.bo); 2196 2197 INIT_KFIFO(vm->faults); 2198 2199 return 0; 2200 2201 error_unreserve: 2202 amdgpu_bo_unreserve(vm->root.bo); 2203 2204 error_free_root: 2205 amdgpu_bo_unref(&root->shadow); 2206 amdgpu_bo_unref(&root_bo); 2207 vm->root.bo = NULL; 2208 2209 error_free_delayed: 2210 dma_fence_put(vm->last_tlb_flush); 2211 dma_fence_put(vm->last_unlocked); 2212 amdgpu_vm_fini_entities(vm); 2213 2214 return r; 2215 } 2216 2217 /** 2218 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2219 * 2220 * @adev: amdgpu_device pointer 2221 * @vm: requested vm 2222 * 2223 * This only works on GFX VMs that don't have any BOs added and no 2224 * page tables allocated yet. 2225 * 2226 * Changes the following VM parameters: 2227 * - use_cpu_for_update 2228 * - pte_supports_ats 2229 * 2230 * Reinitializes the page directory to reflect the changed ATS 2231 * setting. 2232 * 2233 * Returns: 2234 * 0 for success, -errno for errors. 2235 */ 2236 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2237 { 2238 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 2239 int r; 2240 2241 r = amdgpu_bo_reserve(vm->root.bo, true); 2242 if (r) 2243 return r; 2244 2245 /* Check if PD needs to be reinitialized and do it before 2246 * changing any other state, in case it fails. 2247 */ 2248 if (pte_support_ats != vm->pte_support_ats) { 2249 /* Sanity checks */ 2250 if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { 2251 r = -EINVAL; 2252 goto unreserve_bo; 2253 } 2254 2255 vm->pte_support_ats = pte_support_ats; 2256 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), 2257 false); 2258 if (r) 2259 goto unreserve_bo; 2260 } 2261 2262 /* Update VM state */ 2263 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2264 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2265 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2266 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2267 WARN_ONCE((vm->use_cpu_for_update && 2268 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2269 "CPU update of VM recommended only for large BAR system\n"); 2270 2271 if (vm->use_cpu_for_update) { 2272 /* Sync with last SDMA update/clear before switching to CPU */ 2273 r = amdgpu_bo_sync_wait(vm->root.bo, 2274 AMDGPU_FENCE_OWNER_UNDEFINED, true); 2275 if (r) 2276 goto unreserve_bo; 2277 2278 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2279 r = amdgpu_vm_pt_map_tables(adev, vm); 2280 if (r) 2281 goto unreserve_bo; 2282 2283 } else { 2284 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2285 } 2286 2287 dma_fence_put(vm->last_update); 2288 vm->last_update = dma_fence_get_stub(); 2289 vm->is_compute_context = true; 2290 2291 /* Free the shadow bo for compute VM */ 2292 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); 2293 2294 goto unreserve_bo; 2295 2296 unreserve_bo: 2297 amdgpu_bo_unreserve(vm->root.bo); 2298 return r; 2299 } 2300 2301 /** 2302 * amdgpu_vm_release_compute - release a compute vm 2303 * @adev: amdgpu_device pointer 2304 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 2305 * 2306 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 2307 * pasid from vm. Compute should stop use of vm after this call. 2308 */ 2309 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2310 { 2311 amdgpu_vm_set_pasid(adev, vm, 0); 2312 vm->is_compute_context = false; 2313 } 2314 2315 /** 2316 * amdgpu_vm_fini - tear down a vm instance 2317 * 2318 * @adev: amdgpu_device pointer 2319 * @vm: requested vm 2320 * 2321 * Tear down @vm. 2322 * Unbind the VM and remove all bos from the vm bo list 2323 */ 2324 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2325 { 2326 struct amdgpu_bo_va_mapping *mapping, *tmp; 2327 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 2328 struct amdgpu_bo *root; 2329 unsigned long flags; 2330 int i; 2331 2332 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 2333 2334 flush_work(&vm->pt_free_work); 2335 2336 root = amdgpu_bo_ref(vm->root.bo); 2337 amdgpu_bo_reserve(root, true); 2338 amdgpu_vm_set_pasid(adev, vm, 0); 2339 dma_fence_wait(vm->last_unlocked, false); 2340 dma_fence_put(vm->last_unlocked); 2341 dma_fence_wait(vm->last_tlb_flush, false); 2342 /* Make sure that all fence callbacks have completed */ 2343 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); 2344 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); 2345 dma_fence_put(vm->last_tlb_flush); 2346 2347 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 2348 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 2349 amdgpu_vm_prt_fini(adev, vm); 2350 prt_fini_needed = false; 2351 } 2352 2353 list_del(&mapping->list); 2354 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 2355 } 2356 2357 amdgpu_vm_pt_free_root(adev, vm); 2358 amdgpu_bo_unreserve(root); 2359 amdgpu_bo_unref(&root); 2360 WARN_ON(vm->root.bo); 2361 2362 amdgpu_vm_fini_entities(vm); 2363 2364 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2365 dev_err(adev->dev, "still active bo inside vm\n"); 2366 } 2367 rbtree_postorder_for_each_entry_safe(mapping, tmp, 2368 &vm->va.rb_root, rb) { 2369 /* Don't remove the mapping here, we don't want to trigger a 2370 * rebalance and the tree is about to be destroyed anyway. 2371 */ 2372 list_del(&mapping->list); 2373 kfree(mapping); 2374 } 2375 2376 dma_fence_put(vm->last_update); 2377 2378 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { 2379 if (vm->reserved_vmid[i]) { 2380 amdgpu_vmid_free_reserved(adev, i); 2381 vm->reserved_vmid[i] = false; 2382 } 2383 } 2384 2385 } 2386 2387 /** 2388 * amdgpu_vm_manager_init - init the VM manager 2389 * 2390 * @adev: amdgpu_device pointer 2391 * 2392 * Initialize the VM manager structures 2393 */ 2394 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 2395 { 2396 unsigned i; 2397 2398 /* Concurrent flushes are only possible starting with Vega10 and 2399 * are broken on Navi10 and Navi14. 2400 */ 2401 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || 2402 adev->asic_type == CHIP_NAVI10 || 2403 adev->asic_type == CHIP_NAVI14); 2404 amdgpu_vmid_mgr_init(adev); 2405 2406 adev->vm_manager.fence_context = 2407 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2408 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2409 adev->vm_manager.seqno[i] = 0; 2410 2411 spin_lock_init(&adev->vm_manager.prt_lock); 2412 atomic_set(&adev->vm_manager.num_prt_users, 0); 2413 2414 /* If not overridden by the user, by default, only in large BAR systems 2415 * Compute VM tables will be updated by CPU 2416 */ 2417 #ifdef CONFIG_X86_64 2418 if (amdgpu_vm_update_mode == -1) { 2419 /* For asic with VF MMIO access protection 2420 * avoid using CPU for VM table updates 2421 */ 2422 if (amdgpu_gmc_vram_full_visible(&adev->gmc) && 2423 !amdgpu_sriov_vf_mmio_access_protection(adev)) 2424 adev->vm_manager.vm_update_mode = 2425 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 2426 else 2427 adev->vm_manager.vm_update_mode = 0; 2428 } else 2429 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 2430 #else 2431 adev->vm_manager.vm_update_mode = 0; 2432 #endif 2433 2434 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); 2435 } 2436 2437 /** 2438 * amdgpu_vm_manager_fini - cleanup VM manager 2439 * 2440 * @adev: amdgpu_device pointer 2441 * 2442 * Cleanup the VM manager and free resources. 2443 */ 2444 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 2445 { 2446 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); 2447 xa_destroy(&adev->vm_manager.pasids); 2448 2449 amdgpu_vmid_mgr_fini(adev); 2450 } 2451 2452 /** 2453 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 2454 * 2455 * @dev: drm device pointer 2456 * @data: drm_amdgpu_vm 2457 * @filp: drm file pointer 2458 * 2459 * Returns: 2460 * 0 for success, -errno for errors. 2461 */ 2462 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 2463 { 2464 union drm_amdgpu_vm *args = data; 2465 struct amdgpu_device *adev = drm_to_adev(dev); 2466 struct amdgpu_fpriv *fpriv = filp->driver_priv; 2467 2468 /* No valid flags defined yet */ 2469 if (args->in.flags) 2470 return -EINVAL; 2471 2472 switch (args->in.op) { 2473 case AMDGPU_VM_OP_RESERVE_VMID: 2474 /* We only have requirement to reserve vmid from gfxhub */ 2475 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { 2476 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); 2477 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; 2478 } 2479 2480 break; 2481 case AMDGPU_VM_OP_UNRESERVE_VMID: 2482 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { 2483 amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0)); 2484 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; 2485 } 2486 break; 2487 default: 2488 return -EINVAL; 2489 } 2490 2491 return 0; 2492 } 2493 2494 /** 2495 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 2496 * 2497 * @adev: drm device pointer 2498 * @pasid: PASID identifier for VM 2499 * @task_info: task_info to fill. 2500 */ 2501 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, 2502 struct amdgpu_task_info *task_info) 2503 { 2504 struct amdgpu_vm *vm; 2505 unsigned long flags; 2506 2507 xa_lock_irqsave(&adev->vm_manager.pasids, flags); 2508 2509 vm = xa_load(&adev->vm_manager.pasids, pasid); 2510 if (vm) 2511 *task_info = vm->task_info; 2512 2513 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); 2514 } 2515 2516 /** 2517 * amdgpu_vm_set_task_info - Sets VMs task info. 2518 * 2519 * @vm: vm for which to set the info 2520 */ 2521 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 2522 { 2523 if (vm->task_info.pid) 2524 return; 2525 2526 vm->task_info.pid = current->pid; 2527 get_task_comm(vm->task_info.task_name, current); 2528 2529 if (current->group_leader->mm != current->mm) 2530 return; 2531 2532 vm->task_info.tgid = current->group_leader->pid; 2533 get_task_comm(vm->task_info.process_name, current->group_leader); 2534 } 2535 2536 /** 2537 * amdgpu_vm_handle_fault - graceful handling of VM faults. 2538 * @adev: amdgpu device pointer 2539 * @pasid: PASID of the VM 2540 * @vmid: VMID, only used for GFX 9.4.3. 2541 * @node_id: Node_id received in IH cookie. Only applicable for 2542 * GFX 9.4.3. 2543 * @addr: Address of the fault 2544 * @write_fault: true is write fault, false is read fault 2545 * 2546 * Try to gracefully handle a VM fault. Return true if the fault was handled and 2547 * shouldn't be reported any more. 2548 */ 2549 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, 2550 u32 vmid, u32 node_id, uint64_t addr, 2551 bool write_fault) 2552 { 2553 bool is_compute_context = false; 2554 struct amdgpu_bo *root; 2555 unsigned long irqflags; 2556 uint64_t value, flags; 2557 struct amdgpu_vm *vm; 2558 int r; 2559 2560 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 2561 vm = xa_load(&adev->vm_manager.pasids, pasid); 2562 if (vm) { 2563 root = amdgpu_bo_ref(vm->root.bo); 2564 is_compute_context = vm->is_compute_context; 2565 } else { 2566 root = NULL; 2567 } 2568 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 2569 2570 if (!root) 2571 return false; 2572 2573 addr /= AMDGPU_GPU_PAGE_SIZE; 2574 2575 if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, 2576 node_id, addr, write_fault)) { 2577 amdgpu_bo_unref(&root); 2578 return true; 2579 } 2580 2581 r = amdgpu_bo_reserve(root, true); 2582 if (r) 2583 goto error_unref; 2584 2585 /* Double check that the VM still exists */ 2586 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 2587 vm = xa_load(&adev->vm_manager.pasids, pasid); 2588 if (vm && vm->root.bo != root) 2589 vm = NULL; 2590 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 2591 if (!vm) 2592 goto error_unlock; 2593 2594 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 2595 AMDGPU_PTE_SYSTEM; 2596 2597 if (is_compute_context) { 2598 /* Intentionally setting invalid PTE flag 2599 * combination to force a no-retry-fault 2600 */ 2601 flags = AMDGPU_VM_NORETRY_FLAGS; 2602 value = 0; 2603 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 2604 /* Redirect the access to the dummy page */ 2605 value = adev->dummy_page_addr; 2606 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 2607 AMDGPU_PTE_WRITEABLE; 2608 2609 } else { 2610 /* Let the hw retry silently on the PTE */ 2611 value = 0; 2612 } 2613 2614 r = dma_resv_reserve_fences(root->tbo.base.resv, 1); 2615 if (r) { 2616 pr_debug("failed %d to reserve fence slot\n", r); 2617 goto error_unlock; 2618 } 2619 2620 r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, 2621 addr, flags, value, 0, NULL, NULL, NULL); 2622 if (r) 2623 goto error_unlock; 2624 2625 r = amdgpu_vm_update_pdes(adev, vm, true); 2626 2627 error_unlock: 2628 amdgpu_bo_unreserve(root); 2629 if (r < 0) 2630 DRM_ERROR("Can't handle page fault (%d)\n", r); 2631 2632 error_unref: 2633 amdgpu_bo_unref(&root); 2634 2635 return false; 2636 } 2637 2638 #if defined(CONFIG_DEBUG_FS) 2639 /** 2640 * amdgpu_debugfs_vm_bo_info - print BO info for the VM 2641 * 2642 * @vm: Requested VM for printing BO info 2643 * @m: debugfs file 2644 * 2645 * Print BO information in debugfs file for the VM 2646 */ 2647 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) 2648 { 2649 struct amdgpu_bo_va *bo_va, *tmp; 2650 u64 total_idle = 0; 2651 u64 total_evicted = 0; 2652 u64 total_relocated = 0; 2653 u64 total_moved = 0; 2654 u64 total_invalidated = 0; 2655 u64 total_done = 0; 2656 unsigned int total_idle_objs = 0; 2657 unsigned int total_evicted_objs = 0; 2658 unsigned int total_relocated_objs = 0; 2659 unsigned int total_moved_objs = 0; 2660 unsigned int total_invalidated_objs = 0; 2661 unsigned int total_done_objs = 0; 2662 unsigned int id = 0; 2663 2664 spin_lock(&vm->status_lock); 2665 seq_puts(m, "\tIdle BOs:\n"); 2666 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { 2667 if (!bo_va->base.bo) 2668 continue; 2669 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2670 } 2671 total_idle_objs = id; 2672 id = 0; 2673 2674 seq_puts(m, "\tEvicted BOs:\n"); 2675 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { 2676 if (!bo_va->base.bo) 2677 continue; 2678 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2679 } 2680 total_evicted_objs = id; 2681 id = 0; 2682 2683 seq_puts(m, "\tRelocated BOs:\n"); 2684 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { 2685 if (!bo_va->base.bo) 2686 continue; 2687 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2688 } 2689 total_relocated_objs = id; 2690 id = 0; 2691 2692 seq_puts(m, "\tMoved BOs:\n"); 2693 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2694 if (!bo_va->base.bo) 2695 continue; 2696 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2697 } 2698 total_moved_objs = id; 2699 id = 0; 2700 2701 seq_puts(m, "\tInvalidated BOs:\n"); 2702 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { 2703 if (!bo_va->base.bo) 2704 continue; 2705 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2706 } 2707 total_invalidated_objs = id; 2708 id = 0; 2709 2710 seq_puts(m, "\tDone BOs:\n"); 2711 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { 2712 if (!bo_va->base.bo) 2713 continue; 2714 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2715 } 2716 spin_unlock(&vm->status_lock); 2717 total_done_objs = id; 2718 2719 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, 2720 total_idle_objs); 2721 seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted, 2722 total_evicted_objs); 2723 seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated, 2724 total_relocated_objs); 2725 seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved, 2726 total_moved_objs); 2727 seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated, 2728 total_invalidated_objs); 2729 seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done, 2730 total_done_objs); 2731 } 2732 #endif 2733