1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/dma-fence-array.h> 30 #include <linux/interval_tree_generic.h> 31 #include <linux/idr.h> 32 #include <linux/dma-buf.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_drv.h> 36 #include "amdgpu.h" 37 #include "amdgpu_trace.h" 38 #include "amdgpu_amdkfd.h" 39 #include "amdgpu_gmc.h" 40 #include "amdgpu_xgmi.h" 41 #include "amdgpu_dma_buf.h" 42 #include "amdgpu_res_cursor.h" 43 #include "kfd_svm.h" 44 45 /** 46 * DOC: GPUVM 47 * 48 * GPUVM is similar to the legacy gart on older asics, however 49 * rather than there being a single global gart table 50 * for the entire GPU, there are multiple VM page tables active 51 * at any given time. The VM page tables can contain a mix 52 * vram pages and system memory pages and system memory pages 53 * can be mapped as snooped (cached system pages) or unsnooped 54 * (uncached system pages). 55 * Each VM has an ID associated with it and there is a page table 56 * associated with each VMID. When execting a command buffer, 57 * the kernel tells the the ring what VMID to use for that command 58 * buffer. VMIDs are allocated dynamically as commands are submitted. 59 * The userspace drivers maintain their own address space and the kernel 60 * sets up their pages tables accordingly when they submit their 61 * command buffers and a VMID is assigned. 62 * Cayman/Trinity support up to 8 active VMs at any given time; 63 * SI supports 16. 64 */ 65 66 #define START(node) ((node)->start) 67 #define LAST(node) ((node)->last) 68 69 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 70 START, LAST, static, amdgpu_vm_it) 71 72 #undef START 73 #undef LAST 74 75 /** 76 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 77 */ 78 struct amdgpu_prt_cb { 79 80 /** 81 * @adev: amdgpu device 82 */ 83 struct amdgpu_device *adev; 84 85 /** 86 * @cb: callback 87 */ 88 struct dma_fence_cb cb; 89 }; 90 91 /** 92 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 93 * 94 * @adev: amdgpu_device pointer 95 * @vm: amdgpu_vm pointer 96 * @pasid: the pasid the VM is using on this GPU 97 * 98 * Set the pasid this VM is using on this GPU, can also be used to remove the 99 * pasid by passing in zero. 100 * 101 */ 102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, 103 u32 pasid) 104 { 105 int r; 106 107 if (vm->pasid == pasid) 108 return 0; 109 110 if (vm->pasid) { 111 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); 112 if (r < 0) 113 return r; 114 115 vm->pasid = 0; 116 } 117 118 if (pasid) { 119 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, 120 GFP_KERNEL)); 121 if (r < 0) 122 return r; 123 124 vm->pasid = pasid; 125 } 126 127 128 return 0; 129 } 130 131 /* 132 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS 133 * happens while holding this lock anywhere to prevent deadlocks when 134 * an MMU notifier runs in reclaim-FS context. 135 */ 136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) 137 { 138 mutex_lock(&vm->eviction_lock); 139 vm->saved_flags = memalloc_noreclaim_save(); 140 } 141 142 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) 143 { 144 if (mutex_trylock(&vm->eviction_lock)) { 145 vm->saved_flags = memalloc_noreclaim_save(); 146 return 1; 147 } 148 return 0; 149 } 150 151 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) 152 { 153 memalloc_noreclaim_restore(vm->saved_flags); 154 mutex_unlock(&vm->eviction_lock); 155 } 156 157 /** 158 * amdgpu_vm_level_shift - return the addr shift for each level 159 * 160 * @adev: amdgpu_device pointer 161 * @level: VMPT level 162 * 163 * Returns: 164 * The number of bits the pfn needs to be right shifted for a level. 165 */ 166 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, 167 unsigned level) 168 { 169 switch (level) { 170 case AMDGPU_VM_PDB2: 171 case AMDGPU_VM_PDB1: 172 case AMDGPU_VM_PDB0: 173 return 9 * (AMDGPU_VM_PDB0 - level) + 174 adev->vm_manager.block_size; 175 case AMDGPU_VM_PTB: 176 return 0; 177 default: 178 return ~0; 179 } 180 } 181 182 /** 183 * amdgpu_vm_num_entries - return the number of entries in a PD/PT 184 * 185 * @adev: amdgpu_device pointer 186 * @level: VMPT level 187 * 188 * Returns: 189 * The number of entries in a page directory or page table. 190 */ 191 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, 192 unsigned level) 193 { 194 unsigned shift = amdgpu_vm_level_shift(adev, 195 adev->vm_manager.root_level); 196 197 if (level == adev->vm_manager.root_level) 198 /* For the root directory */ 199 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) 200 >> shift; 201 else if (level != AMDGPU_VM_PTB) 202 /* Everything in between */ 203 return 512; 204 else 205 /* For the page tables on the leaves */ 206 return AMDGPU_VM_PTE_COUNT(adev); 207 } 208 209 /** 210 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD 211 * 212 * @adev: amdgpu_device pointer 213 * 214 * Returns: 215 * The number of entries in the root page directory which needs the ATS setting. 216 */ 217 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev) 218 { 219 unsigned shift; 220 221 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); 222 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT); 223 } 224 225 /** 226 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT 227 * 228 * @adev: amdgpu_device pointer 229 * @level: VMPT level 230 * 231 * Returns: 232 * The mask to extract the entry number of a PD/PT from an address. 233 */ 234 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, 235 unsigned int level) 236 { 237 if (level <= adev->vm_manager.root_level) 238 return 0xffffffff; 239 else if (level != AMDGPU_VM_PTB) 240 return 0x1ff; 241 else 242 return AMDGPU_VM_PTE_COUNT(adev) - 1; 243 } 244 245 /** 246 * amdgpu_vm_bo_size - returns the size of the BOs in bytes 247 * 248 * @adev: amdgpu_device pointer 249 * @level: VMPT level 250 * 251 * Returns: 252 * The size of the BO for a page directory or page table in bytes. 253 */ 254 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) 255 { 256 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); 257 } 258 259 /** 260 * amdgpu_vm_bo_evicted - vm_bo is evicted 261 * 262 * @vm_bo: vm_bo which is evicted 263 * 264 * State for PDs/PTs and per VM BOs which are not at the location they should 265 * be. 266 */ 267 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 268 { 269 struct amdgpu_vm *vm = vm_bo->vm; 270 struct amdgpu_bo *bo = vm_bo->bo; 271 272 vm_bo->moved = true; 273 if (bo->tbo.type == ttm_bo_type_kernel) 274 list_move(&vm_bo->vm_status, &vm->evicted); 275 else 276 list_move_tail(&vm_bo->vm_status, &vm->evicted); 277 } 278 /** 279 * amdgpu_vm_bo_moved - vm_bo is moved 280 * 281 * @vm_bo: vm_bo which is moved 282 * 283 * State for per VM BOs which are moved, but that change is not yet reflected 284 * in the page tables. 285 */ 286 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 287 { 288 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 289 } 290 291 /** 292 * amdgpu_vm_bo_idle - vm_bo is idle 293 * 294 * @vm_bo: vm_bo which is now idle 295 * 296 * State for PDs/PTs and per VM BOs which have gone through the state machine 297 * and are now idle. 298 */ 299 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 300 { 301 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 302 vm_bo->moved = false; 303 } 304 305 /** 306 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 307 * 308 * @vm_bo: vm_bo which is now invalidated 309 * 310 * State for normal BOs which are invalidated and that change not yet reflected 311 * in the PTs. 312 */ 313 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 314 { 315 spin_lock(&vm_bo->vm->invalidated_lock); 316 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 317 spin_unlock(&vm_bo->vm->invalidated_lock); 318 } 319 320 /** 321 * amdgpu_vm_bo_relocated - vm_bo is reloacted 322 * 323 * @vm_bo: vm_bo which is relocated 324 * 325 * State for PDs/PTs which needs to update their parent PD. 326 * For the root PD, just move to idle state. 327 */ 328 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 329 { 330 if (vm_bo->bo->parent) 331 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 332 else 333 amdgpu_vm_bo_idle(vm_bo); 334 } 335 336 /** 337 * amdgpu_vm_bo_done - vm_bo is done 338 * 339 * @vm_bo: vm_bo which is now done 340 * 341 * State for normal BOs which are invalidated and that change has been updated 342 * in the PTs. 343 */ 344 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 345 { 346 spin_lock(&vm_bo->vm->invalidated_lock); 347 list_move(&vm_bo->vm_status, &vm_bo->vm->done); 348 spin_unlock(&vm_bo->vm->invalidated_lock); 349 } 350 351 /** 352 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 353 * 354 * @base: base structure for tracking BO usage in a VM 355 * @vm: vm to which bo is to be added 356 * @bo: amdgpu buffer object 357 * 358 * Initialize a bo_va_base structure and add it to the appropriate lists 359 * 360 */ 361 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 362 struct amdgpu_vm *vm, 363 struct amdgpu_bo *bo) 364 { 365 base->vm = vm; 366 base->bo = bo; 367 base->next = NULL; 368 INIT_LIST_HEAD(&base->vm_status); 369 370 if (!bo) 371 return; 372 base->next = bo->vm_bo; 373 bo->vm_bo = base; 374 375 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 376 return; 377 378 vm->bulk_moveable = false; 379 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 380 amdgpu_vm_bo_relocated(base); 381 else 382 amdgpu_vm_bo_idle(base); 383 384 if (bo->preferred_domains & 385 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) 386 return; 387 388 /* 389 * we checked all the prerequisites, but it looks like this per vm bo 390 * is currently evicted. add the bo to the evicted list to make sure it 391 * is validated on next vm use to avoid fault. 392 * */ 393 amdgpu_vm_bo_evicted(base); 394 } 395 396 /** 397 * amdgpu_vm_pt_parent - get the parent page directory 398 * 399 * @pt: child page table 400 * 401 * Helper to get the parent entry for the child page table. NULL if we are at 402 * the root page directory. 403 */ 404 static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) 405 { 406 struct amdgpu_bo *parent = pt->bo->parent; 407 408 if (!parent) 409 return NULL; 410 411 return parent->vm_bo; 412 } 413 414 /* 415 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt 416 */ 417 struct amdgpu_vm_pt_cursor { 418 uint64_t pfn; 419 struct amdgpu_vm_bo_base *parent; 420 struct amdgpu_vm_bo_base *entry; 421 unsigned level; 422 }; 423 424 /** 425 * amdgpu_vm_pt_start - start PD/PT walk 426 * 427 * @adev: amdgpu_device pointer 428 * @vm: amdgpu_vm structure 429 * @start: start address of the walk 430 * @cursor: state to initialize 431 * 432 * Initialize a amdgpu_vm_pt_cursor to start a walk. 433 */ 434 static void amdgpu_vm_pt_start(struct amdgpu_device *adev, 435 struct amdgpu_vm *vm, uint64_t start, 436 struct amdgpu_vm_pt_cursor *cursor) 437 { 438 cursor->pfn = start; 439 cursor->parent = NULL; 440 cursor->entry = &vm->root; 441 cursor->level = adev->vm_manager.root_level; 442 } 443 444 /** 445 * amdgpu_vm_pt_descendant - go to child node 446 * 447 * @adev: amdgpu_device pointer 448 * @cursor: current state 449 * 450 * Walk to the child node of the current node. 451 * Returns: 452 * True if the walk was possible, false otherwise. 453 */ 454 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, 455 struct amdgpu_vm_pt_cursor *cursor) 456 { 457 unsigned mask, shift, idx; 458 459 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || 460 !cursor->entry->bo) 461 return false; 462 463 mask = amdgpu_vm_entries_mask(adev, cursor->level); 464 shift = amdgpu_vm_level_shift(adev, cursor->level); 465 466 ++cursor->level; 467 idx = (cursor->pfn >> shift) & mask; 468 cursor->parent = cursor->entry; 469 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; 470 return true; 471 } 472 473 /** 474 * amdgpu_vm_pt_sibling - go to sibling node 475 * 476 * @adev: amdgpu_device pointer 477 * @cursor: current state 478 * 479 * Walk to the sibling node of the current node. 480 * Returns: 481 * True if the walk was possible, false otherwise. 482 */ 483 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, 484 struct amdgpu_vm_pt_cursor *cursor) 485 { 486 unsigned shift, num_entries; 487 488 /* Root doesn't have a sibling */ 489 if (!cursor->parent) 490 return false; 491 492 /* Go to our parents and see if we got a sibling */ 493 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 494 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 495 496 if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) 497 return false; 498 499 cursor->pfn += 1ULL << shift; 500 cursor->pfn &= ~((1ULL << shift) - 1); 501 ++cursor->entry; 502 return true; 503 } 504 505 /** 506 * amdgpu_vm_pt_ancestor - go to parent node 507 * 508 * @cursor: current state 509 * 510 * Walk to the parent node of the current node. 511 * Returns: 512 * True if the walk was possible, false otherwise. 513 */ 514 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) 515 { 516 if (!cursor->parent) 517 return false; 518 519 --cursor->level; 520 cursor->entry = cursor->parent; 521 cursor->parent = amdgpu_vm_pt_parent(cursor->parent); 522 return true; 523 } 524 525 /** 526 * amdgpu_vm_pt_next - get next PD/PT in hieratchy 527 * 528 * @adev: amdgpu_device pointer 529 * @cursor: current state 530 * 531 * Walk the PD/PT tree to the next node. 532 */ 533 static void amdgpu_vm_pt_next(struct amdgpu_device *adev, 534 struct amdgpu_vm_pt_cursor *cursor) 535 { 536 /* First try a newborn child */ 537 if (amdgpu_vm_pt_descendant(adev, cursor)) 538 return; 539 540 /* If that didn't worked try to find a sibling */ 541 while (!amdgpu_vm_pt_sibling(adev, cursor)) { 542 /* No sibling, go to our parents and grandparents */ 543 if (!amdgpu_vm_pt_ancestor(cursor)) { 544 cursor->pfn = ~0ll; 545 return; 546 } 547 } 548 } 549 550 /** 551 * amdgpu_vm_pt_first_dfs - start a deep first search 552 * 553 * @adev: amdgpu_device structure 554 * @vm: amdgpu_vm structure 555 * @start: optional cursor to start with 556 * @cursor: state to initialize 557 * 558 * Starts a deep first traversal of the PD/PT tree. 559 */ 560 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, 561 struct amdgpu_vm *vm, 562 struct amdgpu_vm_pt_cursor *start, 563 struct amdgpu_vm_pt_cursor *cursor) 564 { 565 if (start) 566 *cursor = *start; 567 else 568 amdgpu_vm_pt_start(adev, vm, 0, cursor); 569 while (amdgpu_vm_pt_descendant(adev, cursor)); 570 } 571 572 /** 573 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue 574 * 575 * @start: starting point for the search 576 * @entry: current entry 577 * 578 * Returns: 579 * True when the search should continue, false otherwise. 580 */ 581 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 582 struct amdgpu_vm_bo_base *entry) 583 { 584 return entry && (!start || entry != start->entry); 585 } 586 587 /** 588 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search 589 * 590 * @adev: amdgpu_device structure 591 * @cursor: current state 592 * 593 * Move the cursor to the next node in a deep first search. 594 */ 595 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, 596 struct amdgpu_vm_pt_cursor *cursor) 597 { 598 if (!cursor->entry) 599 return; 600 601 if (!cursor->parent) 602 cursor->entry = NULL; 603 else if (amdgpu_vm_pt_sibling(adev, cursor)) 604 while (amdgpu_vm_pt_descendant(adev, cursor)); 605 else 606 amdgpu_vm_pt_ancestor(cursor); 607 } 608 609 /* 610 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs 611 */ 612 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ 613 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \ 614 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ 615 amdgpu_vm_pt_continue_dfs((start), (entry)); \ 616 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor))) 617 618 /** 619 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 620 * 621 * @vm: vm providing the BOs 622 * @validated: head of validation list 623 * @entry: entry to add 624 * 625 * Add the page directory to the list of BOs to 626 * validate for command submission. 627 */ 628 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 629 struct list_head *validated, 630 struct amdgpu_bo_list_entry *entry) 631 { 632 entry->priority = 0; 633 entry->tv.bo = &vm->root.bo->tbo; 634 /* Two for VM updates, one for TTM and one for the CS job */ 635 entry->tv.num_shared = 4; 636 entry->user_pages = NULL; 637 list_add(&entry->tv.head, validated); 638 } 639 640 /** 641 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag 642 * 643 * @bo: BO which was removed from the LRU 644 * 645 * Make sure the bulk_moveable flag is updated when a BO is removed from the 646 * LRU. 647 */ 648 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) 649 { 650 struct amdgpu_bo *abo; 651 struct amdgpu_vm_bo_base *bo_base; 652 653 if (!amdgpu_bo_is_amdgpu_bo(bo)) 654 return; 655 656 if (bo->pin_count) 657 return; 658 659 abo = ttm_to_amdgpu_bo(bo); 660 if (!abo->parent) 661 return; 662 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 663 struct amdgpu_vm *vm = bo_base->vm; 664 665 if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) 666 vm->bulk_moveable = false; 667 } 668 669 } 670 /** 671 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 672 * 673 * @adev: amdgpu device pointer 674 * @vm: vm providing the BOs 675 * 676 * Move all BOs to the end of LRU and remember their positions to put them 677 * together. 678 */ 679 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 680 struct amdgpu_vm *vm) 681 { 682 struct amdgpu_vm_bo_base *bo_base; 683 684 if (vm->bulk_moveable) { 685 spin_lock(&adev->mman.bdev.lru_lock); 686 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 687 spin_unlock(&adev->mman.bdev.lru_lock); 688 return; 689 } 690 691 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 692 693 spin_lock(&adev->mman.bdev.lru_lock); 694 list_for_each_entry(bo_base, &vm->idle, vm_status) { 695 struct amdgpu_bo *bo = bo_base->bo; 696 struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); 697 698 if (!bo->parent) 699 continue; 700 701 ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, 702 &vm->lru_bulk_move); 703 if (shadow) 704 ttm_bo_move_to_lru_tail(&shadow->tbo, 705 shadow->tbo.resource, 706 &vm->lru_bulk_move); 707 } 708 spin_unlock(&adev->mman.bdev.lru_lock); 709 710 vm->bulk_moveable = true; 711 } 712 713 /** 714 * amdgpu_vm_validate_pt_bos - validate the page table BOs 715 * 716 * @adev: amdgpu device pointer 717 * @vm: vm providing the BOs 718 * @validate: callback to do the validation 719 * @param: parameter for the validation callback 720 * 721 * Validate the page table BOs on command submission if neccessary. 722 * 723 * Returns: 724 * Validation result. 725 */ 726 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 727 int (*validate)(void *p, struct amdgpu_bo *bo), 728 void *param) 729 { 730 struct amdgpu_vm_bo_base *bo_base, *tmp; 731 int r; 732 733 vm->bulk_moveable &= list_empty(&vm->evicted); 734 735 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 736 struct amdgpu_bo *bo = bo_base->bo; 737 struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); 738 739 r = validate(param, bo); 740 if (r) 741 return r; 742 if (shadow) { 743 r = validate(param, shadow); 744 if (r) 745 return r; 746 } 747 748 if (bo->tbo.type != ttm_bo_type_kernel) { 749 amdgpu_vm_bo_moved(bo_base); 750 } else { 751 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); 752 amdgpu_vm_bo_relocated(bo_base); 753 } 754 } 755 756 amdgpu_vm_eviction_lock(vm); 757 vm->evicting = false; 758 amdgpu_vm_eviction_unlock(vm); 759 760 return 0; 761 } 762 763 /** 764 * amdgpu_vm_ready - check VM is ready for updates 765 * 766 * @vm: VM to check 767 * 768 * Check if all VM PDs/PTs are ready for updates 769 * 770 * Returns: 771 * True if eviction list is empty. 772 */ 773 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 774 { 775 return list_empty(&vm->evicted); 776 } 777 778 /** 779 * amdgpu_vm_clear_bo - initially clear the PDs/PTs 780 * 781 * @adev: amdgpu_device pointer 782 * @vm: VM to clear BO from 783 * @vmbo: BO to clear 784 * @immediate: use an immediate update 785 * 786 * Root PD needs to be reserved when calling this. 787 * 788 * Returns: 789 * 0 on success, errno otherwise. 790 */ 791 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 792 struct amdgpu_vm *vm, 793 struct amdgpu_bo_vm *vmbo, 794 bool immediate) 795 { 796 struct ttm_operation_ctx ctx = { true, false }; 797 unsigned level = adev->vm_manager.root_level; 798 struct amdgpu_vm_update_params params; 799 struct amdgpu_bo *ancestor = &vmbo->bo; 800 struct amdgpu_bo *bo = &vmbo->bo; 801 unsigned entries, ats_entries; 802 uint64_t addr; 803 int r; 804 805 /* Figure out our place in the hierarchy */ 806 if (ancestor->parent) { 807 ++level; 808 while (ancestor->parent->parent) { 809 ++level; 810 ancestor = ancestor->parent; 811 } 812 } 813 814 entries = amdgpu_bo_size(bo) / 8; 815 if (!vm->pte_support_ats) { 816 ats_entries = 0; 817 818 } else if (!bo->parent) { 819 ats_entries = amdgpu_vm_num_ats_entries(adev); 820 ats_entries = min(ats_entries, entries); 821 entries -= ats_entries; 822 823 } else { 824 struct amdgpu_vm_bo_base *pt; 825 826 pt = ancestor->vm_bo; 827 ats_entries = amdgpu_vm_num_ats_entries(adev); 828 if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { 829 ats_entries = 0; 830 } else { 831 ats_entries = entries; 832 entries = 0; 833 } 834 } 835 836 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 837 if (r) 838 return r; 839 840 if (vmbo->shadow) { 841 struct amdgpu_bo *shadow = vmbo->shadow; 842 843 r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx); 844 if (r) 845 return r; 846 } 847 848 r = vm->update_funcs->map_table(vmbo); 849 if (r) 850 return r; 851 852 memset(¶ms, 0, sizeof(params)); 853 params.adev = adev; 854 params.vm = vm; 855 params.immediate = immediate; 856 857 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 858 if (r) 859 return r; 860 861 addr = 0; 862 if (ats_entries) { 863 uint64_t value = 0, flags; 864 865 flags = AMDGPU_PTE_DEFAULT_ATC; 866 if (level != AMDGPU_VM_PTB) { 867 /* Handle leaf PDEs as PTEs */ 868 flags |= AMDGPU_PDE_PTE; 869 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); 870 } 871 872 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, 873 value, flags); 874 if (r) 875 return r; 876 877 addr += ats_entries * 8; 878 } 879 880 if (entries) { 881 uint64_t value = 0, flags = 0; 882 883 if (adev->asic_type >= CHIP_VEGA10) { 884 if (level != AMDGPU_VM_PTB) { 885 /* Handle leaf PDEs as PTEs */ 886 flags |= AMDGPU_PDE_PTE; 887 amdgpu_gmc_get_vm_pde(adev, level, 888 &value, &flags); 889 } else { 890 /* Workaround for fault priority problem on GMC9 */ 891 flags = AMDGPU_PTE_EXECUTABLE; 892 } 893 } 894 895 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, 896 value, flags); 897 if (r) 898 return r; 899 } 900 901 return vm->update_funcs->commit(¶ms, NULL); 902 } 903 904 /** 905 * amdgpu_vm_pt_create - create bo for PD/PT 906 * 907 * @adev: amdgpu_device pointer 908 * @vm: requesting vm 909 * @level: the page table level 910 * @immediate: use a immediate update 911 * @vmbo: pointer to the buffer object pointer 912 */ 913 static int amdgpu_vm_pt_create(struct amdgpu_device *adev, 914 struct amdgpu_vm *vm, 915 int level, bool immediate, 916 struct amdgpu_bo_vm **vmbo) 917 { 918 struct amdgpu_bo_param bp; 919 struct amdgpu_bo *bo; 920 struct dma_resv *resv; 921 unsigned int num_entries; 922 int r; 923 924 memset(&bp, 0, sizeof(bp)); 925 926 bp.size = amdgpu_vm_bo_size(adev, level); 927 bp.byte_align = AMDGPU_GPU_PAGE_SIZE; 928 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 929 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain); 930 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 931 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 932 933 if (level < AMDGPU_VM_PTB) 934 num_entries = amdgpu_vm_num_entries(adev, level); 935 else 936 num_entries = 0; 937 938 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries); 939 940 if (vm->use_cpu_for_update) 941 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 942 943 bp.type = ttm_bo_type_kernel; 944 bp.no_wait_gpu = immediate; 945 if (vm->root.bo) 946 bp.resv = vm->root.bo->tbo.base.resv; 947 948 r = amdgpu_bo_create_vm(adev, &bp, vmbo); 949 if (r) 950 return r; 951 952 bo = &(*vmbo)->bo; 953 if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { 954 (*vmbo)->shadow = NULL; 955 return 0; 956 } 957 958 if (!bp.resv) 959 WARN_ON(dma_resv_lock(bo->tbo.base.resv, 960 NULL)); 961 resv = bp.resv; 962 memset(&bp, 0, sizeof(bp)); 963 bp.size = amdgpu_vm_bo_size(adev, level); 964 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 965 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 966 bp.type = ttm_bo_type_kernel; 967 bp.resv = bo->tbo.base.resv; 968 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 969 970 r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); 971 972 if (!resv) 973 dma_resv_unlock(bo->tbo.base.resv); 974 975 if (r) { 976 amdgpu_bo_unref(&bo); 977 return r; 978 } 979 980 (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); 981 amdgpu_bo_add_to_shadow_list(*vmbo); 982 983 return 0; 984 } 985 986 /** 987 * amdgpu_vm_alloc_pts - Allocate a specific page table 988 * 989 * @adev: amdgpu_device pointer 990 * @vm: VM to allocate page tables for 991 * @cursor: Which page table to allocate 992 * @immediate: use an immediate update 993 * 994 * Make sure a specific page table or directory is allocated. 995 * 996 * Returns: 997 * 1 if page table needed to be allocated, 0 if page table was already 998 * allocated, negative errno if an error occurred. 999 */ 1000 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 1001 struct amdgpu_vm *vm, 1002 struct amdgpu_vm_pt_cursor *cursor, 1003 bool immediate) 1004 { 1005 struct amdgpu_vm_bo_base *entry = cursor->entry; 1006 struct amdgpu_bo *pt_bo; 1007 struct amdgpu_bo_vm *pt; 1008 int r; 1009 1010 if (entry->bo) 1011 return 0; 1012 1013 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); 1014 if (r) 1015 return r; 1016 1017 /* Keep a reference to the root directory to avoid 1018 * freeing them up in the wrong order. 1019 */ 1020 pt_bo = &pt->bo; 1021 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); 1022 amdgpu_vm_bo_base_init(entry, vm, pt_bo); 1023 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); 1024 if (r) 1025 goto error_free_pt; 1026 1027 return 0; 1028 1029 error_free_pt: 1030 amdgpu_bo_unref(&pt->shadow); 1031 amdgpu_bo_unref(&pt_bo); 1032 return r; 1033 } 1034 1035 /** 1036 * amdgpu_vm_free_table - fre one PD/PT 1037 * 1038 * @entry: PDE to free 1039 */ 1040 static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) 1041 { 1042 struct amdgpu_bo *shadow; 1043 1044 if (!entry->bo) 1045 return; 1046 shadow = amdgpu_bo_shadowed(entry->bo); 1047 entry->bo->vm_bo = NULL; 1048 list_del(&entry->vm_status); 1049 amdgpu_bo_unref(&shadow); 1050 amdgpu_bo_unref(&entry->bo); 1051 } 1052 1053 /** 1054 * amdgpu_vm_free_pts - free PD/PT levels 1055 * 1056 * @adev: amdgpu device structure 1057 * @vm: amdgpu vm structure 1058 * @start: optional cursor where to start freeing PDs/PTs 1059 * 1060 * Free the page directory or page table level and all sub levels. 1061 */ 1062 static void amdgpu_vm_free_pts(struct amdgpu_device *adev, 1063 struct amdgpu_vm *vm, 1064 struct amdgpu_vm_pt_cursor *start) 1065 { 1066 struct amdgpu_vm_pt_cursor cursor; 1067 struct amdgpu_vm_bo_base *entry; 1068 1069 vm->bulk_moveable = false; 1070 1071 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) 1072 amdgpu_vm_free_table(entry); 1073 1074 if (start) 1075 amdgpu_vm_free_table(start->entry); 1076 } 1077 1078 /** 1079 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 1080 * 1081 * @adev: amdgpu_device pointer 1082 */ 1083 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 1084 { 1085 const struct amdgpu_ip_block *ip_block; 1086 bool has_compute_vm_bug; 1087 struct amdgpu_ring *ring; 1088 int i; 1089 1090 has_compute_vm_bug = false; 1091 1092 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 1093 if (ip_block) { 1094 /* Compute has a VM bug for GFX version < 7. 1095 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 1096 if (ip_block->version->major <= 7) 1097 has_compute_vm_bug = true; 1098 else if (ip_block->version->major == 8) 1099 if (adev->gfx.mec_fw_version < 673) 1100 has_compute_vm_bug = true; 1101 } 1102 1103 for (i = 0; i < adev->num_rings; i++) { 1104 ring = adev->rings[i]; 1105 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 1106 /* only compute rings */ 1107 ring->has_compute_vm_bug = has_compute_vm_bug; 1108 else 1109 ring->has_compute_vm_bug = false; 1110 } 1111 } 1112 1113 /** 1114 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 1115 * 1116 * @ring: ring on which the job will be submitted 1117 * @job: job to submit 1118 * 1119 * Returns: 1120 * True if sync is needed. 1121 */ 1122 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 1123 struct amdgpu_job *job) 1124 { 1125 struct amdgpu_device *adev = ring->adev; 1126 unsigned vmhub = ring->funcs->vmhub; 1127 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1128 struct amdgpu_vmid *id; 1129 bool gds_switch_needed; 1130 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 1131 1132 if (job->vmid == 0) 1133 return false; 1134 id = &id_mgr->ids[job->vmid]; 1135 gds_switch_needed = ring->funcs->emit_gds_switch && ( 1136 id->gds_base != job->gds_base || 1137 id->gds_size != job->gds_size || 1138 id->gws_base != job->gws_base || 1139 id->gws_size != job->gws_size || 1140 id->oa_base != job->oa_base || 1141 id->oa_size != job->oa_size); 1142 1143 if (amdgpu_vmid_had_gpu_reset(adev, id)) 1144 return true; 1145 1146 return vm_flush_needed || gds_switch_needed; 1147 } 1148 1149 /** 1150 * amdgpu_vm_flush - hardware flush the vm 1151 * 1152 * @ring: ring to use for flush 1153 * @job: related job 1154 * @need_pipe_sync: is pipe sync needed 1155 * 1156 * Emit a VM flush when it is necessary. 1157 * 1158 * Returns: 1159 * 0 on success, errno otherwise. 1160 */ 1161 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 1162 bool need_pipe_sync) 1163 { 1164 struct amdgpu_device *adev = ring->adev; 1165 unsigned vmhub = ring->funcs->vmhub; 1166 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1167 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 1168 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 1169 id->gds_base != job->gds_base || 1170 id->gds_size != job->gds_size || 1171 id->gws_base != job->gws_base || 1172 id->gws_size != job->gws_size || 1173 id->oa_base != job->oa_base || 1174 id->oa_size != job->oa_size); 1175 bool vm_flush_needed = job->vm_needs_flush; 1176 struct dma_fence *fence = NULL; 1177 bool pasid_mapping_needed = false; 1178 unsigned patch_offset = 0; 1179 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); 1180 int r; 1181 1182 if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) 1183 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); 1184 1185 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 1186 gds_switch_needed = true; 1187 vm_flush_needed = true; 1188 pasid_mapping_needed = true; 1189 } 1190 1191 mutex_lock(&id_mgr->lock); 1192 if (id->pasid != job->pasid || !id->pasid_mapping || 1193 !dma_fence_is_signaled(id->pasid_mapping)) 1194 pasid_mapping_needed = true; 1195 mutex_unlock(&id_mgr->lock); 1196 1197 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 1198 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 1199 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 1200 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 1201 ring->funcs->emit_wreg; 1202 1203 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 1204 return 0; 1205 1206 if (ring->funcs->init_cond_exec) 1207 patch_offset = amdgpu_ring_init_cond_exec(ring); 1208 1209 if (need_pipe_sync) 1210 amdgpu_ring_emit_pipeline_sync(ring); 1211 1212 if (vm_flush_needed) { 1213 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 1214 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 1215 } 1216 1217 if (pasid_mapping_needed) 1218 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 1219 1220 if (vm_flush_needed || pasid_mapping_needed) { 1221 r = amdgpu_fence_emit(ring, &fence, NULL, 0); 1222 if (r) 1223 return r; 1224 } 1225 1226 if (vm_flush_needed) { 1227 mutex_lock(&id_mgr->lock); 1228 dma_fence_put(id->last_flush); 1229 id->last_flush = dma_fence_get(fence); 1230 id->current_gpu_reset_count = 1231 atomic_read(&adev->gpu_reset_counter); 1232 mutex_unlock(&id_mgr->lock); 1233 } 1234 1235 if (pasid_mapping_needed) { 1236 mutex_lock(&id_mgr->lock); 1237 id->pasid = job->pasid; 1238 dma_fence_put(id->pasid_mapping); 1239 id->pasid_mapping = dma_fence_get(fence); 1240 mutex_unlock(&id_mgr->lock); 1241 } 1242 dma_fence_put(fence); 1243 1244 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 1245 id->gds_base = job->gds_base; 1246 id->gds_size = job->gds_size; 1247 id->gws_base = job->gws_base; 1248 id->gws_size = job->gws_size; 1249 id->oa_base = job->oa_base; 1250 id->oa_size = job->oa_size; 1251 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 1252 job->gds_size, job->gws_base, 1253 job->gws_size, job->oa_base, 1254 job->oa_size); 1255 } 1256 1257 if (ring->funcs->patch_cond_exec) 1258 amdgpu_ring_patch_cond_exec(ring, patch_offset); 1259 1260 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 1261 if (ring->funcs->emit_switch_buffer) { 1262 amdgpu_ring_emit_switch_buffer(ring); 1263 amdgpu_ring_emit_switch_buffer(ring); 1264 } 1265 return 0; 1266 } 1267 1268 /** 1269 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 1270 * 1271 * @vm: requested vm 1272 * @bo: requested buffer object 1273 * 1274 * Find @bo inside the requested vm. 1275 * Search inside the @bos vm list for the requested vm 1276 * Returns the found bo_va or NULL if none is found 1277 * 1278 * Object has to be reserved! 1279 * 1280 * Returns: 1281 * Found bo_va or NULL. 1282 */ 1283 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1284 struct amdgpu_bo *bo) 1285 { 1286 struct amdgpu_vm_bo_base *base; 1287 1288 for (base = bo->vm_bo; base; base = base->next) { 1289 if (base->vm != vm) 1290 continue; 1291 1292 return container_of(base, struct amdgpu_bo_va, base); 1293 } 1294 return NULL; 1295 } 1296 1297 /** 1298 * amdgpu_vm_map_gart - Resolve gart mapping of addr 1299 * 1300 * @pages_addr: optional DMA address to use for lookup 1301 * @addr: the unmapped addr 1302 * 1303 * Look up the physical address of the page that the pte resolves 1304 * to. 1305 * 1306 * Returns: 1307 * The pointer for the page table entry. 1308 */ 1309 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 1310 { 1311 uint64_t result; 1312 1313 /* page table offset */ 1314 result = pages_addr[addr >> PAGE_SHIFT]; 1315 1316 /* in case cpu page size != gpu page size*/ 1317 result |= addr & (~PAGE_MASK); 1318 1319 result &= 0xFFFFFFFFFFFFF000ULL; 1320 1321 return result; 1322 } 1323 1324 /** 1325 * amdgpu_vm_update_pde - update a single level in the hierarchy 1326 * 1327 * @params: parameters for the update 1328 * @vm: requested vm 1329 * @entry: entry to update 1330 * 1331 * Makes sure the requested entry in parent is up to date. 1332 */ 1333 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1334 struct amdgpu_vm *vm, 1335 struct amdgpu_vm_bo_base *entry) 1336 { 1337 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); 1338 struct amdgpu_bo *bo = parent->bo, *pbo; 1339 uint64_t pde, pt, flags; 1340 unsigned level; 1341 1342 for (level = 0, pbo = bo->parent; pbo; ++level) 1343 pbo = pbo->parent; 1344 1345 level += params->adev->vm_manager.root_level; 1346 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); 1347 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; 1348 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, 1349 1, 0, flags); 1350 } 1351 1352 /** 1353 * amdgpu_vm_invalidate_pds - mark all PDs as invalid 1354 * 1355 * @adev: amdgpu_device pointer 1356 * @vm: related vm 1357 * 1358 * Mark all PD level as invalid after an error. 1359 */ 1360 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, 1361 struct amdgpu_vm *vm) 1362 { 1363 struct amdgpu_vm_pt_cursor cursor; 1364 struct amdgpu_vm_bo_base *entry; 1365 1366 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1367 if (entry->bo && !entry->moved) 1368 amdgpu_vm_bo_relocated(entry); 1369 } 1370 1371 /** 1372 * amdgpu_vm_update_pdes - make sure that all directories are valid 1373 * 1374 * @adev: amdgpu_device pointer 1375 * @vm: requested vm 1376 * @immediate: submit immediately to the paging queue 1377 * 1378 * Makes sure all directories are up to date. 1379 * 1380 * Returns: 1381 * 0 for success, error for failure. 1382 */ 1383 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 1384 struct amdgpu_vm *vm, bool immediate) 1385 { 1386 struct amdgpu_vm_update_params params; 1387 int r; 1388 1389 if (list_empty(&vm->relocated)) 1390 return 0; 1391 1392 memset(¶ms, 0, sizeof(params)); 1393 params.adev = adev; 1394 params.vm = vm; 1395 params.immediate = immediate; 1396 1397 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 1398 if (r) 1399 return r; 1400 1401 while (!list_empty(&vm->relocated)) { 1402 struct amdgpu_vm_bo_base *entry; 1403 1404 entry = list_first_entry(&vm->relocated, 1405 struct amdgpu_vm_bo_base, 1406 vm_status); 1407 amdgpu_vm_bo_idle(entry); 1408 1409 r = amdgpu_vm_update_pde(¶ms, vm, entry); 1410 if (r) 1411 goto error; 1412 } 1413 1414 r = vm->update_funcs->commit(¶ms, &vm->last_update); 1415 if (r) 1416 goto error; 1417 return 0; 1418 1419 error: 1420 amdgpu_vm_invalidate_pds(adev, vm); 1421 return r; 1422 } 1423 1424 /* 1425 * amdgpu_vm_update_flags - figure out flags for PTE updates 1426 * 1427 * Make sure to set the right flags for the PTEs at the desired level. 1428 */ 1429 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, 1430 struct amdgpu_bo_vm *pt, unsigned int level, 1431 uint64_t pe, uint64_t addr, 1432 unsigned int count, uint32_t incr, 1433 uint64_t flags) 1434 1435 { 1436 if (level != AMDGPU_VM_PTB) { 1437 flags |= AMDGPU_PDE_PTE; 1438 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); 1439 1440 } else if (params->adev->asic_type >= CHIP_VEGA10 && 1441 !(flags & AMDGPU_PTE_VALID) && 1442 !(flags & AMDGPU_PTE_PRT)) { 1443 1444 /* Workaround for fault priority problem on GMC9 */ 1445 flags |= AMDGPU_PTE_EXECUTABLE; 1446 } 1447 1448 params->vm->update_funcs->update(params, pt, pe, addr, count, incr, 1449 flags); 1450 } 1451 1452 /** 1453 * amdgpu_vm_fragment - get fragment for PTEs 1454 * 1455 * @params: see amdgpu_vm_update_params definition 1456 * @start: first PTE to handle 1457 * @end: last PTE to handle 1458 * @flags: hw mapping flags 1459 * @frag: resulting fragment size 1460 * @frag_end: end of this fragment 1461 * 1462 * Returns the first possible fragment for the start and end address. 1463 */ 1464 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params, 1465 uint64_t start, uint64_t end, uint64_t flags, 1466 unsigned int *frag, uint64_t *frag_end) 1467 { 1468 /** 1469 * The MC L1 TLB supports variable sized pages, based on a fragment 1470 * field in the PTE. When this field is set to a non-zero value, page 1471 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 1472 * flags are considered valid for all PTEs within the fragment range 1473 * and corresponding mappings are assumed to be physically contiguous. 1474 * 1475 * The L1 TLB can store a single PTE for the whole fragment, 1476 * significantly increasing the space available for translation 1477 * caching. This leads to large improvements in throughput when the 1478 * TLB is under pressure. 1479 * 1480 * The L2 TLB distributes small and large fragments into two 1481 * asymmetric partitions. The large fragment cache is significantly 1482 * larger. Thus, we try to use large fragments wherever possible. 1483 * Userspace can support this by aligning virtual base address and 1484 * allocation size to the fragment size. 1485 * 1486 * Starting with Vega10 the fragment size only controls the L1. The L2 1487 * is now directly feed with small/huge/giant pages from the walker. 1488 */ 1489 unsigned max_frag; 1490 1491 if (params->adev->asic_type < CHIP_VEGA10) 1492 max_frag = params->adev->vm_manager.fragment_size; 1493 else 1494 max_frag = 31; 1495 1496 /* system pages are non continuously */ 1497 if (params->pages_addr) { 1498 *frag = 0; 1499 *frag_end = end; 1500 return; 1501 } 1502 1503 /* This intentionally wraps around if no bit is set */ 1504 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); 1505 if (*frag >= max_frag) { 1506 *frag = max_frag; 1507 *frag_end = end & ~((1ULL << max_frag) - 1); 1508 } else { 1509 *frag_end = start + (1 << *frag); 1510 } 1511 } 1512 1513 /** 1514 * amdgpu_vm_update_ptes - make sure that page tables are valid 1515 * 1516 * @params: see amdgpu_vm_update_params definition 1517 * @start: start of GPU address range 1518 * @end: end of GPU address range 1519 * @dst: destination address to map to, the next dst inside the function 1520 * @flags: mapping flags 1521 * 1522 * Update the page tables in the range @start - @end. 1523 * 1524 * Returns: 1525 * 0 for success, -EINVAL for failure. 1526 */ 1527 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, 1528 uint64_t start, uint64_t end, 1529 uint64_t dst, uint64_t flags) 1530 { 1531 struct amdgpu_device *adev = params->adev; 1532 struct amdgpu_vm_pt_cursor cursor; 1533 uint64_t frag_start = start, frag_end; 1534 unsigned int frag; 1535 int r; 1536 1537 /* figure out the initial fragment */ 1538 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); 1539 1540 /* walk over the address space and update the PTs */ 1541 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); 1542 while (cursor.pfn < end) { 1543 unsigned shift, parent_shift, mask; 1544 uint64_t incr, entry_end, pe_start; 1545 struct amdgpu_bo *pt; 1546 1547 if (!params->unlocked) { 1548 /* make sure that the page tables covering the 1549 * address range are actually allocated 1550 */ 1551 r = amdgpu_vm_alloc_pts(params->adev, params->vm, 1552 &cursor, params->immediate); 1553 if (r) 1554 return r; 1555 } 1556 1557 shift = amdgpu_vm_level_shift(adev, cursor.level); 1558 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); 1559 if (params->unlocked) { 1560 /* Unlocked updates are only allowed on the leaves */ 1561 if (amdgpu_vm_pt_descendant(adev, &cursor)) 1562 continue; 1563 } else if (adev->asic_type < CHIP_VEGA10 && 1564 (flags & AMDGPU_PTE_VALID)) { 1565 /* No huge page support before GMC v9 */ 1566 if (cursor.level != AMDGPU_VM_PTB) { 1567 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1568 return -ENOENT; 1569 continue; 1570 } 1571 } else if (frag < shift) { 1572 /* We can't use this level when the fragment size is 1573 * smaller than the address shift. Go to the next 1574 * child entry and try again. 1575 */ 1576 if (amdgpu_vm_pt_descendant(adev, &cursor)) 1577 continue; 1578 } else if (frag >= parent_shift) { 1579 /* If the fragment size is even larger than the parent 1580 * shift we should go up one level and check it again. 1581 */ 1582 if (!amdgpu_vm_pt_ancestor(&cursor)) 1583 return -EINVAL; 1584 continue; 1585 } 1586 1587 pt = cursor.entry->bo; 1588 if (!pt) { 1589 /* We need all PDs and PTs for mapping something, */ 1590 if (flags & AMDGPU_PTE_VALID) 1591 return -ENOENT; 1592 1593 /* but unmapping something can happen at a higher 1594 * level. 1595 */ 1596 if (!amdgpu_vm_pt_ancestor(&cursor)) 1597 return -EINVAL; 1598 1599 pt = cursor.entry->bo; 1600 shift = parent_shift; 1601 frag_end = max(frag_end, ALIGN(frag_start + 1, 1602 1ULL << shift)); 1603 } 1604 1605 /* Looks good so far, calculate parameters for the update */ 1606 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; 1607 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1608 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1609 entry_end = ((uint64_t)mask + 1) << shift; 1610 entry_end += cursor.pfn & ~(entry_end - 1); 1611 entry_end = min(entry_end, end); 1612 1613 do { 1614 struct amdgpu_vm *vm = params->vm; 1615 uint64_t upd_end = min(entry_end, frag_end); 1616 unsigned nptes = (upd_end - frag_start) >> shift; 1617 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag); 1618 1619 /* This can happen when we set higher level PDs to 1620 * silent to stop fault floods. 1621 */ 1622 nptes = max(nptes, 1u); 1623 1624 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end, 1625 nptes, dst, incr, upd_flags, 1626 vm->task_info.pid, 1627 vm->immediate.fence_context); 1628 amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt), 1629 cursor.level, pe_start, dst, 1630 nptes, incr, upd_flags); 1631 1632 pe_start += nptes * 8; 1633 dst += nptes * incr; 1634 1635 frag_start = upd_end; 1636 if (frag_start >= frag_end) { 1637 /* figure out the next fragment */ 1638 amdgpu_vm_fragment(params, frag_start, end, 1639 flags, &frag, &frag_end); 1640 if (frag < shift) 1641 break; 1642 } 1643 } while (frag_start < entry_end); 1644 1645 if (amdgpu_vm_pt_descendant(adev, &cursor)) { 1646 /* Free all child entries. 1647 * Update the tables with the flags and addresses and free up subsequent 1648 * tables in the case of huge pages or freed up areas. 1649 * This is the maximum you can free, because all other page tables are not 1650 * completely covered by the range and so potentially still in use. 1651 */ 1652 while (cursor.pfn < frag_start) { 1653 /* Make sure previous mapping is freed */ 1654 if (cursor.entry->bo) { 1655 params->table_freed = true; 1656 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1657 } 1658 amdgpu_vm_pt_next(adev, &cursor); 1659 } 1660 1661 } else if (frag >= shift) { 1662 /* or just move on to the next on the same level. */ 1663 amdgpu_vm_pt_next(adev, &cursor); 1664 } 1665 } 1666 1667 return 0; 1668 } 1669 1670 /** 1671 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 1672 * 1673 * @adev: amdgpu_device pointer of the VM 1674 * @bo_adev: amdgpu_device pointer of the mapped BO 1675 * @vm: requested vm 1676 * @immediate: immediate submission in a page fault 1677 * @unlocked: unlocked invalidation during MM callback 1678 * @resv: fences we need to sync to 1679 * @start: start of mapped range 1680 * @last: last mapped entry 1681 * @flags: flags for the entries 1682 * @offset: offset into nodes and pages_addr 1683 * @res: ttm_resource to map 1684 * @pages_addr: DMA addresses to use for mapping 1685 * @fence: optional resulting fence 1686 * @table_freed: return true if page table is freed 1687 * 1688 * Fill in the page table entries between @start and @last. 1689 * 1690 * Returns: 1691 * 0 for success, -EINVAL for failure. 1692 */ 1693 int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1694 struct amdgpu_device *bo_adev, 1695 struct amdgpu_vm *vm, bool immediate, 1696 bool unlocked, struct dma_resv *resv, 1697 uint64_t start, uint64_t last, 1698 uint64_t flags, uint64_t offset, 1699 struct ttm_resource *res, 1700 dma_addr_t *pages_addr, 1701 struct dma_fence **fence, 1702 bool *table_freed) 1703 { 1704 struct amdgpu_vm_update_params params; 1705 struct amdgpu_res_cursor cursor; 1706 enum amdgpu_sync_mode sync_mode; 1707 int r, idx; 1708 1709 if (!drm_dev_enter(&adev->ddev, &idx)) 1710 return -ENODEV; 1711 1712 memset(¶ms, 0, sizeof(params)); 1713 params.adev = adev; 1714 params.vm = vm; 1715 params.immediate = immediate; 1716 params.pages_addr = pages_addr; 1717 params.unlocked = unlocked; 1718 1719 /* Implicitly sync to command submissions in the same VM before 1720 * unmapping. Sync to moving fences before mapping. 1721 */ 1722 if (!(flags & AMDGPU_PTE_VALID)) 1723 sync_mode = AMDGPU_SYNC_EQ_OWNER; 1724 else 1725 sync_mode = AMDGPU_SYNC_EXPLICIT; 1726 1727 amdgpu_vm_eviction_lock(vm); 1728 if (vm->evicting) { 1729 r = -EBUSY; 1730 goto error_unlock; 1731 } 1732 1733 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { 1734 struct dma_fence *tmp = dma_fence_get_stub(); 1735 1736 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); 1737 swap(vm->last_unlocked, tmp); 1738 dma_fence_put(tmp); 1739 } 1740 1741 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); 1742 if (r) 1743 goto error_unlock; 1744 1745 amdgpu_res_first(pages_addr ? NULL : res, offset, 1746 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); 1747 while (cursor.remaining) { 1748 uint64_t tmp, num_entries, addr; 1749 1750 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT; 1751 if (pages_addr) { 1752 bool contiguous = true; 1753 1754 if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { 1755 uint64_t pfn = cursor.start >> PAGE_SHIFT; 1756 uint64_t count; 1757 1758 contiguous = pages_addr[pfn + 1] == 1759 pages_addr[pfn] + PAGE_SIZE; 1760 1761 tmp = num_entries / 1762 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1763 for (count = 2; count < tmp; ++count) { 1764 uint64_t idx = pfn + count; 1765 1766 if (contiguous != (pages_addr[idx] == 1767 pages_addr[idx - 1] + PAGE_SIZE)) 1768 break; 1769 } 1770 num_entries = count * 1771 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1772 } 1773 1774 if (!contiguous) { 1775 addr = cursor.start; 1776 params.pages_addr = pages_addr; 1777 } else { 1778 addr = pages_addr[cursor.start >> PAGE_SHIFT]; 1779 params.pages_addr = NULL; 1780 } 1781 1782 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1783 addr = bo_adev->vm_manager.vram_base_offset + 1784 cursor.start; 1785 } else { 1786 addr = 0; 1787 } 1788 1789 tmp = start + num_entries; 1790 r = amdgpu_vm_update_ptes(¶ms, start, tmp, addr, flags); 1791 if (r) 1792 goto error_unlock; 1793 1794 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE); 1795 start = tmp; 1796 } 1797 1798 r = vm->update_funcs->commit(¶ms, fence); 1799 1800 if (table_freed) 1801 *table_freed = *table_freed || params.table_freed; 1802 1803 error_unlock: 1804 amdgpu_vm_eviction_unlock(vm); 1805 drm_dev_exit(idx); 1806 return r; 1807 } 1808 1809 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, 1810 uint64_t *gtt_mem, uint64_t *cpu_mem) 1811 { 1812 struct amdgpu_bo_va *bo_va, *tmp; 1813 1814 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { 1815 if (!bo_va->base.bo) 1816 continue; 1817 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1818 gtt_mem, cpu_mem); 1819 } 1820 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { 1821 if (!bo_va->base.bo) 1822 continue; 1823 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1824 gtt_mem, cpu_mem); 1825 } 1826 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { 1827 if (!bo_va->base.bo) 1828 continue; 1829 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1830 gtt_mem, cpu_mem); 1831 } 1832 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 1833 if (!bo_va->base.bo) 1834 continue; 1835 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1836 gtt_mem, cpu_mem); 1837 } 1838 spin_lock(&vm->invalidated_lock); 1839 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { 1840 if (!bo_va->base.bo) 1841 continue; 1842 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1843 gtt_mem, cpu_mem); 1844 } 1845 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { 1846 if (!bo_va->base.bo) 1847 continue; 1848 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, 1849 gtt_mem, cpu_mem); 1850 } 1851 spin_unlock(&vm->invalidated_lock); 1852 } 1853 /** 1854 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1855 * 1856 * @adev: amdgpu_device pointer 1857 * @bo_va: requested BO and VM object 1858 * @clear: if true clear the entries 1859 * @table_freed: return true if page table is freed 1860 * 1861 * Fill in the page table entries for @bo_va. 1862 * 1863 * Returns: 1864 * 0 for success, -EINVAL for failure. 1865 */ 1866 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1867 bool clear, bool *table_freed) 1868 { 1869 struct amdgpu_bo *bo = bo_va->base.bo; 1870 struct amdgpu_vm *vm = bo_va->base.vm; 1871 struct amdgpu_bo_va_mapping *mapping; 1872 dma_addr_t *pages_addr = NULL; 1873 struct ttm_resource *mem; 1874 struct dma_fence **last_update; 1875 struct dma_resv *resv; 1876 uint64_t flags; 1877 struct amdgpu_device *bo_adev = adev; 1878 int r; 1879 1880 if (clear || !bo) { 1881 mem = NULL; 1882 resv = vm->root.bo->tbo.base.resv; 1883 } else { 1884 struct drm_gem_object *obj = &bo->tbo.base; 1885 1886 resv = bo->tbo.base.resv; 1887 if (obj->import_attach && bo_va->is_xgmi) { 1888 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 1889 struct drm_gem_object *gobj = dma_buf->priv; 1890 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 1891 1892 if (abo->tbo.resource->mem_type == TTM_PL_VRAM) 1893 bo = gem_to_amdgpu_bo(gobj); 1894 } 1895 mem = bo->tbo.resource; 1896 if (mem->mem_type == TTM_PL_TT || 1897 mem->mem_type == AMDGPU_PL_PREEMPT) 1898 pages_addr = bo->tbo.ttm->dma_address; 1899 } 1900 1901 if (bo) { 1902 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1903 1904 if (amdgpu_bo_encrypted(bo)) 1905 flags |= AMDGPU_PTE_TMZ; 1906 1907 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1908 } else { 1909 flags = 0x0; 1910 } 1911 1912 if (clear || (bo && bo->tbo.base.resv == 1913 vm->root.bo->tbo.base.resv)) 1914 last_update = &vm->last_update; 1915 else 1916 last_update = &bo_va->last_pt_update; 1917 1918 if (!clear && bo_va->base.moved) { 1919 bo_va->base.moved = false; 1920 list_splice_init(&bo_va->valids, &bo_va->invalids); 1921 1922 } else if (bo_va->cleared != clear) { 1923 list_splice_init(&bo_va->valids, &bo_va->invalids); 1924 } 1925 1926 list_for_each_entry(mapping, &bo_va->invalids, list) { 1927 uint64_t update_flags = flags; 1928 1929 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1930 * but in case of something, we filter the flags in first place 1931 */ 1932 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1933 update_flags &= ~AMDGPU_PTE_READABLE; 1934 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1935 update_flags &= ~AMDGPU_PTE_WRITEABLE; 1936 1937 /* Apply ASIC specific mapping flags */ 1938 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); 1939 1940 trace_amdgpu_vm_bo_update(mapping); 1941 1942 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, 1943 resv, mapping->start, 1944 mapping->last, update_flags, 1945 mapping->offset, mem, 1946 pages_addr, last_update, table_freed); 1947 if (r) 1948 return r; 1949 } 1950 1951 /* If the BO is not in its preferred location add it back to 1952 * the evicted list so that it gets validated again on the 1953 * next command submission. 1954 */ 1955 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1956 uint32_t mem_type = bo->tbo.resource->mem_type; 1957 1958 if (!(bo->preferred_domains & 1959 amdgpu_mem_type_to_domain(mem_type))) 1960 amdgpu_vm_bo_evicted(&bo_va->base); 1961 else 1962 amdgpu_vm_bo_idle(&bo_va->base); 1963 } else { 1964 amdgpu_vm_bo_done(&bo_va->base); 1965 } 1966 1967 list_splice_init(&bo_va->invalids, &bo_va->valids); 1968 bo_va->cleared = clear; 1969 1970 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1971 list_for_each_entry(mapping, &bo_va->valids, list) 1972 trace_amdgpu_vm_bo_mapping(mapping); 1973 } 1974 1975 return 0; 1976 } 1977 1978 /** 1979 * amdgpu_vm_update_prt_state - update the global PRT state 1980 * 1981 * @adev: amdgpu_device pointer 1982 */ 1983 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1984 { 1985 unsigned long flags; 1986 bool enable; 1987 1988 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1989 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1990 adev->gmc.gmc_funcs->set_prt(adev, enable); 1991 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1992 } 1993 1994 /** 1995 * amdgpu_vm_prt_get - add a PRT user 1996 * 1997 * @adev: amdgpu_device pointer 1998 */ 1999 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 2000 { 2001 if (!adev->gmc.gmc_funcs->set_prt) 2002 return; 2003 2004 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 2005 amdgpu_vm_update_prt_state(adev); 2006 } 2007 2008 /** 2009 * amdgpu_vm_prt_put - drop a PRT user 2010 * 2011 * @adev: amdgpu_device pointer 2012 */ 2013 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 2014 { 2015 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 2016 amdgpu_vm_update_prt_state(adev); 2017 } 2018 2019 /** 2020 * amdgpu_vm_prt_cb - callback for updating the PRT status 2021 * 2022 * @fence: fence for the callback 2023 * @_cb: the callback function 2024 */ 2025 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 2026 { 2027 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 2028 2029 amdgpu_vm_prt_put(cb->adev); 2030 kfree(cb); 2031 } 2032 2033 /** 2034 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 2035 * 2036 * @adev: amdgpu_device pointer 2037 * @fence: fence for the callback 2038 */ 2039 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 2040 struct dma_fence *fence) 2041 { 2042 struct amdgpu_prt_cb *cb; 2043 2044 if (!adev->gmc.gmc_funcs->set_prt) 2045 return; 2046 2047 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 2048 if (!cb) { 2049 /* Last resort when we are OOM */ 2050 if (fence) 2051 dma_fence_wait(fence, false); 2052 2053 amdgpu_vm_prt_put(adev); 2054 } else { 2055 cb->adev = adev; 2056 if (!fence || dma_fence_add_callback(fence, &cb->cb, 2057 amdgpu_vm_prt_cb)) 2058 amdgpu_vm_prt_cb(fence, &cb->cb); 2059 } 2060 } 2061 2062 /** 2063 * amdgpu_vm_free_mapping - free a mapping 2064 * 2065 * @adev: amdgpu_device pointer 2066 * @vm: requested vm 2067 * @mapping: mapping to be freed 2068 * @fence: fence of the unmap operation 2069 * 2070 * Free a mapping and make sure we decrease the PRT usage count if applicable. 2071 */ 2072 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 2073 struct amdgpu_vm *vm, 2074 struct amdgpu_bo_va_mapping *mapping, 2075 struct dma_fence *fence) 2076 { 2077 if (mapping->flags & AMDGPU_PTE_PRT) 2078 amdgpu_vm_add_prt_cb(adev, fence); 2079 kfree(mapping); 2080 } 2081 2082 /** 2083 * amdgpu_vm_prt_fini - finish all prt mappings 2084 * 2085 * @adev: amdgpu_device pointer 2086 * @vm: requested vm 2087 * 2088 * Register a cleanup callback to disable PRT support after VM dies. 2089 */ 2090 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2091 { 2092 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2093 struct dma_fence *excl, **shared; 2094 unsigned i, shared_count; 2095 int r; 2096 2097 r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); 2098 if (r) { 2099 /* Not enough memory to grab the fence list, as last resort 2100 * block for all the fences to complete. 2101 */ 2102 dma_resv_wait_timeout(resv, true, false, 2103 MAX_SCHEDULE_TIMEOUT); 2104 return; 2105 } 2106 2107 /* Add a callback for each fence in the reservation object */ 2108 amdgpu_vm_prt_get(adev); 2109 amdgpu_vm_add_prt_cb(adev, excl); 2110 2111 for (i = 0; i < shared_count; ++i) { 2112 amdgpu_vm_prt_get(adev); 2113 amdgpu_vm_add_prt_cb(adev, shared[i]); 2114 } 2115 2116 kfree(shared); 2117 } 2118 2119 /** 2120 * amdgpu_vm_clear_freed - clear freed BOs in the PT 2121 * 2122 * @adev: amdgpu_device pointer 2123 * @vm: requested vm 2124 * @fence: optional resulting fence (unchanged if no work needed to be done 2125 * or if an error occurred) 2126 * 2127 * Make sure all freed BOs are cleared in the PT. 2128 * PTs have to be reserved and mutex must be locked! 2129 * 2130 * Returns: 2131 * 0 for success. 2132 * 2133 */ 2134 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 2135 struct amdgpu_vm *vm, 2136 struct dma_fence **fence) 2137 { 2138 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2139 struct amdgpu_bo_va_mapping *mapping; 2140 uint64_t init_pte_value = 0; 2141 struct dma_fence *f = NULL; 2142 int r; 2143 2144 while (!list_empty(&vm->freed)) { 2145 mapping = list_first_entry(&vm->freed, 2146 struct amdgpu_bo_va_mapping, list); 2147 list_del(&mapping->list); 2148 2149 if (vm->pte_support_ats && 2150 mapping->start < AMDGPU_GMC_HOLE_START) 2151 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 2152 2153 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, 2154 resv, mapping->start, 2155 mapping->last, init_pte_value, 2156 0, NULL, NULL, &f, NULL); 2157 amdgpu_vm_free_mapping(adev, vm, mapping, f); 2158 if (r) { 2159 dma_fence_put(f); 2160 return r; 2161 } 2162 } 2163 2164 if (fence && f) { 2165 dma_fence_put(*fence); 2166 *fence = f; 2167 } else { 2168 dma_fence_put(f); 2169 } 2170 2171 return 0; 2172 2173 } 2174 2175 /** 2176 * amdgpu_vm_handle_moved - handle moved BOs in the PT 2177 * 2178 * @adev: amdgpu_device pointer 2179 * @vm: requested vm 2180 * 2181 * Make sure all BOs which are moved are updated in the PTs. 2182 * 2183 * Returns: 2184 * 0 for success. 2185 * 2186 * PTs have to be reserved! 2187 */ 2188 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 2189 struct amdgpu_vm *vm) 2190 { 2191 struct amdgpu_bo_va *bo_va, *tmp; 2192 struct dma_resv *resv; 2193 bool clear; 2194 int r; 2195 2196 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2197 /* Per VM BOs never need to bo cleared in the page tables */ 2198 r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 2199 if (r) 2200 return r; 2201 } 2202 2203 spin_lock(&vm->invalidated_lock); 2204 while (!list_empty(&vm->invalidated)) { 2205 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 2206 base.vm_status); 2207 resv = bo_va->base.bo->tbo.base.resv; 2208 spin_unlock(&vm->invalidated_lock); 2209 2210 /* Try to reserve the BO to avoid clearing its ptes */ 2211 if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2212 clear = false; 2213 /* Somebody else is using the BO right now */ 2214 else 2215 clear = true; 2216 2217 r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); 2218 if (r) 2219 return r; 2220 2221 if (!clear) 2222 dma_resv_unlock(resv); 2223 spin_lock(&vm->invalidated_lock); 2224 } 2225 spin_unlock(&vm->invalidated_lock); 2226 2227 return 0; 2228 } 2229 2230 /** 2231 * amdgpu_vm_bo_add - add a bo to a specific vm 2232 * 2233 * @adev: amdgpu_device pointer 2234 * @vm: requested vm 2235 * @bo: amdgpu buffer object 2236 * 2237 * Add @bo into the requested vm. 2238 * Add @bo to the list of bos associated with the vm 2239 * 2240 * Returns: 2241 * Newly added bo_va or NULL for failure 2242 * 2243 * Object has to be reserved! 2244 */ 2245 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2246 struct amdgpu_vm *vm, 2247 struct amdgpu_bo *bo) 2248 { 2249 struct amdgpu_bo_va *bo_va; 2250 2251 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 2252 if (bo_va == NULL) { 2253 return NULL; 2254 } 2255 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 2256 2257 bo_va->ref_count = 1; 2258 INIT_LIST_HEAD(&bo_va->valids); 2259 INIT_LIST_HEAD(&bo_va->invalids); 2260 2261 if (!bo) 2262 return bo_va; 2263 2264 if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { 2265 bo_va->is_xgmi = true; 2266 /* Power up XGMI if it can be potentially used */ 2267 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); 2268 } 2269 2270 return bo_va; 2271 } 2272 2273 2274 /** 2275 * amdgpu_vm_bo_insert_map - insert a new mapping 2276 * 2277 * @adev: amdgpu_device pointer 2278 * @bo_va: bo_va to store the address 2279 * @mapping: the mapping to insert 2280 * 2281 * Insert a new mapping into all structures. 2282 */ 2283 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 2284 struct amdgpu_bo_va *bo_va, 2285 struct amdgpu_bo_va_mapping *mapping) 2286 { 2287 struct amdgpu_vm *vm = bo_va->base.vm; 2288 struct amdgpu_bo *bo = bo_va->base.bo; 2289 2290 mapping->bo_va = bo_va; 2291 list_add(&mapping->list, &bo_va->invalids); 2292 amdgpu_vm_it_insert(mapping, &vm->va); 2293 2294 if (mapping->flags & AMDGPU_PTE_PRT) 2295 amdgpu_vm_prt_get(adev); 2296 2297 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 2298 !bo_va->base.moved) { 2299 list_move(&bo_va->base.vm_status, &vm->moved); 2300 } 2301 trace_amdgpu_vm_bo_map(bo_va, mapping); 2302 } 2303 2304 /** 2305 * amdgpu_vm_bo_map - map bo inside a vm 2306 * 2307 * @adev: amdgpu_device pointer 2308 * @bo_va: bo_va to store the address 2309 * @saddr: where to map the BO 2310 * @offset: requested offset in the BO 2311 * @size: BO size in bytes 2312 * @flags: attributes of pages (read/write/valid/etc.) 2313 * 2314 * Add a mapping of the BO at the specefied addr into the VM. 2315 * 2316 * Returns: 2317 * 0 for success, error for failure. 2318 * 2319 * Object has to be reserved and unreserved outside! 2320 */ 2321 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2322 struct amdgpu_bo_va *bo_va, 2323 uint64_t saddr, uint64_t offset, 2324 uint64_t size, uint64_t flags) 2325 { 2326 struct amdgpu_bo_va_mapping *mapping, *tmp; 2327 struct amdgpu_bo *bo = bo_va->base.bo; 2328 struct amdgpu_vm *vm = bo_va->base.vm; 2329 uint64_t eaddr; 2330 2331 /* validate the parameters */ 2332 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || 2333 size == 0 || size & ~PAGE_MASK) 2334 return -EINVAL; 2335 2336 /* make sure object fit at this offset */ 2337 eaddr = saddr + size - 1; 2338 if (saddr >= eaddr || 2339 (bo && offset + size > amdgpu_bo_size(bo)) || 2340 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 2341 return -EINVAL; 2342 2343 saddr /= AMDGPU_GPU_PAGE_SIZE; 2344 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2345 2346 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2347 if (tmp) { 2348 /* bo and tmp overlap, invalid addr */ 2349 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2350 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 2351 tmp->start, tmp->last + 1); 2352 return -EINVAL; 2353 } 2354 2355 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2356 if (!mapping) 2357 return -ENOMEM; 2358 2359 mapping->start = saddr; 2360 mapping->last = eaddr; 2361 mapping->offset = offset; 2362 mapping->flags = flags; 2363 2364 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2365 2366 return 0; 2367 } 2368 2369 /** 2370 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 2371 * 2372 * @adev: amdgpu_device pointer 2373 * @bo_va: bo_va to store the address 2374 * @saddr: where to map the BO 2375 * @offset: requested offset in the BO 2376 * @size: BO size in bytes 2377 * @flags: attributes of pages (read/write/valid/etc.) 2378 * 2379 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 2380 * mappings as we do so. 2381 * 2382 * Returns: 2383 * 0 for success, error for failure. 2384 * 2385 * Object has to be reserved and unreserved outside! 2386 */ 2387 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 2388 struct amdgpu_bo_va *bo_va, 2389 uint64_t saddr, uint64_t offset, 2390 uint64_t size, uint64_t flags) 2391 { 2392 struct amdgpu_bo_va_mapping *mapping; 2393 struct amdgpu_bo *bo = bo_va->base.bo; 2394 uint64_t eaddr; 2395 int r; 2396 2397 /* validate the parameters */ 2398 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || 2399 size == 0 || size & ~PAGE_MASK) 2400 return -EINVAL; 2401 2402 /* make sure object fit at this offset */ 2403 eaddr = saddr + size - 1; 2404 if (saddr >= eaddr || 2405 (bo && offset + size > amdgpu_bo_size(bo)) || 2406 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 2407 return -EINVAL; 2408 2409 /* Allocate all the needed memory */ 2410 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2411 if (!mapping) 2412 return -ENOMEM; 2413 2414 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 2415 if (r) { 2416 kfree(mapping); 2417 return r; 2418 } 2419 2420 saddr /= AMDGPU_GPU_PAGE_SIZE; 2421 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2422 2423 mapping->start = saddr; 2424 mapping->last = eaddr; 2425 mapping->offset = offset; 2426 mapping->flags = flags; 2427 2428 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2429 2430 return 0; 2431 } 2432 2433 /** 2434 * amdgpu_vm_bo_unmap - remove bo mapping from vm 2435 * 2436 * @adev: amdgpu_device pointer 2437 * @bo_va: bo_va to remove the address from 2438 * @saddr: where to the BO is mapped 2439 * 2440 * Remove a mapping of the BO at the specefied addr from the VM. 2441 * 2442 * Returns: 2443 * 0 for success, error for failure. 2444 * 2445 * Object has to be reserved and unreserved outside! 2446 */ 2447 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2448 struct amdgpu_bo_va *bo_va, 2449 uint64_t saddr) 2450 { 2451 struct amdgpu_bo_va_mapping *mapping; 2452 struct amdgpu_vm *vm = bo_va->base.vm; 2453 bool valid = true; 2454 2455 saddr /= AMDGPU_GPU_PAGE_SIZE; 2456 2457 list_for_each_entry(mapping, &bo_va->valids, list) { 2458 if (mapping->start == saddr) 2459 break; 2460 } 2461 2462 if (&mapping->list == &bo_va->valids) { 2463 valid = false; 2464 2465 list_for_each_entry(mapping, &bo_va->invalids, list) { 2466 if (mapping->start == saddr) 2467 break; 2468 } 2469 2470 if (&mapping->list == &bo_va->invalids) 2471 return -ENOENT; 2472 } 2473 2474 list_del(&mapping->list); 2475 amdgpu_vm_it_remove(mapping, &vm->va); 2476 mapping->bo_va = NULL; 2477 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2478 2479 if (valid) 2480 list_add(&mapping->list, &vm->freed); 2481 else 2482 amdgpu_vm_free_mapping(adev, vm, mapping, 2483 bo_va->last_pt_update); 2484 2485 return 0; 2486 } 2487 2488 /** 2489 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 2490 * 2491 * @adev: amdgpu_device pointer 2492 * @vm: VM structure to use 2493 * @saddr: start of the range 2494 * @size: size of the range 2495 * 2496 * Remove all mappings in a range, split them as appropriate. 2497 * 2498 * Returns: 2499 * 0 for success, error for failure. 2500 */ 2501 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 2502 struct amdgpu_vm *vm, 2503 uint64_t saddr, uint64_t size) 2504 { 2505 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 2506 LIST_HEAD(removed); 2507 uint64_t eaddr; 2508 2509 eaddr = saddr + size - 1; 2510 saddr /= AMDGPU_GPU_PAGE_SIZE; 2511 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2512 2513 /* Allocate all the needed memory */ 2514 before = kzalloc(sizeof(*before), GFP_KERNEL); 2515 if (!before) 2516 return -ENOMEM; 2517 INIT_LIST_HEAD(&before->list); 2518 2519 after = kzalloc(sizeof(*after), GFP_KERNEL); 2520 if (!after) { 2521 kfree(before); 2522 return -ENOMEM; 2523 } 2524 INIT_LIST_HEAD(&after->list); 2525 2526 /* Now gather all removed mappings */ 2527 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2528 while (tmp) { 2529 /* Remember mapping split at the start */ 2530 if (tmp->start < saddr) { 2531 before->start = tmp->start; 2532 before->last = saddr - 1; 2533 before->offset = tmp->offset; 2534 before->flags = tmp->flags; 2535 before->bo_va = tmp->bo_va; 2536 list_add(&before->list, &tmp->bo_va->invalids); 2537 } 2538 2539 /* Remember mapping split at the end */ 2540 if (tmp->last > eaddr) { 2541 after->start = eaddr + 1; 2542 after->last = tmp->last; 2543 after->offset = tmp->offset; 2544 after->offset += (after->start - tmp->start) << PAGE_SHIFT; 2545 after->flags = tmp->flags; 2546 after->bo_va = tmp->bo_va; 2547 list_add(&after->list, &tmp->bo_va->invalids); 2548 } 2549 2550 list_del(&tmp->list); 2551 list_add(&tmp->list, &removed); 2552 2553 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 2554 } 2555 2556 /* And free them up */ 2557 list_for_each_entry_safe(tmp, next, &removed, list) { 2558 amdgpu_vm_it_remove(tmp, &vm->va); 2559 list_del(&tmp->list); 2560 2561 if (tmp->start < saddr) 2562 tmp->start = saddr; 2563 if (tmp->last > eaddr) 2564 tmp->last = eaddr; 2565 2566 tmp->bo_va = NULL; 2567 list_add(&tmp->list, &vm->freed); 2568 trace_amdgpu_vm_bo_unmap(NULL, tmp); 2569 } 2570 2571 /* Insert partial mapping before the range */ 2572 if (!list_empty(&before->list)) { 2573 amdgpu_vm_it_insert(before, &vm->va); 2574 if (before->flags & AMDGPU_PTE_PRT) 2575 amdgpu_vm_prt_get(adev); 2576 } else { 2577 kfree(before); 2578 } 2579 2580 /* Insert partial mapping after the range */ 2581 if (!list_empty(&after->list)) { 2582 amdgpu_vm_it_insert(after, &vm->va); 2583 if (after->flags & AMDGPU_PTE_PRT) 2584 amdgpu_vm_prt_get(adev); 2585 } else { 2586 kfree(after); 2587 } 2588 2589 return 0; 2590 } 2591 2592 /** 2593 * amdgpu_vm_bo_lookup_mapping - find mapping by address 2594 * 2595 * @vm: the requested VM 2596 * @addr: the address 2597 * 2598 * Find a mapping by it's address. 2599 * 2600 * Returns: 2601 * The amdgpu_bo_va_mapping matching for addr or NULL 2602 * 2603 */ 2604 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 2605 uint64_t addr) 2606 { 2607 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 2608 } 2609 2610 /** 2611 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 2612 * 2613 * @vm: the requested vm 2614 * @ticket: CS ticket 2615 * 2616 * Trace all mappings of BOs reserved during a command submission. 2617 */ 2618 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 2619 { 2620 struct amdgpu_bo_va_mapping *mapping; 2621 2622 if (!trace_amdgpu_vm_bo_cs_enabled()) 2623 return; 2624 2625 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 2626 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 2627 if (mapping->bo_va && mapping->bo_va->base.bo) { 2628 struct amdgpu_bo *bo; 2629 2630 bo = mapping->bo_va->base.bo; 2631 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2632 ticket) 2633 continue; 2634 } 2635 2636 trace_amdgpu_vm_bo_cs(mapping); 2637 } 2638 } 2639 2640 /** 2641 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2642 * 2643 * @adev: amdgpu_device pointer 2644 * @bo_va: requested bo_va 2645 * 2646 * Remove @bo_va->bo from the requested vm. 2647 * 2648 * Object have to be reserved! 2649 */ 2650 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2651 struct amdgpu_bo_va *bo_va) 2652 { 2653 struct amdgpu_bo_va_mapping *mapping, *next; 2654 struct amdgpu_bo *bo = bo_va->base.bo; 2655 struct amdgpu_vm *vm = bo_va->base.vm; 2656 struct amdgpu_vm_bo_base **base; 2657 2658 if (bo) { 2659 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2660 vm->bulk_moveable = false; 2661 2662 for (base = &bo_va->base.bo->vm_bo; *base; 2663 base = &(*base)->next) { 2664 if (*base != &bo_va->base) 2665 continue; 2666 2667 *base = bo_va->base.next; 2668 break; 2669 } 2670 } 2671 2672 spin_lock(&vm->invalidated_lock); 2673 list_del(&bo_va->base.vm_status); 2674 spin_unlock(&vm->invalidated_lock); 2675 2676 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2677 list_del(&mapping->list); 2678 amdgpu_vm_it_remove(mapping, &vm->va); 2679 mapping->bo_va = NULL; 2680 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2681 list_add(&mapping->list, &vm->freed); 2682 } 2683 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2684 list_del(&mapping->list); 2685 amdgpu_vm_it_remove(mapping, &vm->va); 2686 amdgpu_vm_free_mapping(adev, vm, mapping, 2687 bo_va->last_pt_update); 2688 } 2689 2690 dma_fence_put(bo_va->last_pt_update); 2691 2692 if (bo && bo_va->is_xgmi) 2693 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); 2694 2695 kfree(bo_va); 2696 } 2697 2698 /** 2699 * amdgpu_vm_evictable - check if we can evict a VM 2700 * 2701 * @bo: A page table of the VM. 2702 * 2703 * Check if it is possible to evict a VM. 2704 */ 2705 bool amdgpu_vm_evictable(struct amdgpu_bo *bo) 2706 { 2707 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; 2708 2709 /* Page tables of a destroyed VM can go away immediately */ 2710 if (!bo_base || !bo_base->vm) 2711 return true; 2712 2713 /* Don't evict VM page tables while they are busy */ 2714 if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) 2715 return false; 2716 2717 /* Try to block ongoing updates */ 2718 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) 2719 return false; 2720 2721 /* Don't evict VM page tables while they are updated */ 2722 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { 2723 amdgpu_vm_eviction_unlock(bo_base->vm); 2724 return false; 2725 } 2726 2727 bo_base->vm->evicting = true; 2728 amdgpu_vm_eviction_unlock(bo_base->vm); 2729 return true; 2730 } 2731 2732 /** 2733 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2734 * 2735 * @adev: amdgpu_device pointer 2736 * @bo: amdgpu buffer object 2737 * @evicted: is the BO evicted 2738 * 2739 * Mark @bo as invalid. 2740 */ 2741 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2742 struct amdgpu_bo *bo, bool evicted) 2743 { 2744 struct amdgpu_vm_bo_base *bo_base; 2745 2746 /* shadow bo doesn't have bo base, its validation needs its parent */ 2747 if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) 2748 bo = bo->parent; 2749 2750 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2751 struct amdgpu_vm *vm = bo_base->vm; 2752 2753 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 2754 amdgpu_vm_bo_evicted(bo_base); 2755 continue; 2756 } 2757 2758 if (bo_base->moved) 2759 continue; 2760 bo_base->moved = true; 2761 2762 if (bo->tbo.type == ttm_bo_type_kernel) 2763 amdgpu_vm_bo_relocated(bo_base); 2764 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2765 amdgpu_vm_bo_moved(bo_base); 2766 else 2767 amdgpu_vm_bo_invalidated(bo_base); 2768 } 2769 } 2770 2771 /** 2772 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 2773 * 2774 * @vm_size: VM size 2775 * 2776 * Returns: 2777 * VM page table as power of two 2778 */ 2779 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2780 { 2781 /* Total bits covered by PD + PTs */ 2782 unsigned bits = ilog2(vm_size) + 18; 2783 2784 /* Make sure the PD is 4K in size up to 8GB address space. 2785 Above that split equal between PD and PTs */ 2786 if (vm_size <= 8) 2787 return (bits - 9); 2788 else 2789 return ((bits + 3) / 2); 2790 } 2791 2792 /** 2793 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2794 * 2795 * @adev: amdgpu_device pointer 2796 * @min_vm_size: the minimum vm size in GB if it's set auto 2797 * @fragment_size_default: Default PTE fragment size 2798 * @max_level: max VMPT level 2799 * @max_bits: max address space size in bits 2800 * 2801 */ 2802 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2803 uint32_t fragment_size_default, unsigned max_level, 2804 unsigned max_bits) 2805 { 2806 unsigned int max_size = 1 << (max_bits - 30); 2807 unsigned int vm_size; 2808 uint64_t tmp; 2809 2810 /* adjust vm size first */ 2811 if (amdgpu_vm_size != -1) { 2812 vm_size = amdgpu_vm_size; 2813 if (vm_size > max_size) { 2814 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2815 amdgpu_vm_size, max_size); 2816 vm_size = max_size; 2817 } 2818 } else { 2819 struct sysinfo si; 2820 unsigned int phys_ram_gb; 2821 2822 /* Optimal VM size depends on the amount of physical 2823 * RAM available. Underlying requirements and 2824 * assumptions: 2825 * 2826 * - Need to map system memory and VRAM from all GPUs 2827 * - VRAM from other GPUs not known here 2828 * - Assume VRAM <= system memory 2829 * - On GFX8 and older, VM space can be segmented for 2830 * different MTYPEs 2831 * - Need to allow room for fragmentation, guard pages etc. 2832 * 2833 * This adds up to a rough guess of system memory x3. 2834 * Round up to power of two to maximize the available 2835 * VM size with the given page table size. 2836 */ 2837 si_meminfo(&si); 2838 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2839 (1 << 30) - 1) >> 30; 2840 vm_size = roundup_pow_of_two( 2841 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2842 } 2843 2844 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2845 2846 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2847 if (amdgpu_vm_block_size != -1) 2848 tmp >>= amdgpu_vm_block_size - 9; 2849 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2850 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); 2851 switch (adev->vm_manager.num_level) { 2852 case 3: 2853 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2854 break; 2855 case 2: 2856 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2857 break; 2858 case 1: 2859 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2860 break; 2861 default: 2862 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2863 } 2864 /* block size depends on vm size and hw setup*/ 2865 if (amdgpu_vm_block_size != -1) 2866 adev->vm_manager.block_size = 2867 min((unsigned)amdgpu_vm_block_size, max_bits 2868 - AMDGPU_GPU_PAGE_SHIFT 2869 - 9 * adev->vm_manager.num_level); 2870 else if (adev->vm_manager.num_level > 1) 2871 adev->vm_manager.block_size = 9; 2872 else 2873 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2874 2875 if (amdgpu_vm_fragment_size == -1) 2876 adev->vm_manager.fragment_size = fragment_size_default; 2877 else 2878 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2879 2880 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2881 vm_size, adev->vm_manager.num_level + 1, 2882 adev->vm_manager.block_size, 2883 adev->vm_manager.fragment_size); 2884 } 2885 2886 /** 2887 * amdgpu_vm_wait_idle - wait for the VM to become idle 2888 * 2889 * @vm: VM object to wait for 2890 * @timeout: timeout to wait for VM to become idle 2891 */ 2892 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2893 { 2894 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, 2895 true, timeout); 2896 if (timeout <= 0) 2897 return timeout; 2898 2899 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); 2900 } 2901 2902 /** 2903 * amdgpu_vm_init - initialize a vm instance 2904 * 2905 * @adev: amdgpu_device pointer 2906 * @vm: requested vm 2907 * 2908 * Init @vm fields. 2909 * 2910 * Returns: 2911 * 0 for success, error for failure. 2912 */ 2913 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2914 { 2915 struct amdgpu_bo *root_bo; 2916 struct amdgpu_bo_vm *root; 2917 int r, i; 2918 2919 vm->va = RB_ROOT_CACHED; 2920 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2921 vm->reserved_vmid[i] = NULL; 2922 INIT_LIST_HEAD(&vm->evicted); 2923 INIT_LIST_HEAD(&vm->relocated); 2924 INIT_LIST_HEAD(&vm->moved); 2925 INIT_LIST_HEAD(&vm->idle); 2926 INIT_LIST_HEAD(&vm->invalidated); 2927 spin_lock_init(&vm->invalidated_lock); 2928 INIT_LIST_HEAD(&vm->freed); 2929 INIT_LIST_HEAD(&vm->done); 2930 2931 /* create scheduler entities for page table updates */ 2932 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, 2933 adev->vm_manager.vm_pte_scheds, 2934 adev->vm_manager.vm_pte_num_scheds, NULL); 2935 if (r) 2936 return r; 2937 2938 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 2939 adev->vm_manager.vm_pte_scheds, 2940 adev->vm_manager.vm_pte_num_scheds, NULL); 2941 if (r) 2942 goto error_free_immediate; 2943 2944 vm->pte_support_ats = false; 2945 vm->is_compute_context = false; 2946 2947 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2948 AMDGPU_VM_USE_CPU_FOR_GFX); 2949 2950 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2951 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2952 WARN_ONCE((vm->use_cpu_for_update && 2953 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2954 "CPU update of VM recommended only for large BAR system\n"); 2955 2956 if (vm->use_cpu_for_update) 2957 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2958 else 2959 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2960 vm->last_update = NULL; 2961 vm->last_unlocked = dma_fence_get_stub(); 2962 2963 mutex_init(&vm->eviction_lock); 2964 vm->evicting = false; 2965 2966 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, 2967 false, &root); 2968 if (r) 2969 goto error_free_delayed; 2970 root_bo = &root->bo; 2971 r = amdgpu_bo_reserve(root_bo, true); 2972 if (r) 2973 goto error_free_root; 2974 2975 r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1); 2976 if (r) 2977 goto error_unreserve; 2978 2979 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); 2980 2981 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2982 if (r) 2983 goto error_unreserve; 2984 2985 amdgpu_bo_unreserve(vm->root.bo); 2986 2987 INIT_KFIFO(vm->faults); 2988 2989 return 0; 2990 2991 error_unreserve: 2992 amdgpu_bo_unreserve(vm->root.bo); 2993 2994 error_free_root: 2995 amdgpu_bo_unref(&root->shadow); 2996 amdgpu_bo_unref(&root_bo); 2997 vm->root.bo = NULL; 2998 2999 error_free_delayed: 3000 dma_fence_put(vm->last_unlocked); 3001 drm_sched_entity_destroy(&vm->delayed); 3002 3003 error_free_immediate: 3004 drm_sched_entity_destroy(&vm->immediate); 3005 3006 return r; 3007 } 3008 3009 /** 3010 * amdgpu_vm_check_clean_reserved - check if a VM is clean 3011 * 3012 * @adev: amdgpu_device pointer 3013 * @vm: the VM to check 3014 * 3015 * check all entries of the root PD, if any subsequent PDs are allocated, 3016 * it means there are page table creating and filling, and is no a clean 3017 * VM 3018 * 3019 * Returns: 3020 * 0 if this VM is clean 3021 */ 3022 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 3023 struct amdgpu_vm *vm) 3024 { 3025 enum amdgpu_vm_level root = adev->vm_manager.root_level; 3026 unsigned int entries = amdgpu_vm_num_entries(adev, root); 3027 unsigned int i = 0; 3028 3029 for (i = 0; i < entries; i++) { 3030 if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) 3031 return -EINVAL; 3032 } 3033 3034 return 0; 3035 } 3036 3037 /** 3038 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 3039 * 3040 * @adev: amdgpu_device pointer 3041 * @vm: requested vm 3042 * 3043 * This only works on GFX VMs that don't have any BOs added and no 3044 * page tables allocated yet. 3045 * 3046 * Changes the following VM parameters: 3047 * - use_cpu_for_update 3048 * - pte_supports_ats 3049 * 3050 * Reinitializes the page directory to reflect the changed ATS 3051 * setting. 3052 * 3053 * Returns: 3054 * 0 for success, -errno for errors. 3055 */ 3056 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3057 { 3058 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 3059 int r; 3060 3061 r = amdgpu_bo_reserve(vm->root.bo, true); 3062 if (r) 3063 return r; 3064 3065 /* Sanity checks */ 3066 r = amdgpu_vm_check_clean_reserved(adev, vm); 3067 if (r) 3068 goto unreserve_bo; 3069 3070 /* Check if PD needs to be reinitialized and do it before 3071 * changing any other state, in case it fails. 3072 */ 3073 if (pte_support_ats != vm->pte_support_ats) { 3074 vm->pte_support_ats = pte_support_ats; 3075 r = amdgpu_vm_clear_bo(adev, vm, 3076 to_amdgpu_bo_vm(vm->root.bo), 3077 false); 3078 if (r) 3079 goto unreserve_bo; 3080 } 3081 3082 /* Update VM state */ 3083 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 3084 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 3085 DRM_DEBUG_DRIVER("VM update mode is %s\n", 3086 vm->use_cpu_for_update ? "CPU" : "SDMA"); 3087 WARN_ONCE((vm->use_cpu_for_update && 3088 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 3089 "CPU update of VM recommended only for large BAR system\n"); 3090 3091 if (vm->use_cpu_for_update) { 3092 /* Sync with last SDMA update/clear before switching to CPU */ 3093 r = amdgpu_bo_sync_wait(vm->root.bo, 3094 AMDGPU_FENCE_OWNER_UNDEFINED, true); 3095 if (r) 3096 goto unreserve_bo; 3097 3098 vm->update_funcs = &amdgpu_vm_cpu_funcs; 3099 } else { 3100 vm->update_funcs = &amdgpu_vm_sdma_funcs; 3101 } 3102 dma_fence_put(vm->last_update); 3103 vm->last_update = NULL; 3104 vm->is_compute_context = true; 3105 3106 /* Free the shadow bo for compute VM */ 3107 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); 3108 3109 goto unreserve_bo; 3110 3111 unreserve_bo: 3112 amdgpu_bo_unreserve(vm->root.bo); 3113 return r; 3114 } 3115 3116 /** 3117 * amdgpu_vm_release_compute - release a compute vm 3118 * @adev: amdgpu_device pointer 3119 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 3120 * 3121 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 3122 * pasid from vm. Compute should stop use of vm after this call. 3123 */ 3124 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3125 { 3126 amdgpu_vm_set_pasid(adev, vm, 0); 3127 vm->is_compute_context = false; 3128 } 3129 3130 /** 3131 * amdgpu_vm_fini - tear down a vm instance 3132 * 3133 * @adev: amdgpu_device pointer 3134 * @vm: requested vm 3135 * 3136 * Tear down @vm. 3137 * Unbind the VM and remove all bos from the vm bo list 3138 */ 3139 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3140 { 3141 struct amdgpu_bo_va_mapping *mapping, *tmp; 3142 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 3143 struct amdgpu_bo *root; 3144 int i; 3145 3146 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 3147 3148 root = amdgpu_bo_ref(vm->root.bo); 3149 amdgpu_bo_reserve(root, true); 3150 amdgpu_vm_set_pasid(adev, vm, 0); 3151 dma_fence_wait(vm->last_unlocked, false); 3152 dma_fence_put(vm->last_unlocked); 3153 3154 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 3155 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 3156 amdgpu_vm_prt_fini(adev, vm); 3157 prt_fini_needed = false; 3158 } 3159 3160 list_del(&mapping->list); 3161 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 3162 } 3163 3164 amdgpu_vm_free_pts(adev, vm, NULL); 3165 amdgpu_bo_unreserve(root); 3166 amdgpu_bo_unref(&root); 3167 WARN_ON(vm->root.bo); 3168 3169 drm_sched_entity_destroy(&vm->immediate); 3170 drm_sched_entity_destroy(&vm->delayed); 3171 3172 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 3173 dev_err(adev->dev, "still active bo inside vm\n"); 3174 } 3175 rbtree_postorder_for_each_entry_safe(mapping, tmp, 3176 &vm->va.rb_root, rb) { 3177 /* Don't remove the mapping here, we don't want to trigger a 3178 * rebalance and the tree is about to be destroyed anyway. 3179 */ 3180 list_del(&mapping->list); 3181 kfree(mapping); 3182 } 3183 3184 dma_fence_put(vm->last_update); 3185 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 3186 amdgpu_vmid_free_reserved(adev, vm, i); 3187 } 3188 3189 /** 3190 * amdgpu_vm_manager_init - init the VM manager 3191 * 3192 * @adev: amdgpu_device pointer 3193 * 3194 * Initialize the VM manager structures 3195 */ 3196 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 3197 { 3198 unsigned i; 3199 3200 /* Concurrent flushes are only possible starting with Vega10 and 3201 * are broken on Navi10 and Navi14. 3202 */ 3203 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || 3204 adev->asic_type == CHIP_NAVI10 || 3205 adev->asic_type == CHIP_NAVI14); 3206 amdgpu_vmid_mgr_init(adev); 3207 3208 adev->vm_manager.fence_context = 3209 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3210 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 3211 adev->vm_manager.seqno[i] = 0; 3212 3213 spin_lock_init(&adev->vm_manager.prt_lock); 3214 atomic_set(&adev->vm_manager.num_prt_users, 0); 3215 3216 /* If not overridden by the user, by default, only in large BAR systems 3217 * Compute VM tables will be updated by CPU 3218 */ 3219 #ifdef CONFIG_X86_64 3220 if (amdgpu_vm_update_mode == -1) { 3221 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 3222 adev->vm_manager.vm_update_mode = 3223 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 3224 else 3225 adev->vm_manager.vm_update_mode = 0; 3226 } else 3227 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 3228 #else 3229 adev->vm_manager.vm_update_mode = 0; 3230 #endif 3231 3232 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); 3233 } 3234 3235 /** 3236 * amdgpu_vm_manager_fini - cleanup VM manager 3237 * 3238 * @adev: amdgpu_device pointer 3239 * 3240 * Cleanup the VM manager and free resources. 3241 */ 3242 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 3243 { 3244 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); 3245 xa_destroy(&adev->vm_manager.pasids); 3246 3247 amdgpu_vmid_mgr_fini(adev); 3248 } 3249 3250 /** 3251 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 3252 * 3253 * @dev: drm device pointer 3254 * @data: drm_amdgpu_vm 3255 * @filp: drm file pointer 3256 * 3257 * Returns: 3258 * 0 for success, -errno for errors. 3259 */ 3260 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 3261 { 3262 union drm_amdgpu_vm *args = data; 3263 struct amdgpu_device *adev = drm_to_adev(dev); 3264 struct amdgpu_fpriv *fpriv = filp->driver_priv; 3265 long timeout = msecs_to_jiffies(2000); 3266 int r; 3267 3268 switch (args->in.op) { 3269 case AMDGPU_VM_OP_RESERVE_VMID: 3270 /* We only have requirement to reserve vmid from gfxhub */ 3271 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, 3272 AMDGPU_GFXHUB_0); 3273 if (r) 3274 return r; 3275 break; 3276 case AMDGPU_VM_OP_UNRESERVE_VMID: 3277 if (amdgpu_sriov_runtime(adev)) 3278 timeout = 8 * timeout; 3279 3280 /* Wait vm idle to make sure the vmid set in SPM_VMID is 3281 * not referenced anymore. 3282 */ 3283 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); 3284 if (r) 3285 return r; 3286 3287 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); 3288 if (r < 0) 3289 return r; 3290 3291 amdgpu_bo_unreserve(fpriv->vm.root.bo); 3292 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3293 break; 3294 default: 3295 return -EINVAL; 3296 } 3297 3298 return 0; 3299 } 3300 3301 /** 3302 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 3303 * 3304 * @adev: drm device pointer 3305 * @pasid: PASID identifier for VM 3306 * @task_info: task_info to fill. 3307 */ 3308 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, 3309 struct amdgpu_task_info *task_info) 3310 { 3311 struct amdgpu_vm *vm; 3312 unsigned long flags; 3313 3314 xa_lock_irqsave(&adev->vm_manager.pasids, flags); 3315 3316 vm = xa_load(&adev->vm_manager.pasids, pasid); 3317 if (vm) 3318 *task_info = vm->task_info; 3319 3320 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); 3321 } 3322 3323 /** 3324 * amdgpu_vm_set_task_info - Sets VMs task info. 3325 * 3326 * @vm: vm for which to set the info 3327 */ 3328 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 3329 { 3330 if (vm->task_info.pid) 3331 return; 3332 3333 vm->task_info.pid = current->pid; 3334 get_task_comm(vm->task_info.task_name, current); 3335 3336 if (current->group_leader->mm != current->mm) 3337 return; 3338 3339 vm->task_info.tgid = current->group_leader->pid; 3340 get_task_comm(vm->task_info.process_name, current->group_leader); 3341 } 3342 3343 /** 3344 * amdgpu_vm_handle_fault - graceful handling of VM faults. 3345 * @adev: amdgpu device pointer 3346 * @pasid: PASID of the VM 3347 * @addr: Address of the fault 3348 * @write_fault: true is write fault, false is read fault 3349 * 3350 * Try to gracefully handle a VM fault. Return true if the fault was handled and 3351 * shouldn't be reported any more. 3352 */ 3353 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, 3354 uint64_t addr, bool write_fault) 3355 { 3356 bool is_compute_context = false; 3357 struct amdgpu_bo *root; 3358 unsigned long irqflags; 3359 uint64_t value, flags; 3360 struct amdgpu_vm *vm; 3361 int r; 3362 3363 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 3364 vm = xa_load(&adev->vm_manager.pasids, pasid); 3365 if (vm) { 3366 root = amdgpu_bo_ref(vm->root.bo); 3367 is_compute_context = vm->is_compute_context; 3368 } else { 3369 root = NULL; 3370 } 3371 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 3372 3373 if (!root) 3374 return false; 3375 3376 addr /= AMDGPU_GPU_PAGE_SIZE; 3377 3378 if (is_compute_context && 3379 !svm_range_restore_pages(adev, pasid, addr, write_fault)) { 3380 amdgpu_bo_unref(&root); 3381 return true; 3382 } 3383 3384 r = amdgpu_bo_reserve(root, true); 3385 if (r) 3386 goto error_unref; 3387 3388 /* Double check that the VM still exists */ 3389 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 3390 vm = xa_load(&adev->vm_manager.pasids, pasid); 3391 if (vm && vm->root.bo != root) 3392 vm = NULL; 3393 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 3394 if (!vm) 3395 goto error_unlock; 3396 3397 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 3398 AMDGPU_PTE_SYSTEM; 3399 3400 if (is_compute_context) { 3401 /* Intentionally setting invalid PTE flag 3402 * combination to force a no-retry-fault 3403 */ 3404 flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | 3405 AMDGPU_PTE_TF; 3406 value = 0; 3407 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 3408 /* Redirect the access to the dummy page */ 3409 value = adev->dummy_page_addr; 3410 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 3411 AMDGPU_PTE_WRITEABLE; 3412 3413 } else { 3414 /* Let the hw retry silently on the PTE */ 3415 value = 0; 3416 } 3417 3418 r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 3419 if (r) { 3420 pr_debug("failed %d to reserve fence slot\n", r); 3421 goto error_unlock; 3422 } 3423 3424 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, 3425 addr, flags, value, NULL, NULL, NULL, 3426 NULL); 3427 if (r) 3428 goto error_unlock; 3429 3430 r = amdgpu_vm_update_pdes(adev, vm, true); 3431 3432 error_unlock: 3433 amdgpu_bo_unreserve(root); 3434 if (r < 0) 3435 DRM_ERROR("Can't handle page fault (%d)\n", r); 3436 3437 error_unref: 3438 amdgpu_bo_unref(&root); 3439 3440 return false; 3441 } 3442 3443 #if defined(CONFIG_DEBUG_FS) 3444 /** 3445 * amdgpu_debugfs_vm_bo_info - print BO info for the VM 3446 * 3447 * @vm: Requested VM for printing BO info 3448 * @m: debugfs file 3449 * 3450 * Print BO information in debugfs file for the VM 3451 */ 3452 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) 3453 { 3454 struct amdgpu_bo_va *bo_va, *tmp; 3455 u64 total_idle = 0; 3456 u64 total_evicted = 0; 3457 u64 total_relocated = 0; 3458 u64 total_moved = 0; 3459 u64 total_invalidated = 0; 3460 u64 total_done = 0; 3461 unsigned int total_idle_objs = 0; 3462 unsigned int total_evicted_objs = 0; 3463 unsigned int total_relocated_objs = 0; 3464 unsigned int total_moved_objs = 0; 3465 unsigned int total_invalidated_objs = 0; 3466 unsigned int total_done_objs = 0; 3467 unsigned int id = 0; 3468 3469 seq_puts(m, "\tIdle BOs:\n"); 3470 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { 3471 if (!bo_va->base.bo) 3472 continue; 3473 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3474 } 3475 total_idle_objs = id; 3476 id = 0; 3477 3478 seq_puts(m, "\tEvicted BOs:\n"); 3479 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { 3480 if (!bo_va->base.bo) 3481 continue; 3482 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3483 } 3484 total_evicted_objs = id; 3485 id = 0; 3486 3487 seq_puts(m, "\tRelocated BOs:\n"); 3488 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { 3489 if (!bo_va->base.bo) 3490 continue; 3491 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3492 } 3493 total_relocated_objs = id; 3494 id = 0; 3495 3496 seq_puts(m, "\tMoved BOs:\n"); 3497 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 3498 if (!bo_va->base.bo) 3499 continue; 3500 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3501 } 3502 total_moved_objs = id; 3503 id = 0; 3504 3505 seq_puts(m, "\tInvalidated BOs:\n"); 3506 spin_lock(&vm->invalidated_lock); 3507 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { 3508 if (!bo_va->base.bo) 3509 continue; 3510 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3511 } 3512 total_invalidated_objs = id; 3513 id = 0; 3514 3515 seq_puts(m, "\tDone BOs:\n"); 3516 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { 3517 if (!bo_va->base.bo) 3518 continue; 3519 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3520 } 3521 spin_unlock(&vm->invalidated_lock); 3522 total_done_objs = id; 3523 3524 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, 3525 total_idle_objs); 3526 seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted, 3527 total_evicted_objs); 3528 seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated, 3529 total_relocated_objs); 3530 seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved, 3531 total_moved_objs); 3532 seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated, 3533 total_invalidated_objs); 3534 seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done, 3535 total_done_objs); 3536 } 3537 #endif 3538