1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/dma-fence-array.h> 29 #include <linux/interval_tree_generic.h> 30 #include <linux/idr.h> 31 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "amdgpu_trace.h" 35 #include "amdgpu_amdkfd.h" 36 #include "amdgpu_gmc.h" 37 #include "amdgpu_xgmi.h" 38 39 /** 40 * DOC: GPUVM 41 * 42 * GPUVM is similar to the legacy gart on older asics, however 43 * rather than there being a single global gart table 44 * for the entire GPU, there are multiple VM page tables active 45 * at any given time. The VM page tables can contain a mix 46 * vram pages and system memory pages and system memory pages 47 * can be mapped as snooped (cached system pages) or unsnooped 48 * (uncached system pages). 49 * Each VM has an ID associated with it and there is a page table 50 * associated with each VMID. When execting a command buffer, 51 * the kernel tells the the ring what VMID to use for that command 52 * buffer. VMIDs are allocated dynamically as commands are submitted. 53 * The userspace drivers maintain their own address space and the kernel 54 * sets up their pages tables accordingly when they submit their 55 * command buffers and a VMID is assigned. 56 * Cayman/Trinity support up to 8 active VMs at any given time; 57 * SI supports 16. 58 */ 59 60 #define START(node) ((node)->start) 61 #define LAST(node) ((node)->last) 62 63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 64 START, LAST, static, amdgpu_vm_it) 65 66 #undef START 67 #undef LAST 68 69 /** 70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 71 */ 72 struct amdgpu_prt_cb { 73 74 /** 75 * @adev: amdgpu device 76 */ 77 struct amdgpu_device *adev; 78 79 /** 80 * @cb: callback 81 */ 82 struct dma_fence_cb cb; 83 }; 84 85 /** 86 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS 87 * happens while holding this lock anywhere to prevent deadlocks when 88 * an MMU notifier runs in reclaim-FS context. 89 */ 90 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) 91 { 92 mutex_lock(&vm->eviction_lock); 93 vm->saved_flags = memalloc_nofs_save(); 94 } 95 96 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) 97 { 98 if (mutex_trylock(&vm->eviction_lock)) { 99 vm->saved_flags = memalloc_nofs_save(); 100 return 1; 101 } 102 return 0; 103 } 104 105 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) 106 { 107 memalloc_nofs_restore(vm->saved_flags); 108 mutex_unlock(&vm->eviction_lock); 109 } 110 111 /** 112 * amdgpu_vm_level_shift - return the addr shift for each level 113 * 114 * @adev: amdgpu_device pointer 115 * @level: VMPT level 116 * 117 * Returns: 118 * The number of bits the pfn needs to be right shifted for a level. 119 */ 120 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, 121 unsigned level) 122 { 123 switch (level) { 124 case AMDGPU_VM_PDB2: 125 case AMDGPU_VM_PDB1: 126 case AMDGPU_VM_PDB0: 127 return 9 * (AMDGPU_VM_PDB0 - level) + 128 adev->vm_manager.block_size; 129 case AMDGPU_VM_PTB: 130 return 0; 131 default: 132 return ~0; 133 } 134 } 135 136 /** 137 * amdgpu_vm_num_entries - return the number of entries in a PD/PT 138 * 139 * @adev: amdgpu_device pointer 140 * @level: VMPT level 141 * 142 * Returns: 143 * The number of entries in a page directory or page table. 144 */ 145 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, 146 unsigned level) 147 { 148 unsigned shift = amdgpu_vm_level_shift(adev, 149 adev->vm_manager.root_level); 150 151 if (level == adev->vm_manager.root_level) 152 /* For the root directory */ 153 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) 154 >> shift; 155 else if (level != AMDGPU_VM_PTB) 156 /* Everything in between */ 157 return 512; 158 else 159 /* For the page tables on the leaves */ 160 return AMDGPU_VM_PTE_COUNT(adev); 161 } 162 163 /** 164 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD 165 * 166 * @adev: amdgpu_device pointer 167 * 168 * Returns: 169 * The number of entries in the root page directory which needs the ATS setting. 170 */ 171 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev) 172 { 173 unsigned shift; 174 175 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); 176 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT); 177 } 178 179 /** 180 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT 181 * 182 * @adev: amdgpu_device pointer 183 * @level: VMPT level 184 * 185 * Returns: 186 * The mask to extract the entry number of a PD/PT from an address. 187 */ 188 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, 189 unsigned int level) 190 { 191 if (level <= adev->vm_manager.root_level) 192 return 0xffffffff; 193 else if (level != AMDGPU_VM_PTB) 194 return 0x1ff; 195 else 196 return AMDGPU_VM_PTE_COUNT(adev) - 1; 197 } 198 199 /** 200 * amdgpu_vm_bo_size - returns the size of the BOs in bytes 201 * 202 * @adev: amdgpu_device pointer 203 * @level: VMPT level 204 * 205 * Returns: 206 * The size of the BO for a page directory or page table in bytes. 207 */ 208 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) 209 { 210 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); 211 } 212 213 /** 214 * amdgpu_vm_bo_evicted - vm_bo is evicted 215 * 216 * @vm_bo: vm_bo which is evicted 217 * 218 * State for PDs/PTs and per VM BOs which are not at the location they should 219 * be. 220 */ 221 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 222 { 223 struct amdgpu_vm *vm = vm_bo->vm; 224 struct amdgpu_bo *bo = vm_bo->bo; 225 226 vm_bo->moved = true; 227 if (bo->tbo.type == ttm_bo_type_kernel) 228 list_move(&vm_bo->vm_status, &vm->evicted); 229 else 230 list_move_tail(&vm_bo->vm_status, &vm->evicted); 231 } 232 /** 233 * amdgpu_vm_bo_moved - vm_bo is moved 234 * 235 * @vm_bo: vm_bo which is moved 236 * 237 * State for per VM BOs which are moved, but that change is not yet reflected 238 * in the page tables. 239 */ 240 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 241 { 242 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 243 } 244 245 /** 246 * amdgpu_vm_bo_idle - vm_bo is idle 247 * 248 * @vm_bo: vm_bo which is now idle 249 * 250 * State for PDs/PTs and per VM BOs which have gone through the state machine 251 * and are now idle. 252 */ 253 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 254 { 255 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 256 vm_bo->moved = false; 257 } 258 259 /** 260 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 261 * 262 * @vm_bo: vm_bo which is now invalidated 263 * 264 * State for normal BOs which are invalidated and that change not yet reflected 265 * in the PTs. 266 */ 267 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 268 { 269 spin_lock(&vm_bo->vm->invalidated_lock); 270 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 271 spin_unlock(&vm_bo->vm->invalidated_lock); 272 } 273 274 /** 275 * amdgpu_vm_bo_relocated - vm_bo is reloacted 276 * 277 * @vm_bo: vm_bo which is relocated 278 * 279 * State for PDs/PTs which needs to update their parent PD. 280 * For the root PD, just move to idle state. 281 */ 282 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 283 { 284 if (vm_bo->bo->parent) 285 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 286 else 287 amdgpu_vm_bo_idle(vm_bo); 288 } 289 290 /** 291 * amdgpu_vm_bo_done - vm_bo is done 292 * 293 * @vm_bo: vm_bo which is now done 294 * 295 * State for normal BOs which are invalidated and that change has been updated 296 * in the PTs. 297 */ 298 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 299 { 300 spin_lock(&vm_bo->vm->invalidated_lock); 301 list_del_init(&vm_bo->vm_status); 302 spin_unlock(&vm_bo->vm->invalidated_lock); 303 } 304 305 /** 306 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 307 * 308 * @base: base structure for tracking BO usage in a VM 309 * @vm: vm to which bo is to be added 310 * @bo: amdgpu buffer object 311 * 312 * Initialize a bo_va_base structure and add it to the appropriate lists 313 * 314 */ 315 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 316 struct amdgpu_vm *vm, 317 struct amdgpu_bo *bo) 318 { 319 base->vm = vm; 320 base->bo = bo; 321 base->next = NULL; 322 INIT_LIST_HEAD(&base->vm_status); 323 324 if (!bo) 325 return; 326 base->next = bo->vm_bo; 327 bo->vm_bo = base; 328 329 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 330 return; 331 332 vm->bulk_moveable = false; 333 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 334 amdgpu_vm_bo_relocated(base); 335 else 336 amdgpu_vm_bo_idle(base); 337 338 if (bo->preferred_domains & 339 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) 340 return; 341 342 /* 343 * we checked all the prerequisites, but it looks like this per vm bo 344 * is currently evicted. add the bo to the evicted list to make sure it 345 * is validated on next vm use to avoid fault. 346 * */ 347 amdgpu_vm_bo_evicted(base); 348 } 349 350 /** 351 * amdgpu_vm_pt_parent - get the parent page directory 352 * 353 * @pt: child page table 354 * 355 * Helper to get the parent entry for the child page table. NULL if we are at 356 * the root page directory. 357 */ 358 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) 359 { 360 struct amdgpu_bo *parent = pt->base.bo->parent; 361 362 if (!parent) 363 return NULL; 364 365 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); 366 } 367 368 /* 369 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt 370 */ 371 struct amdgpu_vm_pt_cursor { 372 uint64_t pfn; 373 struct amdgpu_vm_pt *parent; 374 struct amdgpu_vm_pt *entry; 375 unsigned level; 376 }; 377 378 /** 379 * amdgpu_vm_pt_start - start PD/PT walk 380 * 381 * @adev: amdgpu_device pointer 382 * @vm: amdgpu_vm structure 383 * @start: start address of the walk 384 * @cursor: state to initialize 385 * 386 * Initialize a amdgpu_vm_pt_cursor to start a walk. 387 */ 388 static void amdgpu_vm_pt_start(struct amdgpu_device *adev, 389 struct amdgpu_vm *vm, uint64_t start, 390 struct amdgpu_vm_pt_cursor *cursor) 391 { 392 cursor->pfn = start; 393 cursor->parent = NULL; 394 cursor->entry = &vm->root; 395 cursor->level = adev->vm_manager.root_level; 396 } 397 398 /** 399 * amdgpu_vm_pt_descendant - go to child node 400 * 401 * @adev: amdgpu_device pointer 402 * @cursor: current state 403 * 404 * Walk to the child node of the current node. 405 * Returns: 406 * True if the walk was possible, false otherwise. 407 */ 408 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, 409 struct amdgpu_vm_pt_cursor *cursor) 410 { 411 unsigned mask, shift, idx; 412 413 if (!cursor->entry->entries) 414 return false; 415 416 BUG_ON(!cursor->entry->base.bo); 417 mask = amdgpu_vm_entries_mask(adev, cursor->level); 418 shift = amdgpu_vm_level_shift(adev, cursor->level); 419 420 ++cursor->level; 421 idx = (cursor->pfn >> shift) & mask; 422 cursor->parent = cursor->entry; 423 cursor->entry = &cursor->entry->entries[idx]; 424 return true; 425 } 426 427 /** 428 * amdgpu_vm_pt_sibling - go to sibling node 429 * 430 * @adev: amdgpu_device pointer 431 * @cursor: current state 432 * 433 * Walk to the sibling node of the current node. 434 * Returns: 435 * True if the walk was possible, false otherwise. 436 */ 437 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, 438 struct amdgpu_vm_pt_cursor *cursor) 439 { 440 unsigned shift, num_entries; 441 442 /* Root doesn't have a sibling */ 443 if (!cursor->parent) 444 return false; 445 446 /* Go to our parents and see if we got a sibling */ 447 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 448 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 449 450 if (cursor->entry == &cursor->parent->entries[num_entries - 1]) 451 return false; 452 453 cursor->pfn += 1ULL << shift; 454 cursor->pfn &= ~((1ULL << shift) - 1); 455 ++cursor->entry; 456 return true; 457 } 458 459 /** 460 * amdgpu_vm_pt_ancestor - go to parent node 461 * 462 * @cursor: current state 463 * 464 * Walk to the parent node of the current node. 465 * Returns: 466 * True if the walk was possible, false otherwise. 467 */ 468 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) 469 { 470 if (!cursor->parent) 471 return false; 472 473 --cursor->level; 474 cursor->entry = cursor->parent; 475 cursor->parent = amdgpu_vm_pt_parent(cursor->parent); 476 return true; 477 } 478 479 /** 480 * amdgpu_vm_pt_next - get next PD/PT in hieratchy 481 * 482 * @adev: amdgpu_device pointer 483 * @cursor: current state 484 * 485 * Walk the PD/PT tree to the next node. 486 */ 487 static void amdgpu_vm_pt_next(struct amdgpu_device *adev, 488 struct amdgpu_vm_pt_cursor *cursor) 489 { 490 /* First try a newborn child */ 491 if (amdgpu_vm_pt_descendant(adev, cursor)) 492 return; 493 494 /* If that didn't worked try to find a sibling */ 495 while (!amdgpu_vm_pt_sibling(adev, cursor)) { 496 /* No sibling, go to our parents and grandparents */ 497 if (!amdgpu_vm_pt_ancestor(cursor)) { 498 cursor->pfn = ~0ll; 499 return; 500 } 501 } 502 } 503 504 /** 505 * amdgpu_vm_pt_first_dfs - start a deep first search 506 * 507 * @adev: amdgpu_device structure 508 * @vm: amdgpu_vm structure 509 * @start: optional cursor to start with 510 * @cursor: state to initialize 511 * 512 * Starts a deep first traversal of the PD/PT tree. 513 */ 514 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, 515 struct amdgpu_vm *vm, 516 struct amdgpu_vm_pt_cursor *start, 517 struct amdgpu_vm_pt_cursor *cursor) 518 { 519 if (start) 520 *cursor = *start; 521 else 522 amdgpu_vm_pt_start(adev, vm, 0, cursor); 523 while (amdgpu_vm_pt_descendant(adev, cursor)); 524 } 525 526 /** 527 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue 528 * 529 * @start: starting point for the search 530 * @entry: current entry 531 * 532 * Returns: 533 * True when the search should continue, false otherwise. 534 */ 535 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 536 struct amdgpu_vm_pt *entry) 537 { 538 return entry && (!start || entry != start->entry); 539 } 540 541 /** 542 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search 543 * 544 * @adev: amdgpu_device structure 545 * @cursor: current state 546 * 547 * Move the cursor to the next node in a deep first search. 548 */ 549 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, 550 struct amdgpu_vm_pt_cursor *cursor) 551 { 552 if (!cursor->entry) 553 return; 554 555 if (!cursor->parent) 556 cursor->entry = NULL; 557 else if (amdgpu_vm_pt_sibling(adev, cursor)) 558 while (amdgpu_vm_pt_descendant(adev, cursor)); 559 else 560 amdgpu_vm_pt_ancestor(cursor); 561 } 562 563 /* 564 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs 565 */ 566 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ 567 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \ 568 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ 569 amdgpu_vm_pt_continue_dfs((start), (entry)); \ 570 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor))) 571 572 /** 573 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 574 * 575 * @vm: vm providing the BOs 576 * @validated: head of validation list 577 * @entry: entry to add 578 * 579 * Add the page directory to the list of BOs to 580 * validate for command submission. 581 */ 582 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 583 struct list_head *validated, 584 struct amdgpu_bo_list_entry *entry) 585 { 586 entry->priority = 0; 587 entry->tv.bo = &vm->root.base.bo->tbo; 588 /* Two for VM updates, one for TTM and one for the CS job */ 589 entry->tv.num_shared = 4; 590 entry->user_pages = NULL; 591 list_add(&entry->tv.head, validated); 592 } 593 594 /** 595 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag 596 * 597 * @bo: BO which was removed from the LRU 598 * 599 * Make sure the bulk_moveable flag is updated when a BO is removed from the 600 * LRU. 601 */ 602 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) 603 { 604 struct amdgpu_bo *abo; 605 struct amdgpu_vm_bo_base *bo_base; 606 607 if (!amdgpu_bo_is_amdgpu_bo(bo)) 608 return; 609 610 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) 611 return; 612 613 abo = ttm_to_amdgpu_bo(bo); 614 if (!abo->parent) 615 return; 616 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 617 struct amdgpu_vm *vm = bo_base->vm; 618 619 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 620 vm->bulk_moveable = false; 621 } 622 623 } 624 /** 625 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 626 * 627 * @adev: amdgpu device pointer 628 * @vm: vm providing the BOs 629 * 630 * Move all BOs to the end of LRU and remember their positions to put them 631 * together. 632 */ 633 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 634 struct amdgpu_vm *vm) 635 { 636 struct amdgpu_vm_bo_base *bo_base; 637 638 if (vm->bulk_moveable) { 639 spin_lock(&ttm_bo_glob.lru_lock); 640 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 641 spin_unlock(&ttm_bo_glob.lru_lock); 642 return; 643 } 644 645 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 646 647 spin_lock(&ttm_bo_glob.lru_lock); 648 list_for_each_entry(bo_base, &vm->idle, vm_status) { 649 struct amdgpu_bo *bo = bo_base->bo; 650 651 if (!bo->parent) 652 continue; 653 654 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); 655 if (bo->shadow) 656 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, 657 &vm->lru_bulk_move); 658 } 659 spin_unlock(&ttm_bo_glob.lru_lock); 660 661 vm->bulk_moveable = true; 662 } 663 664 /** 665 * amdgpu_vm_validate_pt_bos - validate the page table BOs 666 * 667 * @adev: amdgpu device pointer 668 * @vm: vm providing the BOs 669 * @validate: callback to do the validation 670 * @param: parameter for the validation callback 671 * 672 * Validate the page table BOs on command submission if neccessary. 673 * 674 * Returns: 675 * Validation result. 676 */ 677 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 678 int (*validate)(void *p, struct amdgpu_bo *bo), 679 void *param) 680 { 681 struct amdgpu_vm_bo_base *bo_base, *tmp; 682 int r; 683 684 vm->bulk_moveable &= list_empty(&vm->evicted); 685 686 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 687 struct amdgpu_bo *bo = bo_base->bo; 688 689 r = validate(param, bo); 690 if (r) 691 return r; 692 693 if (bo->tbo.type != ttm_bo_type_kernel) { 694 amdgpu_vm_bo_moved(bo_base); 695 } else { 696 vm->update_funcs->map_table(bo); 697 amdgpu_vm_bo_relocated(bo_base); 698 } 699 } 700 701 amdgpu_vm_eviction_lock(vm); 702 vm->evicting = false; 703 amdgpu_vm_eviction_unlock(vm); 704 705 return 0; 706 } 707 708 /** 709 * amdgpu_vm_ready - check VM is ready for updates 710 * 711 * @vm: VM to check 712 * 713 * Check if all VM PDs/PTs are ready for updates 714 * 715 * Returns: 716 * True if eviction list is empty. 717 */ 718 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 719 { 720 return list_empty(&vm->evicted); 721 } 722 723 /** 724 * amdgpu_vm_clear_bo - initially clear the PDs/PTs 725 * 726 * @adev: amdgpu_device pointer 727 * @vm: VM to clear BO from 728 * @bo: BO to clear 729 * @direct: use a direct update 730 * 731 * Root PD needs to be reserved when calling this. 732 * 733 * Returns: 734 * 0 on success, errno otherwise. 735 */ 736 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 737 struct amdgpu_vm *vm, 738 struct amdgpu_bo *bo, 739 bool direct) 740 { 741 struct ttm_operation_ctx ctx = { true, false }; 742 unsigned level = adev->vm_manager.root_level; 743 struct amdgpu_vm_update_params params; 744 struct amdgpu_bo *ancestor = bo; 745 unsigned entries, ats_entries; 746 uint64_t addr; 747 int r; 748 749 /* Figure out our place in the hierarchy */ 750 if (ancestor->parent) { 751 ++level; 752 while (ancestor->parent->parent) { 753 ++level; 754 ancestor = ancestor->parent; 755 } 756 } 757 758 entries = amdgpu_bo_size(bo) / 8; 759 if (!vm->pte_support_ats) { 760 ats_entries = 0; 761 762 } else if (!bo->parent) { 763 ats_entries = amdgpu_vm_num_ats_entries(adev); 764 ats_entries = min(ats_entries, entries); 765 entries -= ats_entries; 766 767 } else { 768 struct amdgpu_vm_pt *pt; 769 770 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); 771 ats_entries = amdgpu_vm_num_ats_entries(adev); 772 if ((pt - vm->root.entries) >= ats_entries) { 773 ats_entries = 0; 774 } else { 775 ats_entries = entries; 776 entries = 0; 777 } 778 } 779 780 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 781 if (r) 782 return r; 783 784 if (bo->shadow) { 785 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, 786 &ctx); 787 if (r) 788 return r; 789 } 790 791 r = vm->update_funcs->map_table(bo); 792 if (r) 793 return r; 794 795 memset(¶ms, 0, sizeof(params)); 796 params.adev = adev; 797 params.vm = vm; 798 params.direct = direct; 799 800 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 801 if (r) 802 return r; 803 804 addr = 0; 805 if (ats_entries) { 806 uint64_t value = 0, flags; 807 808 flags = AMDGPU_PTE_DEFAULT_ATC; 809 if (level != AMDGPU_VM_PTB) { 810 /* Handle leaf PDEs as PTEs */ 811 flags |= AMDGPU_PDE_PTE; 812 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); 813 } 814 815 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, 816 value, flags); 817 if (r) 818 return r; 819 820 addr += ats_entries * 8; 821 } 822 823 if (entries) { 824 uint64_t value = 0, flags = 0; 825 826 if (adev->asic_type >= CHIP_VEGA10) { 827 if (level != AMDGPU_VM_PTB) { 828 /* Handle leaf PDEs as PTEs */ 829 flags |= AMDGPU_PDE_PTE; 830 amdgpu_gmc_get_vm_pde(adev, level, 831 &value, &flags); 832 } else { 833 /* Workaround for fault priority problem on GMC9 */ 834 flags = AMDGPU_PTE_EXECUTABLE; 835 } 836 } 837 838 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, 839 value, flags); 840 if (r) 841 return r; 842 } 843 844 return vm->update_funcs->commit(¶ms, NULL); 845 } 846 847 /** 848 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation 849 * 850 * @adev: amdgpu_device pointer 851 * @vm: requesting vm 852 * @level: the page table level 853 * @direct: use a direct update 854 * @bp: resulting BO allocation parameters 855 */ 856 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, 857 int level, bool direct, 858 struct amdgpu_bo_param *bp) 859 { 860 memset(bp, 0, sizeof(*bp)); 861 862 bp->size = amdgpu_vm_bo_size(adev, level); 863 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 864 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 865 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 866 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 867 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 868 if (vm->use_cpu_for_update) 869 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 870 else if (!vm->root.base.bo || vm->root.base.bo->shadow) 871 bp->flags |= AMDGPU_GEM_CREATE_SHADOW; 872 bp->type = ttm_bo_type_kernel; 873 bp->no_wait_gpu = direct; 874 if (vm->root.base.bo) 875 bp->resv = vm->root.base.bo->tbo.base.resv; 876 } 877 878 /** 879 * amdgpu_vm_alloc_pts - Allocate a specific page table 880 * 881 * @adev: amdgpu_device pointer 882 * @vm: VM to allocate page tables for 883 * @cursor: Which page table to allocate 884 * @direct: use a direct update 885 * 886 * Make sure a specific page table or directory is allocated. 887 * 888 * Returns: 889 * 1 if page table needed to be allocated, 0 if page table was already 890 * allocated, negative errno if an error occurred. 891 */ 892 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 893 struct amdgpu_vm *vm, 894 struct amdgpu_vm_pt_cursor *cursor, 895 bool direct) 896 { 897 struct amdgpu_vm_pt *entry = cursor->entry; 898 struct amdgpu_bo_param bp; 899 struct amdgpu_bo *pt; 900 int r; 901 902 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { 903 unsigned num_entries; 904 905 num_entries = amdgpu_vm_num_entries(adev, cursor->level); 906 entry->entries = kvmalloc_array(num_entries, 907 sizeof(*entry->entries), 908 GFP_KERNEL | __GFP_ZERO); 909 if (!entry->entries) 910 return -ENOMEM; 911 } 912 913 if (entry->base.bo) 914 return 0; 915 916 amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); 917 918 r = amdgpu_bo_create(adev, &bp, &pt); 919 if (r) 920 return r; 921 922 /* Keep a reference to the root directory to avoid 923 * freeing them up in the wrong order. 924 */ 925 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); 926 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 927 928 r = amdgpu_vm_clear_bo(adev, vm, pt, direct); 929 if (r) 930 goto error_free_pt; 931 932 return 0; 933 934 error_free_pt: 935 amdgpu_bo_unref(&pt->shadow); 936 amdgpu_bo_unref(&pt); 937 return r; 938 } 939 940 /** 941 * amdgpu_vm_free_table - fre one PD/PT 942 * 943 * @entry: PDE to free 944 */ 945 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) 946 { 947 if (entry->base.bo) { 948 entry->base.bo->vm_bo = NULL; 949 list_del(&entry->base.vm_status); 950 amdgpu_bo_unref(&entry->base.bo->shadow); 951 amdgpu_bo_unref(&entry->base.bo); 952 } 953 kvfree(entry->entries); 954 entry->entries = NULL; 955 } 956 957 /** 958 * amdgpu_vm_free_pts - free PD/PT levels 959 * 960 * @adev: amdgpu device structure 961 * @vm: amdgpu vm structure 962 * @start: optional cursor where to start freeing PDs/PTs 963 * 964 * Free the page directory or page table level and all sub levels. 965 */ 966 static void amdgpu_vm_free_pts(struct amdgpu_device *adev, 967 struct amdgpu_vm *vm, 968 struct amdgpu_vm_pt_cursor *start) 969 { 970 struct amdgpu_vm_pt_cursor cursor; 971 struct amdgpu_vm_pt *entry; 972 973 vm->bulk_moveable = false; 974 975 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) 976 amdgpu_vm_free_table(entry); 977 978 if (start) 979 amdgpu_vm_free_table(start->entry); 980 } 981 982 /** 983 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 984 * 985 * @adev: amdgpu_device pointer 986 */ 987 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 988 { 989 const struct amdgpu_ip_block *ip_block; 990 bool has_compute_vm_bug; 991 struct amdgpu_ring *ring; 992 int i; 993 994 has_compute_vm_bug = false; 995 996 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 997 if (ip_block) { 998 /* Compute has a VM bug for GFX version < 7. 999 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 1000 if (ip_block->version->major <= 7) 1001 has_compute_vm_bug = true; 1002 else if (ip_block->version->major == 8) 1003 if (adev->gfx.mec_fw_version < 673) 1004 has_compute_vm_bug = true; 1005 } 1006 1007 for (i = 0; i < adev->num_rings; i++) { 1008 ring = adev->rings[i]; 1009 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 1010 /* only compute rings */ 1011 ring->has_compute_vm_bug = has_compute_vm_bug; 1012 else 1013 ring->has_compute_vm_bug = false; 1014 } 1015 } 1016 1017 /** 1018 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 1019 * 1020 * @ring: ring on which the job will be submitted 1021 * @job: job to submit 1022 * 1023 * Returns: 1024 * True if sync is needed. 1025 */ 1026 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 1027 struct amdgpu_job *job) 1028 { 1029 struct amdgpu_device *adev = ring->adev; 1030 unsigned vmhub = ring->funcs->vmhub; 1031 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1032 struct amdgpu_vmid *id; 1033 bool gds_switch_needed; 1034 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 1035 1036 if (job->vmid == 0) 1037 return false; 1038 id = &id_mgr->ids[job->vmid]; 1039 gds_switch_needed = ring->funcs->emit_gds_switch && ( 1040 id->gds_base != job->gds_base || 1041 id->gds_size != job->gds_size || 1042 id->gws_base != job->gws_base || 1043 id->gws_size != job->gws_size || 1044 id->oa_base != job->oa_base || 1045 id->oa_size != job->oa_size); 1046 1047 if (amdgpu_vmid_had_gpu_reset(adev, id)) 1048 return true; 1049 1050 return vm_flush_needed || gds_switch_needed; 1051 } 1052 1053 /** 1054 * amdgpu_vm_flush - hardware flush the vm 1055 * 1056 * @ring: ring to use for flush 1057 * @job: related job 1058 * @need_pipe_sync: is pipe sync needed 1059 * 1060 * Emit a VM flush when it is necessary. 1061 * 1062 * Returns: 1063 * 0 on success, errno otherwise. 1064 */ 1065 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 1066 bool need_pipe_sync) 1067 { 1068 struct amdgpu_device *adev = ring->adev; 1069 unsigned vmhub = ring->funcs->vmhub; 1070 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1071 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 1072 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 1073 id->gds_base != job->gds_base || 1074 id->gds_size != job->gds_size || 1075 id->gws_base != job->gws_base || 1076 id->gws_size != job->gws_size || 1077 id->oa_base != job->oa_base || 1078 id->oa_size != job->oa_size); 1079 bool vm_flush_needed = job->vm_needs_flush; 1080 struct dma_fence *fence = NULL; 1081 bool pasid_mapping_needed = false; 1082 unsigned patch_offset = 0; 1083 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); 1084 int r; 1085 1086 if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) 1087 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); 1088 1089 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 1090 gds_switch_needed = true; 1091 vm_flush_needed = true; 1092 pasid_mapping_needed = true; 1093 } 1094 1095 mutex_lock(&id_mgr->lock); 1096 if (id->pasid != job->pasid || !id->pasid_mapping || 1097 !dma_fence_is_signaled(id->pasid_mapping)) 1098 pasid_mapping_needed = true; 1099 mutex_unlock(&id_mgr->lock); 1100 1101 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 1102 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 1103 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 1104 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 1105 ring->funcs->emit_wreg; 1106 1107 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 1108 return 0; 1109 1110 if (ring->funcs->init_cond_exec) 1111 patch_offset = amdgpu_ring_init_cond_exec(ring); 1112 1113 if (need_pipe_sync) 1114 amdgpu_ring_emit_pipeline_sync(ring); 1115 1116 if (vm_flush_needed) { 1117 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 1118 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 1119 } 1120 1121 if (pasid_mapping_needed) 1122 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 1123 1124 if (vm_flush_needed || pasid_mapping_needed) { 1125 r = amdgpu_fence_emit(ring, &fence, 0); 1126 if (r) 1127 return r; 1128 } 1129 1130 if (vm_flush_needed) { 1131 mutex_lock(&id_mgr->lock); 1132 dma_fence_put(id->last_flush); 1133 id->last_flush = dma_fence_get(fence); 1134 id->current_gpu_reset_count = 1135 atomic_read(&adev->gpu_reset_counter); 1136 mutex_unlock(&id_mgr->lock); 1137 } 1138 1139 if (pasid_mapping_needed) { 1140 mutex_lock(&id_mgr->lock); 1141 id->pasid = job->pasid; 1142 dma_fence_put(id->pasid_mapping); 1143 id->pasid_mapping = dma_fence_get(fence); 1144 mutex_unlock(&id_mgr->lock); 1145 } 1146 dma_fence_put(fence); 1147 1148 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 1149 id->gds_base = job->gds_base; 1150 id->gds_size = job->gds_size; 1151 id->gws_base = job->gws_base; 1152 id->gws_size = job->gws_size; 1153 id->oa_base = job->oa_base; 1154 id->oa_size = job->oa_size; 1155 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 1156 job->gds_size, job->gws_base, 1157 job->gws_size, job->oa_base, 1158 job->oa_size); 1159 } 1160 1161 if (ring->funcs->patch_cond_exec) 1162 amdgpu_ring_patch_cond_exec(ring, patch_offset); 1163 1164 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 1165 if (ring->funcs->emit_switch_buffer) { 1166 amdgpu_ring_emit_switch_buffer(ring); 1167 amdgpu_ring_emit_switch_buffer(ring); 1168 } 1169 return 0; 1170 } 1171 1172 /** 1173 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 1174 * 1175 * @vm: requested vm 1176 * @bo: requested buffer object 1177 * 1178 * Find @bo inside the requested vm. 1179 * Search inside the @bos vm list for the requested vm 1180 * Returns the found bo_va or NULL if none is found 1181 * 1182 * Object has to be reserved! 1183 * 1184 * Returns: 1185 * Found bo_va or NULL. 1186 */ 1187 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1188 struct amdgpu_bo *bo) 1189 { 1190 struct amdgpu_vm_bo_base *base; 1191 1192 for (base = bo->vm_bo; base; base = base->next) { 1193 if (base->vm != vm) 1194 continue; 1195 1196 return container_of(base, struct amdgpu_bo_va, base); 1197 } 1198 return NULL; 1199 } 1200 1201 /** 1202 * amdgpu_vm_map_gart - Resolve gart mapping of addr 1203 * 1204 * @pages_addr: optional DMA address to use for lookup 1205 * @addr: the unmapped addr 1206 * 1207 * Look up the physical address of the page that the pte resolves 1208 * to. 1209 * 1210 * Returns: 1211 * The pointer for the page table entry. 1212 */ 1213 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 1214 { 1215 uint64_t result; 1216 1217 /* page table offset */ 1218 result = pages_addr[addr >> PAGE_SHIFT]; 1219 1220 /* in case cpu page size != gpu page size*/ 1221 result |= addr & (~PAGE_MASK); 1222 1223 result &= 0xFFFFFFFFFFFFF000ULL; 1224 1225 return result; 1226 } 1227 1228 /** 1229 * amdgpu_vm_update_pde - update a single level in the hierarchy 1230 * 1231 * @params: parameters for the update 1232 * @vm: requested vm 1233 * @entry: entry to update 1234 * 1235 * Makes sure the requested entry in parent is up to date. 1236 */ 1237 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1238 struct amdgpu_vm *vm, 1239 struct amdgpu_vm_pt *entry) 1240 { 1241 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); 1242 struct amdgpu_bo *bo = parent->base.bo, *pbo; 1243 uint64_t pde, pt, flags; 1244 unsigned level; 1245 1246 for (level = 0, pbo = bo->parent; pbo; ++level) 1247 pbo = pbo->parent; 1248 1249 level += params->adev->vm_manager.root_level; 1250 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); 1251 pde = (entry - parent->entries) * 8; 1252 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); 1253 } 1254 1255 /** 1256 * amdgpu_vm_invalidate_pds - mark all PDs as invalid 1257 * 1258 * @adev: amdgpu_device pointer 1259 * @vm: related vm 1260 * 1261 * Mark all PD level as invalid after an error. 1262 */ 1263 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, 1264 struct amdgpu_vm *vm) 1265 { 1266 struct amdgpu_vm_pt_cursor cursor; 1267 struct amdgpu_vm_pt *entry; 1268 1269 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1270 if (entry->base.bo && !entry->base.moved) 1271 amdgpu_vm_bo_relocated(&entry->base); 1272 } 1273 1274 /** 1275 * amdgpu_vm_update_pdes - make sure that all directories are valid 1276 * 1277 * @adev: amdgpu_device pointer 1278 * @vm: requested vm 1279 * @direct: submit directly to the paging queue 1280 * 1281 * Makes sure all directories are up to date. 1282 * 1283 * Returns: 1284 * 0 for success, error for failure. 1285 */ 1286 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 1287 struct amdgpu_vm *vm, bool direct) 1288 { 1289 struct amdgpu_vm_update_params params; 1290 int r; 1291 1292 if (list_empty(&vm->relocated)) 1293 return 0; 1294 1295 memset(¶ms, 0, sizeof(params)); 1296 params.adev = adev; 1297 params.vm = vm; 1298 params.direct = direct; 1299 1300 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 1301 if (r) 1302 return r; 1303 1304 while (!list_empty(&vm->relocated)) { 1305 struct amdgpu_vm_pt *entry; 1306 1307 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, 1308 base.vm_status); 1309 amdgpu_vm_bo_idle(&entry->base); 1310 1311 r = amdgpu_vm_update_pde(¶ms, vm, entry); 1312 if (r) 1313 goto error; 1314 } 1315 1316 r = vm->update_funcs->commit(¶ms, &vm->last_update); 1317 if (r) 1318 goto error; 1319 return 0; 1320 1321 error: 1322 amdgpu_vm_invalidate_pds(adev, vm); 1323 return r; 1324 } 1325 1326 /* 1327 * amdgpu_vm_update_flags - figure out flags for PTE updates 1328 * 1329 * Make sure to set the right flags for the PTEs at the desired level. 1330 */ 1331 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, 1332 struct amdgpu_bo *bo, unsigned level, 1333 uint64_t pe, uint64_t addr, 1334 unsigned count, uint32_t incr, 1335 uint64_t flags) 1336 1337 { 1338 if (level != AMDGPU_VM_PTB) { 1339 flags |= AMDGPU_PDE_PTE; 1340 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); 1341 1342 } else if (params->adev->asic_type >= CHIP_VEGA10 && 1343 !(flags & AMDGPU_PTE_VALID) && 1344 !(flags & AMDGPU_PTE_PRT)) { 1345 1346 /* Workaround for fault priority problem on GMC9 */ 1347 flags |= AMDGPU_PTE_EXECUTABLE; 1348 } 1349 1350 params->vm->update_funcs->update(params, bo, pe, addr, count, incr, 1351 flags); 1352 } 1353 1354 /** 1355 * amdgpu_vm_fragment - get fragment for PTEs 1356 * 1357 * @params: see amdgpu_vm_update_params definition 1358 * @start: first PTE to handle 1359 * @end: last PTE to handle 1360 * @flags: hw mapping flags 1361 * @frag: resulting fragment size 1362 * @frag_end: end of this fragment 1363 * 1364 * Returns the first possible fragment for the start and end address. 1365 */ 1366 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params, 1367 uint64_t start, uint64_t end, uint64_t flags, 1368 unsigned int *frag, uint64_t *frag_end) 1369 { 1370 /** 1371 * The MC L1 TLB supports variable sized pages, based on a fragment 1372 * field in the PTE. When this field is set to a non-zero value, page 1373 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 1374 * flags are considered valid for all PTEs within the fragment range 1375 * and corresponding mappings are assumed to be physically contiguous. 1376 * 1377 * The L1 TLB can store a single PTE for the whole fragment, 1378 * significantly increasing the space available for translation 1379 * caching. This leads to large improvements in throughput when the 1380 * TLB is under pressure. 1381 * 1382 * The L2 TLB distributes small and large fragments into two 1383 * asymmetric partitions. The large fragment cache is significantly 1384 * larger. Thus, we try to use large fragments wherever possible. 1385 * Userspace can support this by aligning virtual base address and 1386 * allocation size to the fragment size. 1387 * 1388 * Starting with Vega10 the fragment size only controls the L1. The L2 1389 * is now directly feed with small/huge/giant pages from the walker. 1390 */ 1391 unsigned max_frag; 1392 1393 if (params->adev->asic_type < CHIP_VEGA10) 1394 max_frag = params->adev->vm_manager.fragment_size; 1395 else 1396 max_frag = 31; 1397 1398 /* system pages are non continuously */ 1399 if (params->pages_addr) { 1400 *frag = 0; 1401 *frag_end = end; 1402 return; 1403 } 1404 1405 /* This intentionally wraps around if no bit is set */ 1406 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); 1407 if (*frag >= max_frag) { 1408 *frag = max_frag; 1409 *frag_end = end & ~((1ULL << max_frag) - 1); 1410 } else { 1411 *frag_end = start + (1 << *frag); 1412 } 1413 } 1414 1415 /** 1416 * amdgpu_vm_update_ptes - make sure that page tables are valid 1417 * 1418 * @params: see amdgpu_vm_update_params definition 1419 * @start: start of GPU address range 1420 * @end: end of GPU address range 1421 * @dst: destination address to map to, the next dst inside the function 1422 * @flags: mapping flags 1423 * 1424 * Update the page tables in the range @start - @end. 1425 * 1426 * Returns: 1427 * 0 for success, -EINVAL for failure. 1428 */ 1429 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, 1430 uint64_t start, uint64_t end, 1431 uint64_t dst, uint64_t flags) 1432 { 1433 struct amdgpu_device *adev = params->adev; 1434 struct amdgpu_vm_pt_cursor cursor; 1435 uint64_t frag_start = start, frag_end; 1436 unsigned int frag; 1437 int r; 1438 1439 /* figure out the initial fragment */ 1440 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); 1441 1442 /* walk over the address space and update the PTs */ 1443 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); 1444 while (cursor.pfn < end) { 1445 unsigned shift, parent_shift, mask; 1446 uint64_t incr, entry_end, pe_start; 1447 struct amdgpu_bo *pt; 1448 1449 if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1450 /* make sure that the page tables covering the 1451 * address range are actually allocated 1452 */ 1453 r = amdgpu_vm_alloc_pts(params->adev, params->vm, 1454 &cursor, params->direct); 1455 if (r) 1456 return r; 1457 } 1458 1459 shift = amdgpu_vm_level_shift(adev, cursor.level); 1460 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); 1461 if (adev->asic_type < CHIP_VEGA10 && 1462 (flags & AMDGPU_PTE_VALID)) { 1463 /* No huge page support before GMC v9 */ 1464 if (cursor.level != AMDGPU_VM_PTB) { 1465 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1466 return -ENOENT; 1467 continue; 1468 } 1469 } else if (frag < shift) { 1470 /* We can't use this level when the fragment size is 1471 * smaller than the address shift. Go to the next 1472 * child entry and try again. 1473 */ 1474 if (amdgpu_vm_pt_descendant(adev, &cursor)) 1475 continue; 1476 } else if (frag >= parent_shift) { 1477 /* If the fragment size is even larger than the parent 1478 * shift we should go up one level and check it again. 1479 */ 1480 if (!amdgpu_vm_pt_ancestor(&cursor)) 1481 return -EINVAL; 1482 continue; 1483 } 1484 1485 pt = cursor.entry->base.bo; 1486 if (!pt) { 1487 /* We need all PDs and PTs for mapping something, */ 1488 if (flags & AMDGPU_PTE_VALID) 1489 return -ENOENT; 1490 1491 /* but unmapping something can happen at a higher 1492 * level. 1493 */ 1494 if (!amdgpu_vm_pt_ancestor(&cursor)) 1495 return -EINVAL; 1496 1497 pt = cursor.entry->base.bo; 1498 shift = parent_shift; 1499 } 1500 1501 /* Looks good so far, calculate parameters for the update */ 1502 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; 1503 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1504 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1505 entry_end = ((uint64_t)mask + 1) << shift; 1506 entry_end += cursor.pfn & ~(entry_end - 1); 1507 entry_end = min(entry_end, end); 1508 1509 do { 1510 uint64_t upd_end = min(entry_end, frag_end); 1511 unsigned nptes = (upd_end - frag_start) >> shift; 1512 1513 /* This can happen when we set higher level PDs to 1514 * silent to stop fault floods. 1515 */ 1516 nptes = max(nptes, 1u); 1517 amdgpu_vm_update_flags(params, pt, cursor.level, 1518 pe_start, dst, nptes, incr, 1519 flags | AMDGPU_PTE_FRAG(frag)); 1520 1521 pe_start += nptes * 8; 1522 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1523 1524 frag_start = upd_end; 1525 if (frag_start >= frag_end) { 1526 /* figure out the next fragment */ 1527 amdgpu_vm_fragment(params, frag_start, end, 1528 flags, &frag, &frag_end); 1529 if (frag < shift) 1530 break; 1531 } 1532 } while (frag_start < entry_end); 1533 1534 if (amdgpu_vm_pt_descendant(adev, &cursor)) { 1535 /* Free all child entries. 1536 * Update the tables with the flags and addresses and free up subsequent 1537 * tables in the case of huge pages or freed up areas. 1538 * This is the maximum you can free, because all other page tables are not 1539 * completely covered by the range and so potentially still in use. 1540 */ 1541 while (cursor.pfn < frag_start) { 1542 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1543 amdgpu_vm_pt_next(adev, &cursor); 1544 } 1545 1546 } else if (frag >= shift) { 1547 /* or just move on to the next on the same level. */ 1548 amdgpu_vm_pt_next(adev, &cursor); 1549 } 1550 } 1551 1552 return 0; 1553 } 1554 1555 /** 1556 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 1557 * 1558 * @adev: amdgpu_device pointer 1559 * @vm: requested vm 1560 * @direct: direct submission in a page fault 1561 * @resv: fences we need to sync to 1562 * @start: start of mapped range 1563 * @last: last mapped entry 1564 * @flags: flags for the entries 1565 * @addr: addr to set the area to 1566 * @pages_addr: DMA addresses to use for mapping 1567 * @fence: optional resulting fence 1568 * 1569 * Fill in the page table entries between @start and @last. 1570 * 1571 * Returns: 1572 * 0 for success, -EINVAL for failure. 1573 */ 1574 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1575 struct amdgpu_vm *vm, bool direct, 1576 struct dma_resv *resv, 1577 uint64_t start, uint64_t last, 1578 uint64_t flags, uint64_t addr, 1579 dma_addr_t *pages_addr, 1580 struct dma_fence **fence) 1581 { 1582 struct amdgpu_vm_update_params params; 1583 enum amdgpu_sync_mode sync_mode; 1584 int r; 1585 1586 memset(¶ms, 0, sizeof(params)); 1587 params.adev = adev; 1588 params.vm = vm; 1589 params.direct = direct; 1590 params.pages_addr = pages_addr; 1591 1592 /* Implicitly sync to command submissions in the same VM before 1593 * unmapping. Sync to moving fences before mapping. 1594 */ 1595 if (!(flags & AMDGPU_PTE_VALID)) 1596 sync_mode = AMDGPU_SYNC_EQ_OWNER; 1597 else 1598 sync_mode = AMDGPU_SYNC_EXPLICIT; 1599 1600 amdgpu_vm_eviction_lock(vm); 1601 if (vm->evicting) { 1602 r = -EBUSY; 1603 goto error_unlock; 1604 } 1605 1606 if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1607 struct amdgpu_bo *root = vm->root.base.bo; 1608 1609 if (!dma_fence_is_signaled(vm->last_direct)) 1610 amdgpu_bo_fence(root, vm->last_direct, true); 1611 } 1612 1613 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); 1614 if (r) 1615 goto error_unlock; 1616 1617 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); 1618 if (r) 1619 goto error_unlock; 1620 1621 r = vm->update_funcs->commit(¶ms, fence); 1622 1623 error_unlock: 1624 amdgpu_vm_eviction_unlock(vm); 1625 return r; 1626 } 1627 1628 /** 1629 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 1630 * 1631 * @adev: amdgpu_device pointer 1632 * @resv: fences we need to sync to 1633 * @pages_addr: DMA addresses to use for mapping 1634 * @vm: requested vm 1635 * @mapping: mapped range and flags to use for the update 1636 * @flags: HW flags for the mapping 1637 * @bo_adev: amdgpu_device pointer that bo actually been allocated 1638 * @nodes: array of drm_mm_nodes with the MC addresses 1639 * @fence: optional resulting fence 1640 * 1641 * Split the mapping into smaller chunks so that each update fits 1642 * into a SDMA IB. 1643 * 1644 * Returns: 1645 * 0 for success, -EINVAL for failure. 1646 */ 1647 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1648 struct dma_resv *resv, 1649 dma_addr_t *pages_addr, 1650 struct amdgpu_vm *vm, 1651 struct amdgpu_bo_va_mapping *mapping, 1652 uint64_t flags, 1653 struct amdgpu_device *bo_adev, 1654 struct drm_mm_node *nodes, 1655 struct dma_fence **fence) 1656 { 1657 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; 1658 uint64_t pfn, start = mapping->start; 1659 int r; 1660 1661 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1662 * but in case of something, we filter the flags in first place 1663 */ 1664 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1665 flags &= ~AMDGPU_PTE_READABLE; 1666 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1667 flags &= ~AMDGPU_PTE_WRITEABLE; 1668 1669 /* Apply ASIC specific mapping flags */ 1670 amdgpu_gmc_get_vm_pte(adev, mapping, &flags); 1671 1672 trace_amdgpu_vm_bo_update(mapping); 1673 1674 pfn = mapping->offset >> PAGE_SHIFT; 1675 if (nodes) { 1676 while (pfn >= nodes->size) { 1677 pfn -= nodes->size; 1678 ++nodes; 1679 } 1680 } 1681 1682 do { 1683 dma_addr_t *dma_addr = NULL; 1684 uint64_t max_entries; 1685 uint64_t addr, last; 1686 1687 if (nodes) { 1688 addr = nodes->start << PAGE_SHIFT; 1689 max_entries = (nodes->size - pfn) * 1690 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1691 } else { 1692 addr = 0; 1693 max_entries = S64_MAX; 1694 } 1695 1696 if (pages_addr) { 1697 uint64_t count; 1698 1699 for (count = 1; 1700 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1701 ++count) { 1702 uint64_t idx = pfn + count; 1703 1704 if (pages_addr[idx] != 1705 (pages_addr[idx - 1] + PAGE_SIZE)) 1706 break; 1707 } 1708 1709 if (count < min_linear_pages) { 1710 addr = pfn << PAGE_SHIFT; 1711 dma_addr = pages_addr; 1712 } else { 1713 addr = pages_addr[pfn]; 1714 max_entries = count * 1715 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1716 } 1717 1718 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1719 addr += bo_adev->vm_manager.vram_base_offset; 1720 addr += pfn << PAGE_SHIFT; 1721 } 1722 1723 last = min((uint64_t)mapping->last, start + max_entries - 1); 1724 r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv, 1725 start, last, flags, addr, 1726 dma_addr, fence); 1727 if (r) 1728 return r; 1729 1730 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1731 if (nodes && nodes->size == pfn) { 1732 pfn = 0; 1733 ++nodes; 1734 } 1735 start = last + 1; 1736 1737 } while (unlikely(start != mapping->last + 1)); 1738 1739 return 0; 1740 } 1741 1742 /** 1743 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1744 * 1745 * @adev: amdgpu_device pointer 1746 * @bo_va: requested BO and VM object 1747 * @clear: if true clear the entries 1748 * 1749 * Fill in the page table entries for @bo_va. 1750 * 1751 * Returns: 1752 * 0 for success, -EINVAL for failure. 1753 */ 1754 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1755 bool clear) 1756 { 1757 struct amdgpu_bo *bo = bo_va->base.bo; 1758 struct amdgpu_vm *vm = bo_va->base.vm; 1759 struct amdgpu_bo_va_mapping *mapping; 1760 dma_addr_t *pages_addr = NULL; 1761 struct ttm_mem_reg *mem; 1762 struct drm_mm_node *nodes; 1763 struct dma_fence **last_update; 1764 struct dma_resv *resv; 1765 uint64_t flags; 1766 struct amdgpu_device *bo_adev = adev; 1767 int r; 1768 1769 if (clear || !bo) { 1770 mem = NULL; 1771 nodes = NULL; 1772 resv = vm->root.base.bo->tbo.base.resv; 1773 } else { 1774 struct ttm_dma_tt *ttm; 1775 1776 mem = &bo->tbo.mem; 1777 nodes = mem->mm_node; 1778 if (mem->mem_type == TTM_PL_TT) { 1779 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1780 pages_addr = ttm->dma_address; 1781 } 1782 resv = bo->tbo.base.resv; 1783 } 1784 1785 if (bo) { 1786 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1787 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1788 } else { 1789 flags = 0x0; 1790 } 1791 1792 if (clear || (bo && bo->tbo.base.resv == 1793 vm->root.base.bo->tbo.base.resv)) 1794 last_update = &vm->last_update; 1795 else 1796 last_update = &bo_va->last_pt_update; 1797 1798 if (!clear && bo_va->base.moved) { 1799 bo_va->base.moved = false; 1800 list_splice_init(&bo_va->valids, &bo_va->invalids); 1801 1802 } else if (bo_va->cleared != clear) { 1803 list_splice_init(&bo_va->valids, &bo_va->invalids); 1804 } 1805 1806 list_for_each_entry(mapping, &bo_va->invalids, list) { 1807 r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm, 1808 mapping, flags, bo_adev, nodes, 1809 last_update); 1810 if (r) 1811 return r; 1812 } 1813 1814 /* If the BO is not in its preferred location add it back to 1815 * the evicted list so that it gets validated again on the 1816 * next command submission. 1817 */ 1818 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1819 uint32_t mem_type = bo->tbo.mem.mem_type; 1820 1821 if (!(bo->preferred_domains & 1822 amdgpu_mem_type_to_domain(mem_type))) 1823 amdgpu_vm_bo_evicted(&bo_va->base); 1824 else 1825 amdgpu_vm_bo_idle(&bo_va->base); 1826 } else { 1827 amdgpu_vm_bo_done(&bo_va->base); 1828 } 1829 1830 list_splice_init(&bo_va->invalids, &bo_va->valids); 1831 bo_va->cleared = clear; 1832 1833 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1834 list_for_each_entry(mapping, &bo_va->valids, list) 1835 trace_amdgpu_vm_bo_mapping(mapping); 1836 } 1837 1838 return 0; 1839 } 1840 1841 /** 1842 * amdgpu_vm_update_prt_state - update the global PRT state 1843 * 1844 * @adev: amdgpu_device pointer 1845 */ 1846 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1847 { 1848 unsigned long flags; 1849 bool enable; 1850 1851 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1852 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1853 adev->gmc.gmc_funcs->set_prt(adev, enable); 1854 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1855 } 1856 1857 /** 1858 * amdgpu_vm_prt_get - add a PRT user 1859 * 1860 * @adev: amdgpu_device pointer 1861 */ 1862 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1863 { 1864 if (!adev->gmc.gmc_funcs->set_prt) 1865 return; 1866 1867 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1868 amdgpu_vm_update_prt_state(adev); 1869 } 1870 1871 /** 1872 * amdgpu_vm_prt_put - drop a PRT user 1873 * 1874 * @adev: amdgpu_device pointer 1875 */ 1876 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1877 { 1878 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1879 amdgpu_vm_update_prt_state(adev); 1880 } 1881 1882 /** 1883 * amdgpu_vm_prt_cb - callback for updating the PRT status 1884 * 1885 * @fence: fence for the callback 1886 * @_cb: the callback function 1887 */ 1888 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1889 { 1890 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1891 1892 amdgpu_vm_prt_put(cb->adev); 1893 kfree(cb); 1894 } 1895 1896 /** 1897 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1898 * 1899 * @adev: amdgpu_device pointer 1900 * @fence: fence for the callback 1901 */ 1902 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1903 struct dma_fence *fence) 1904 { 1905 struct amdgpu_prt_cb *cb; 1906 1907 if (!adev->gmc.gmc_funcs->set_prt) 1908 return; 1909 1910 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1911 if (!cb) { 1912 /* Last resort when we are OOM */ 1913 if (fence) 1914 dma_fence_wait(fence, false); 1915 1916 amdgpu_vm_prt_put(adev); 1917 } else { 1918 cb->adev = adev; 1919 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1920 amdgpu_vm_prt_cb)) 1921 amdgpu_vm_prt_cb(fence, &cb->cb); 1922 } 1923 } 1924 1925 /** 1926 * amdgpu_vm_free_mapping - free a mapping 1927 * 1928 * @adev: amdgpu_device pointer 1929 * @vm: requested vm 1930 * @mapping: mapping to be freed 1931 * @fence: fence of the unmap operation 1932 * 1933 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1934 */ 1935 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1936 struct amdgpu_vm *vm, 1937 struct amdgpu_bo_va_mapping *mapping, 1938 struct dma_fence *fence) 1939 { 1940 if (mapping->flags & AMDGPU_PTE_PRT) 1941 amdgpu_vm_add_prt_cb(adev, fence); 1942 kfree(mapping); 1943 } 1944 1945 /** 1946 * amdgpu_vm_prt_fini - finish all prt mappings 1947 * 1948 * @adev: amdgpu_device pointer 1949 * @vm: requested vm 1950 * 1951 * Register a cleanup callback to disable PRT support after VM dies. 1952 */ 1953 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1954 { 1955 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 1956 struct dma_fence *excl, **shared; 1957 unsigned i, shared_count; 1958 int r; 1959 1960 r = dma_resv_get_fences_rcu(resv, &excl, 1961 &shared_count, &shared); 1962 if (r) { 1963 /* Not enough memory to grab the fence list, as last resort 1964 * block for all the fences to complete. 1965 */ 1966 dma_resv_wait_timeout_rcu(resv, true, false, 1967 MAX_SCHEDULE_TIMEOUT); 1968 return; 1969 } 1970 1971 /* Add a callback for each fence in the reservation object */ 1972 amdgpu_vm_prt_get(adev); 1973 amdgpu_vm_add_prt_cb(adev, excl); 1974 1975 for (i = 0; i < shared_count; ++i) { 1976 amdgpu_vm_prt_get(adev); 1977 amdgpu_vm_add_prt_cb(adev, shared[i]); 1978 } 1979 1980 kfree(shared); 1981 } 1982 1983 /** 1984 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1985 * 1986 * @adev: amdgpu_device pointer 1987 * @vm: requested vm 1988 * @fence: optional resulting fence (unchanged if no work needed to be done 1989 * or if an error occurred) 1990 * 1991 * Make sure all freed BOs are cleared in the PT. 1992 * PTs have to be reserved and mutex must be locked! 1993 * 1994 * Returns: 1995 * 0 for success. 1996 * 1997 */ 1998 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1999 struct amdgpu_vm *vm, 2000 struct dma_fence **fence) 2001 { 2002 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 2003 struct amdgpu_bo_va_mapping *mapping; 2004 uint64_t init_pte_value = 0; 2005 struct dma_fence *f = NULL; 2006 int r; 2007 2008 while (!list_empty(&vm->freed)) { 2009 mapping = list_first_entry(&vm->freed, 2010 struct amdgpu_bo_va_mapping, list); 2011 list_del(&mapping->list); 2012 2013 if (vm->pte_support_ats && 2014 mapping->start < AMDGPU_GMC_HOLE_START) 2015 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 2016 2017 r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv, 2018 mapping->start, mapping->last, 2019 init_pte_value, 0, NULL, &f); 2020 amdgpu_vm_free_mapping(adev, vm, mapping, f); 2021 if (r) { 2022 dma_fence_put(f); 2023 return r; 2024 } 2025 } 2026 2027 if (fence && f) { 2028 dma_fence_put(*fence); 2029 *fence = f; 2030 } else { 2031 dma_fence_put(f); 2032 } 2033 2034 return 0; 2035 2036 } 2037 2038 /** 2039 * amdgpu_vm_handle_moved - handle moved BOs in the PT 2040 * 2041 * @adev: amdgpu_device pointer 2042 * @vm: requested vm 2043 * 2044 * Make sure all BOs which are moved are updated in the PTs. 2045 * 2046 * Returns: 2047 * 0 for success. 2048 * 2049 * PTs have to be reserved! 2050 */ 2051 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 2052 struct amdgpu_vm *vm) 2053 { 2054 struct amdgpu_bo_va *bo_va, *tmp; 2055 struct dma_resv *resv; 2056 bool clear; 2057 int r; 2058 2059 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2060 /* Per VM BOs never need to bo cleared in the page tables */ 2061 r = amdgpu_vm_bo_update(adev, bo_va, false); 2062 if (r) 2063 return r; 2064 } 2065 2066 spin_lock(&vm->invalidated_lock); 2067 while (!list_empty(&vm->invalidated)) { 2068 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 2069 base.vm_status); 2070 resv = bo_va->base.bo->tbo.base.resv; 2071 spin_unlock(&vm->invalidated_lock); 2072 2073 /* Try to reserve the BO to avoid clearing its ptes */ 2074 if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2075 clear = false; 2076 /* Somebody else is using the BO right now */ 2077 else 2078 clear = true; 2079 2080 r = amdgpu_vm_bo_update(adev, bo_va, clear); 2081 if (r) 2082 return r; 2083 2084 if (!clear) 2085 dma_resv_unlock(resv); 2086 spin_lock(&vm->invalidated_lock); 2087 } 2088 spin_unlock(&vm->invalidated_lock); 2089 2090 return 0; 2091 } 2092 2093 /** 2094 * amdgpu_vm_bo_add - add a bo to a specific vm 2095 * 2096 * @adev: amdgpu_device pointer 2097 * @vm: requested vm 2098 * @bo: amdgpu buffer object 2099 * 2100 * Add @bo into the requested vm. 2101 * Add @bo to the list of bos associated with the vm 2102 * 2103 * Returns: 2104 * Newly added bo_va or NULL for failure 2105 * 2106 * Object has to be reserved! 2107 */ 2108 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2109 struct amdgpu_vm *vm, 2110 struct amdgpu_bo *bo) 2111 { 2112 struct amdgpu_bo_va *bo_va; 2113 2114 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 2115 if (bo_va == NULL) { 2116 return NULL; 2117 } 2118 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 2119 2120 bo_va->ref_count = 1; 2121 INIT_LIST_HEAD(&bo_va->valids); 2122 INIT_LIST_HEAD(&bo_va->invalids); 2123 2124 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 2125 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { 2126 bo_va->is_xgmi = true; 2127 mutex_lock(&adev->vm_manager.lock_pstate); 2128 /* Power up XGMI if it can be potentially used */ 2129 if (++adev->vm_manager.xgmi_map_counter == 1) 2130 amdgpu_xgmi_set_pstate(adev, 1); 2131 mutex_unlock(&adev->vm_manager.lock_pstate); 2132 } 2133 2134 return bo_va; 2135 } 2136 2137 2138 /** 2139 * amdgpu_vm_bo_insert_mapping - insert a new mapping 2140 * 2141 * @adev: amdgpu_device pointer 2142 * @bo_va: bo_va to store the address 2143 * @mapping: the mapping to insert 2144 * 2145 * Insert a new mapping into all structures. 2146 */ 2147 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 2148 struct amdgpu_bo_va *bo_va, 2149 struct amdgpu_bo_va_mapping *mapping) 2150 { 2151 struct amdgpu_vm *vm = bo_va->base.vm; 2152 struct amdgpu_bo *bo = bo_va->base.bo; 2153 2154 mapping->bo_va = bo_va; 2155 list_add(&mapping->list, &bo_va->invalids); 2156 amdgpu_vm_it_insert(mapping, &vm->va); 2157 2158 if (mapping->flags & AMDGPU_PTE_PRT) 2159 amdgpu_vm_prt_get(adev); 2160 2161 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && 2162 !bo_va->base.moved) { 2163 list_move(&bo_va->base.vm_status, &vm->moved); 2164 } 2165 trace_amdgpu_vm_bo_map(bo_va, mapping); 2166 } 2167 2168 /** 2169 * amdgpu_vm_bo_map - map bo inside a vm 2170 * 2171 * @adev: amdgpu_device pointer 2172 * @bo_va: bo_va to store the address 2173 * @saddr: where to map the BO 2174 * @offset: requested offset in the BO 2175 * @size: BO size in bytes 2176 * @flags: attributes of pages (read/write/valid/etc.) 2177 * 2178 * Add a mapping of the BO at the specefied addr into the VM. 2179 * 2180 * Returns: 2181 * 0 for success, error for failure. 2182 * 2183 * Object has to be reserved and unreserved outside! 2184 */ 2185 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2186 struct amdgpu_bo_va *bo_va, 2187 uint64_t saddr, uint64_t offset, 2188 uint64_t size, uint64_t flags) 2189 { 2190 struct amdgpu_bo_va_mapping *mapping, *tmp; 2191 struct amdgpu_bo *bo = bo_va->base.bo; 2192 struct amdgpu_vm *vm = bo_va->base.vm; 2193 uint64_t eaddr; 2194 2195 /* validate the parameters */ 2196 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2197 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2198 return -EINVAL; 2199 2200 /* make sure object fit at this offset */ 2201 eaddr = saddr + size - 1; 2202 if (saddr >= eaddr || 2203 (bo && offset + size > amdgpu_bo_size(bo))) 2204 return -EINVAL; 2205 2206 saddr /= AMDGPU_GPU_PAGE_SIZE; 2207 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2208 2209 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2210 if (tmp) { 2211 /* bo and tmp overlap, invalid addr */ 2212 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2213 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 2214 tmp->start, tmp->last + 1); 2215 return -EINVAL; 2216 } 2217 2218 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2219 if (!mapping) 2220 return -ENOMEM; 2221 2222 mapping->start = saddr; 2223 mapping->last = eaddr; 2224 mapping->offset = offset; 2225 mapping->flags = flags; 2226 2227 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2228 2229 return 0; 2230 } 2231 2232 /** 2233 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 2234 * 2235 * @adev: amdgpu_device pointer 2236 * @bo_va: bo_va to store the address 2237 * @saddr: where to map the BO 2238 * @offset: requested offset in the BO 2239 * @size: BO size in bytes 2240 * @flags: attributes of pages (read/write/valid/etc.) 2241 * 2242 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 2243 * mappings as we do so. 2244 * 2245 * Returns: 2246 * 0 for success, error for failure. 2247 * 2248 * Object has to be reserved and unreserved outside! 2249 */ 2250 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 2251 struct amdgpu_bo_va *bo_va, 2252 uint64_t saddr, uint64_t offset, 2253 uint64_t size, uint64_t flags) 2254 { 2255 struct amdgpu_bo_va_mapping *mapping; 2256 struct amdgpu_bo *bo = bo_va->base.bo; 2257 uint64_t eaddr; 2258 int r; 2259 2260 /* validate the parameters */ 2261 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2262 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2263 return -EINVAL; 2264 2265 /* make sure object fit at this offset */ 2266 eaddr = saddr + size - 1; 2267 if (saddr >= eaddr || 2268 (bo && offset + size > amdgpu_bo_size(bo))) 2269 return -EINVAL; 2270 2271 /* Allocate all the needed memory */ 2272 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2273 if (!mapping) 2274 return -ENOMEM; 2275 2276 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 2277 if (r) { 2278 kfree(mapping); 2279 return r; 2280 } 2281 2282 saddr /= AMDGPU_GPU_PAGE_SIZE; 2283 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2284 2285 mapping->start = saddr; 2286 mapping->last = eaddr; 2287 mapping->offset = offset; 2288 mapping->flags = flags; 2289 2290 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2291 2292 return 0; 2293 } 2294 2295 /** 2296 * amdgpu_vm_bo_unmap - remove bo mapping from vm 2297 * 2298 * @adev: amdgpu_device pointer 2299 * @bo_va: bo_va to remove the address from 2300 * @saddr: where to the BO is mapped 2301 * 2302 * Remove a mapping of the BO at the specefied addr from the VM. 2303 * 2304 * Returns: 2305 * 0 for success, error for failure. 2306 * 2307 * Object has to be reserved and unreserved outside! 2308 */ 2309 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2310 struct amdgpu_bo_va *bo_va, 2311 uint64_t saddr) 2312 { 2313 struct amdgpu_bo_va_mapping *mapping; 2314 struct amdgpu_vm *vm = bo_va->base.vm; 2315 bool valid = true; 2316 2317 saddr /= AMDGPU_GPU_PAGE_SIZE; 2318 2319 list_for_each_entry(mapping, &bo_va->valids, list) { 2320 if (mapping->start == saddr) 2321 break; 2322 } 2323 2324 if (&mapping->list == &bo_va->valids) { 2325 valid = false; 2326 2327 list_for_each_entry(mapping, &bo_va->invalids, list) { 2328 if (mapping->start == saddr) 2329 break; 2330 } 2331 2332 if (&mapping->list == &bo_va->invalids) 2333 return -ENOENT; 2334 } 2335 2336 list_del(&mapping->list); 2337 amdgpu_vm_it_remove(mapping, &vm->va); 2338 mapping->bo_va = NULL; 2339 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2340 2341 if (valid) 2342 list_add(&mapping->list, &vm->freed); 2343 else 2344 amdgpu_vm_free_mapping(adev, vm, mapping, 2345 bo_va->last_pt_update); 2346 2347 return 0; 2348 } 2349 2350 /** 2351 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 2352 * 2353 * @adev: amdgpu_device pointer 2354 * @vm: VM structure to use 2355 * @saddr: start of the range 2356 * @size: size of the range 2357 * 2358 * Remove all mappings in a range, split them as appropriate. 2359 * 2360 * Returns: 2361 * 0 for success, error for failure. 2362 */ 2363 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 2364 struct amdgpu_vm *vm, 2365 uint64_t saddr, uint64_t size) 2366 { 2367 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 2368 LIST_HEAD(removed); 2369 uint64_t eaddr; 2370 2371 eaddr = saddr + size - 1; 2372 saddr /= AMDGPU_GPU_PAGE_SIZE; 2373 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2374 2375 /* Allocate all the needed memory */ 2376 before = kzalloc(sizeof(*before), GFP_KERNEL); 2377 if (!before) 2378 return -ENOMEM; 2379 INIT_LIST_HEAD(&before->list); 2380 2381 after = kzalloc(sizeof(*after), GFP_KERNEL); 2382 if (!after) { 2383 kfree(before); 2384 return -ENOMEM; 2385 } 2386 INIT_LIST_HEAD(&after->list); 2387 2388 /* Now gather all removed mappings */ 2389 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2390 while (tmp) { 2391 /* Remember mapping split at the start */ 2392 if (tmp->start < saddr) { 2393 before->start = tmp->start; 2394 before->last = saddr - 1; 2395 before->offset = tmp->offset; 2396 before->flags = tmp->flags; 2397 before->bo_va = tmp->bo_va; 2398 list_add(&before->list, &tmp->bo_va->invalids); 2399 } 2400 2401 /* Remember mapping split at the end */ 2402 if (tmp->last > eaddr) { 2403 after->start = eaddr + 1; 2404 after->last = tmp->last; 2405 after->offset = tmp->offset; 2406 after->offset += after->start - tmp->start; 2407 after->flags = tmp->flags; 2408 after->bo_va = tmp->bo_va; 2409 list_add(&after->list, &tmp->bo_va->invalids); 2410 } 2411 2412 list_del(&tmp->list); 2413 list_add(&tmp->list, &removed); 2414 2415 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 2416 } 2417 2418 /* And free them up */ 2419 list_for_each_entry_safe(tmp, next, &removed, list) { 2420 amdgpu_vm_it_remove(tmp, &vm->va); 2421 list_del(&tmp->list); 2422 2423 if (tmp->start < saddr) 2424 tmp->start = saddr; 2425 if (tmp->last > eaddr) 2426 tmp->last = eaddr; 2427 2428 tmp->bo_va = NULL; 2429 list_add(&tmp->list, &vm->freed); 2430 trace_amdgpu_vm_bo_unmap(NULL, tmp); 2431 } 2432 2433 /* Insert partial mapping before the range */ 2434 if (!list_empty(&before->list)) { 2435 amdgpu_vm_it_insert(before, &vm->va); 2436 if (before->flags & AMDGPU_PTE_PRT) 2437 amdgpu_vm_prt_get(adev); 2438 } else { 2439 kfree(before); 2440 } 2441 2442 /* Insert partial mapping after the range */ 2443 if (!list_empty(&after->list)) { 2444 amdgpu_vm_it_insert(after, &vm->va); 2445 if (after->flags & AMDGPU_PTE_PRT) 2446 amdgpu_vm_prt_get(adev); 2447 } else { 2448 kfree(after); 2449 } 2450 2451 return 0; 2452 } 2453 2454 /** 2455 * amdgpu_vm_bo_lookup_mapping - find mapping by address 2456 * 2457 * @vm: the requested VM 2458 * @addr: the address 2459 * 2460 * Find a mapping by it's address. 2461 * 2462 * Returns: 2463 * The amdgpu_bo_va_mapping matching for addr or NULL 2464 * 2465 */ 2466 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 2467 uint64_t addr) 2468 { 2469 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 2470 } 2471 2472 /** 2473 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 2474 * 2475 * @vm: the requested vm 2476 * @ticket: CS ticket 2477 * 2478 * Trace all mappings of BOs reserved during a command submission. 2479 */ 2480 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 2481 { 2482 struct amdgpu_bo_va_mapping *mapping; 2483 2484 if (!trace_amdgpu_vm_bo_cs_enabled()) 2485 return; 2486 2487 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 2488 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 2489 if (mapping->bo_va && mapping->bo_va->base.bo) { 2490 struct amdgpu_bo *bo; 2491 2492 bo = mapping->bo_va->base.bo; 2493 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2494 ticket) 2495 continue; 2496 } 2497 2498 trace_amdgpu_vm_bo_cs(mapping); 2499 } 2500 } 2501 2502 /** 2503 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2504 * 2505 * @adev: amdgpu_device pointer 2506 * @bo_va: requested bo_va 2507 * 2508 * Remove @bo_va->bo from the requested vm. 2509 * 2510 * Object have to be reserved! 2511 */ 2512 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2513 struct amdgpu_bo_va *bo_va) 2514 { 2515 struct amdgpu_bo_va_mapping *mapping, *next; 2516 struct amdgpu_bo *bo = bo_va->base.bo; 2517 struct amdgpu_vm *vm = bo_va->base.vm; 2518 struct amdgpu_vm_bo_base **base; 2519 2520 if (bo) { 2521 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2522 vm->bulk_moveable = false; 2523 2524 for (base = &bo_va->base.bo->vm_bo; *base; 2525 base = &(*base)->next) { 2526 if (*base != &bo_va->base) 2527 continue; 2528 2529 *base = bo_va->base.next; 2530 break; 2531 } 2532 } 2533 2534 spin_lock(&vm->invalidated_lock); 2535 list_del(&bo_va->base.vm_status); 2536 spin_unlock(&vm->invalidated_lock); 2537 2538 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2539 list_del(&mapping->list); 2540 amdgpu_vm_it_remove(mapping, &vm->va); 2541 mapping->bo_va = NULL; 2542 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2543 list_add(&mapping->list, &vm->freed); 2544 } 2545 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2546 list_del(&mapping->list); 2547 amdgpu_vm_it_remove(mapping, &vm->va); 2548 amdgpu_vm_free_mapping(adev, vm, mapping, 2549 bo_va->last_pt_update); 2550 } 2551 2552 dma_fence_put(bo_va->last_pt_update); 2553 2554 if (bo && bo_va->is_xgmi) { 2555 mutex_lock(&adev->vm_manager.lock_pstate); 2556 if (--adev->vm_manager.xgmi_map_counter == 0) 2557 amdgpu_xgmi_set_pstate(adev, 0); 2558 mutex_unlock(&adev->vm_manager.lock_pstate); 2559 } 2560 2561 kfree(bo_va); 2562 } 2563 2564 /** 2565 * amdgpu_vm_evictable - check if we can evict a VM 2566 * 2567 * @bo: A page table of the VM. 2568 * 2569 * Check if it is possible to evict a VM. 2570 */ 2571 bool amdgpu_vm_evictable(struct amdgpu_bo *bo) 2572 { 2573 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; 2574 2575 /* Page tables of a destroyed VM can go away immediately */ 2576 if (!bo_base || !bo_base->vm) 2577 return true; 2578 2579 /* Don't evict VM page tables while they are busy */ 2580 if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) 2581 return false; 2582 2583 /* Try to block ongoing updates */ 2584 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) 2585 return false; 2586 2587 /* Don't evict VM page tables while they are updated */ 2588 if (!dma_fence_is_signaled(bo_base->vm->last_direct)) { 2589 amdgpu_vm_eviction_unlock(bo_base->vm); 2590 return false; 2591 } 2592 2593 bo_base->vm->evicting = true; 2594 amdgpu_vm_eviction_unlock(bo_base->vm); 2595 return true; 2596 } 2597 2598 /** 2599 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2600 * 2601 * @adev: amdgpu_device pointer 2602 * @bo: amdgpu buffer object 2603 * @evicted: is the BO evicted 2604 * 2605 * Mark @bo as invalid. 2606 */ 2607 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2608 struct amdgpu_bo *bo, bool evicted) 2609 { 2610 struct amdgpu_vm_bo_base *bo_base; 2611 2612 /* shadow bo doesn't have bo base, its validation needs its parent */ 2613 if (bo->parent && bo->parent->shadow == bo) 2614 bo = bo->parent; 2615 2616 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2617 struct amdgpu_vm *vm = bo_base->vm; 2618 2619 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 2620 amdgpu_vm_bo_evicted(bo_base); 2621 continue; 2622 } 2623 2624 if (bo_base->moved) 2625 continue; 2626 bo_base->moved = true; 2627 2628 if (bo->tbo.type == ttm_bo_type_kernel) 2629 amdgpu_vm_bo_relocated(bo_base); 2630 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2631 amdgpu_vm_bo_moved(bo_base); 2632 else 2633 amdgpu_vm_bo_invalidated(bo_base); 2634 } 2635 } 2636 2637 /** 2638 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 2639 * 2640 * @vm_size: VM size 2641 * 2642 * Returns: 2643 * VM page table as power of two 2644 */ 2645 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2646 { 2647 /* Total bits covered by PD + PTs */ 2648 unsigned bits = ilog2(vm_size) + 18; 2649 2650 /* Make sure the PD is 4K in size up to 8GB address space. 2651 Above that split equal between PD and PTs */ 2652 if (vm_size <= 8) 2653 return (bits - 9); 2654 else 2655 return ((bits + 3) / 2); 2656 } 2657 2658 /** 2659 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2660 * 2661 * @adev: amdgpu_device pointer 2662 * @min_vm_size: the minimum vm size in GB if it's set auto 2663 * @fragment_size_default: Default PTE fragment size 2664 * @max_level: max VMPT level 2665 * @max_bits: max address space size in bits 2666 * 2667 */ 2668 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2669 uint32_t fragment_size_default, unsigned max_level, 2670 unsigned max_bits) 2671 { 2672 unsigned int max_size = 1 << (max_bits - 30); 2673 unsigned int vm_size; 2674 uint64_t tmp; 2675 2676 /* adjust vm size first */ 2677 if (amdgpu_vm_size != -1) { 2678 vm_size = amdgpu_vm_size; 2679 if (vm_size > max_size) { 2680 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2681 amdgpu_vm_size, max_size); 2682 vm_size = max_size; 2683 } 2684 } else { 2685 struct sysinfo si; 2686 unsigned int phys_ram_gb; 2687 2688 /* Optimal VM size depends on the amount of physical 2689 * RAM available. Underlying requirements and 2690 * assumptions: 2691 * 2692 * - Need to map system memory and VRAM from all GPUs 2693 * - VRAM from other GPUs not known here 2694 * - Assume VRAM <= system memory 2695 * - On GFX8 and older, VM space can be segmented for 2696 * different MTYPEs 2697 * - Need to allow room for fragmentation, guard pages etc. 2698 * 2699 * This adds up to a rough guess of system memory x3. 2700 * Round up to power of two to maximize the available 2701 * VM size with the given page table size. 2702 */ 2703 si_meminfo(&si); 2704 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2705 (1 << 30) - 1) >> 30; 2706 vm_size = roundup_pow_of_two( 2707 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2708 } 2709 2710 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2711 2712 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2713 if (amdgpu_vm_block_size != -1) 2714 tmp >>= amdgpu_vm_block_size - 9; 2715 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2716 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); 2717 switch (adev->vm_manager.num_level) { 2718 case 3: 2719 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2720 break; 2721 case 2: 2722 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2723 break; 2724 case 1: 2725 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2726 break; 2727 default: 2728 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2729 } 2730 /* block size depends on vm size and hw setup*/ 2731 if (amdgpu_vm_block_size != -1) 2732 adev->vm_manager.block_size = 2733 min((unsigned)amdgpu_vm_block_size, max_bits 2734 - AMDGPU_GPU_PAGE_SHIFT 2735 - 9 * adev->vm_manager.num_level); 2736 else if (adev->vm_manager.num_level > 1) 2737 adev->vm_manager.block_size = 9; 2738 else 2739 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2740 2741 if (amdgpu_vm_fragment_size == -1) 2742 adev->vm_manager.fragment_size = fragment_size_default; 2743 else 2744 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2745 2746 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2747 vm_size, adev->vm_manager.num_level + 1, 2748 adev->vm_manager.block_size, 2749 adev->vm_manager.fragment_size); 2750 } 2751 2752 /** 2753 * amdgpu_vm_wait_idle - wait for the VM to become idle 2754 * 2755 * @vm: VM object to wait for 2756 * @timeout: timeout to wait for VM to become idle 2757 */ 2758 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2759 { 2760 timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2761 true, true, timeout); 2762 if (timeout <= 0) 2763 return timeout; 2764 2765 return dma_fence_wait_timeout(vm->last_direct, true, timeout); 2766 } 2767 2768 /** 2769 * amdgpu_vm_init - initialize a vm instance 2770 * 2771 * @adev: amdgpu_device pointer 2772 * @vm: requested vm 2773 * @vm_context: Indicates if it GFX or Compute context 2774 * @pasid: Process address space identifier 2775 * 2776 * Init @vm fields. 2777 * 2778 * Returns: 2779 * 0 for success, error for failure. 2780 */ 2781 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2782 int vm_context, unsigned int pasid) 2783 { 2784 struct amdgpu_bo_param bp; 2785 struct amdgpu_bo *root; 2786 int r, i; 2787 2788 vm->va = RB_ROOT_CACHED; 2789 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2790 vm->reserved_vmid[i] = NULL; 2791 INIT_LIST_HEAD(&vm->evicted); 2792 INIT_LIST_HEAD(&vm->relocated); 2793 INIT_LIST_HEAD(&vm->moved); 2794 INIT_LIST_HEAD(&vm->idle); 2795 INIT_LIST_HEAD(&vm->invalidated); 2796 spin_lock_init(&vm->invalidated_lock); 2797 INIT_LIST_HEAD(&vm->freed); 2798 2799 2800 /* create scheduler entities for page table updates */ 2801 r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, 2802 adev->vm_manager.vm_pte_scheds, 2803 adev->vm_manager.vm_pte_num_scheds, NULL); 2804 if (r) 2805 return r; 2806 2807 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 2808 adev->vm_manager.vm_pte_scheds, 2809 adev->vm_manager.vm_pte_num_scheds, NULL); 2810 if (r) 2811 goto error_free_direct; 2812 2813 vm->pte_support_ats = false; 2814 vm->is_compute_context = false; 2815 2816 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { 2817 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2818 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2819 2820 if (adev->asic_type == CHIP_RAVEN) 2821 vm->pte_support_ats = true; 2822 } else { 2823 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2824 AMDGPU_VM_USE_CPU_FOR_GFX); 2825 } 2826 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2827 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2828 WARN_ONCE((vm->use_cpu_for_update && 2829 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2830 "CPU update of VM recommended only for large BAR system\n"); 2831 2832 if (vm->use_cpu_for_update) 2833 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2834 else 2835 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2836 vm->last_update = NULL; 2837 vm->last_direct = dma_fence_get_stub(); 2838 2839 mutex_init(&vm->eviction_lock); 2840 vm->evicting = false; 2841 2842 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); 2843 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) 2844 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; 2845 r = amdgpu_bo_create(adev, &bp, &root); 2846 if (r) 2847 goto error_free_delayed; 2848 2849 r = amdgpu_bo_reserve(root, true); 2850 if (r) 2851 goto error_free_root; 2852 2853 r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 2854 if (r) 2855 goto error_unreserve; 2856 2857 amdgpu_vm_bo_base_init(&vm->root.base, vm, root); 2858 2859 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2860 if (r) 2861 goto error_unreserve; 2862 2863 amdgpu_bo_unreserve(vm->root.base.bo); 2864 2865 if (pasid) { 2866 unsigned long flags; 2867 2868 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2869 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2870 GFP_ATOMIC); 2871 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2872 if (r < 0) 2873 goto error_free_root; 2874 2875 vm->pasid = pasid; 2876 } 2877 2878 INIT_KFIFO(vm->faults); 2879 2880 return 0; 2881 2882 error_unreserve: 2883 amdgpu_bo_unreserve(vm->root.base.bo); 2884 2885 error_free_root: 2886 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2887 amdgpu_bo_unref(&vm->root.base.bo); 2888 vm->root.base.bo = NULL; 2889 2890 error_free_delayed: 2891 dma_fence_put(vm->last_direct); 2892 drm_sched_entity_destroy(&vm->delayed); 2893 2894 error_free_direct: 2895 drm_sched_entity_destroy(&vm->direct); 2896 2897 return r; 2898 } 2899 2900 /** 2901 * amdgpu_vm_check_clean_reserved - check if a VM is clean 2902 * 2903 * @adev: amdgpu_device pointer 2904 * @vm: the VM to check 2905 * 2906 * check all entries of the root PD, if any subsequent PDs are allocated, 2907 * it means there are page table creating and filling, and is no a clean 2908 * VM 2909 * 2910 * Returns: 2911 * 0 if this VM is clean 2912 */ 2913 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 2914 struct amdgpu_vm *vm) 2915 { 2916 enum amdgpu_vm_level root = adev->vm_manager.root_level; 2917 unsigned int entries = amdgpu_vm_num_entries(adev, root); 2918 unsigned int i = 0; 2919 2920 if (!(vm->root.entries)) 2921 return 0; 2922 2923 for (i = 0; i < entries; i++) { 2924 if (vm->root.entries[i].base.bo) 2925 return -EINVAL; 2926 } 2927 2928 return 0; 2929 } 2930 2931 /** 2932 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2933 * 2934 * @adev: amdgpu_device pointer 2935 * @vm: requested vm 2936 * @pasid: pasid to use 2937 * 2938 * This only works on GFX VMs that don't have any BOs added and no 2939 * page tables allocated yet. 2940 * 2941 * Changes the following VM parameters: 2942 * - use_cpu_for_update 2943 * - pte_supports_ats 2944 * - pasid (old PASID is released, because compute manages its own PASIDs) 2945 * 2946 * Reinitializes the page directory to reflect the changed ATS 2947 * setting. 2948 * 2949 * Returns: 2950 * 0 for success, -errno for errors. 2951 */ 2952 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2953 unsigned int pasid) 2954 { 2955 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 2956 int r; 2957 2958 r = amdgpu_bo_reserve(vm->root.base.bo, true); 2959 if (r) 2960 return r; 2961 2962 /* Sanity checks */ 2963 r = amdgpu_vm_check_clean_reserved(adev, vm); 2964 if (r) 2965 goto unreserve_bo; 2966 2967 if (pasid) { 2968 unsigned long flags; 2969 2970 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2971 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2972 GFP_ATOMIC); 2973 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2974 2975 if (r == -ENOSPC) 2976 goto unreserve_bo; 2977 r = 0; 2978 } 2979 2980 /* Check if PD needs to be reinitialized and do it before 2981 * changing any other state, in case it fails. 2982 */ 2983 if (pte_support_ats != vm->pte_support_ats) { 2984 vm->pte_support_ats = pte_support_ats; 2985 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); 2986 if (r) 2987 goto free_idr; 2988 } 2989 2990 /* Update VM state */ 2991 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2992 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2993 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2994 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2995 WARN_ONCE((vm->use_cpu_for_update && 2996 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2997 "CPU update of VM recommended only for large BAR system\n"); 2998 2999 if (vm->use_cpu_for_update) 3000 vm->update_funcs = &amdgpu_vm_cpu_funcs; 3001 else 3002 vm->update_funcs = &amdgpu_vm_sdma_funcs; 3003 dma_fence_put(vm->last_update); 3004 vm->last_update = NULL; 3005 vm->is_compute_context = true; 3006 3007 if (vm->pasid) { 3008 unsigned long flags; 3009 3010 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3011 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 3012 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3013 3014 /* Free the original amdgpu allocated pasid 3015 * Will be replaced with kfd allocated pasid 3016 */ 3017 amdgpu_pasid_free(vm->pasid); 3018 vm->pasid = 0; 3019 } 3020 3021 /* Free the shadow bo for compute VM */ 3022 amdgpu_bo_unref(&vm->root.base.bo->shadow); 3023 3024 if (pasid) 3025 vm->pasid = pasid; 3026 3027 goto unreserve_bo; 3028 3029 free_idr: 3030 if (pasid) { 3031 unsigned long flags; 3032 3033 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3034 idr_remove(&adev->vm_manager.pasid_idr, pasid); 3035 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3036 } 3037 unreserve_bo: 3038 amdgpu_bo_unreserve(vm->root.base.bo); 3039 return r; 3040 } 3041 3042 /** 3043 * amdgpu_vm_release_compute - release a compute vm 3044 * @adev: amdgpu_device pointer 3045 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 3046 * 3047 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 3048 * pasid from vm. Compute should stop use of vm after this call. 3049 */ 3050 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3051 { 3052 if (vm->pasid) { 3053 unsigned long flags; 3054 3055 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3056 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 3057 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3058 } 3059 vm->pasid = 0; 3060 vm->is_compute_context = false; 3061 } 3062 3063 /** 3064 * amdgpu_vm_fini - tear down a vm instance 3065 * 3066 * @adev: amdgpu_device pointer 3067 * @vm: requested vm 3068 * 3069 * Tear down @vm. 3070 * Unbind the VM and remove all bos from the vm bo list 3071 */ 3072 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3073 { 3074 struct amdgpu_bo_va_mapping *mapping, *tmp; 3075 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 3076 struct amdgpu_bo *root; 3077 int i; 3078 3079 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 3080 3081 root = amdgpu_bo_ref(vm->root.base.bo); 3082 amdgpu_bo_reserve(root, true); 3083 if (vm->pasid) { 3084 unsigned long flags; 3085 3086 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3087 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 3088 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3089 vm->pasid = 0; 3090 } 3091 3092 dma_fence_wait(vm->last_direct, false); 3093 dma_fence_put(vm->last_direct); 3094 3095 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 3096 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 3097 amdgpu_vm_prt_fini(adev, vm); 3098 prt_fini_needed = false; 3099 } 3100 3101 list_del(&mapping->list); 3102 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 3103 } 3104 3105 amdgpu_vm_free_pts(adev, vm, NULL); 3106 amdgpu_bo_unreserve(root); 3107 amdgpu_bo_unref(&root); 3108 WARN_ON(vm->root.base.bo); 3109 3110 drm_sched_entity_destroy(&vm->direct); 3111 drm_sched_entity_destroy(&vm->delayed); 3112 3113 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 3114 dev_err(adev->dev, "still active bo inside vm\n"); 3115 } 3116 rbtree_postorder_for_each_entry_safe(mapping, tmp, 3117 &vm->va.rb_root, rb) { 3118 /* Don't remove the mapping here, we don't want to trigger a 3119 * rebalance and the tree is about to be destroyed anyway. 3120 */ 3121 list_del(&mapping->list); 3122 kfree(mapping); 3123 } 3124 3125 dma_fence_put(vm->last_update); 3126 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 3127 amdgpu_vmid_free_reserved(adev, vm, i); 3128 } 3129 3130 /** 3131 * amdgpu_vm_manager_init - init the VM manager 3132 * 3133 * @adev: amdgpu_device pointer 3134 * 3135 * Initialize the VM manager structures 3136 */ 3137 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 3138 { 3139 unsigned i; 3140 3141 amdgpu_vmid_mgr_init(adev); 3142 3143 adev->vm_manager.fence_context = 3144 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3145 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 3146 adev->vm_manager.seqno[i] = 0; 3147 3148 spin_lock_init(&adev->vm_manager.prt_lock); 3149 atomic_set(&adev->vm_manager.num_prt_users, 0); 3150 3151 /* If not overridden by the user, by default, only in large BAR systems 3152 * Compute VM tables will be updated by CPU 3153 */ 3154 #ifdef CONFIG_X86_64 3155 if (amdgpu_vm_update_mode == -1) { 3156 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 3157 adev->vm_manager.vm_update_mode = 3158 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 3159 else 3160 adev->vm_manager.vm_update_mode = 0; 3161 } else 3162 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 3163 #else 3164 adev->vm_manager.vm_update_mode = 0; 3165 #endif 3166 3167 idr_init(&adev->vm_manager.pasid_idr); 3168 spin_lock_init(&adev->vm_manager.pasid_lock); 3169 3170 adev->vm_manager.xgmi_map_counter = 0; 3171 mutex_init(&adev->vm_manager.lock_pstate); 3172 } 3173 3174 /** 3175 * amdgpu_vm_manager_fini - cleanup VM manager 3176 * 3177 * @adev: amdgpu_device pointer 3178 * 3179 * Cleanup the VM manager and free resources. 3180 */ 3181 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 3182 { 3183 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); 3184 idr_destroy(&adev->vm_manager.pasid_idr); 3185 3186 amdgpu_vmid_mgr_fini(adev); 3187 } 3188 3189 /** 3190 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 3191 * 3192 * @dev: drm device pointer 3193 * @data: drm_amdgpu_vm 3194 * @filp: drm file pointer 3195 * 3196 * Returns: 3197 * 0 for success, -errno for errors. 3198 */ 3199 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 3200 { 3201 union drm_amdgpu_vm *args = data; 3202 struct amdgpu_device *adev = dev->dev_private; 3203 struct amdgpu_fpriv *fpriv = filp->driver_priv; 3204 long timeout = msecs_to_jiffies(2000); 3205 int r; 3206 3207 switch (args->in.op) { 3208 case AMDGPU_VM_OP_RESERVE_VMID: 3209 /* We only have requirement to reserve vmid from gfxhub */ 3210 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, 3211 AMDGPU_GFXHUB_0); 3212 if (r) 3213 return r; 3214 break; 3215 case AMDGPU_VM_OP_UNRESERVE_VMID: 3216 if (amdgpu_sriov_runtime(adev)) 3217 timeout = 8 * timeout; 3218 3219 /* Wait vm idle to make sure the vmid set in SPM_VMID is 3220 * not referenced anymore. 3221 */ 3222 r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); 3223 if (r) 3224 return r; 3225 3226 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); 3227 if (r < 0) 3228 return r; 3229 3230 amdgpu_bo_unreserve(fpriv->vm.root.base.bo); 3231 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3232 break; 3233 default: 3234 return -EINVAL; 3235 } 3236 3237 return 0; 3238 } 3239 3240 /** 3241 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 3242 * 3243 * @adev: drm device pointer 3244 * @pasid: PASID identifier for VM 3245 * @task_info: task_info to fill. 3246 */ 3247 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 3248 struct amdgpu_task_info *task_info) 3249 { 3250 struct amdgpu_vm *vm; 3251 unsigned long flags; 3252 3253 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3254 3255 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3256 if (vm) 3257 *task_info = vm->task_info; 3258 3259 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3260 } 3261 3262 /** 3263 * amdgpu_vm_set_task_info - Sets VMs task info. 3264 * 3265 * @vm: vm for which to set the info 3266 */ 3267 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 3268 { 3269 if (vm->task_info.pid) 3270 return; 3271 3272 vm->task_info.pid = current->pid; 3273 get_task_comm(vm->task_info.task_name, current); 3274 3275 if (current->group_leader->mm != current->mm) 3276 return; 3277 3278 vm->task_info.tgid = current->group_leader->pid; 3279 get_task_comm(vm->task_info.process_name, current->group_leader); 3280 } 3281 3282 /** 3283 * amdgpu_vm_handle_fault - graceful handling of VM faults. 3284 * @adev: amdgpu device pointer 3285 * @pasid: PASID of the VM 3286 * @addr: Address of the fault 3287 * 3288 * Try to gracefully handle a VM fault. Return true if the fault was handled and 3289 * shouldn't be reported any more. 3290 */ 3291 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, 3292 uint64_t addr) 3293 { 3294 struct amdgpu_bo *root; 3295 uint64_t value, flags; 3296 struct amdgpu_vm *vm; 3297 long r; 3298 3299 spin_lock(&adev->vm_manager.pasid_lock); 3300 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3301 if (vm) 3302 root = amdgpu_bo_ref(vm->root.base.bo); 3303 else 3304 root = NULL; 3305 spin_unlock(&adev->vm_manager.pasid_lock); 3306 3307 if (!root) 3308 return false; 3309 3310 r = amdgpu_bo_reserve(root, true); 3311 if (r) 3312 goto error_unref; 3313 3314 /* Double check that the VM still exists */ 3315 spin_lock(&adev->vm_manager.pasid_lock); 3316 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3317 if (vm && vm->root.base.bo != root) 3318 vm = NULL; 3319 spin_unlock(&adev->vm_manager.pasid_lock); 3320 if (!vm) 3321 goto error_unlock; 3322 3323 addr /= AMDGPU_GPU_PAGE_SIZE; 3324 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 3325 AMDGPU_PTE_SYSTEM; 3326 3327 if (vm->is_compute_context) { 3328 /* Intentionally setting invalid PTE flag 3329 * combination to force a no-retry-fault 3330 */ 3331 flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | 3332 AMDGPU_PTE_TF; 3333 value = 0; 3334 3335 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 3336 /* Redirect the access to the dummy page */ 3337 value = adev->dummy_page_addr; 3338 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 3339 AMDGPU_PTE_WRITEABLE; 3340 3341 } else { 3342 /* Let the hw retry silently on the PTE */ 3343 value = 0; 3344 } 3345 3346 r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, 3347 flags, value, NULL, NULL); 3348 if (r) 3349 goto error_unlock; 3350 3351 r = amdgpu_vm_update_pdes(adev, vm, true); 3352 3353 error_unlock: 3354 amdgpu_bo_unreserve(root); 3355 if (r < 0) 3356 DRM_ERROR("Can't handle page fault (%ld)\n", r); 3357 3358 error_unref: 3359 amdgpu_bo_unref(&root); 3360 3361 return false; 3362 } 3363