1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/dma-fence-array.h> 29 #include <linux/interval_tree_generic.h> 30 #include <linux/idr.h> 31 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "amdgpu_trace.h" 35 #include "amdgpu_amdkfd.h" 36 #include "amdgpu_gmc.h" 37 #include "amdgpu_xgmi.h" 38 39 /** 40 * DOC: GPUVM 41 * 42 * GPUVM is similar to the legacy gart on older asics, however 43 * rather than there being a single global gart table 44 * for the entire GPU, there are multiple VM page tables active 45 * at any given time. The VM page tables can contain a mix 46 * vram pages and system memory pages and system memory pages 47 * can be mapped as snooped (cached system pages) or unsnooped 48 * (uncached system pages). 49 * Each VM has an ID associated with it and there is a page table 50 * associated with each VMID. When execting a command buffer, 51 * the kernel tells the the ring what VMID to use for that command 52 * buffer. VMIDs are allocated dynamically as commands are submitted. 53 * The userspace drivers maintain their own address space and the kernel 54 * sets up their pages tables accordingly when they submit their 55 * command buffers and a VMID is assigned. 56 * Cayman/Trinity support up to 8 active VMs at any given time; 57 * SI supports 16. 58 */ 59 60 #define START(node) ((node)->start) 61 #define LAST(node) ((node)->last) 62 63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 64 START, LAST, static, amdgpu_vm_it) 65 66 #undef START 67 #undef LAST 68 69 /** 70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 71 */ 72 struct amdgpu_prt_cb { 73 74 /** 75 * @adev: amdgpu device 76 */ 77 struct amdgpu_device *adev; 78 79 /** 80 * @cb: callback 81 */ 82 struct dma_fence_cb cb; 83 }; 84 85 /** 86 * amdgpu_vm_level_shift - return the addr shift for each level 87 * 88 * @adev: amdgpu_device pointer 89 * @level: VMPT level 90 * 91 * Returns: 92 * The number of bits the pfn needs to be right shifted for a level. 93 */ 94 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, 95 unsigned level) 96 { 97 unsigned shift = 0xff; 98 99 switch (level) { 100 case AMDGPU_VM_PDB2: 101 case AMDGPU_VM_PDB1: 102 case AMDGPU_VM_PDB0: 103 shift = 9 * (AMDGPU_VM_PDB0 - level) + 104 adev->vm_manager.block_size; 105 break; 106 case AMDGPU_VM_PTB: 107 shift = 0; 108 break; 109 default: 110 dev_err(adev->dev, "the level%d isn't supported.\n", level); 111 } 112 113 return shift; 114 } 115 116 /** 117 * amdgpu_vm_num_entries - return the number of entries in a PD/PT 118 * 119 * @adev: amdgpu_device pointer 120 * @level: VMPT level 121 * 122 * Returns: 123 * The number of entries in a page directory or page table. 124 */ 125 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, 126 unsigned level) 127 { 128 unsigned shift = amdgpu_vm_level_shift(adev, 129 adev->vm_manager.root_level); 130 131 if (level == adev->vm_manager.root_level) 132 /* For the root directory */ 133 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) 134 >> shift; 135 else if (level != AMDGPU_VM_PTB) 136 /* Everything in between */ 137 return 512; 138 else 139 /* For the page tables on the leaves */ 140 return AMDGPU_VM_PTE_COUNT(adev); 141 } 142 143 /** 144 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD 145 * 146 * @adev: amdgpu_device pointer 147 * 148 * Returns: 149 * The number of entries in the root page directory which needs the ATS setting. 150 */ 151 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev) 152 { 153 unsigned shift; 154 155 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); 156 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT); 157 } 158 159 /** 160 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT 161 * 162 * @adev: amdgpu_device pointer 163 * @level: VMPT level 164 * 165 * Returns: 166 * The mask to extract the entry number of a PD/PT from an address. 167 */ 168 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, 169 unsigned int level) 170 { 171 if (level <= adev->vm_manager.root_level) 172 return 0xffffffff; 173 else if (level != AMDGPU_VM_PTB) 174 return 0x1ff; 175 else 176 return AMDGPU_VM_PTE_COUNT(adev) - 1; 177 } 178 179 /** 180 * amdgpu_vm_bo_size - returns the size of the BOs in bytes 181 * 182 * @adev: amdgpu_device pointer 183 * @level: VMPT level 184 * 185 * Returns: 186 * The size of the BO for a page directory or page table in bytes. 187 */ 188 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) 189 { 190 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); 191 } 192 193 /** 194 * amdgpu_vm_bo_evicted - vm_bo is evicted 195 * 196 * @vm_bo: vm_bo which is evicted 197 * 198 * State for PDs/PTs and per VM BOs which are not at the location they should 199 * be. 200 */ 201 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 202 { 203 struct amdgpu_vm *vm = vm_bo->vm; 204 struct amdgpu_bo *bo = vm_bo->bo; 205 206 vm_bo->moved = true; 207 if (bo->tbo.type == ttm_bo_type_kernel) 208 list_move(&vm_bo->vm_status, &vm->evicted); 209 else 210 list_move_tail(&vm_bo->vm_status, &vm->evicted); 211 } 212 213 /** 214 * amdgpu_vm_bo_relocated - vm_bo is reloacted 215 * 216 * @vm_bo: vm_bo which is relocated 217 * 218 * State for PDs/PTs which needs to update their parent PD. 219 */ 220 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 221 { 222 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 223 } 224 225 /** 226 * amdgpu_vm_bo_moved - vm_bo is moved 227 * 228 * @vm_bo: vm_bo which is moved 229 * 230 * State for per VM BOs which are moved, but that change is not yet reflected 231 * in the page tables. 232 */ 233 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 234 { 235 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 236 } 237 238 /** 239 * amdgpu_vm_bo_idle - vm_bo is idle 240 * 241 * @vm_bo: vm_bo which is now idle 242 * 243 * State for PDs/PTs and per VM BOs which have gone through the state machine 244 * and are now idle. 245 */ 246 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 247 { 248 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 249 vm_bo->moved = false; 250 } 251 252 /** 253 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 254 * 255 * @vm_bo: vm_bo which is now invalidated 256 * 257 * State for normal BOs which are invalidated and that change not yet reflected 258 * in the PTs. 259 */ 260 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 261 { 262 spin_lock(&vm_bo->vm->invalidated_lock); 263 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 264 spin_unlock(&vm_bo->vm->invalidated_lock); 265 } 266 267 /** 268 * amdgpu_vm_bo_done - vm_bo is done 269 * 270 * @vm_bo: vm_bo which is now done 271 * 272 * State for normal BOs which are invalidated and that change has been updated 273 * in the PTs. 274 */ 275 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 276 { 277 spin_lock(&vm_bo->vm->invalidated_lock); 278 list_del_init(&vm_bo->vm_status); 279 spin_unlock(&vm_bo->vm->invalidated_lock); 280 } 281 282 /** 283 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 284 * 285 * @base: base structure for tracking BO usage in a VM 286 * @vm: vm to which bo is to be added 287 * @bo: amdgpu buffer object 288 * 289 * Initialize a bo_va_base structure and add it to the appropriate lists 290 * 291 */ 292 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 293 struct amdgpu_vm *vm, 294 struct amdgpu_bo *bo) 295 { 296 base->vm = vm; 297 base->bo = bo; 298 base->next = NULL; 299 INIT_LIST_HEAD(&base->vm_status); 300 301 if (!bo) 302 return; 303 base->next = bo->vm_bo; 304 bo->vm_bo = base; 305 306 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 307 return; 308 309 vm->bulk_moveable = false; 310 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 311 amdgpu_vm_bo_relocated(base); 312 else 313 amdgpu_vm_bo_idle(base); 314 315 if (bo->preferred_domains & 316 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) 317 return; 318 319 /* 320 * we checked all the prerequisites, but it looks like this per vm bo 321 * is currently evicted. add the bo to the evicted list to make sure it 322 * is validated on next vm use to avoid fault. 323 * */ 324 amdgpu_vm_bo_evicted(base); 325 } 326 327 /** 328 * amdgpu_vm_pt_parent - get the parent page directory 329 * 330 * @pt: child page table 331 * 332 * Helper to get the parent entry for the child page table. NULL if we are at 333 * the root page directory. 334 */ 335 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) 336 { 337 struct amdgpu_bo *parent = pt->base.bo->parent; 338 339 if (!parent) 340 return NULL; 341 342 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); 343 } 344 345 /* 346 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt 347 */ 348 struct amdgpu_vm_pt_cursor { 349 uint64_t pfn; 350 struct amdgpu_vm_pt *parent; 351 struct amdgpu_vm_pt *entry; 352 unsigned level; 353 }; 354 355 /** 356 * amdgpu_vm_pt_start - start PD/PT walk 357 * 358 * @adev: amdgpu_device pointer 359 * @vm: amdgpu_vm structure 360 * @start: start address of the walk 361 * @cursor: state to initialize 362 * 363 * Initialize a amdgpu_vm_pt_cursor to start a walk. 364 */ 365 static void amdgpu_vm_pt_start(struct amdgpu_device *adev, 366 struct amdgpu_vm *vm, uint64_t start, 367 struct amdgpu_vm_pt_cursor *cursor) 368 { 369 cursor->pfn = start; 370 cursor->parent = NULL; 371 cursor->entry = &vm->root; 372 cursor->level = adev->vm_manager.root_level; 373 } 374 375 /** 376 * amdgpu_vm_pt_descendant - go to child node 377 * 378 * @adev: amdgpu_device pointer 379 * @cursor: current state 380 * 381 * Walk to the child node of the current node. 382 * Returns: 383 * True if the walk was possible, false otherwise. 384 */ 385 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, 386 struct amdgpu_vm_pt_cursor *cursor) 387 { 388 unsigned mask, shift, idx; 389 390 if (!cursor->entry->entries) 391 return false; 392 393 BUG_ON(!cursor->entry->base.bo); 394 mask = amdgpu_vm_entries_mask(adev, cursor->level); 395 shift = amdgpu_vm_level_shift(adev, cursor->level); 396 397 ++cursor->level; 398 idx = (cursor->pfn >> shift) & mask; 399 cursor->parent = cursor->entry; 400 cursor->entry = &cursor->entry->entries[idx]; 401 return true; 402 } 403 404 /** 405 * amdgpu_vm_pt_sibling - go to sibling node 406 * 407 * @adev: amdgpu_device pointer 408 * @cursor: current state 409 * 410 * Walk to the sibling node of the current node. 411 * Returns: 412 * True if the walk was possible, false otherwise. 413 */ 414 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, 415 struct amdgpu_vm_pt_cursor *cursor) 416 { 417 unsigned shift, num_entries; 418 419 /* Root doesn't have a sibling */ 420 if (!cursor->parent) 421 return false; 422 423 /* Go to our parents and see if we got a sibling */ 424 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 425 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 426 427 if (cursor->entry == &cursor->parent->entries[num_entries - 1]) 428 return false; 429 430 cursor->pfn += 1ULL << shift; 431 cursor->pfn &= ~((1ULL << shift) - 1); 432 ++cursor->entry; 433 return true; 434 } 435 436 /** 437 * amdgpu_vm_pt_ancestor - go to parent node 438 * 439 * @cursor: current state 440 * 441 * Walk to the parent node of the current node. 442 * Returns: 443 * True if the walk was possible, false otherwise. 444 */ 445 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) 446 { 447 if (!cursor->parent) 448 return false; 449 450 --cursor->level; 451 cursor->entry = cursor->parent; 452 cursor->parent = amdgpu_vm_pt_parent(cursor->parent); 453 return true; 454 } 455 456 /** 457 * amdgpu_vm_pt_next - get next PD/PT in hieratchy 458 * 459 * @adev: amdgpu_device pointer 460 * @cursor: current state 461 * 462 * Walk the PD/PT tree to the next node. 463 */ 464 static void amdgpu_vm_pt_next(struct amdgpu_device *adev, 465 struct amdgpu_vm_pt_cursor *cursor) 466 { 467 /* First try a newborn child */ 468 if (amdgpu_vm_pt_descendant(adev, cursor)) 469 return; 470 471 /* If that didn't worked try to find a sibling */ 472 while (!amdgpu_vm_pt_sibling(adev, cursor)) { 473 /* No sibling, go to our parents and grandparents */ 474 if (!amdgpu_vm_pt_ancestor(cursor)) { 475 cursor->pfn = ~0ll; 476 return; 477 } 478 } 479 } 480 481 /** 482 * amdgpu_vm_pt_first_dfs - start a deep first search 483 * 484 * @adev: amdgpu_device structure 485 * @vm: amdgpu_vm structure 486 * @start: optional cursor to start with 487 * @cursor: state to initialize 488 * 489 * Starts a deep first traversal of the PD/PT tree. 490 */ 491 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, 492 struct amdgpu_vm *vm, 493 struct amdgpu_vm_pt_cursor *start, 494 struct amdgpu_vm_pt_cursor *cursor) 495 { 496 if (start) 497 *cursor = *start; 498 else 499 amdgpu_vm_pt_start(adev, vm, 0, cursor); 500 while (amdgpu_vm_pt_descendant(adev, cursor)); 501 } 502 503 /** 504 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue 505 * 506 * @start: starting point for the search 507 * @entry: current entry 508 * 509 * Returns: 510 * True when the search should continue, false otherwise. 511 */ 512 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 513 struct amdgpu_vm_pt *entry) 514 { 515 return entry && (!start || entry != start->entry); 516 } 517 518 /** 519 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search 520 * 521 * @adev: amdgpu_device structure 522 * @cursor: current state 523 * 524 * Move the cursor to the next node in a deep first search. 525 */ 526 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, 527 struct amdgpu_vm_pt_cursor *cursor) 528 { 529 if (!cursor->entry) 530 return; 531 532 if (!cursor->parent) 533 cursor->entry = NULL; 534 else if (amdgpu_vm_pt_sibling(adev, cursor)) 535 while (amdgpu_vm_pt_descendant(adev, cursor)); 536 else 537 amdgpu_vm_pt_ancestor(cursor); 538 } 539 540 /* 541 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs 542 */ 543 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ 544 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \ 545 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ 546 amdgpu_vm_pt_continue_dfs((start), (entry)); \ 547 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor))) 548 549 /** 550 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 551 * 552 * @vm: vm providing the BOs 553 * @validated: head of validation list 554 * @entry: entry to add 555 * 556 * Add the page directory to the list of BOs to 557 * validate for command submission. 558 */ 559 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 560 struct list_head *validated, 561 struct amdgpu_bo_list_entry *entry) 562 { 563 entry->priority = 0; 564 entry->tv.bo = &vm->root.base.bo->tbo; 565 /* One for TTM and one for the CS job */ 566 entry->tv.num_shared = 2; 567 entry->user_pages = NULL; 568 list_add(&entry->tv.head, validated); 569 } 570 571 /** 572 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag 573 * 574 * @bo: BO which was removed from the LRU 575 * 576 * Make sure the bulk_moveable flag is updated when a BO is removed from the 577 * LRU. 578 */ 579 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) 580 { 581 struct amdgpu_bo *abo; 582 struct amdgpu_vm_bo_base *bo_base; 583 584 if (!amdgpu_bo_is_amdgpu_bo(bo)) 585 return; 586 587 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) 588 return; 589 590 abo = ttm_to_amdgpu_bo(bo); 591 if (!abo->parent) 592 return; 593 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 594 struct amdgpu_vm *vm = bo_base->vm; 595 596 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 597 vm->bulk_moveable = false; 598 } 599 600 } 601 /** 602 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 603 * 604 * @adev: amdgpu device pointer 605 * @vm: vm providing the BOs 606 * 607 * Move all BOs to the end of LRU and remember their positions to put them 608 * together. 609 */ 610 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 611 struct amdgpu_vm *vm) 612 { 613 struct amdgpu_vm_bo_base *bo_base; 614 615 if (vm->bulk_moveable) { 616 spin_lock(&ttm_bo_glob.lru_lock); 617 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 618 spin_unlock(&ttm_bo_glob.lru_lock); 619 return; 620 } 621 622 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 623 624 spin_lock(&ttm_bo_glob.lru_lock); 625 list_for_each_entry(bo_base, &vm->idle, vm_status) { 626 struct amdgpu_bo *bo = bo_base->bo; 627 628 if (!bo->parent) 629 continue; 630 631 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); 632 if (bo->shadow) 633 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, 634 &vm->lru_bulk_move); 635 } 636 spin_unlock(&ttm_bo_glob.lru_lock); 637 638 vm->bulk_moveable = true; 639 } 640 641 /** 642 * amdgpu_vm_validate_pt_bos - validate the page table BOs 643 * 644 * @adev: amdgpu device pointer 645 * @vm: vm providing the BOs 646 * @validate: callback to do the validation 647 * @param: parameter for the validation callback 648 * 649 * Validate the page table BOs on command submission if neccessary. 650 * 651 * Returns: 652 * Validation result. 653 */ 654 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 655 int (*validate)(void *p, struct amdgpu_bo *bo), 656 void *param) 657 { 658 struct amdgpu_vm_bo_base *bo_base, *tmp; 659 int r; 660 661 vm->bulk_moveable &= list_empty(&vm->evicted); 662 663 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 664 struct amdgpu_bo *bo = bo_base->bo; 665 666 r = validate(param, bo); 667 if (r) 668 return r; 669 670 if (bo->tbo.type != ttm_bo_type_kernel) { 671 amdgpu_vm_bo_moved(bo_base); 672 } else { 673 vm->update_funcs->map_table(bo); 674 if (bo->parent) 675 amdgpu_vm_bo_relocated(bo_base); 676 else 677 amdgpu_vm_bo_idle(bo_base); 678 } 679 } 680 681 mutex_lock(&vm->eviction_lock); 682 vm->evicting = false; 683 mutex_unlock(&vm->eviction_lock); 684 685 return 0; 686 } 687 688 /** 689 * amdgpu_vm_ready - check VM is ready for updates 690 * 691 * @vm: VM to check 692 * 693 * Check if all VM PDs/PTs are ready for updates 694 * 695 * Returns: 696 * True if eviction list is empty. 697 */ 698 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 699 { 700 return list_empty(&vm->evicted); 701 } 702 703 /** 704 * amdgpu_vm_clear_bo - initially clear the PDs/PTs 705 * 706 * @adev: amdgpu_device pointer 707 * @vm: VM to clear BO from 708 * @bo: BO to clear 709 * @direct: use a direct update 710 * 711 * Root PD needs to be reserved when calling this. 712 * 713 * Returns: 714 * 0 on success, errno otherwise. 715 */ 716 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 717 struct amdgpu_vm *vm, 718 struct amdgpu_bo *bo, 719 bool direct) 720 { 721 struct ttm_operation_ctx ctx = { true, false }; 722 unsigned level = adev->vm_manager.root_level; 723 struct amdgpu_vm_update_params params; 724 struct amdgpu_bo *ancestor = bo; 725 unsigned entries, ats_entries; 726 uint64_t addr; 727 int r; 728 729 /* Figure out our place in the hierarchy */ 730 if (ancestor->parent) { 731 ++level; 732 while (ancestor->parent->parent) { 733 ++level; 734 ancestor = ancestor->parent; 735 } 736 } 737 738 entries = amdgpu_bo_size(bo) / 8; 739 if (!vm->pte_support_ats) { 740 ats_entries = 0; 741 742 } else if (!bo->parent) { 743 ats_entries = amdgpu_vm_num_ats_entries(adev); 744 ats_entries = min(ats_entries, entries); 745 entries -= ats_entries; 746 747 } else { 748 struct amdgpu_vm_pt *pt; 749 750 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); 751 ats_entries = amdgpu_vm_num_ats_entries(adev); 752 if ((pt - vm->root.entries) >= ats_entries) { 753 ats_entries = 0; 754 } else { 755 ats_entries = entries; 756 entries = 0; 757 } 758 } 759 760 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 761 if (r) 762 return r; 763 764 if (bo->shadow) { 765 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, 766 &ctx); 767 if (r) 768 return r; 769 } 770 771 r = vm->update_funcs->map_table(bo); 772 if (r) 773 return r; 774 775 memset(¶ms, 0, sizeof(params)); 776 params.adev = adev; 777 params.vm = vm; 778 params.direct = direct; 779 780 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); 781 if (r) 782 return r; 783 784 addr = 0; 785 if (ats_entries) { 786 uint64_t value = 0, flags; 787 788 flags = AMDGPU_PTE_DEFAULT_ATC; 789 if (level != AMDGPU_VM_PTB) { 790 /* Handle leaf PDEs as PTEs */ 791 flags |= AMDGPU_PDE_PTE; 792 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); 793 } 794 795 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, 796 value, flags); 797 if (r) 798 return r; 799 800 addr += ats_entries * 8; 801 } 802 803 if (entries) { 804 uint64_t value = 0, flags = 0; 805 806 if (adev->asic_type >= CHIP_VEGA10) { 807 if (level != AMDGPU_VM_PTB) { 808 /* Handle leaf PDEs as PTEs */ 809 flags |= AMDGPU_PDE_PTE; 810 amdgpu_gmc_get_vm_pde(adev, level, 811 &value, &flags); 812 } else { 813 /* Workaround for fault priority problem on GMC9 */ 814 flags = AMDGPU_PTE_EXECUTABLE; 815 } 816 } 817 818 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, 819 value, flags); 820 if (r) 821 return r; 822 } 823 824 return vm->update_funcs->commit(¶ms, NULL); 825 } 826 827 /** 828 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation 829 * 830 * @adev: amdgpu_device pointer 831 * @vm: requesting vm 832 * @level: the page table level 833 * @direct: use a direct update 834 * @bp: resulting BO allocation parameters 835 */ 836 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, 837 int level, bool direct, 838 struct amdgpu_bo_param *bp) 839 { 840 memset(bp, 0, sizeof(*bp)); 841 842 bp->size = amdgpu_vm_bo_size(adev, level); 843 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 844 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 845 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 846 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 847 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 848 if (vm->use_cpu_for_update) 849 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 850 else if (!vm->root.base.bo || vm->root.base.bo->shadow) 851 bp->flags |= AMDGPU_GEM_CREATE_SHADOW; 852 bp->type = ttm_bo_type_kernel; 853 bp->no_wait_gpu = direct; 854 if (vm->root.base.bo) 855 bp->resv = vm->root.base.bo->tbo.base.resv; 856 } 857 858 /** 859 * amdgpu_vm_alloc_pts - Allocate a specific page table 860 * 861 * @adev: amdgpu_device pointer 862 * @vm: VM to allocate page tables for 863 * @cursor: Which page table to allocate 864 * @direct: use a direct update 865 * 866 * Make sure a specific page table or directory is allocated. 867 * 868 * Returns: 869 * 1 if page table needed to be allocated, 0 if page table was already 870 * allocated, negative errno if an error occurred. 871 */ 872 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 873 struct amdgpu_vm *vm, 874 struct amdgpu_vm_pt_cursor *cursor, 875 bool direct) 876 { 877 struct amdgpu_vm_pt *entry = cursor->entry; 878 struct amdgpu_bo_param bp; 879 struct amdgpu_bo *pt; 880 int r; 881 882 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { 883 unsigned num_entries; 884 885 num_entries = amdgpu_vm_num_entries(adev, cursor->level); 886 entry->entries = kvmalloc_array(num_entries, 887 sizeof(*entry->entries), 888 GFP_KERNEL | __GFP_ZERO); 889 if (!entry->entries) 890 return -ENOMEM; 891 } 892 893 if (entry->base.bo) 894 return 0; 895 896 amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); 897 898 r = amdgpu_bo_create(adev, &bp, &pt); 899 if (r) 900 return r; 901 902 /* Keep a reference to the root directory to avoid 903 * freeing them up in the wrong order. 904 */ 905 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); 906 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 907 908 r = amdgpu_vm_clear_bo(adev, vm, pt, direct); 909 if (r) 910 goto error_free_pt; 911 912 return 0; 913 914 error_free_pt: 915 amdgpu_bo_unref(&pt->shadow); 916 amdgpu_bo_unref(&pt); 917 return r; 918 } 919 920 /** 921 * amdgpu_vm_free_table - fre one PD/PT 922 * 923 * @entry: PDE to free 924 */ 925 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) 926 { 927 if (entry->base.bo) { 928 entry->base.bo->vm_bo = NULL; 929 list_del(&entry->base.vm_status); 930 amdgpu_bo_unref(&entry->base.bo->shadow); 931 amdgpu_bo_unref(&entry->base.bo); 932 } 933 kvfree(entry->entries); 934 entry->entries = NULL; 935 } 936 937 /** 938 * amdgpu_vm_free_pts - free PD/PT levels 939 * 940 * @adev: amdgpu device structure 941 * @vm: amdgpu vm structure 942 * @start: optional cursor where to start freeing PDs/PTs 943 * 944 * Free the page directory or page table level and all sub levels. 945 */ 946 static void amdgpu_vm_free_pts(struct amdgpu_device *adev, 947 struct amdgpu_vm *vm, 948 struct amdgpu_vm_pt_cursor *start) 949 { 950 struct amdgpu_vm_pt_cursor cursor; 951 struct amdgpu_vm_pt *entry; 952 953 vm->bulk_moveable = false; 954 955 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) 956 amdgpu_vm_free_table(entry); 957 958 if (start) 959 amdgpu_vm_free_table(start->entry); 960 } 961 962 /** 963 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 964 * 965 * @adev: amdgpu_device pointer 966 */ 967 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 968 { 969 const struct amdgpu_ip_block *ip_block; 970 bool has_compute_vm_bug; 971 struct amdgpu_ring *ring; 972 int i; 973 974 has_compute_vm_bug = false; 975 976 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 977 if (ip_block) { 978 /* Compute has a VM bug for GFX version < 7. 979 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 980 if (ip_block->version->major <= 7) 981 has_compute_vm_bug = true; 982 else if (ip_block->version->major == 8) 983 if (adev->gfx.mec_fw_version < 673) 984 has_compute_vm_bug = true; 985 } 986 987 for (i = 0; i < adev->num_rings; i++) { 988 ring = adev->rings[i]; 989 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 990 /* only compute rings */ 991 ring->has_compute_vm_bug = has_compute_vm_bug; 992 else 993 ring->has_compute_vm_bug = false; 994 } 995 } 996 997 /** 998 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 999 * 1000 * @ring: ring on which the job will be submitted 1001 * @job: job to submit 1002 * 1003 * Returns: 1004 * True if sync is needed. 1005 */ 1006 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 1007 struct amdgpu_job *job) 1008 { 1009 struct amdgpu_device *adev = ring->adev; 1010 unsigned vmhub = ring->funcs->vmhub; 1011 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1012 struct amdgpu_vmid *id; 1013 bool gds_switch_needed; 1014 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 1015 1016 if (job->vmid == 0) 1017 return false; 1018 id = &id_mgr->ids[job->vmid]; 1019 gds_switch_needed = ring->funcs->emit_gds_switch && ( 1020 id->gds_base != job->gds_base || 1021 id->gds_size != job->gds_size || 1022 id->gws_base != job->gws_base || 1023 id->gws_size != job->gws_size || 1024 id->oa_base != job->oa_base || 1025 id->oa_size != job->oa_size); 1026 1027 if (amdgpu_vmid_had_gpu_reset(adev, id)) 1028 return true; 1029 1030 return vm_flush_needed || gds_switch_needed; 1031 } 1032 1033 /** 1034 * amdgpu_vm_flush - hardware flush the vm 1035 * 1036 * @ring: ring to use for flush 1037 * @job: related job 1038 * @need_pipe_sync: is pipe sync needed 1039 * 1040 * Emit a VM flush when it is necessary. 1041 * 1042 * Returns: 1043 * 0 on success, errno otherwise. 1044 */ 1045 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 1046 bool need_pipe_sync) 1047 { 1048 struct amdgpu_device *adev = ring->adev; 1049 unsigned vmhub = ring->funcs->vmhub; 1050 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1051 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 1052 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 1053 id->gds_base != job->gds_base || 1054 id->gds_size != job->gds_size || 1055 id->gws_base != job->gws_base || 1056 id->gws_size != job->gws_size || 1057 id->oa_base != job->oa_base || 1058 id->oa_size != job->oa_size); 1059 bool vm_flush_needed = job->vm_needs_flush; 1060 struct dma_fence *fence = NULL; 1061 bool pasid_mapping_needed = false; 1062 unsigned patch_offset = 0; 1063 int r; 1064 1065 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 1066 gds_switch_needed = true; 1067 vm_flush_needed = true; 1068 pasid_mapping_needed = true; 1069 } 1070 1071 mutex_lock(&id_mgr->lock); 1072 if (id->pasid != job->pasid || !id->pasid_mapping || 1073 !dma_fence_is_signaled(id->pasid_mapping)) 1074 pasid_mapping_needed = true; 1075 mutex_unlock(&id_mgr->lock); 1076 1077 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 1078 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 1079 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 1080 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 1081 ring->funcs->emit_wreg; 1082 1083 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 1084 return 0; 1085 1086 if (ring->funcs->init_cond_exec) 1087 patch_offset = amdgpu_ring_init_cond_exec(ring); 1088 1089 if (need_pipe_sync) 1090 amdgpu_ring_emit_pipeline_sync(ring); 1091 1092 if (vm_flush_needed) { 1093 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 1094 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 1095 } 1096 1097 if (pasid_mapping_needed) 1098 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 1099 1100 if (vm_flush_needed || pasid_mapping_needed) { 1101 r = amdgpu_fence_emit(ring, &fence, 0); 1102 if (r) 1103 return r; 1104 } 1105 1106 if (vm_flush_needed) { 1107 mutex_lock(&id_mgr->lock); 1108 dma_fence_put(id->last_flush); 1109 id->last_flush = dma_fence_get(fence); 1110 id->current_gpu_reset_count = 1111 atomic_read(&adev->gpu_reset_counter); 1112 mutex_unlock(&id_mgr->lock); 1113 } 1114 1115 if (pasid_mapping_needed) { 1116 mutex_lock(&id_mgr->lock); 1117 id->pasid = job->pasid; 1118 dma_fence_put(id->pasid_mapping); 1119 id->pasid_mapping = dma_fence_get(fence); 1120 mutex_unlock(&id_mgr->lock); 1121 } 1122 dma_fence_put(fence); 1123 1124 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 1125 id->gds_base = job->gds_base; 1126 id->gds_size = job->gds_size; 1127 id->gws_base = job->gws_base; 1128 id->gws_size = job->gws_size; 1129 id->oa_base = job->oa_base; 1130 id->oa_size = job->oa_size; 1131 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 1132 job->gds_size, job->gws_base, 1133 job->gws_size, job->oa_base, 1134 job->oa_size); 1135 } 1136 1137 if (ring->funcs->patch_cond_exec) 1138 amdgpu_ring_patch_cond_exec(ring, patch_offset); 1139 1140 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 1141 if (ring->funcs->emit_switch_buffer) { 1142 amdgpu_ring_emit_switch_buffer(ring); 1143 amdgpu_ring_emit_switch_buffer(ring); 1144 } 1145 return 0; 1146 } 1147 1148 /** 1149 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 1150 * 1151 * @vm: requested vm 1152 * @bo: requested buffer object 1153 * 1154 * Find @bo inside the requested vm. 1155 * Search inside the @bos vm list for the requested vm 1156 * Returns the found bo_va or NULL if none is found 1157 * 1158 * Object has to be reserved! 1159 * 1160 * Returns: 1161 * Found bo_va or NULL. 1162 */ 1163 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1164 struct amdgpu_bo *bo) 1165 { 1166 struct amdgpu_vm_bo_base *base; 1167 1168 for (base = bo->vm_bo; base; base = base->next) { 1169 if (base->vm != vm) 1170 continue; 1171 1172 return container_of(base, struct amdgpu_bo_va, base); 1173 } 1174 return NULL; 1175 } 1176 1177 /** 1178 * amdgpu_vm_map_gart - Resolve gart mapping of addr 1179 * 1180 * @pages_addr: optional DMA address to use for lookup 1181 * @addr: the unmapped addr 1182 * 1183 * Look up the physical address of the page that the pte resolves 1184 * to. 1185 * 1186 * Returns: 1187 * The pointer for the page table entry. 1188 */ 1189 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 1190 { 1191 uint64_t result; 1192 1193 /* page table offset */ 1194 result = pages_addr[addr >> PAGE_SHIFT]; 1195 1196 /* in case cpu page size != gpu page size*/ 1197 result |= addr & (~PAGE_MASK); 1198 1199 result &= 0xFFFFFFFFFFFFF000ULL; 1200 1201 return result; 1202 } 1203 1204 /** 1205 * amdgpu_vm_update_pde - update a single level in the hierarchy 1206 * 1207 * @params: parameters for the update 1208 * @vm: requested vm 1209 * @entry: entry to update 1210 * 1211 * Makes sure the requested entry in parent is up to date. 1212 */ 1213 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1214 struct amdgpu_vm *vm, 1215 struct amdgpu_vm_pt *entry) 1216 { 1217 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); 1218 struct amdgpu_bo *bo = parent->base.bo, *pbo; 1219 uint64_t pde, pt, flags; 1220 unsigned level; 1221 1222 for (level = 0, pbo = bo->parent; pbo; ++level) 1223 pbo = pbo->parent; 1224 1225 level += params->adev->vm_manager.root_level; 1226 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); 1227 pde = (entry - parent->entries) * 8; 1228 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); 1229 } 1230 1231 /** 1232 * amdgpu_vm_invalidate_pds - mark all PDs as invalid 1233 * 1234 * @adev: amdgpu_device pointer 1235 * @vm: related vm 1236 * 1237 * Mark all PD level as invalid after an error. 1238 */ 1239 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, 1240 struct amdgpu_vm *vm) 1241 { 1242 struct amdgpu_vm_pt_cursor cursor; 1243 struct amdgpu_vm_pt *entry; 1244 1245 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1246 if (entry->base.bo && !entry->base.moved) 1247 amdgpu_vm_bo_relocated(&entry->base); 1248 } 1249 1250 /** 1251 * amdgpu_vm_update_pdes - make sure that all directories are valid 1252 * 1253 * @adev: amdgpu_device pointer 1254 * @vm: requested vm 1255 * @direct: submit directly to the paging queue 1256 * 1257 * Makes sure all directories are up to date. 1258 * 1259 * Returns: 1260 * 0 for success, error for failure. 1261 */ 1262 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 1263 struct amdgpu_vm *vm, bool direct) 1264 { 1265 struct amdgpu_vm_update_params params; 1266 int r; 1267 1268 if (list_empty(&vm->relocated)) 1269 return 0; 1270 1271 memset(¶ms, 0, sizeof(params)); 1272 params.adev = adev; 1273 params.vm = vm; 1274 params.direct = direct; 1275 1276 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); 1277 if (r) 1278 return r; 1279 1280 while (!list_empty(&vm->relocated)) { 1281 struct amdgpu_vm_pt *entry; 1282 1283 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, 1284 base.vm_status); 1285 amdgpu_vm_bo_idle(&entry->base); 1286 1287 r = amdgpu_vm_update_pde(¶ms, vm, entry); 1288 if (r) 1289 goto error; 1290 } 1291 1292 r = vm->update_funcs->commit(¶ms, &vm->last_update); 1293 if (r) 1294 goto error; 1295 return 0; 1296 1297 error: 1298 amdgpu_vm_invalidate_pds(adev, vm); 1299 return r; 1300 } 1301 1302 /* 1303 * amdgpu_vm_update_flags - figure out flags for PTE updates 1304 * 1305 * Make sure to set the right flags for the PTEs at the desired level. 1306 */ 1307 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, 1308 struct amdgpu_bo *bo, unsigned level, 1309 uint64_t pe, uint64_t addr, 1310 unsigned count, uint32_t incr, 1311 uint64_t flags) 1312 1313 { 1314 if (level != AMDGPU_VM_PTB) { 1315 flags |= AMDGPU_PDE_PTE; 1316 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); 1317 1318 } else if (params->adev->asic_type >= CHIP_VEGA10 && 1319 !(flags & AMDGPU_PTE_VALID) && 1320 !(flags & AMDGPU_PTE_PRT)) { 1321 1322 /* Workaround for fault priority problem on GMC9 */ 1323 flags |= AMDGPU_PTE_EXECUTABLE; 1324 } 1325 1326 params->vm->update_funcs->update(params, bo, pe, addr, count, incr, 1327 flags); 1328 } 1329 1330 /** 1331 * amdgpu_vm_fragment - get fragment for PTEs 1332 * 1333 * @params: see amdgpu_vm_update_params definition 1334 * @start: first PTE to handle 1335 * @end: last PTE to handle 1336 * @flags: hw mapping flags 1337 * @frag: resulting fragment size 1338 * @frag_end: end of this fragment 1339 * 1340 * Returns the first possible fragment for the start and end address. 1341 */ 1342 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params, 1343 uint64_t start, uint64_t end, uint64_t flags, 1344 unsigned int *frag, uint64_t *frag_end) 1345 { 1346 /** 1347 * The MC L1 TLB supports variable sized pages, based on a fragment 1348 * field in the PTE. When this field is set to a non-zero value, page 1349 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 1350 * flags are considered valid for all PTEs within the fragment range 1351 * and corresponding mappings are assumed to be physically contiguous. 1352 * 1353 * The L1 TLB can store a single PTE for the whole fragment, 1354 * significantly increasing the space available for translation 1355 * caching. This leads to large improvements in throughput when the 1356 * TLB is under pressure. 1357 * 1358 * The L2 TLB distributes small and large fragments into two 1359 * asymmetric partitions. The large fragment cache is significantly 1360 * larger. Thus, we try to use large fragments wherever possible. 1361 * Userspace can support this by aligning virtual base address and 1362 * allocation size to the fragment size. 1363 * 1364 * Starting with Vega10 the fragment size only controls the L1. The L2 1365 * is now directly feed with small/huge/giant pages from the walker. 1366 */ 1367 unsigned max_frag; 1368 1369 if (params->adev->asic_type < CHIP_VEGA10) 1370 max_frag = params->adev->vm_manager.fragment_size; 1371 else 1372 max_frag = 31; 1373 1374 /* system pages are non continuously */ 1375 if (params->pages_addr) { 1376 *frag = 0; 1377 *frag_end = end; 1378 return; 1379 } 1380 1381 /* This intentionally wraps around if no bit is set */ 1382 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); 1383 if (*frag >= max_frag) { 1384 *frag = max_frag; 1385 *frag_end = end & ~((1ULL << max_frag) - 1); 1386 } else { 1387 *frag_end = start + (1 << *frag); 1388 } 1389 } 1390 1391 /** 1392 * amdgpu_vm_update_ptes - make sure that page tables are valid 1393 * 1394 * @params: see amdgpu_vm_update_params definition 1395 * @start: start of GPU address range 1396 * @end: end of GPU address range 1397 * @dst: destination address to map to, the next dst inside the function 1398 * @flags: mapping flags 1399 * 1400 * Update the page tables in the range @start - @end. 1401 * 1402 * Returns: 1403 * 0 for success, -EINVAL for failure. 1404 */ 1405 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, 1406 uint64_t start, uint64_t end, 1407 uint64_t dst, uint64_t flags) 1408 { 1409 struct amdgpu_device *adev = params->adev; 1410 struct amdgpu_vm_pt_cursor cursor; 1411 uint64_t frag_start = start, frag_end; 1412 unsigned int frag; 1413 int r; 1414 1415 /* figure out the initial fragment */ 1416 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); 1417 1418 /* walk over the address space and update the PTs */ 1419 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); 1420 while (cursor.pfn < end) { 1421 unsigned shift, parent_shift, mask; 1422 uint64_t incr, entry_end, pe_start; 1423 struct amdgpu_bo *pt; 1424 1425 /* make sure that the page tables covering the address range are 1426 * actually allocated 1427 */ 1428 r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, 1429 params->direct); 1430 if (r) 1431 return r; 1432 1433 pt = cursor.entry->base.bo; 1434 1435 /* The root level can't be a huge page */ 1436 if (cursor.level == adev->vm_manager.root_level) { 1437 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1438 return -ENOENT; 1439 continue; 1440 } 1441 1442 shift = amdgpu_vm_level_shift(adev, cursor.level); 1443 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); 1444 if (adev->asic_type < CHIP_VEGA10 && 1445 (flags & AMDGPU_PTE_VALID)) { 1446 /* No huge page support before GMC v9 */ 1447 if (cursor.level != AMDGPU_VM_PTB) { 1448 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1449 return -ENOENT; 1450 continue; 1451 } 1452 } else if (frag < shift) { 1453 /* We can't use this level when the fragment size is 1454 * smaller than the address shift. Go to the next 1455 * child entry and try again. 1456 */ 1457 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1458 return -ENOENT; 1459 continue; 1460 } else if (frag >= parent_shift && 1461 cursor.level - 1 != adev->vm_manager.root_level) { 1462 /* If the fragment size is even larger than the parent 1463 * shift we should go up one level and check it again 1464 * unless one level up is the root level. 1465 */ 1466 if (!amdgpu_vm_pt_ancestor(&cursor)) 1467 return -ENOENT; 1468 continue; 1469 } 1470 1471 /* Looks good so far, calculate parameters for the update */ 1472 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; 1473 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1474 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1475 entry_end = (uint64_t)(mask + 1) << shift; 1476 entry_end += cursor.pfn & ~(entry_end - 1); 1477 entry_end = min(entry_end, end); 1478 1479 do { 1480 uint64_t upd_end = min(entry_end, frag_end); 1481 unsigned nptes = (upd_end - frag_start) >> shift; 1482 1483 amdgpu_vm_update_flags(params, pt, cursor.level, 1484 pe_start, dst, nptes, incr, 1485 flags | AMDGPU_PTE_FRAG(frag)); 1486 1487 pe_start += nptes * 8; 1488 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1489 1490 frag_start = upd_end; 1491 if (frag_start >= frag_end) { 1492 /* figure out the next fragment */ 1493 amdgpu_vm_fragment(params, frag_start, end, 1494 flags, &frag, &frag_end); 1495 if (frag < shift) 1496 break; 1497 } 1498 } while (frag_start < entry_end); 1499 1500 if (amdgpu_vm_pt_descendant(adev, &cursor)) { 1501 /* Free all child entries. 1502 * Update the tables with the flags and addresses and free up subsequent 1503 * tables in the case of huge pages or freed up areas. 1504 * This is the maximum you can free, because all other page tables are not 1505 * completely covered by the range and so potentially still in use. 1506 */ 1507 while (cursor.pfn < frag_start) { 1508 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1509 amdgpu_vm_pt_next(adev, &cursor); 1510 } 1511 1512 } else if (frag >= shift) { 1513 /* or just move on to the next on the same level. */ 1514 amdgpu_vm_pt_next(adev, &cursor); 1515 } 1516 } 1517 1518 return 0; 1519 } 1520 1521 /** 1522 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 1523 * 1524 * @adev: amdgpu_device pointer 1525 * @vm: requested vm 1526 * @direct: direct submission in a page fault 1527 * @exclusive: fence we need to sync to 1528 * @start: start of mapped range 1529 * @last: last mapped entry 1530 * @flags: flags for the entries 1531 * @addr: addr to set the area to 1532 * @pages_addr: DMA addresses to use for mapping 1533 * @fence: optional resulting fence 1534 * 1535 * Fill in the page table entries between @start and @last. 1536 * 1537 * Returns: 1538 * 0 for success, -EINVAL for failure. 1539 */ 1540 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1541 struct amdgpu_vm *vm, bool direct, 1542 struct dma_fence *exclusive, 1543 uint64_t start, uint64_t last, 1544 uint64_t flags, uint64_t addr, 1545 dma_addr_t *pages_addr, 1546 struct dma_fence **fence) 1547 { 1548 struct amdgpu_vm_update_params params; 1549 void *owner = AMDGPU_FENCE_OWNER_VM; 1550 int r; 1551 1552 memset(¶ms, 0, sizeof(params)); 1553 params.adev = adev; 1554 params.vm = vm; 1555 params.direct = direct; 1556 params.pages_addr = pages_addr; 1557 1558 /* sync to everything except eviction fences on unmapping */ 1559 if (!(flags & AMDGPU_PTE_VALID)) 1560 owner = AMDGPU_FENCE_OWNER_KFD; 1561 1562 mutex_lock(&vm->eviction_lock); 1563 if (vm->evicting) { 1564 r = -EBUSY; 1565 goto error_unlock; 1566 } 1567 1568 r = vm->update_funcs->prepare(¶ms, owner, exclusive); 1569 if (r) 1570 goto error_unlock; 1571 1572 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); 1573 if (r) 1574 goto error_unlock; 1575 1576 r = vm->update_funcs->commit(¶ms, fence); 1577 1578 error_unlock: 1579 mutex_unlock(&vm->eviction_lock); 1580 return r; 1581 } 1582 1583 /** 1584 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 1585 * 1586 * @adev: amdgpu_device pointer 1587 * @exclusive: fence we need to sync to 1588 * @pages_addr: DMA addresses to use for mapping 1589 * @vm: requested vm 1590 * @mapping: mapped range and flags to use for the update 1591 * @flags: HW flags for the mapping 1592 * @bo_adev: amdgpu_device pointer that bo actually been allocated 1593 * @nodes: array of drm_mm_nodes with the MC addresses 1594 * @fence: optional resulting fence 1595 * 1596 * Split the mapping into smaller chunks so that each update fits 1597 * into a SDMA IB. 1598 * 1599 * Returns: 1600 * 0 for success, -EINVAL for failure. 1601 */ 1602 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1603 struct dma_fence *exclusive, 1604 dma_addr_t *pages_addr, 1605 struct amdgpu_vm *vm, 1606 struct amdgpu_bo_va_mapping *mapping, 1607 uint64_t flags, 1608 struct amdgpu_device *bo_adev, 1609 struct drm_mm_node *nodes, 1610 struct dma_fence **fence) 1611 { 1612 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; 1613 uint64_t pfn, start = mapping->start; 1614 int r; 1615 1616 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1617 * but in case of something, we filter the flags in first place 1618 */ 1619 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1620 flags &= ~AMDGPU_PTE_READABLE; 1621 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1622 flags &= ~AMDGPU_PTE_WRITEABLE; 1623 1624 /* Apply ASIC specific mapping flags */ 1625 amdgpu_gmc_get_vm_pte(adev, mapping, &flags); 1626 1627 trace_amdgpu_vm_bo_update(mapping); 1628 1629 pfn = mapping->offset >> PAGE_SHIFT; 1630 if (nodes) { 1631 while (pfn >= nodes->size) { 1632 pfn -= nodes->size; 1633 ++nodes; 1634 } 1635 } 1636 1637 do { 1638 dma_addr_t *dma_addr = NULL; 1639 uint64_t max_entries; 1640 uint64_t addr, last; 1641 1642 if (nodes) { 1643 addr = nodes->start << PAGE_SHIFT; 1644 max_entries = (nodes->size - pfn) * 1645 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1646 } else { 1647 addr = 0; 1648 max_entries = S64_MAX; 1649 } 1650 1651 if (pages_addr) { 1652 uint64_t count; 1653 1654 for (count = 1; 1655 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1656 ++count) { 1657 uint64_t idx = pfn + count; 1658 1659 if (pages_addr[idx] != 1660 (pages_addr[idx - 1] + PAGE_SIZE)) 1661 break; 1662 } 1663 1664 if (count < min_linear_pages) { 1665 addr = pfn << PAGE_SHIFT; 1666 dma_addr = pages_addr; 1667 } else { 1668 addr = pages_addr[pfn]; 1669 max_entries = count * 1670 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1671 } 1672 1673 } else if (flags & AMDGPU_PTE_VALID) { 1674 addr += bo_adev->vm_manager.vram_base_offset; 1675 addr += pfn << PAGE_SHIFT; 1676 } 1677 1678 last = min((uint64_t)mapping->last, start + max_entries - 1); 1679 r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, 1680 start, last, flags, addr, 1681 dma_addr, fence); 1682 if (r) 1683 return r; 1684 1685 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1686 if (nodes && nodes->size == pfn) { 1687 pfn = 0; 1688 ++nodes; 1689 } 1690 start = last + 1; 1691 1692 } while (unlikely(start != mapping->last + 1)); 1693 1694 return 0; 1695 } 1696 1697 /** 1698 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1699 * 1700 * @adev: amdgpu_device pointer 1701 * @bo_va: requested BO and VM object 1702 * @clear: if true clear the entries 1703 * 1704 * Fill in the page table entries for @bo_va. 1705 * 1706 * Returns: 1707 * 0 for success, -EINVAL for failure. 1708 */ 1709 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1710 bool clear) 1711 { 1712 struct amdgpu_bo *bo = bo_va->base.bo; 1713 struct amdgpu_vm *vm = bo_va->base.vm; 1714 struct amdgpu_bo_va_mapping *mapping; 1715 dma_addr_t *pages_addr = NULL; 1716 struct ttm_mem_reg *mem; 1717 struct drm_mm_node *nodes; 1718 struct dma_fence *exclusive, **last_update; 1719 uint64_t flags; 1720 struct amdgpu_device *bo_adev = adev; 1721 int r; 1722 1723 if (clear || !bo) { 1724 mem = NULL; 1725 nodes = NULL; 1726 exclusive = NULL; 1727 } else { 1728 struct ttm_dma_tt *ttm; 1729 1730 mem = &bo->tbo.mem; 1731 nodes = mem->mm_node; 1732 if (mem->mem_type == TTM_PL_TT) { 1733 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1734 pages_addr = ttm->dma_address; 1735 } 1736 exclusive = bo->tbo.moving; 1737 } 1738 1739 if (bo) { 1740 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1741 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1742 } else { 1743 flags = 0x0; 1744 } 1745 1746 if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) 1747 last_update = &vm->last_update; 1748 else 1749 last_update = &bo_va->last_pt_update; 1750 1751 if (!clear && bo_va->base.moved) { 1752 bo_va->base.moved = false; 1753 list_splice_init(&bo_va->valids, &bo_va->invalids); 1754 1755 } else if (bo_va->cleared != clear) { 1756 list_splice_init(&bo_va->valids, &bo_va->invalids); 1757 } 1758 1759 list_for_each_entry(mapping, &bo_va->invalids, list) { 1760 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, 1761 mapping, flags, bo_adev, nodes, 1762 last_update); 1763 if (r) 1764 return r; 1765 } 1766 1767 /* If the BO is not in its preferred location add it back to 1768 * the evicted list so that it gets validated again on the 1769 * next command submission. 1770 */ 1771 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1772 uint32_t mem_type = bo->tbo.mem.mem_type; 1773 1774 if (!(bo->preferred_domains & 1775 amdgpu_mem_type_to_domain(mem_type))) 1776 amdgpu_vm_bo_evicted(&bo_va->base); 1777 else 1778 amdgpu_vm_bo_idle(&bo_va->base); 1779 } else { 1780 amdgpu_vm_bo_done(&bo_va->base); 1781 } 1782 1783 list_splice_init(&bo_va->invalids, &bo_va->valids); 1784 bo_va->cleared = clear; 1785 1786 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1787 list_for_each_entry(mapping, &bo_va->valids, list) 1788 trace_amdgpu_vm_bo_mapping(mapping); 1789 } 1790 1791 return 0; 1792 } 1793 1794 /** 1795 * amdgpu_vm_update_prt_state - update the global PRT state 1796 * 1797 * @adev: amdgpu_device pointer 1798 */ 1799 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1800 { 1801 unsigned long flags; 1802 bool enable; 1803 1804 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1805 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1806 adev->gmc.gmc_funcs->set_prt(adev, enable); 1807 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1808 } 1809 1810 /** 1811 * amdgpu_vm_prt_get - add a PRT user 1812 * 1813 * @adev: amdgpu_device pointer 1814 */ 1815 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1816 { 1817 if (!adev->gmc.gmc_funcs->set_prt) 1818 return; 1819 1820 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1821 amdgpu_vm_update_prt_state(adev); 1822 } 1823 1824 /** 1825 * amdgpu_vm_prt_put - drop a PRT user 1826 * 1827 * @adev: amdgpu_device pointer 1828 */ 1829 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1830 { 1831 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1832 amdgpu_vm_update_prt_state(adev); 1833 } 1834 1835 /** 1836 * amdgpu_vm_prt_cb - callback for updating the PRT status 1837 * 1838 * @fence: fence for the callback 1839 * @_cb: the callback function 1840 */ 1841 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1842 { 1843 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1844 1845 amdgpu_vm_prt_put(cb->adev); 1846 kfree(cb); 1847 } 1848 1849 /** 1850 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1851 * 1852 * @adev: amdgpu_device pointer 1853 * @fence: fence for the callback 1854 */ 1855 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1856 struct dma_fence *fence) 1857 { 1858 struct amdgpu_prt_cb *cb; 1859 1860 if (!adev->gmc.gmc_funcs->set_prt) 1861 return; 1862 1863 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1864 if (!cb) { 1865 /* Last resort when we are OOM */ 1866 if (fence) 1867 dma_fence_wait(fence, false); 1868 1869 amdgpu_vm_prt_put(adev); 1870 } else { 1871 cb->adev = adev; 1872 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1873 amdgpu_vm_prt_cb)) 1874 amdgpu_vm_prt_cb(fence, &cb->cb); 1875 } 1876 } 1877 1878 /** 1879 * amdgpu_vm_free_mapping - free a mapping 1880 * 1881 * @adev: amdgpu_device pointer 1882 * @vm: requested vm 1883 * @mapping: mapping to be freed 1884 * @fence: fence of the unmap operation 1885 * 1886 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1887 */ 1888 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1889 struct amdgpu_vm *vm, 1890 struct amdgpu_bo_va_mapping *mapping, 1891 struct dma_fence *fence) 1892 { 1893 if (mapping->flags & AMDGPU_PTE_PRT) 1894 amdgpu_vm_add_prt_cb(adev, fence); 1895 kfree(mapping); 1896 } 1897 1898 /** 1899 * amdgpu_vm_prt_fini - finish all prt mappings 1900 * 1901 * @adev: amdgpu_device pointer 1902 * @vm: requested vm 1903 * 1904 * Register a cleanup callback to disable PRT support after VM dies. 1905 */ 1906 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1907 { 1908 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 1909 struct dma_fence *excl, **shared; 1910 unsigned i, shared_count; 1911 int r; 1912 1913 r = dma_resv_get_fences_rcu(resv, &excl, 1914 &shared_count, &shared); 1915 if (r) { 1916 /* Not enough memory to grab the fence list, as last resort 1917 * block for all the fences to complete. 1918 */ 1919 dma_resv_wait_timeout_rcu(resv, true, false, 1920 MAX_SCHEDULE_TIMEOUT); 1921 return; 1922 } 1923 1924 /* Add a callback for each fence in the reservation object */ 1925 amdgpu_vm_prt_get(adev); 1926 amdgpu_vm_add_prt_cb(adev, excl); 1927 1928 for (i = 0; i < shared_count; ++i) { 1929 amdgpu_vm_prt_get(adev); 1930 amdgpu_vm_add_prt_cb(adev, shared[i]); 1931 } 1932 1933 kfree(shared); 1934 } 1935 1936 /** 1937 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1938 * 1939 * @adev: amdgpu_device pointer 1940 * @vm: requested vm 1941 * @fence: optional resulting fence (unchanged if no work needed to be done 1942 * or if an error occurred) 1943 * 1944 * Make sure all freed BOs are cleared in the PT. 1945 * PTs have to be reserved and mutex must be locked! 1946 * 1947 * Returns: 1948 * 0 for success. 1949 * 1950 */ 1951 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1952 struct amdgpu_vm *vm, 1953 struct dma_fence **fence) 1954 { 1955 struct amdgpu_bo_va_mapping *mapping; 1956 uint64_t init_pte_value = 0; 1957 struct dma_fence *f = NULL; 1958 int r; 1959 1960 while (!list_empty(&vm->freed)) { 1961 mapping = list_first_entry(&vm->freed, 1962 struct amdgpu_bo_va_mapping, list); 1963 list_del(&mapping->list); 1964 1965 if (vm->pte_support_ats && 1966 mapping->start < AMDGPU_GMC_HOLE_START) 1967 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 1968 1969 r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, 1970 mapping->start, mapping->last, 1971 init_pte_value, 0, NULL, &f); 1972 amdgpu_vm_free_mapping(adev, vm, mapping, f); 1973 if (r) { 1974 dma_fence_put(f); 1975 return r; 1976 } 1977 } 1978 1979 if (fence && f) { 1980 dma_fence_put(*fence); 1981 *fence = f; 1982 } else { 1983 dma_fence_put(f); 1984 } 1985 1986 return 0; 1987 1988 } 1989 1990 /** 1991 * amdgpu_vm_handle_moved - handle moved BOs in the PT 1992 * 1993 * @adev: amdgpu_device pointer 1994 * @vm: requested vm 1995 * 1996 * Make sure all BOs which are moved are updated in the PTs. 1997 * 1998 * Returns: 1999 * 0 for success. 2000 * 2001 * PTs have to be reserved! 2002 */ 2003 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 2004 struct amdgpu_vm *vm) 2005 { 2006 struct amdgpu_bo_va *bo_va, *tmp; 2007 struct dma_resv *resv; 2008 bool clear; 2009 int r; 2010 2011 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2012 /* Per VM BOs never need to bo cleared in the page tables */ 2013 r = amdgpu_vm_bo_update(adev, bo_va, false); 2014 if (r) 2015 return r; 2016 } 2017 2018 spin_lock(&vm->invalidated_lock); 2019 while (!list_empty(&vm->invalidated)) { 2020 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 2021 base.vm_status); 2022 resv = bo_va->base.bo->tbo.base.resv; 2023 spin_unlock(&vm->invalidated_lock); 2024 2025 /* Try to reserve the BO to avoid clearing its ptes */ 2026 if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2027 clear = false; 2028 /* Somebody else is using the BO right now */ 2029 else 2030 clear = true; 2031 2032 r = amdgpu_vm_bo_update(adev, bo_va, clear); 2033 if (r) 2034 return r; 2035 2036 if (!clear) 2037 dma_resv_unlock(resv); 2038 spin_lock(&vm->invalidated_lock); 2039 } 2040 spin_unlock(&vm->invalidated_lock); 2041 2042 return 0; 2043 } 2044 2045 /** 2046 * amdgpu_vm_bo_add - add a bo to a specific vm 2047 * 2048 * @adev: amdgpu_device pointer 2049 * @vm: requested vm 2050 * @bo: amdgpu buffer object 2051 * 2052 * Add @bo into the requested vm. 2053 * Add @bo to the list of bos associated with the vm 2054 * 2055 * Returns: 2056 * Newly added bo_va or NULL for failure 2057 * 2058 * Object has to be reserved! 2059 */ 2060 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2061 struct amdgpu_vm *vm, 2062 struct amdgpu_bo *bo) 2063 { 2064 struct amdgpu_bo_va *bo_va; 2065 2066 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 2067 if (bo_va == NULL) { 2068 return NULL; 2069 } 2070 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 2071 2072 bo_va->ref_count = 1; 2073 INIT_LIST_HEAD(&bo_va->valids); 2074 INIT_LIST_HEAD(&bo_va->invalids); 2075 2076 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 2077 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { 2078 bo_va->is_xgmi = true; 2079 mutex_lock(&adev->vm_manager.lock_pstate); 2080 /* Power up XGMI if it can be potentially used */ 2081 if (++adev->vm_manager.xgmi_map_counter == 1) 2082 amdgpu_xgmi_set_pstate(adev, 1); 2083 mutex_unlock(&adev->vm_manager.lock_pstate); 2084 } 2085 2086 return bo_va; 2087 } 2088 2089 2090 /** 2091 * amdgpu_vm_bo_insert_mapping - insert a new mapping 2092 * 2093 * @adev: amdgpu_device pointer 2094 * @bo_va: bo_va to store the address 2095 * @mapping: the mapping to insert 2096 * 2097 * Insert a new mapping into all structures. 2098 */ 2099 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 2100 struct amdgpu_bo_va *bo_va, 2101 struct amdgpu_bo_va_mapping *mapping) 2102 { 2103 struct amdgpu_vm *vm = bo_va->base.vm; 2104 struct amdgpu_bo *bo = bo_va->base.bo; 2105 2106 mapping->bo_va = bo_va; 2107 list_add(&mapping->list, &bo_va->invalids); 2108 amdgpu_vm_it_insert(mapping, &vm->va); 2109 2110 if (mapping->flags & AMDGPU_PTE_PRT) 2111 amdgpu_vm_prt_get(adev); 2112 2113 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && 2114 !bo_va->base.moved) { 2115 list_move(&bo_va->base.vm_status, &vm->moved); 2116 } 2117 trace_amdgpu_vm_bo_map(bo_va, mapping); 2118 } 2119 2120 /** 2121 * amdgpu_vm_bo_map - map bo inside a vm 2122 * 2123 * @adev: amdgpu_device pointer 2124 * @bo_va: bo_va to store the address 2125 * @saddr: where to map the BO 2126 * @offset: requested offset in the BO 2127 * @size: BO size in bytes 2128 * @flags: attributes of pages (read/write/valid/etc.) 2129 * 2130 * Add a mapping of the BO at the specefied addr into the VM. 2131 * 2132 * Returns: 2133 * 0 for success, error for failure. 2134 * 2135 * Object has to be reserved and unreserved outside! 2136 */ 2137 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2138 struct amdgpu_bo_va *bo_va, 2139 uint64_t saddr, uint64_t offset, 2140 uint64_t size, uint64_t flags) 2141 { 2142 struct amdgpu_bo_va_mapping *mapping, *tmp; 2143 struct amdgpu_bo *bo = bo_va->base.bo; 2144 struct amdgpu_vm *vm = bo_va->base.vm; 2145 uint64_t eaddr; 2146 2147 /* validate the parameters */ 2148 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2149 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2150 return -EINVAL; 2151 2152 /* make sure object fit at this offset */ 2153 eaddr = saddr + size - 1; 2154 if (saddr >= eaddr || 2155 (bo && offset + size > amdgpu_bo_size(bo))) 2156 return -EINVAL; 2157 2158 saddr /= AMDGPU_GPU_PAGE_SIZE; 2159 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2160 2161 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2162 if (tmp) { 2163 /* bo and tmp overlap, invalid addr */ 2164 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2165 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 2166 tmp->start, tmp->last + 1); 2167 return -EINVAL; 2168 } 2169 2170 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2171 if (!mapping) 2172 return -ENOMEM; 2173 2174 mapping->start = saddr; 2175 mapping->last = eaddr; 2176 mapping->offset = offset; 2177 mapping->flags = flags; 2178 2179 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2180 2181 return 0; 2182 } 2183 2184 /** 2185 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 2186 * 2187 * @adev: amdgpu_device pointer 2188 * @bo_va: bo_va to store the address 2189 * @saddr: where to map the BO 2190 * @offset: requested offset in the BO 2191 * @size: BO size in bytes 2192 * @flags: attributes of pages (read/write/valid/etc.) 2193 * 2194 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 2195 * mappings as we do so. 2196 * 2197 * Returns: 2198 * 0 for success, error for failure. 2199 * 2200 * Object has to be reserved and unreserved outside! 2201 */ 2202 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 2203 struct amdgpu_bo_va *bo_va, 2204 uint64_t saddr, uint64_t offset, 2205 uint64_t size, uint64_t flags) 2206 { 2207 struct amdgpu_bo_va_mapping *mapping; 2208 struct amdgpu_bo *bo = bo_va->base.bo; 2209 uint64_t eaddr; 2210 int r; 2211 2212 /* validate the parameters */ 2213 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2214 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2215 return -EINVAL; 2216 2217 /* make sure object fit at this offset */ 2218 eaddr = saddr + size - 1; 2219 if (saddr >= eaddr || 2220 (bo && offset + size > amdgpu_bo_size(bo))) 2221 return -EINVAL; 2222 2223 /* Allocate all the needed memory */ 2224 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2225 if (!mapping) 2226 return -ENOMEM; 2227 2228 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 2229 if (r) { 2230 kfree(mapping); 2231 return r; 2232 } 2233 2234 saddr /= AMDGPU_GPU_PAGE_SIZE; 2235 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2236 2237 mapping->start = saddr; 2238 mapping->last = eaddr; 2239 mapping->offset = offset; 2240 mapping->flags = flags; 2241 2242 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2243 2244 return 0; 2245 } 2246 2247 /** 2248 * amdgpu_vm_bo_unmap - remove bo mapping from vm 2249 * 2250 * @adev: amdgpu_device pointer 2251 * @bo_va: bo_va to remove the address from 2252 * @saddr: where to the BO is mapped 2253 * 2254 * Remove a mapping of the BO at the specefied addr from the VM. 2255 * 2256 * Returns: 2257 * 0 for success, error for failure. 2258 * 2259 * Object has to be reserved and unreserved outside! 2260 */ 2261 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2262 struct amdgpu_bo_va *bo_va, 2263 uint64_t saddr) 2264 { 2265 struct amdgpu_bo_va_mapping *mapping; 2266 struct amdgpu_vm *vm = bo_va->base.vm; 2267 bool valid = true; 2268 2269 saddr /= AMDGPU_GPU_PAGE_SIZE; 2270 2271 list_for_each_entry(mapping, &bo_va->valids, list) { 2272 if (mapping->start == saddr) 2273 break; 2274 } 2275 2276 if (&mapping->list == &bo_va->valids) { 2277 valid = false; 2278 2279 list_for_each_entry(mapping, &bo_va->invalids, list) { 2280 if (mapping->start == saddr) 2281 break; 2282 } 2283 2284 if (&mapping->list == &bo_va->invalids) 2285 return -ENOENT; 2286 } 2287 2288 list_del(&mapping->list); 2289 amdgpu_vm_it_remove(mapping, &vm->va); 2290 mapping->bo_va = NULL; 2291 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2292 2293 if (valid) 2294 list_add(&mapping->list, &vm->freed); 2295 else 2296 amdgpu_vm_free_mapping(adev, vm, mapping, 2297 bo_va->last_pt_update); 2298 2299 return 0; 2300 } 2301 2302 /** 2303 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 2304 * 2305 * @adev: amdgpu_device pointer 2306 * @vm: VM structure to use 2307 * @saddr: start of the range 2308 * @size: size of the range 2309 * 2310 * Remove all mappings in a range, split them as appropriate. 2311 * 2312 * Returns: 2313 * 0 for success, error for failure. 2314 */ 2315 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 2316 struct amdgpu_vm *vm, 2317 uint64_t saddr, uint64_t size) 2318 { 2319 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 2320 LIST_HEAD(removed); 2321 uint64_t eaddr; 2322 2323 eaddr = saddr + size - 1; 2324 saddr /= AMDGPU_GPU_PAGE_SIZE; 2325 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2326 2327 /* Allocate all the needed memory */ 2328 before = kzalloc(sizeof(*before), GFP_KERNEL); 2329 if (!before) 2330 return -ENOMEM; 2331 INIT_LIST_HEAD(&before->list); 2332 2333 after = kzalloc(sizeof(*after), GFP_KERNEL); 2334 if (!after) { 2335 kfree(before); 2336 return -ENOMEM; 2337 } 2338 INIT_LIST_HEAD(&after->list); 2339 2340 /* Now gather all removed mappings */ 2341 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2342 while (tmp) { 2343 /* Remember mapping split at the start */ 2344 if (tmp->start < saddr) { 2345 before->start = tmp->start; 2346 before->last = saddr - 1; 2347 before->offset = tmp->offset; 2348 before->flags = tmp->flags; 2349 before->bo_va = tmp->bo_va; 2350 list_add(&before->list, &tmp->bo_va->invalids); 2351 } 2352 2353 /* Remember mapping split at the end */ 2354 if (tmp->last > eaddr) { 2355 after->start = eaddr + 1; 2356 after->last = tmp->last; 2357 after->offset = tmp->offset; 2358 after->offset += after->start - tmp->start; 2359 after->flags = tmp->flags; 2360 after->bo_va = tmp->bo_va; 2361 list_add(&after->list, &tmp->bo_va->invalids); 2362 } 2363 2364 list_del(&tmp->list); 2365 list_add(&tmp->list, &removed); 2366 2367 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 2368 } 2369 2370 /* And free them up */ 2371 list_for_each_entry_safe(tmp, next, &removed, list) { 2372 amdgpu_vm_it_remove(tmp, &vm->va); 2373 list_del(&tmp->list); 2374 2375 if (tmp->start < saddr) 2376 tmp->start = saddr; 2377 if (tmp->last > eaddr) 2378 tmp->last = eaddr; 2379 2380 tmp->bo_va = NULL; 2381 list_add(&tmp->list, &vm->freed); 2382 trace_amdgpu_vm_bo_unmap(NULL, tmp); 2383 } 2384 2385 /* Insert partial mapping before the range */ 2386 if (!list_empty(&before->list)) { 2387 amdgpu_vm_it_insert(before, &vm->va); 2388 if (before->flags & AMDGPU_PTE_PRT) 2389 amdgpu_vm_prt_get(adev); 2390 } else { 2391 kfree(before); 2392 } 2393 2394 /* Insert partial mapping after the range */ 2395 if (!list_empty(&after->list)) { 2396 amdgpu_vm_it_insert(after, &vm->va); 2397 if (after->flags & AMDGPU_PTE_PRT) 2398 amdgpu_vm_prt_get(adev); 2399 } else { 2400 kfree(after); 2401 } 2402 2403 return 0; 2404 } 2405 2406 /** 2407 * amdgpu_vm_bo_lookup_mapping - find mapping by address 2408 * 2409 * @vm: the requested VM 2410 * @addr: the address 2411 * 2412 * Find a mapping by it's address. 2413 * 2414 * Returns: 2415 * The amdgpu_bo_va_mapping matching for addr or NULL 2416 * 2417 */ 2418 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 2419 uint64_t addr) 2420 { 2421 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 2422 } 2423 2424 /** 2425 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 2426 * 2427 * @vm: the requested vm 2428 * @ticket: CS ticket 2429 * 2430 * Trace all mappings of BOs reserved during a command submission. 2431 */ 2432 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 2433 { 2434 struct amdgpu_bo_va_mapping *mapping; 2435 2436 if (!trace_amdgpu_vm_bo_cs_enabled()) 2437 return; 2438 2439 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 2440 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 2441 if (mapping->bo_va && mapping->bo_va->base.bo) { 2442 struct amdgpu_bo *bo; 2443 2444 bo = mapping->bo_va->base.bo; 2445 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2446 ticket) 2447 continue; 2448 } 2449 2450 trace_amdgpu_vm_bo_cs(mapping); 2451 } 2452 } 2453 2454 /** 2455 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2456 * 2457 * @adev: amdgpu_device pointer 2458 * @bo_va: requested bo_va 2459 * 2460 * Remove @bo_va->bo from the requested vm. 2461 * 2462 * Object have to be reserved! 2463 */ 2464 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2465 struct amdgpu_bo_va *bo_va) 2466 { 2467 struct amdgpu_bo_va_mapping *mapping, *next; 2468 struct amdgpu_bo *bo = bo_va->base.bo; 2469 struct amdgpu_vm *vm = bo_va->base.vm; 2470 struct amdgpu_vm_bo_base **base; 2471 2472 if (bo) { 2473 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2474 vm->bulk_moveable = false; 2475 2476 for (base = &bo_va->base.bo->vm_bo; *base; 2477 base = &(*base)->next) { 2478 if (*base != &bo_va->base) 2479 continue; 2480 2481 *base = bo_va->base.next; 2482 break; 2483 } 2484 } 2485 2486 spin_lock(&vm->invalidated_lock); 2487 list_del(&bo_va->base.vm_status); 2488 spin_unlock(&vm->invalidated_lock); 2489 2490 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2491 list_del(&mapping->list); 2492 amdgpu_vm_it_remove(mapping, &vm->va); 2493 mapping->bo_va = NULL; 2494 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2495 list_add(&mapping->list, &vm->freed); 2496 } 2497 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2498 list_del(&mapping->list); 2499 amdgpu_vm_it_remove(mapping, &vm->va); 2500 amdgpu_vm_free_mapping(adev, vm, mapping, 2501 bo_va->last_pt_update); 2502 } 2503 2504 dma_fence_put(bo_va->last_pt_update); 2505 2506 if (bo && bo_va->is_xgmi) { 2507 mutex_lock(&adev->vm_manager.lock_pstate); 2508 if (--adev->vm_manager.xgmi_map_counter == 0) 2509 amdgpu_xgmi_set_pstate(adev, 0); 2510 mutex_unlock(&adev->vm_manager.lock_pstate); 2511 } 2512 2513 kfree(bo_va); 2514 } 2515 2516 /** 2517 * amdgpu_vm_evictable - check if we can evict a VM 2518 * 2519 * @bo: A page table of the VM. 2520 * 2521 * Check if it is possible to evict a VM. 2522 */ 2523 bool amdgpu_vm_evictable(struct amdgpu_bo *bo) 2524 { 2525 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; 2526 2527 /* Page tables of a destroyed VM can go away immediately */ 2528 if (!bo_base || !bo_base->vm) 2529 return true; 2530 2531 /* Don't evict VM page tables while they are busy */ 2532 if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) 2533 return false; 2534 2535 /* Try to block ongoing updates */ 2536 if (!mutex_trylock(&bo_base->vm->eviction_lock)) 2537 return false; 2538 2539 /* Don't evict VM page tables while they are updated */ 2540 if (!dma_fence_is_signaled(bo_base->vm->last_direct) || 2541 !dma_fence_is_signaled(bo_base->vm->last_delayed)) { 2542 mutex_unlock(&bo_base->vm->eviction_lock); 2543 return false; 2544 } 2545 2546 bo_base->vm->evicting = true; 2547 mutex_unlock(&bo_base->vm->eviction_lock); 2548 return true; 2549 } 2550 2551 /** 2552 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2553 * 2554 * @adev: amdgpu_device pointer 2555 * @bo: amdgpu buffer object 2556 * @evicted: is the BO evicted 2557 * 2558 * Mark @bo as invalid. 2559 */ 2560 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2561 struct amdgpu_bo *bo, bool evicted) 2562 { 2563 struct amdgpu_vm_bo_base *bo_base; 2564 2565 /* shadow bo doesn't have bo base, its validation needs its parent */ 2566 if (bo->parent && bo->parent->shadow == bo) 2567 bo = bo->parent; 2568 2569 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2570 struct amdgpu_vm *vm = bo_base->vm; 2571 2572 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 2573 amdgpu_vm_bo_evicted(bo_base); 2574 continue; 2575 } 2576 2577 if (bo_base->moved) 2578 continue; 2579 bo_base->moved = true; 2580 2581 if (bo->tbo.type == ttm_bo_type_kernel) 2582 amdgpu_vm_bo_relocated(bo_base); 2583 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2584 amdgpu_vm_bo_moved(bo_base); 2585 else 2586 amdgpu_vm_bo_invalidated(bo_base); 2587 } 2588 } 2589 2590 /** 2591 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 2592 * 2593 * @vm_size: VM size 2594 * 2595 * Returns: 2596 * VM page table as power of two 2597 */ 2598 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2599 { 2600 /* Total bits covered by PD + PTs */ 2601 unsigned bits = ilog2(vm_size) + 18; 2602 2603 /* Make sure the PD is 4K in size up to 8GB address space. 2604 Above that split equal between PD and PTs */ 2605 if (vm_size <= 8) 2606 return (bits - 9); 2607 else 2608 return ((bits + 3) / 2); 2609 } 2610 2611 /** 2612 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2613 * 2614 * @adev: amdgpu_device pointer 2615 * @min_vm_size: the minimum vm size in GB if it's set auto 2616 * @fragment_size_default: Default PTE fragment size 2617 * @max_level: max VMPT level 2618 * @max_bits: max address space size in bits 2619 * 2620 */ 2621 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2622 uint32_t fragment_size_default, unsigned max_level, 2623 unsigned max_bits) 2624 { 2625 unsigned int max_size = 1 << (max_bits - 30); 2626 unsigned int vm_size; 2627 uint64_t tmp; 2628 2629 /* adjust vm size first */ 2630 if (amdgpu_vm_size != -1) { 2631 vm_size = amdgpu_vm_size; 2632 if (vm_size > max_size) { 2633 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2634 amdgpu_vm_size, max_size); 2635 vm_size = max_size; 2636 } 2637 } else { 2638 struct sysinfo si; 2639 unsigned int phys_ram_gb; 2640 2641 /* Optimal VM size depends on the amount of physical 2642 * RAM available. Underlying requirements and 2643 * assumptions: 2644 * 2645 * - Need to map system memory and VRAM from all GPUs 2646 * - VRAM from other GPUs not known here 2647 * - Assume VRAM <= system memory 2648 * - On GFX8 and older, VM space can be segmented for 2649 * different MTYPEs 2650 * - Need to allow room for fragmentation, guard pages etc. 2651 * 2652 * This adds up to a rough guess of system memory x3. 2653 * Round up to power of two to maximize the available 2654 * VM size with the given page table size. 2655 */ 2656 si_meminfo(&si); 2657 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2658 (1 << 30) - 1) >> 30; 2659 vm_size = roundup_pow_of_two( 2660 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2661 } 2662 2663 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2664 2665 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2666 if (amdgpu_vm_block_size != -1) 2667 tmp >>= amdgpu_vm_block_size - 9; 2668 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2669 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); 2670 switch (adev->vm_manager.num_level) { 2671 case 3: 2672 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2673 break; 2674 case 2: 2675 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2676 break; 2677 case 1: 2678 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2679 break; 2680 default: 2681 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2682 } 2683 /* block size depends on vm size and hw setup*/ 2684 if (amdgpu_vm_block_size != -1) 2685 adev->vm_manager.block_size = 2686 min((unsigned)amdgpu_vm_block_size, max_bits 2687 - AMDGPU_GPU_PAGE_SHIFT 2688 - 9 * adev->vm_manager.num_level); 2689 else if (adev->vm_manager.num_level > 1) 2690 adev->vm_manager.block_size = 9; 2691 else 2692 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2693 2694 if (amdgpu_vm_fragment_size == -1) 2695 adev->vm_manager.fragment_size = fragment_size_default; 2696 else 2697 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2698 2699 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2700 vm_size, adev->vm_manager.num_level + 1, 2701 adev->vm_manager.block_size, 2702 adev->vm_manager.fragment_size); 2703 } 2704 2705 /** 2706 * amdgpu_vm_wait_idle - wait for the VM to become idle 2707 * 2708 * @vm: VM object to wait for 2709 * @timeout: timeout to wait for VM to become idle 2710 */ 2711 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2712 { 2713 timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2714 true, true, timeout); 2715 if (timeout <= 0) 2716 return timeout; 2717 2718 timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); 2719 if (timeout <= 0) 2720 return timeout; 2721 2722 return dma_fence_wait_timeout(vm->last_delayed, true, timeout); 2723 } 2724 2725 /** 2726 * amdgpu_vm_init - initialize a vm instance 2727 * 2728 * @adev: amdgpu_device pointer 2729 * @vm: requested vm 2730 * @vm_context: Indicates if it GFX or Compute context 2731 * @pasid: Process address space identifier 2732 * 2733 * Init @vm fields. 2734 * 2735 * Returns: 2736 * 0 for success, error for failure. 2737 */ 2738 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2739 int vm_context, unsigned int pasid) 2740 { 2741 struct amdgpu_bo_param bp; 2742 struct amdgpu_bo *root; 2743 int r, i; 2744 2745 vm->va = RB_ROOT_CACHED; 2746 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2747 vm->reserved_vmid[i] = NULL; 2748 INIT_LIST_HEAD(&vm->evicted); 2749 INIT_LIST_HEAD(&vm->relocated); 2750 INIT_LIST_HEAD(&vm->moved); 2751 INIT_LIST_HEAD(&vm->idle); 2752 INIT_LIST_HEAD(&vm->invalidated); 2753 spin_lock_init(&vm->invalidated_lock); 2754 INIT_LIST_HEAD(&vm->freed); 2755 2756 2757 /* create scheduler entities for page table updates */ 2758 r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, 2759 adev->vm_manager.vm_pte_scheds, 2760 adev->vm_manager.vm_pte_num_scheds, NULL); 2761 if (r) 2762 return r; 2763 2764 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 2765 adev->vm_manager.vm_pte_scheds, 2766 adev->vm_manager.vm_pte_num_scheds, NULL); 2767 if (r) 2768 goto error_free_direct; 2769 2770 vm->pte_support_ats = false; 2771 vm->is_compute_context = false; 2772 2773 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { 2774 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2775 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2776 2777 if (adev->asic_type == CHIP_RAVEN) 2778 vm->pte_support_ats = true; 2779 } else { 2780 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2781 AMDGPU_VM_USE_CPU_FOR_GFX); 2782 } 2783 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2784 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2785 WARN_ONCE((vm->use_cpu_for_update && 2786 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2787 "CPU update of VM recommended only for large BAR system\n"); 2788 2789 if (vm->use_cpu_for_update) 2790 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2791 else 2792 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2793 vm->last_update = NULL; 2794 vm->last_direct = dma_fence_get_stub(); 2795 vm->last_delayed = dma_fence_get_stub(); 2796 2797 mutex_init(&vm->eviction_lock); 2798 vm->evicting = false; 2799 2800 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); 2801 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) 2802 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; 2803 r = amdgpu_bo_create(adev, &bp, &root); 2804 if (r) 2805 goto error_free_delayed; 2806 2807 r = amdgpu_bo_reserve(root, true); 2808 if (r) 2809 goto error_free_root; 2810 2811 r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 2812 if (r) 2813 goto error_unreserve; 2814 2815 amdgpu_vm_bo_base_init(&vm->root.base, vm, root); 2816 2817 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2818 if (r) 2819 goto error_unreserve; 2820 2821 amdgpu_bo_unreserve(vm->root.base.bo); 2822 2823 if (pasid) { 2824 unsigned long flags; 2825 2826 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2827 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2828 GFP_ATOMIC); 2829 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2830 if (r < 0) 2831 goto error_free_root; 2832 2833 vm->pasid = pasid; 2834 } 2835 2836 INIT_KFIFO(vm->faults); 2837 2838 return 0; 2839 2840 error_unreserve: 2841 amdgpu_bo_unreserve(vm->root.base.bo); 2842 2843 error_free_root: 2844 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2845 amdgpu_bo_unref(&vm->root.base.bo); 2846 vm->root.base.bo = NULL; 2847 2848 error_free_delayed: 2849 dma_fence_put(vm->last_direct); 2850 dma_fence_put(vm->last_delayed); 2851 drm_sched_entity_destroy(&vm->delayed); 2852 2853 error_free_direct: 2854 drm_sched_entity_destroy(&vm->direct); 2855 2856 return r; 2857 } 2858 2859 /** 2860 * amdgpu_vm_check_clean_reserved - check if a VM is clean 2861 * 2862 * @adev: amdgpu_device pointer 2863 * @vm: the VM to check 2864 * 2865 * check all entries of the root PD, if any subsequent PDs are allocated, 2866 * it means there are page table creating and filling, and is no a clean 2867 * VM 2868 * 2869 * Returns: 2870 * 0 if this VM is clean 2871 */ 2872 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 2873 struct amdgpu_vm *vm) 2874 { 2875 enum amdgpu_vm_level root = adev->vm_manager.root_level; 2876 unsigned int entries = amdgpu_vm_num_entries(adev, root); 2877 unsigned int i = 0; 2878 2879 if (!(vm->root.entries)) 2880 return 0; 2881 2882 for (i = 0; i < entries; i++) { 2883 if (vm->root.entries[i].base.bo) 2884 return -EINVAL; 2885 } 2886 2887 return 0; 2888 } 2889 2890 /** 2891 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2892 * 2893 * @adev: amdgpu_device pointer 2894 * @vm: requested vm 2895 * @pasid: pasid to use 2896 * 2897 * This only works on GFX VMs that don't have any BOs added and no 2898 * page tables allocated yet. 2899 * 2900 * Changes the following VM parameters: 2901 * - use_cpu_for_update 2902 * - pte_supports_ats 2903 * - pasid (old PASID is released, because compute manages its own PASIDs) 2904 * 2905 * Reinitializes the page directory to reflect the changed ATS 2906 * setting. 2907 * 2908 * Returns: 2909 * 0 for success, -errno for errors. 2910 */ 2911 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2912 unsigned int pasid) 2913 { 2914 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 2915 int r; 2916 2917 r = amdgpu_bo_reserve(vm->root.base.bo, true); 2918 if (r) 2919 return r; 2920 2921 /* Sanity checks */ 2922 r = amdgpu_vm_check_clean_reserved(adev, vm); 2923 if (r) 2924 goto unreserve_bo; 2925 2926 if (pasid) { 2927 unsigned long flags; 2928 2929 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2930 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2931 GFP_ATOMIC); 2932 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2933 2934 if (r == -ENOSPC) 2935 goto unreserve_bo; 2936 r = 0; 2937 } 2938 2939 /* Check if PD needs to be reinitialized and do it before 2940 * changing any other state, in case it fails. 2941 */ 2942 if (pte_support_ats != vm->pte_support_ats) { 2943 vm->pte_support_ats = pte_support_ats; 2944 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); 2945 if (r) 2946 goto free_idr; 2947 } 2948 2949 /* Update VM state */ 2950 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2951 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2952 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2953 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2954 WARN_ONCE((vm->use_cpu_for_update && 2955 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2956 "CPU update of VM recommended only for large BAR system\n"); 2957 2958 if (vm->use_cpu_for_update) 2959 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2960 else 2961 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2962 dma_fence_put(vm->last_update); 2963 vm->last_update = NULL; 2964 vm->is_compute_context = true; 2965 2966 if (vm->pasid) { 2967 unsigned long flags; 2968 2969 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2970 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 2971 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2972 2973 /* Free the original amdgpu allocated pasid 2974 * Will be replaced with kfd allocated pasid 2975 */ 2976 amdgpu_pasid_free(vm->pasid); 2977 vm->pasid = 0; 2978 } 2979 2980 /* Free the shadow bo for compute VM */ 2981 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2982 2983 if (pasid) 2984 vm->pasid = pasid; 2985 2986 goto unreserve_bo; 2987 2988 free_idr: 2989 if (pasid) { 2990 unsigned long flags; 2991 2992 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2993 idr_remove(&adev->vm_manager.pasid_idr, pasid); 2994 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2995 } 2996 unreserve_bo: 2997 amdgpu_bo_unreserve(vm->root.base.bo); 2998 return r; 2999 } 3000 3001 /** 3002 * amdgpu_vm_release_compute - release a compute vm 3003 * @adev: amdgpu_device pointer 3004 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 3005 * 3006 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 3007 * pasid from vm. Compute should stop use of vm after this call. 3008 */ 3009 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3010 { 3011 if (vm->pasid) { 3012 unsigned long flags; 3013 3014 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3015 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 3016 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3017 } 3018 vm->pasid = 0; 3019 vm->is_compute_context = false; 3020 } 3021 3022 /** 3023 * amdgpu_vm_fini - tear down a vm instance 3024 * 3025 * @adev: amdgpu_device pointer 3026 * @vm: requested vm 3027 * 3028 * Tear down @vm. 3029 * Unbind the VM and remove all bos from the vm bo list 3030 */ 3031 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 3032 { 3033 struct amdgpu_bo_va_mapping *mapping, *tmp; 3034 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 3035 struct amdgpu_bo *root; 3036 int i; 3037 3038 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 3039 3040 root = amdgpu_bo_ref(vm->root.base.bo); 3041 amdgpu_bo_reserve(root, true); 3042 if (vm->pasid) { 3043 unsigned long flags; 3044 3045 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3046 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 3047 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3048 vm->pasid = 0; 3049 } 3050 3051 dma_fence_wait(vm->last_direct, false); 3052 dma_fence_put(vm->last_direct); 3053 dma_fence_wait(vm->last_delayed, false); 3054 dma_fence_put(vm->last_delayed); 3055 3056 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 3057 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 3058 amdgpu_vm_prt_fini(adev, vm); 3059 prt_fini_needed = false; 3060 } 3061 3062 list_del(&mapping->list); 3063 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 3064 } 3065 3066 amdgpu_vm_free_pts(adev, vm, NULL); 3067 amdgpu_bo_unreserve(root); 3068 amdgpu_bo_unref(&root); 3069 WARN_ON(vm->root.base.bo); 3070 3071 drm_sched_entity_destroy(&vm->direct); 3072 drm_sched_entity_destroy(&vm->delayed); 3073 3074 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 3075 dev_err(adev->dev, "still active bo inside vm\n"); 3076 } 3077 rbtree_postorder_for_each_entry_safe(mapping, tmp, 3078 &vm->va.rb_root, rb) { 3079 /* Don't remove the mapping here, we don't want to trigger a 3080 * rebalance and the tree is about to be destroyed anyway. 3081 */ 3082 list_del(&mapping->list); 3083 kfree(mapping); 3084 } 3085 3086 dma_fence_put(vm->last_update); 3087 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 3088 amdgpu_vmid_free_reserved(adev, vm, i); 3089 } 3090 3091 /** 3092 * amdgpu_vm_manager_init - init the VM manager 3093 * 3094 * @adev: amdgpu_device pointer 3095 * 3096 * Initialize the VM manager structures 3097 */ 3098 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 3099 { 3100 unsigned i; 3101 3102 amdgpu_vmid_mgr_init(adev); 3103 3104 adev->vm_manager.fence_context = 3105 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3106 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 3107 adev->vm_manager.seqno[i] = 0; 3108 3109 spin_lock_init(&adev->vm_manager.prt_lock); 3110 atomic_set(&adev->vm_manager.num_prt_users, 0); 3111 3112 /* If not overridden by the user, by default, only in large BAR systems 3113 * Compute VM tables will be updated by CPU 3114 */ 3115 #ifdef CONFIG_X86_64 3116 if (amdgpu_vm_update_mode == -1) { 3117 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 3118 adev->vm_manager.vm_update_mode = 3119 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 3120 else 3121 adev->vm_manager.vm_update_mode = 0; 3122 } else 3123 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 3124 #else 3125 adev->vm_manager.vm_update_mode = 0; 3126 #endif 3127 3128 idr_init(&adev->vm_manager.pasid_idr); 3129 spin_lock_init(&adev->vm_manager.pasid_lock); 3130 3131 adev->vm_manager.xgmi_map_counter = 0; 3132 mutex_init(&adev->vm_manager.lock_pstate); 3133 } 3134 3135 /** 3136 * amdgpu_vm_manager_fini - cleanup VM manager 3137 * 3138 * @adev: amdgpu_device pointer 3139 * 3140 * Cleanup the VM manager and free resources. 3141 */ 3142 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 3143 { 3144 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); 3145 idr_destroy(&adev->vm_manager.pasid_idr); 3146 3147 amdgpu_vmid_mgr_fini(adev); 3148 } 3149 3150 /** 3151 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 3152 * 3153 * @dev: drm device pointer 3154 * @data: drm_amdgpu_vm 3155 * @filp: drm file pointer 3156 * 3157 * Returns: 3158 * 0 for success, -errno for errors. 3159 */ 3160 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 3161 { 3162 union drm_amdgpu_vm *args = data; 3163 struct amdgpu_device *adev = dev->dev_private; 3164 struct amdgpu_fpriv *fpriv = filp->driver_priv; 3165 int r; 3166 3167 switch (args->in.op) { 3168 case AMDGPU_VM_OP_RESERVE_VMID: 3169 /* We only have requirement to reserve vmid from gfxhub */ 3170 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, 3171 AMDGPU_GFXHUB_0); 3172 if (r) 3173 return r; 3174 break; 3175 case AMDGPU_VM_OP_UNRESERVE_VMID: 3176 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3177 break; 3178 default: 3179 return -EINVAL; 3180 } 3181 3182 return 0; 3183 } 3184 3185 /** 3186 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 3187 * 3188 * @adev: drm device pointer 3189 * @pasid: PASID identifier for VM 3190 * @task_info: task_info to fill. 3191 */ 3192 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 3193 struct amdgpu_task_info *task_info) 3194 { 3195 struct amdgpu_vm *vm; 3196 unsigned long flags; 3197 3198 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3199 3200 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3201 if (vm) 3202 *task_info = vm->task_info; 3203 3204 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3205 } 3206 3207 /** 3208 * amdgpu_vm_set_task_info - Sets VMs task info. 3209 * 3210 * @vm: vm for which to set the info 3211 */ 3212 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 3213 { 3214 if (vm->task_info.pid) 3215 return; 3216 3217 vm->task_info.pid = current->pid; 3218 get_task_comm(vm->task_info.task_name, current); 3219 3220 if (current->group_leader->mm != current->mm) 3221 return; 3222 3223 vm->task_info.tgid = current->group_leader->pid; 3224 get_task_comm(vm->task_info.process_name, current->group_leader); 3225 } 3226 3227 /** 3228 * amdgpu_vm_handle_fault - graceful handling of VM faults. 3229 * @adev: amdgpu device pointer 3230 * @pasid: PASID of the VM 3231 * @addr: Address of the fault 3232 * 3233 * Try to gracefully handle a VM fault. Return true if the fault was handled and 3234 * shouldn't be reported any more. 3235 */ 3236 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, 3237 uint64_t addr) 3238 { 3239 struct amdgpu_bo *root; 3240 uint64_t value, flags; 3241 struct amdgpu_vm *vm; 3242 long r; 3243 3244 spin_lock(&adev->vm_manager.pasid_lock); 3245 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3246 if (vm) 3247 root = amdgpu_bo_ref(vm->root.base.bo); 3248 else 3249 root = NULL; 3250 spin_unlock(&adev->vm_manager.pasid_lock); 3251 3252 if (!root) 3253 return false; 3254 3255 r = amdgpu_bo_reserve(root, true); 3256 if (r) 3257 goto error_unref; 3258 3259 /* Double check that the VM still exists */ 3260 spin_lock(&adev->vm_manager.pasid_lock); 3261 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3262 if (vm && vm->root.base.bo != root) 3263 vm = NULL; 3264 spin_unlock(&adev->vm_manager.pasid_lock); 3265 if (!vm) 3266 goto error_unlock; 3267 3268 addr /= AMDGPU_GPU_PAGE_SIZE; 3269 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 3270 AMDGPU_PTE_SYSTEM; 3271 3272 if (vm->is_compute_context) { 3273 /* Intentionally setting invalid PTE flag 3274 * combination to force a no-retry-fault 3275 */ 3276 flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | 3277 AMDGPU_PTE_TF; 3278 value = 0; 3279 3280 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 3281 /* Redirect the access to the dummy page */ 3282 value = adev->dummy_page_addr; 3283 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 3284 AMDGPU_PTE_WRITEABLE; 3285 3286 } else { 3287 /* Let the hw retry silently on the PTE */ 3288 value = 0; 3289 } 3290 3291 r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, 3292 flags, value, NULL, NULL); 3293 if (r) 3294 goto error_unlock; 3295 3296 r = amdgpu_vm_update_pdes(adev, vm, true); 3297 3298 error_unlock: 3299 amdgpu_bo_unreserve(root); 3300 if (r < 0) 3301 DRM_ERROR("Can't handle page fault (%ld)\n", r); 3302 3303 error_unref: 3304 amdgpu_bo_unref(&root); 3305 3306 return false; 3307 } 3308