1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/dma-fence-array.h> 29 #include <linux/interval_tree_generic.h> 30 #include <linux/idr.h> 31 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "amdgpu_trace.h" 35 #include "amdgpu_amdkfd.h" 36 #include "amdgpu_gmc.h" 37 #include "amdgpu_xgmi.h" 38 39 /** 40 * DOC: GPUVM 41 * 42 * GPUVM is similar to the legacy gart on older asics, however 43 * rather than there being a single global gart table 44 * for the entire GPU, there are multiple VM page tables active 45 * at any given time. The VM page tables can contain a mix 46 * vram pages and system memory pages and system memory pages 47 * can be mapped as snooped (cached system pages) or unsnooped 48 * (uncached system pages). 49 * Each VM has an ID associated with it and there is a page table 50 * associated with each VMID. When execting a command buffer, 51 * the kernel tells the the ring what VMID to use for that command 52 * buffer. VMIDs are allocated dynamically as commands are submitted. 53 * The userspace drivers maintain their own address space and the kernel 54 * sets up their pages tables accordingly when they submit their 55 * command buffers and a VMID is assigned. 56 * Cayman/Trinity support up to 8 active VMs at any given time; 57 * SI supports 16. 58 */ 59 60 #define START(node) ((node)->start) 61 #define LAST(node) ((node)->last) 62 63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 64 START, LAST, static, amdgpu_vm_it) 65 66 #undef START 67 #undef LAST 68 69 /** 70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 71 */ 72 struct amdgpu_prt_cb { 73 74 /** 75 * @adev: amdgpu device 76 */ 77 struct amdgpu_device *adev; 78 79 /** 80 * @cb: callback 81 */ 82 struct dma_fence_cb cb; 83 }; 84 85 /** 86 * amdgpu_vm_level_shift - return the addr shift for each level 87 * 88 * @adev: amdgpu_device pointer 89 * @level: VMPT level 90 * 91 * Returns: 92 * The number of bits the pfn needs to be right shifted for a level. 93 */ 94 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, 95 unsigned level) 96 { 97 unsigned shift = 0xff; 98 99 switch (level) { 100 case AMDGPU_VM_PDB2: 101 case AMDGPU_VM_PDB1: 102 case AMDGPU_VM_PDB0: 103 shift = 9 * (AMDGPU_VM_PDB0 - level) + 104 adev->vm_manager.block_size; 105 break; 106 case AMDGPU_VM_PTB: 107 shift = 0; 108 break; 109 default: 110 dev_err(adev->dev, "the level%d isn't supported.\n", level); 111 } 112 113 return shift; 114 } 115 116 /** 117 * amdgpu_vm_num_entries - return the number of entries in a PD/PT 118 * 119 * @adev: amdgpu_device pointer 120 * @level: VMPT level 121 * 122 * Returns: 123 * The number of entries in a page directory or page table. 124 */ 125 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, 126 unsigned level) 127 { 128 unsigned shift = amdgpu_vm_level_shift(adev, 129 adev->vm_manager.root_level); 130 131 if (level == adev->vm_manager.root_level) 132 /* For the root directory */ 133 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) 134 >> shift; 135 else if (level != AMDGPU_VM_PTB) 136 /* Everything in between */ 137 return 512; 138 else 139 /* For the page tables on the leaves */ 140 return AMDGPU_VM_PTE_COUNT(adev); 141 } 142 143 /** 144 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD 145 * 146 * @adev: amdgpu_device pointer 147 * 148 * Returns: 149 * The number of entries in the root page directory which needs the ATS setting. 150 */ 151 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev) 152 { 153 unsigned shift; 154 155 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); 156 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT); 157 } 158 159 /** 160 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT 161 * 162 * @adev: amdgpu_device pointer 163 * @level: VMPT level 164 * 165 * Returns: 166 * The mask to extract the entry number of a PD/PT from an address. 167 */ 168 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, 169 unsigned int level) 170 { 171 if (level <= adev->vm_manager.root_level) 172 return 0xffffffff; 173 else if (level != AMDGPU_VM_PTB) 174 return 0x1ff; 175 else 176 return AMDGPU_VM_PTE_COUNT(adev) - 1; 177 } 178 179 /** 180 * amdgpu_vm_bo_size - returns the size of the BOs in bytes 181 * 182 * @adev: amdgpu_device pointer 183 * @level: VMPT level 184 * 185 * Returns: 186 * The size of the BO for a page directory or page table in bytes. 187 */ 188 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) 189 { 190 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); 191 } 192 193 /** 194 * amdgpu_vm_bo_evicted - vm_bo is evicted 195 * 196 * @vm_bo: vm_bo which is evicted 197 * 198 * State for PDs/PTs and per VM BOs which are not at the location they should 199 * be. 200 */ 201 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 202 { 203 struct amdgpu_vm *vm = vm_bo->vm; 204 struct amdgpu_bo *bo = vm_bo->bo; 205 206 vm_bo->moved = true; 207 if (bo->tbo.type == ttm_bo_type_kernel) 208 list_move(&vm_bo->vm_status, &vm->evicted); 209 else 210 list_move_tail(&vm_bo->vm_status, &vm->evicted); 211 } 212 213 /** 214 * amdgpu_vm_bo_relocated - vm_bo is reloacted 215 * 216 * @vm_bo: vm_bo which is relocated 217 * 218 * State for PDs/PTs which needs to update their parent PD. 219 */ 220 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 221 { 222 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 223 } 224 225 /** 226 * amdgpu_vm_bo_moved - vm_bo is moved 227 * 228 * @vm_bo: vm_bo which is moved 229 * 230 * State for per VM BOs which are moved, but that change is not yet reflected 231 * in the page tables. 232 */ 233 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 234 { 235 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 236 } 237 238 /** 239 * amdgpu_vm_bo_idle - vm_bo is idle 240 * 241 * @vm_bo: vm_bo which is now idle 242 * 243 * State for PDs/PTs and per VM BOs which have gone through the state machine 244 * and are now idle. 245 */ 246 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 247 { 248 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 249 vm_bo->moved = false; 250 } 251 252 /** 253 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 254 * 255 * @vm_bo: vm_bo which is now invalidated 256 * 257 * State for normal BOs which are invalidated and that change not yet reflected 258 * in the PTs. 259 */ 260 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 261 { 262 spin_lock(&vm_bo->vm->invalidated_lock); 263 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 264 spin_unlock(&vm_bo->vm->invalidated_lock); 265 } 266 267 /** 268 * amdgpu_vm_bo_done - vm_bo is done 269 * 270 * @vm_bo: vm_bo which is now done 271 * 272 * State for normal BOs which are invalidated and that change has been updated 273 * in the PTs. 274 */ 275 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 276 { 277 spin_lock(&vm_bo->vm->invalidated_lock); 278 list_del_init(&vm_bo->vm_status); 279 spin_unlock(&vm_bo->vm->invalidated_lock); 280 } 281 282 /** 283 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 284 * 285 * @base: base structure for tracking BO usage in a VM 286 * @vm: vm to which bo is to be added 287 * @bo: amdgpu buffer object 288 * 289 * Initialize a bo_va_base structure and add it to the appropriate lists 290 * 291 */ 292 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 293 struct amdgpu_vm *vm, 294 struct amdgpu_bo *bo) 295 { 296 base->vm = vm; 297 base->bo = bo; 298 base->next = NULL; 299 INIT_LIST_HEAD(&base->vm_status); 300 301 if (!bo) 302 return; 303 base->next = bo->vm_bo; 304 bo->vm_bo = base; 305 306 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 307 return; 308 309 vm->bulk_moveable = false; 310 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 311 amdgpu_vm_bo_relocated(base); 312 else 313 amdgpu_vm_bo_idle(base); 314 315 if (bo->preferred_domains & 316 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) 317 return; 318 319 /* 320 * we checked all the prerequisites, but it looks like this per vm bo 321 * is currently evicted. add the bo to the evicted list to make sure it 322 * is validated on next vm use to avoid fault. 323 * */ 324 amdgpu_vm_bo_evicted(base); 325 } 326 327 /** 328 * amdgpu_vm_pt_parent - get the parent page directory 329 * 330 * @pt: child page table 331 * 332 * Helper to get the parent entry for the child page table. NULL if we are at 333 * the root page directory. 334 */ 335 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) 336 { 337 struct amdgpu_bo *parent = pt->base.bo->parent; 338 339 if (!parent) 340 return NULL; 341 342 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); 343 } 344 345 /* 346 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt 347 */ 348 struct amdgpu_vm_pt_cursor { 349 uint64_t pfn; 350 struct amdgpu_vm_pt *parent; 351 struct amdgpu_vm_pt *entry; 352 unsigned level; 353 }; 354 355 /** 356 * amdgpu_vm_pt_start - start PD/PT walk 357 * 358 * @adev: amdgpu_device pointer 359 * @vm: amdgpu_vm structure 360 * @start: start address of the walk 361 * @cursor: state to initialize 362 * 363 * Initialize a amdgpu_vm_pt_cursor to start a walk. 364 */ 365 static void amdgpu_vm_pt_start(struct amdgpu_device *adev, 366 struct amdgpu_vm *vm, uint64_t start, 367 struct amdgpu_vm_pt_cursor *cursor) 368 { 369 cursor->pfn = start; 370 cursor->parent = NULL; 371 cursor->entry = &vm->root; 372 cursor->level = adev->vm_manager.root_level; 373 } 374 375 /** 376 * amdgpu_vm_pt_descendant - go to child node 377 * 378 * @adev: amdgpu_device pointer 379 * @cursor: current state 380 * 381 * Walk to the child node of the current node. 382 * Returns: 383 * True if the walk was possible, false otherwise. 384 */ 385 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, 386 struct amdgpu_vm_pt_cursor *cursor) 387 { 388 unsigned mask, shift, idx; 389 390 if (!cursor->entry->entries) 391 return false; 392 393 BUG_ON(!cursor->entry->base.bo); 394 mask = amdgpu_vm_entries_mask(adev, cursor->level); 395 shift = amdgpu_vm_level_shift(adev, cursor->level); 396 397 ++cursor->level; 398 idx = (cursor->pfn >> shift) & mask; 399 cursor->parent = cursor->entry; 400 cursor->entry = &cursor->entry->entries[idx]; 401 return true; 402 } 403 404 /** 405 * amdgpu_vm_pt_sibling - go to sibling node 406 * 407 * @adev: amdgpu_device pointer 408 * @cursor: current state 409 * 410 * Walk to the sibling node of the current node. 411 * Returns: 412 * True if the walk was possible, false otherwise. 413 */ 414 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, 415 struct amdgpu_vm_pt_cursor *cursor) 416 { 417 unsigned shift, num_entries; 418 419 /* Root doesn't have a sibling */ 420 if (!cursor->parent) 421 return false; 422 423 /* Go to our parents and see if we got a sibling */ 424 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 425 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 426 427 if (cursor->entry == &cursor->parent->entries[num_entries - 1]) 428 return false; 429 430 cursor->pfn += 1ULL << shift; 431 cursor->pfn &= ~((1ULL << shift) - 1); 432 ++cursor->entry; 433 return true; 434 } 435 436 /** 437 * amdgpu_vm_pt_ancestor - go to parent node 438 * 439 * @cursor: current state 440 * 441 * Walk to the parent node of the current node. 442 * Returns: 443 * True if the walk was possible, false otherwise. 444 */ 445 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) 446 { 447 if (!cursor->parent) 448 return false; 449 450 --cursor->level; 451 cursor->entry = cursor->parent; 452 cursor->parent = amdgpu_vm_pt_parent(cursor->parent); 453 return true; 454 } 455 456 /** 457 * amdgpu_vm_pt_next - get next PD/PT in hieratchy 458 * 459 * @adev: amdgpu_device pointer 460 * @cursor: current state 461 * 462 * Walk the PD/PT tree to the next node. 463 */ 464 static void amdgpu_vm_pt_next(struct amdgpu_device *adev, 465 struct amdgpu_vm_pt_cursor *cursor) 466 { 467 /* First try a newborn child */ 468 if (amdgpu_vm_pt_descendant(adev, cursor)) 469 return; 470 471 /* If that didn't worked try to find a sibling */ 472 while (!amdgpu_vm_pt_sibling(adev, cursor)) { 473 /* No sibling, go to our parents and grandparents */ 474 if (!amdgpu_vm_pt_ancestor(cursor)) { 475 cursor->pfn = ~0ll; 476 return; 477 } 478 } 479 } 480 481 /** 482 * amdgpu_vm_pt_first_dfs - start a deep first search 483 * 484 * @adev: amdgpu_device structure 485 * @vm: amdgpu_vm structure 486 * @start: optional cursor to start with 487 * @cursor: state to initialize 488 * 489 * Starts a deep first traversal of the PD/PT tree. 490 */ 491 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, 492 struct amdgpu_vm *vm, 493 struct amdgpu_vm_pt_cursor *start, 494 struct amdgpu_vm_pt_cursor *cursor) 495 { 496 if (start) 497 *cursor = *start; 498 else 499 amdgpu_vm_pt_start(adev, vm, 0, cursor); 500 while (amdgpu_vm_pt_descendant(adev, cursor)); 501 } 502 503 /** 504 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue 505 * 506 * @start: starting point for the search 507 * @entry: current entry 508 * 509 * Returns: 510 * True when the search should continue, false otherwise. 511 */ 512 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 513 struct amdgpu_vm_pt *entry) 514 { 515 return entry && (!start || entry != start->entry); 516 } 517 518 /** 519 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search 520 * 521 * @adev: amdgpu_device structure 522 * @cursor: current state 523 * 524 * Move the cursor to the next node in a deep first search. 525 */ 526 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, 527 struct amdgpu_vm_pt_cursor *cursor) 528 { 529 if (!cursor->entry) 530 return; 531 532 if (!cursor->parent) 533 cursor->entry = NULL; 534 else if (amdgpu_vm_pt_sibling(adev, cursor)) 535 while (amdgpu_vm_pt_descendant(adev, cursor)); 536 else 537 amdgpu_vm_pt_ancestor(cursor); 538 } 539 540 /* 541 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs 542 */ 543 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ 544 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \ 545 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ 546 amdgpu_vm_pt_continue_dfs((start), (entry)); \ 547 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor))) 548 549 /** 550 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 551 * 552 * @vm: vm providing the BOs 553 * @validated: head of validation list 554 * @entry: entry to add 555 * 556 * Add the page directory to the list of BOs to 557 * validate for command submission. 558 */ 559 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 560 struct list_head *validated, 561 struct amdgpu_bo_list_entry *entry) 562 { 563 entry->priority = 0; 564 entry->tv.bo = &vm->root.base.bo->tbo; 565 /* One for the VM updates, one for TTM and one for the CS job */ 566 entry->tv.num_shared = 3; 567 entry->user_pages = NULL; 568 list_add(&entry->tv.head, validated); 569 } 570 571 /** 572 * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag 573 * 574 * @bo: BO which was removed from the LRU 575 * 576 * Make sure the bulk_moveable flag is updated when a BO is removed from the 577 * LRU. 578 */ 579 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) 580 { 581 struct amdgpu_bo *abo; 582 struct amdgpu_vm_bo_base *bo_base; 583 584 if (!amdgpu_bo_is_amdgpu_bo(bo)) 585 return; 586 587 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) 588 return; 589 590 abo = ttm_to_amdgpu_bo(bo); 591 if (!abo->parent) 592 return; 593 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 594 struct amdgpu_vm *vm = bo_base->vm; 595 596 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 597 vm->bulk_moveable = false; 598 } 599 600 } 601 /** 602 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 603 * 604 * @adev: amdgpu device pointer 605 * @vm: vm providing the BOs 606 * 607 * Move all BOs to the end of LRU and remember their positions to put them 608 * together. 609 */ 610 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 611 struct amdgpu_vm *vm) 612 { 613 struct ttm_bo_global *glob = adev->mman.bdev.glob; 614 struct amdgpu_vm_bo_base *bo_base; 615 616 if (vm->bulk_moveable) { 617 spin_lock(&glob->lru_lock); 618 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 619 spin_unlock(&glob->lru_lock); 620 return; 621 } 622 623 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 624 625 spin_lock(&glob->lru_lock); 626 list_for_each_entry(bo_base, &vm->idle, vm_status) { 627 struct amdgpu_bo *bo = bo_base->bo; 628 629 if (!bo->parent) 630 continue; 631 632 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); 633 if (bo->shadow) 634 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, 635 &vm->lru_bulk_move); 636 } 637 spin_unlock(&glob->lru_lock); 638 639 vm->bulk_moveable = true; 640 } 641 642 /** 643 * amdgpu_vm_validate_pt_bos - validate the page table BOs 644 * 645 * @adev: amdgpu device pointer 646 * @vm: vm providing the BOs 647 * @validate: callback to do the validation 648 * @param: parameter for the validation callback 649 * 650 * Validate the page table BOs on command submission if neccessary. 651 * 652 * Returns: 653 * Validation result. 654 */ 655 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 656 int (*validate)(void *p, struct amdgpu_bo *bo), 657 void *param) 658 { 659 struct amdgpu_vm_bo_base *bo_base, *tmp; 660 int r = 0; 661 662 vm->bulk_moveable &= list_empty(&vm->evicted); 663 664 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 665 struct amdgpu_bo *bo = bo_base->bo; 666 667 r = validate(param, bo); 668 if (r) 669 break; 670 671 if (bo->tbo.type != ttm_bo_type_kernel) { 672 amdgpu_vm_bo_moved(bo_base); 673 } else { 674 vm->update_funcs->map_table(bo); 675 if (bo->parent) 676 amdgpu_vm_bo_relocated(bo_base); 677 else 678 amdgpu_vm_bo_idle(bo_base); 679 } 680 } 681 682 return r; 683 } 684 685 /** 686 * amdgpu_vm_ready - check VM is ready for updates 687 * 688 * @vm: VM to check 689 * 690 * Check if all VM PDs/PTs are ready for updates 691 * 692 * Returns: 693 * True if eviction list is empty. 694 */ 695 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 696 { 697 return list_empty(&vm->evicted); 698 } 699 700 /** 701 * amdgpu_vm_clear_bo - initially clear the PDs/PTs 702 * 703 * @adev: amdgpu_device pointer 704 * @vm: VM to clear BO from 705 * @bo: BO to clear 706 * @direct: use a direct update 707 * 708 * Root PD needs to be reserved when calling this. 709 * 710 * Returns: 711 * 0 on success, errno otherwise. 712 */ 713 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 714 struct amdgpu_vm *vm, 715 struct amdgpu_bo *bo, 716 bool direct) 717 { 718 struct ttm_operation_ctx ctx = { true, false }; 719 unsigned level = adev->vm_manager.root_level; 720 struct amdgpu_vm_update_params params; 721 struct amdgpu_bo *ancestor = bo; 722 unsigned entries, ats_entries; 723 uint64_t addr; 724 int r; 725 726 /* Figure out our place in the hierarchy */ 727 if (ancestor->parent) { 728 ++level; 729 while (ancestor->parent->parent) { 730 ++level; 731 ancestor = ancestor->parent; 732 } 733 } 734 735 entries = amdgpu_bo_size(bo) / 8; 736 if (!vm->pte_support_ats) { 737 ats_entries = 0; 738 739 } else if (!bo->parent) { 740 ats_entries = amdgpu_vm_num_ats_entries(adev); 741 ats_entries = min(ats_entries, entries); 742 entries -= ats_entries; 743 744 } else { 745 struct amdgpu_vm_pt *pt; 746 747 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); 748 ats_entries = amdgpu_vm_num_ats_entries(adev); 749 if ((pt - vm->root.entries) >= ats_entries) { 750 ats_entries = 0; 751 } else { 752 ats_entries = entries; 753 entries = 0; 754 } 755 } 756 757 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 758 if (r) 759 return r; 760 761 if (bo->shadow) { 762 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, 763 &ctx); 764 if (r) 765 return r; 766 } 767 768 r = vm->update_funcs->map_table(bo); 769 if (r) 770 return r; 771 772 memset(¶ms, 0, sizeof(params)); 773 params.adev = adev; 774 params.vm = vm; 775 params.direct = direct; 776 777 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); 778 if (r) 779 return r; 780 781 addr = 0; 782 if (ats_entries) { 783 uint64_t value = 0, flags; 784 785 flags = AMDGPU_PTE_DEFAULT_ATC; 786 if (level != AMDGPU_VM_PTB) { 787 /* Handle leaf PDEs as PTEs */ 788 flags |= AMDGPU_PDE_PTE; 789 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); 790 } 791 792 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, 793 value, flags); 794 if (r) 795 return r; 796 797 addr += ats_entries * 8; 798 } 799 800 if (entries) { 801 uint64_t value = 0, flags = 0; 802 803 if (adev->asic_type >= CHIP_VEGA10) { 804 if (level != AMDGPU_VM_PTB) { 805 /* Handle leaf PDEs as PTEs */ 806 flags |= AMDGPU_PDE_PTE; 807 amdgpu_gmc_get_vm_pde(adev, level, 808 &value, &flags); 809 } else { 810 /* Workaround for fault priority problem on GMC9 */ 811 flags = AMDGPU_PTE_EXECUTABLE; 812 } 813 } 814 815 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, 816 value, flags); 817 if (r) 818 return r; 819 } 820 821 return vm->update_funcs->commit(¶ms, NULL); 822 } 823 824 /** 825 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation 826 * 827 * @adev: amdgpu_device pointer 828 * @vm: requesting vm 829 * @level: the page table level 830 * @direct: use a direct update 831 * @bp: resulting BO allocation parameters 832 */ 833 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, 834 int level, bool direct, 835 struct amdgpu_bo_param *bp) 836 { 837 memset(bp, 0, sizeof(*bp)); 838 839 bp->size = amdgpu_vm_bo_size(adev, level); 840 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 841 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 842 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 843 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 844 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 845 if (vm->use_cpu_for_update) 846 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 847 else if (!vm->root.base.bo || vm->root.base.bo->shadow) 848 bp->flags |= AMDGPU_GEM_CREATE_SHADOW; 849 bp->type = ttm_bo_type_kernel; 850 bp->no_wait_gpu = direct; 851 if (vm->root.base.bo) 852 bp->resv = vm->root.base.bo->tbo.base.resv; 853 } 854 855 /** 856 * amdgpu_vm_alloc_pts - Allocate a specific page table 857 * 858 * @adev: amdgpu_device pointer 859 * @vm: VM to allocate page tables for 860 * @cursor: Which page table to allocate 861 * @direct: use a direct update 862 * 863 * Make sure a specific page table or directory is allocated. 864 * 865 * Returns: 866 * 1 if page table needed to be allocated, 0 if page table was already 867 * allocated, negative errno if an error occurred. 868 */ 869 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 870 struct amdgpu_vm *vm, 871 struct amdgpu_vm_pt_cursor *cursor, 872 bool direct) 873 { 874 struct amdgpu_vm_pt *entry = cursor->entry; 875 struct amdgpu_bo_param bp; 876 struct amdgpu_bo *pt; 877 int r; 878 879 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { 880 unsigned num_entries; 881 882 num_entries = amdgpu_vm_num_entries(adev, cursor->level); 883 entry->entries = kvmalloc_array(num_entries, 884 sizeof(*entry->entries), 885 GFP_KERNEL | __GFP_ZERO); 886 if (!entry->entries) 887 return -ENOMEM; 888 } 889 890 if (entry->base.bo) 891 return 0; 892 893 amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); 894 895 r = amdgpu_bo_create(adev, &bp, &pt); 896 if (r) 897 return r; 898 899 /* Keep a reference to the root directory to avoid 900 * freeing them up in the wrong order. 901 */ 902 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); 903 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 904 905 r = amdgpu_vm_clear_bo(adev, vm, pt, direct); 906 if (r) 907 goto error_free_pt; 908 909 return 0; 910 911 error_free_pt: 912 amdgpu_bo_unref(&pt->shadow); 913 amdgpu_bo_unref(&pt); 914 return r; 915 } 916 917 /** 918 * amdgpu_vm_free_table - fre one PD/PT 919 * 920 * @entry: PDE to free 921 */ 922 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) 923 { 924 if (entry->base.bo) { 925 entry->base.bo->vm_bo = NULL; 926 list_del(&entry->base.vm_status); 927 amdgpu_bo_unref(&entry->base.bo->shadow); 928 amdgpu_bo_unref(&entry->base.bo); 929 } 930 kvfree(entry->entries); 931 entry->entries = NULL; 932 } 933 934 /** 935 * amdgpu_vm_free_pts - free PD/PT levels 936 * 937 * @adev: amdgpu device structure 938 * @vm: amdgpu vm structure 939 * @start: optional cursor where to start freeing PDs/PTs 940 * 941 * Free the page directory or page table level and all sub levels. 942 */ 943 static void amdgpu_vm_free_pts(struct amdgpu_device *adev, 944 struct amdgpu_vm *vm, 945 struct amdgpu_vm_pt_cursor *start) 946 { 947 struct amdgpu_vm_pt_cursor cursor; 948 struct amdgpu_vm_pt *entry; 949 950 vm->bulk_moveable = false; 951 952 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) 953 amdgpu_vm_free_table(entry); 954 955 if (start) 956 amdgpu_vm_free_table(start->entry); 957 } 958 959 /** 960 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 961 * 962 * @adev: amdgpu_device pointer 963 */ 964 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 965 { 966 const struct amdgpu_ip_block *ip_block; 967 bool has_compute_vm_bug; 968 struct amdgpu_ring *ring; 969 int i; 970 971 has_compute_vm_bug = false; 972 973 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 974 if (ip_block) { 975 /* Compute has a VM bug for GFX version < 7. 976 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 977 if (ip_block->version->major <= 7) 978 has_compute_vm_bug = true; 979 else if (ip_block->version->major == 8) 980 if (adev->gfx.mec_fw_version < 673) 981 has_compute_vm_bug = true; 982 } 983 984 for (i = 0; i < adev->num_rings; i++) { 985 ring = adev->rings[i]; 986 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 987 /* only compute rings */ 988 ring->has_compute_vm_bug = has_compute_vm_bug; 989 else 990 ring->has_compute_vm_bug = false; 991 } 992 } 993 994 /** 995 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 996 * 997 * @ring: ring on which the job will be submitted 998 * @job: job to submit 999 * 1000 * Returns: 1001 * True if sync is needed. 1002 */ 1003 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 1004 struct amdgpu_job *job) 1005 { 1006 struct amdgpu_device *adev = ring->adev; 1007 unsigned vmhub = ring->funcs->vmhub; 1008 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1009 struct amdgpu_vmid *id; 1010 bool gds_switch_needed; 1011 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 1012 1013 if (job->vmid == 0) 1014 return false; 1015 id = &id_mgr->ids[job->vmid]; 1016 gds_switch_needed = ring->funcs->emit_gds_switch && ( 1017 id->gds_base != job->gds_base || 1018 id->gds_size != job->gds_size || 1019 id->gws_base != job->gws_base || 1020 id->gws_size != job->gws_size || 1021 id->oa_base != job->oa_base || 1022 id->oa_size != job->oa_size); 1023 1024 if (amdgpu_vmid_had_gpu_reset(adev, id)) 1025 return true; 1026 1027 return vm_flush_needed || gds_switch_needed; 1028 } 1029 1030 /** 1031 * amdgpu_vm_flush - hardware flush the vm 1032 * 1033 * @ring: ring to use for flush 1034 * @job: related job 1035 * @need_pipe_sync: is pipe sync needed 1036 * 1037 * Emit a VM flush when it is necessary. 1038 * 1039 * Returns: 1040 * 0 on success, errno otherwise. 1041 */ 1042 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 1043 bool need_pipe_sync) 1044 { 1045 struct amdgpu_device *adev = ring->adev; 1046 unsigned vmhub = ring->funcs->vmhub; 1047 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 1048 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 1049 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 1050 id->gds_base != job->gds_base || 1051 id->gds_size != job->gds_size || 1052 id->gws_base != job->gws_base || 1053 id->gws_size != job->gws_size || 1054 id->oa_base != job->oa_base || 1055 id->oa_size != job->oa_size); 1056 bool vm_flush_needed = job->vm_needs_flush; 1057 struct dma_fence *fence = NULL; 1058 bool pasid_mapping_needed = false; 1059 unsigned patch_offset = 0; 1060 int r; 1061 1062 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 1063 gds_switch_needed = true; 1064 vm_flush_needed = true; 1065 pasid_mapping_needed = true; 1066 } 1067 1068 mutex_lock(&id_mgr->lock); 1069 if (id->pasid != job->pasid || !id->pasid_mapping || 1070 !dma_fence_is_signaled(id->pasid_mapping)) 1071 pasid_mapping_needed = true; 1072 mutex_unlock(&id_mgr->lock); 1073 1074 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 1075 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 1076 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 1077 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 1078 ring->funcs->emit_wreg; 1079 1080 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 1081 return 0; 1082 1083 if (ring->funcs->init_cond_exec) 1084 patch_offset = amdgpu_ring_init_cond_exec(ring); 1085 1086 if (need_pipe_sync) 1087 amdgpu_ring_emit_pipeline_sync(ring); 1088 1089 if (vm_flush_needed) { 1090 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 1091 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 1092 } 1093 1094 if (pasid_mapping_needed) 1095 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 1096 1097 if (vm_flush_needed || pasid_mapping_needed) { 1098 r = amdgpu_fence_emit(ring, &fence, 0); 1099 if (r) 1100 return r; 1101 } 1102 1103 if (vm_flush_needed) { 1104 mutex_lock(&id_mgr->lock); 1105 dma_fence_put(id->last_flush); 1106 id->last_flush = dma_fence_get(fence); 1107 id->current_gpu_reset_count = 1108 atomic_read(&adev->gpu_reset_counter); 1109 mutex_unlock(&id_mgr->lock); 1110 } 1111 1112 if (pasid_mapping_needed) { 1113 mutex_lock(&id_mgr->lock); 1114 id->pasid = job->pasid; 1115 dma_fence_put(id->pasid_mapping); 1116 id->pasid_mapping = dma_fence_get(fence); 1117 mutex_unlock(&id_mgr->lock); 1118 } 1119 dma_fence_put(fence); 1120 1121 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 1122 id->gds_base = job->gds_base; 1123 id->gds_size = job->gds_size; 1124 id->gws_base = job->gws_base; 1125 id->gws_size = job->gws_size; 1126 id->oa_base = job->oa_base; 1127 id->oa_size = job->oa_size; 1128 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 1129 job->gds_size, job->gws_base, 1130 job->gws_size, job->oa_base, 1131 job->oa_size); 1132 } 1133 1134 if (ring->funcs->patch_cond_exec) 1135 amdgpu_ring_patch_cond_exec(ring, patch_offset); 1136 1137 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 1138 if (ring->funcs->emit_switch_buffer) { 1139 amdgpu_ring_emit_switch_buffer(ring); 1140 amdgpu_ring_emit_switch_buffer(ring); 1141 } 1142 return 0; 1143 } 1144 1145 /** 1146 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 1147 * 1148 * @vm: requested vm 1149 * @bo: requested buffer object 1150 * 1151 * Find @bo inside the requested vm. 1152 * Search inside the @bos vm list for the requested vm 1153 * Returns the found bo_va or NULL if none is found 1154 * 1155 * Object has to be reserved! 1156 * 1157 * Returns: 1158 * Found bo_va or NULL. 1159 */ 1160 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1161 struct amdgpu_bo *bo) 1162 { 1163 struct amdgpu_vm_bo_base *base; 1164 1165 for (base = bo->vm_bo; base; base = base->next) { 1166 if (base->vm != vm) 1167 continue; 1168 1169 return container_of(base, struct amdgpu_bo_va, base); 1170 } 1171 return NULL; 1172 } 1173 1174 /** 1175 * amdgpu_vm_map_gart - Resolve gart mapping of addr 1176 * 1177 * @pages_addr: optional DMA address to use for lookup 1178 * @addr: the unmapped addr 1179 * 1180 * Look up the physical address of the page that the pte resolves 1181 * to. 1182 * 1183 * Returns: 1184 * The pointer for the page table entry. 1185 */ 1186 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 1187 { 1188 uint64_t result; 1189 1190 /* page table offset */ 1191 result = pages_addr[addr >> PAGE_SHIFT]; 1192 1193 /* in case cpu page size != gpu page size*/ 1194 result |= addr & (~PAGE_MASK); 1195 1196 result &= 0xFFFFFFFFFFFFF000ULL; 1197 1198 return result; 1199 } 1200 1201 /** 1202 * amdgpu_vm_update_pde - update a single level in the hierarchy 1203 * 1204 * @params: parameters for the update 1205 * @vm: requested vm 1206 * @entry: entry to update 1207 * 1208 * Makes sure the requested entry in parent is up to date. 1209 */ 1210 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1211 struct amdgpu_vm *vm, 1212 struct amdgpu_vm_pt *entry) 1213 { 1214 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); 1215 struct amdgpu_bo *bo = parent->base.bo, *pbo; 1216 uint64_t pde, pt, flags; 1217 unsigned level; 1218 1219 for (level = 0, pbo = bo->parent; pbo; ++level) 1220 pbo = pbo->parent; 1221 1222 level += params->adev->vm_manager.root_level; 1223 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); 1224 pde = (entry - parent->entries) * 8; 1225 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); 1226 } 1227 1228 /** 1229 * amdgpu_vm_invalidate_pds - mark all PDs as invalid 1230 * 1231 * @adev: amdgpu_device pointer 1232 * @vm: related vm 1233 * 1234 * Mark all PD level as invalid after an error. 1235 */ 1236 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, 1237 struct amdgpu_vm *vm) 1238 { 1239 struct amdgpu_vm_pt_cursor cursor; 1240 struct amdgpu_vm_pt *entry; 1241 1242 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1243 if (entry->base.bo && !entry->base.moved) 1244 amdgpu_vm_bo_relocated(&entry->base); 1245 } 1246 1247 /** 1248 * amdgpu_vm_update_pdes - make sure that all directories are valid 1249 * 1250 * @adev: amdgpu_device pointer 1251 * @vm: requested vm 1252 * @direct: submit directly to the paging queue 1253 * 1254 * Makes sure all directories are up to date. 1255 * 1256 * Returns: 1257 * 0 for success, error for failure. 1258 */ 1259 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 1260 struct amdgpu_vm *vm, bool direct) 1261 { 1262 struct amdgpu_vm_update_params params; 1263 int r; 1264 1265 if (list_empty(&vm->relocated)) 1266 return 0; 1267 1268 memset(¶ms, 0, sizeof(params)); 1269 params.adev = adev; 1270 params.vm = vm; 1271 params.direct = direct; 1272 1273 r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); 1274 if (r) 1275 return r; 1276 1277 while (!list_empty(&vm->relocated)) { 1278 struct amdgpu_vm_pt *entry; 1279 1280 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, 1281 base.vm_status); 1282 amdgpu_vm_bo_idle(&entry->base); 1283 1284 r = amdgpu_vm_update_pde(¶ms, vm, entry); 1285 if (r) 1286 goto error; 1287 } 1288 1289 r = vm->update_funcs->commit(¶ms, &vm->last_update); 1290 if (r) 1291 goto error; 1292 return 0; 1293 1294 error: 1295 amdgpu_vm_invalidate_pds(adev, vm); 1296 return r; 1297 } 1298 1299 /* 1300 * amdgpu_vm_update_flags - figure out flags for PTE updates 1301 * 1302 * Make sure to set the right flags for the PTEs at the desired level. 1303 */ 1304 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, 1305 struct amdgpu_bo *bo, unsigned level, 1306 uint64_t pe, uint64_t addr, 1307 unsigned count, uint32_t incr, 1308 uint64_t flags) 1309 1310 { 1311 if (level != AMDGPU_VM_PTB) { 1312 flags |= AMDGPU_PDE_PTE; 1313 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); 1314 1315 } else if (params->adev->asic_type >= CHIP_VEGA10 && 1316 !(flags & AMDGPU_PTE_VALID) && 1317 !(flags & AMDGPU_PTE_PRT)) { 1318 1319 /* Workaround for fault priority problem on GMC9 */ 1320 flags |= AMDGPU_PTE_EXECUTABLE; 1321 } 1322 1323 params->vm->update_funcs->update(params, bo, pe, addr, count, incr, 1324 flags); 1325 } 1326 1327 /** 1328 * amdgpu_vm_fragment - get fragment for PTEs 1329 * 1330 * @params: see amdgpu_vm_update_params definition 1331 * @start: first PTE to handle 1332 * @end: last PTE to handle 1333 * @flags: hw mapping flags 1334 * @frag: resulting fragment size 1335 * @frag_end: end of this fragment 1336 * 1337 * Returns the first possible fragment for the start and end address. 1338 */ 1339 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params, 1340 uint64_t start, uint64_t end, uint64_t flags, 1341 unsigned int *frag, uint64_t *frag_end) 1342 { 1343 /** 1344 * The MC L1 TLB supports variable sized pages, based on a fragment 1345 * field in the PTE. When this field is set to a non-zero value, page 1346 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 1347 * flags are considered valid for all PTEs within the fragment range 1348 * and corresponding mappings are assumed to be physically contiguous. 1349 * 1350 * The L1 TLB can store a single PTE for the whole fragment, 1351 * significantly increasing the space available for translation 1352 * caching. This leads to large improvements in throughput when the 1353 * TLB is under pressure. 1354 * 1355 * The L2 TLB distributes small and large fragments into two 1356 * asymmetric partitions. The large fragment cache is significantly 1357 * larger. Thus, we try to use large fragments wherever possible. 1358 * Userspace can support this by aligning virtual base address and 1359 * allocation size to the fragment size. 1360 * 1361 * Starting with Vega10 the fragment size only controls the L1. The L2 1362 * is now directly feed with small/huge/giant pages from the walker. 1363 */ 1364 unsigned max_frag; 1365 1366 if (params->adev->asic_type < CHIP_VEGA10) 1367 max_frag = params->adev->vm_manager.fragment_size; 1368 else 1369 max_frag = 31; 1370 1371 /* system pages are non continuously */ 1372 if (params->pages_addr) { 1373 *frag = 0; 1374 *frag_end = end; 1375 return; 1376 } 1377 1378 /* This intentionally wraps around if no bit is set */ 1379 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); 1380 if (*frag >= max_frag) { 1381 *frag = max_frag; 1382 *frag_end = end & ~((1ULL << max_frag) - 1); 1383 } else { 1384 *frag_end = start + (1 << *frag); 1385 } 1386 } 1387 1388 /** 1389 * amdgpu_vm_update_ptes - make sure that page tables are valid 1390 * 1391 * @params: see amdgpu_vm_update_params definition 1392 * @start: start of GPU address range 1393 * @end: end of GPU address range 1394 * @dst: destination address to map to, the next dst inside the function 1395 * @flags: mapping flags 1396 * 1397 * Update the page tables in the range @start - @end. 1398 * 1399 * Returns: 1400 * 0 for success, -EINVAL for failure. 1401 */ 1402 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, 1403 uint64_t start, uint64_t end, 1404 uint64_t dst, uint64_t flags) 1405 { 1406 struct amdgpu_device *adev = params->adev; 1407 struct amdgpu_vm_pt_cursor cursor; 1408 uint64_t frag_start = start, frag_end; 1409 unsigned int frag; 1410 int r; 1411 1412 /* figure out the initial fragment */ 1413 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); 1414 1415 /* walk over the address space and update the PTs */ 1416 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); 1417 while (cursor.pfn < end) { 1418 unsigned shift, parent_shift, mask; 1419 uint64_t incr, entry_end, pe_start; 1420 struct amdgpu_bo *pt; 1421 1422 r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, 1423 params->direct); 1424 if (r) 1425 return r; 1426 1427 pt = cursor.entry->base.bo; 1428 1429 /* The root level can't be a huge page */ 1430 if (cursor.level == adev->vm_manager.root_level) { 1431 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1432 return -ENOENT; 1433 continue; 1434 } 1435 1436 shift = amdgpu_vm_level_shift(adev, cursor.level); 1437 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); 1438 if (adev->asic_type < CHIP_VEGA10 && 1439 (flags & AMDGPU_PTE_VALID)) { 1440 /* No huge page support before GMC v9 */ 1441 if (cursor.level != AMDGPU_VM_PTB) { 1442 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1443 return -ENOENT; 1444 continue; 1445 } 1446 } else if (frag < shift) { 1447 /* We can't use this level when the fragment size is 1448 * smaller than the address shift. Go to the next 1449 * child entry and try again. 1450 */ 1451 if (!amdgpu_vm_pt_descendant(adev, &cursor)) 1452 return -ENOENT; 1453 continue; 1454 } else if (frag >= parent_shift && 1455 cursor.level - 1 != adev->vm_manager.root_level) { 1456 /* If the fragment size is even larger than the parent 1457 * shift we should go up one level and check it again 1458 * unless one level up is the root level. 1459 */ 1460 if (!amdgpu_vm_pt_ancestor(&cursor)) 1461 return -ENOENT; 1462 continue; 1463 } 1464 1465 /* Looks good so far, calculate parameters for the update */ 1466 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; 1467 mask = amdgpu_vm_entries_mask(adev, cursor.level); 1468 pe_start = ((cursor.pfn >> shift) & mask) * 8; 1469 entry_end = (uint64_t)(mask + 1) << shift; 1470 entry_end += cursor.pfn & ~(entry_end - 1); 1471 entry_end = min(entry_end, end); 1472 1473 do { 1474 uint64_t upd_end = min(entry_end, frag_end); 1475 unsigned nptes = (upd_end - frag_start) >> shift; 1476 1477 amdgpu_vm_update_flags(params, pt, cursor.level, 1478 pe_start, dst, nptes, incr, 1479 flags | AMDGPU_PTE_FRAG(frag)); 1480 1481 pe_start += nptes * 8; 1482 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; 1483 1484 frag_start = upd_end; 1485 if (frag_start >= frag_end) { 1486 /* figure out the next fragment */ 1487 amdgpu_vm_fragment(params, frag_start, end, 1488 flags, &frag, &frag_end); 1489 if (frag < shift) 1490 break; 1491 } 1492 } while (frag_start < entry_end); 1493 1494 if (amdgpu_vm_pt_descendant(adev, &cursor)) { 1495 /* Free all child entries */ 1496 while (cursor.pfn < frag_start) { 1497 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1498 amdgpu_vm_pt_next(adev, &cursor); 1499 } 1500 1501 } else if (frag >= shift) { 1502 /* or just move on to the next on the same level. */ 1503 amdgpu_vm_pt_next(adev, &cursor); 1504 } 1505 } 1506 1507 return 0; 1508 } 1509 1510 /** 1511 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 1512 * 1513 * @adev: amdgpu_device pointer 1514 * @vm: requested vm 1515 * @direct: direct submission in a page fault 1516 * @exclusive: fence we need to sync to 1517 * @start: start of mapped range 1518 * @last: last mapped entry 1519 * @flags: flags for the entries 1520 * @addr: addr to set the area to 1521 * @pages_addr: DMA addresses to use for mapping 1522 * @fence: optional resulting fence 1523 * 1524 * Fill in the page table entries between @start and @last. 1525 * 1526 * Returns: 1527 * 0 for success, -EINVAL for failure. 1528 */ 1529 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1530 struct amdgpu_vm *vm, bool direct, 1531 struct dma_fence *exclusive, 1532 uint64_t start, uint64_t last, 1533 uint64_t flags, uint64_t addr, 1534 dma_addr_t *pages_addr, 1535 struct dma_fence **fence) 1536 { 1537 struct amdgpu_vm_update_params params; 1538 void *owner = AMDGPU_FENCE_OWNER_VM; 1539 int r; 1540 1541 memset(¶ms, 0, sizeof(params)); 1542 params.adev = adev; 1543 params.vm = vm; 1544 params.direct = direct; 1545 params.pages_addr = pages_addr; 1546 1547 /* sync to everything except eviction fences on unmapping */ 1548 if (!(flags & AMDGPU_PTE_VALID)) 1549 owner = AMDGPU_FENCE_OWNER_KFD; 1550 1551 r = vm->update_funcs->prepare(¶ms, owner, exclusive); 1552 if (r) 1553 return r; 1554 1555 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); 1556 if (r) 1557 return r; 1558 1559 return vm->update_funcs->commit(¶ms, fence); 1560 } 1561 1562 /** 1563 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 1564 * 1565 * @adev: amdgpu_device pointer 1566 * @exclusive: fence we need to sync to 1567 * @pages_addr: DMA addresses to use for mapping 1568 * @vm: requested vm 1569 * @mapping: mapped range and flags to use for the update 1570 * @flags: HW flags for the mapping 1571 * @bo_adev: amdgpu_device pointer that bo actually been allocated 1572 * @nodes: array of drm_mm_nodes with the MC addresses 1573 * @fence: optional resulting fence 1574 * 1575 * Split the mapping into smaller chunks so that each update fits 1576 * into a SDMA IB. 1577 * 1578 * Returns: 1579 * 0 for success, -EINVAL for failure. 1580 */ 1581 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1582 struct dma_fence *exclusive, 1583 dma_addr_t *pages_addr, 1584 struct amdgpu_vm *vm, 1585 struct amdgpu_bo_va_mapping *mapping, 1586 uint64_t flags, 1587 struct amdgpu_device *bo_adev, 1588 struct drm_mm_node *nodes, 1589 struct dma_fence **fence) 1590 { 1591 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; 1592 uint64_t pfn, start = mapping->start; 1593 int r; 1594 1595 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1596 * but in case of something, we filter the flags in first place 1597 */ 1598 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1599 flags &= ~AMDGPU_PTE_READABLE; 1600 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1601 flags &= ~AMDGPU_PTE_WRITEABLE; 1602 1603 /* Apply ASIC specific mapping flags */ 1604 amdgpu_gmc_get_vm_pte(adev, mapping, &flags); 1605 1606 trace_amdgpu_vm_bo_update(mapping); 1607 1608 pfn = mapping->offset >> PAGE_SHIFT; 1609 if (nodes) { 1610 while (pfn >= nodes->size) { 1611 pfn -= nodes->size; 1612 ++nodes; 1613 } 1614 } 1615 1616 do { 1617 dma_addr_t *dma_addr = NULL; 1618 uint64_t max_entries; 1619 uint64_t addr, last; 1620 1621 if (nodes) { 1622 addr = nodes->start << PAGE_SHIFT; 1623 max_entries = (nodes->size - pfn) * 1624 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1625 } else { 1626 addr = 0; 1627 max_entries = S64_MAX; 1628 } 1629 1630 if (pages_addr) { 1631 uint64_t count; 1632 1633 for (count = 1; 1634 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1635 ++count) { 1636 uint64_t idx = pfn + count; 1637 1638 if (pages_addr[idx] != 1639 (pages_addr[idx - 1] + PAGE_SIZE)) 1640 break; 1641 } 1642 1643 if (count < min_linear_pages) { 1644 addr = pfn << PAGE_SHIFT; 1645 dma_addr = pages_addr; 1646 } else { 1647 addr = pages_addr[pfn]; 1648 max_entries = count * 1649 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1650 } 1651 1652 } else if (flags & AMDGPU_PTE_VALID) { 1653 addr += bo_adev->vm_manager.vram_base_offset; 1654 addr += pfn << PAGE_SHIFT; 1655 } 1656 1657 last = min((uint64_t)mapping->last, start + max_entries - 1); 1658 r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, 1659 start, last, flags, addr, 1660 dma_addr, fence); 1661 if (r) 1662 return r; 1663 1664 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1665 if (nodes && nodes->size == pfn) { 1666 pfn = 0; 1667 ++nodes; 1668 } 1669 start = last + 1; 1670 1671 } while (unlikely(start != mapping->last + 1)); 1672 1673 return 0; 1674 } 1675 1676 /** 1677 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1678 * 1679 * @adev: amdgpu_device pointer 1680 * @bo_va: requested BO and VM object 1681 * @clear: if true clear the entries 1682 * 1683 * Fill in the page table entries for @bo_va. 1684 * 1685 * Returns: 1686 * 0 for success, -EINVAL for failure. 1687 */ 1688 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1689 bool clear) 1690 { 1691 struct amdgpu_bo *bo = bo_va->base.bo; 1692 struct amdgpu_vm *vm = bo_va->base.vm; 1693 struct amdgpu_bo_va_mapping *mapping; 1694 dma_addr_t *pages_addr = NULL; 1695 struct ttm_mem_reg *mem; 1696 struct drm_mm_node *nodes; 1697 struct dma_fence *exclusive, **last_update; 1698 uint64_t flags; 1699 struct amdgpu_device *bo_adev = adev; 1700 int r; 1701 1702 if (clear || !bo) { 1703 mem = NULL; 1704 nodes = NULL; 1705 exclusive = NULL; 1706 } else { 1707 struct ttm_dma_tt *ttm; 1708 1709 mem = &bo->tbo.mem; 1710 nodes = mem->mm_node; 1711 if (mem->mem_type == TTM_PL_TT) { 1712 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1713 pages_addr = ttm->dma_address; 1714 } 1715 exclusive = bo->tbo.moving; 1716 } 1717 1718 if (bo) { 1719 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1720 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1721 } else { 1722 flags = 0x0; 1723 } 1724 1725 if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) 1726 last_update = &vm->last_update; 1727 else 1728 last_update = &bo_va->last_pt_update; 1729 1730 if (!clear && bo_va->base.moved) { 1731 bo_va->base.moved = false; 1732 list_splice_init(&bo_va->valids, &bo_va->invalids); 1733 1734 } else if (bo_va->cleared != clear) { 1735 list_splice_init(&bo_va->valids, &bo_va->invalids); 1736 } 1737 1738 list_for_each_entry(mapping, &bo_va->invalids, list) { 1739 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, 1740 mapping, flags, bo_adev, nodes, 1741 last_update); 1742 if (r) 1743 return r; 1744 } 1745 1746 /* If the BO is not in its preferred location add it back to 1747 * the evicted list so that it gets validated again on the 1748 * next command submission. 1749 */ 1750 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1751 uint32_t mem_type = bo->tbo.mem.mem_type; 1752 1753 if (!(bo->preferred_domains & 1754 amdgpu_mem_type_to_domain(mem_type))) 1755 amdgpu_vm_bo_evicted(&bo_va->base); 1756 else 1757 amdgpu_vm_bo_idle(&bo_va->base); 1758 } else { 1759 amdgpu_vm_bo_done(&bo_va->base); 1760 } 1761 1762 list_splice_init(&bo_va->invalids, &bo_va->valids); 1763 bo_va->cleared = clear; 1764 1765 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1766 list_for_each_entry(mapping, &bo_va->valids, list) 1767 trace_amdgpu_vm_bo_mapping(mapping); 1768 } 1769 1770 return 0; 1771 } 1772 1773 /** 1774 * amdgpu_vm_update_prt_state - update the global PRT state 1775 * 1776 * @adev: amdgpu_device pointer 1777 */ 1778 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1779 { 1780 unsigned long flags; 1781 bool enable; 1782 1783 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1784 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1785 adev->gmc.gmc_funcs->set_prt(adev, enable); 1786 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1787 } 1788 1789 /** 1790 * amdgpu_vm_prt_get - add a PRT user 1791 * 1792 * @adev: amdgpu_device pointer 1793 */ 1794 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1795 { 1796 if (!adev->gmc.gmc_funcs->set_prt) 1797 return; 1798 1799 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1800 amdgpu_vm_update_prt_state(adev); 1801 } 1802 1803 /** 1804 * amdgpu_vm_prt_put - drop a PRT user 1805 * 1806 * @adev: amdgpu_device pointer 1807 */ 1808 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1809 { 1810 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1811 amdgpu_vm_update_prt_state(adev); 1812 } 1813 1814 /** 1815 * amdgpu_vm_prt_cb - callback for updating the PRT status 1816 * 1817 * @fence: fence for the callback 1818 * @_cb: the callback function 1819 */ 1820 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1821 { 1822 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1823 1824 amdgpu_vm_prt_put(cb->adev); 1825 kfree(cb); 1826 } 1827 1828 /** 1829 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1830 * 1831 * @adev: amdgpu_device pointer 1832 * @fence: fence for the callback 1833 */ 1834 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1835 struct dma_fence *fence) 1836 { 1837 struct amdgpu_prt_cb *cb; 1838 1839 if (!adev->gmc.gmc_funcs->set_prt) 1840 return; 1841 1842 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1843 if (!cb) { 1844 /* Last resort when we are OOM */ 1845 if (fence) 1846 dma_fence_wait(fence, false); 1847 1848 amdgpu_vm_prt_put(adev); 1849 } else { 1850 cb->adev = adev; 1851 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1852 amdgpu_vm_prt_cb)) 1853 amdgpu_vm_prt_cb(fence, &cb->cb); 1854 } 1855 } 1856 1857 /** 1858 * amdgpu_vm_free_mapping - free a mapping 1859 * 1860 * @adev: amdgpu_device pointer 1861 * @vm: requested vm 1862 * @mapping: mapping to be freed 1863 * @fence: fence of the unmap operation 1864 * 1865 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1866 */ 1867 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1868 struct amdgpu_vm *vm, 1869 struct amdgpu_bo_va_mapping *mapping, 1870 struct dma_fence *fence) 1871 { 1872 if (mapping->flags & AMDGPU_PTE_PRT) 1873 amdgpu_vm_add_prt_cb(adev, fence); 1874 kfree(mapping); 1875 } 1876 1877 /** 1878 * amdgpu_vm_prt_fini - finish all prt mappings 1879 * 1880 * @adev: amdgpu_device pointer 1881 * @vm: requested vm 1882 * 1883 * Register a cleanup callback to disable PRT support after VM dies. 1884 */ 1885 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1886 { 1887 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 1888 struct dma_fence *excl, **shared; 1889 unsigned i, shared_count; 1890 int r; 1891 1892 r = dma_resv_get_fences_rcu(resv, &excl, 1893 &shared_count, &shared); 1894 if (r) { 1895 /* Not enough memory to grab the fence list, as last resort 1896 * block for all the fences to complete. 1897 */ 1898 dma_resv_wait_timeout_rcu(resv, true, false, 1899 MAX_SCHEDULE_TIMEOUT); 1900 return; 1901 } 1902 1903 /* Add a callback for each fence in the reservation object */ 1904 amdgpu_vm_prt_get(adev); 1905 amdgpu_vm_add_prt_cb(adev, excl); 1906 1907 for (i = 0; i < shared_count; ++i) { 1908 amdgpu_vm_prt_get(adev); 1909 amdgpu_vm_add_prt_cb(adev, shared[i]); 1910 } 1911 1912 kfree(shared); 1913 } 1914 1915 /** 1916 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1917 * 1918 * @adev: amdgpu_device pointer 1919 * @vm: requested vm 1920 * @fence: optional resulting fence (unchanged if no work needed to be done 1921 * or if an error occurred) 1922 * 1923 * Make sure all freed BOs are cleared in the PT. 1924 * PTs have to be reserved and mutex must be locked! 1925 * 1926 * Returns: 1927 * 0 for success. 1928 * 1929 */ 1930 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1931 struct amdgpu_vm *vm, 1932 struct dma_fence **fence) 1933 { 1934 struct amdgpu_bo_va_mapping *mapping; 1935 uint64_t init_pte_value = 0; 1936 struct dma_fence *f = NULL; 1937 int r; 1938 1939 while (!list_empty(&vm->freed)) { 1940 mapping = list_first_entry(&vm->freed, 1941 struct amdgpu_bo_va_mapping, list); 1942 list_del(&mapping->list); 1943 1944 if (vm->pte_support_ats && 1945 mapping->start < AMDGPU_GMC_HOLE_START) 1946 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 1947 1948 r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, 1949 mapping->start, mapping->last, 1950 init_pte_value, 0, NULL, &f); 1951 amdgpu_vm_free_mapping(adev, vm, mapping, f); 1952 if (r) { 1953 dma_fence_put(f); 1954 return r; 1955 } 1956 } 1957 1958 if (fence && f) { 1959 dma_fence_put(*fence); 1960 *fence = f; 1961 } else { 1962 dma_fence_put(f); 1963 } 1964 1965 return 0; 1966 1967 } 1968 1969 /** 1970 * amdgpu_vm_handle_moved - handle moved BOs in the PT 1971 * 1972 * @adev: amdgpu_device pointer 1973 * @vm: requested vm 1974 * 1975 * Make sure all BOs which are moved are updated in the PTs. 1976 * 1977 * Returns: 1978 * 0 for success. 1979 * 1980 * PTs have to be reserved! 1981 */ 1982 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 1983 struct amdgpu_vm *vm) 1984 { 1985 struct amdgpu_bo_va *bo_va, *tmp; 1986 struct dma_resv *resv; 1987 bool clear; 1988 int r; 1989 1990 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 1991 /* Per VM BOs never need to bo cleared in the page tables */ 1992 r = amdgpu_vm_bo_update(adev, bo_va, false); 1993 if (r) 1994 return r; 1995 } 1996 1997 spin_lock(&vm->invalidated_lock); 1998 while (!list_empty(&vm->invalidated)) { 1999 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 2000 base.vm_status); 2001 resv = bo_va->base.bo->tbo.base.resv; 2002 spin_unlock(&vm->invalidated_lock); 2003 2004 /* Try to reserve the BO to avoid clearing its ptes */ 2005 if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2006 clear = false; 2007 /* Somebody else is using the BO right now */ 2008 else 2009 clear = true; 2010 2011 r = amdgpu_vm_bo_update(adev, bo_va, clear); 2012 if (r) 2013 return r; 2014 2015 if (!clear) 2016 dma_resv_unlock(resv); 2017 spin_lock(&vm->invalidated_lock); 2018 } 2019 spin_unlock(&vm->invalidated_lock); 2020 2021 return 0; 2022 } 2023 2024 /** 2025 * amdgpu_vm_bo_add - add a bo to a specific vm 2026 * 2027 * @adev: amdgpu_device pointer 2028 * @vm: requested vm 2029 * @bo: amdgpu buffer object 2030 * 2031 * Add @bo into the requested vm. 2032 * Add @bo to the list of bos associated with the vm 2033 * 2034 * Returns: 2035 * Newly added bo_va or NULL for failure 2036 * 2037 * Object has to be reserved! 2038 */ 2039 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2040 struct amdgpu_vm *vm, 2041 struct amdgpu_bo *bo) 2042 { 2043 struct amdgpu_bo_va *bo_va; 2044 2045 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 2046 if (bo_va == NULL) { 2047 return NULL; 2048 } 2049 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 2050 2051 bo_va->ref_count = 1; 2052 INIT_LIST_HEAD(&bo_va->valids); 2053 INIT_LIST_HEAD(&bo_va->invalids); 2054 2055 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 2056 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { 2057 bo_va->is_xgmi = true; 2058 mutex_lock(&adev->vm_manager.lock_pstate); 2059 /* Power up XGMI if it can be potentially used */ 2060 if (++adev->vm_manager.xgmi_map_counter == 1) 2061 amdgpu_xgmi_set_pstate(adev, 1); 2062 mutex_unlock(&adev->vm_manager.lock_pstate); 2063 } 2064 2065 return bo_va; 2066 } 2067 2068 2069 /** 2070 * amdgpu_vm_bo_insert_mapping - insert a new mapping 2071 * 2072 * @adev: amdgpu_device pointer 2073 * @bo_va: bo_va to store the address 2074 * @mapping: the mapping to insert 2075 * 2076 * Insert a new mapping into all structures. 2077 */ 2078 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 2079 struct amdgpu_bo_va *bo_va, 2080 struct amdgpu_bo_va_mapping *mapping) 2081 { 2082 struct amdgpu_vm *vm = bo_va->base.vm; 2083 struct amdgpu_bo *bo = bo_va->base.bo; 2084 2085 mapping->bo_va = bo_va; 2086 list_add(&mapping->list, &bo_va->invalids); 2087 amdgpu_vm_it_insert(mapping, &vm->va); 2088 2089 if (mapping->flags & AMDGPU_PTE_PRT) 2090 amdgpu_vm_prt_get(adev); 2091 2092 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && 2093 !bo_va->base.moved) { 2094 list_move(&bo_va->base.vm_status, &vm->moved); 2095 } 2096 trace_amdgpu_vm_bo_map(bo_va, mapping); 2097 } 2098 2099 /** 2100 * amdgpu_vm_bo_map - map bo inside a vm 2101 * 2102 * @adev: amdgpu_device pointer 2103 * @bo_va: bo_va to store the address 2104 * @saddr: where to map the BO 2105 * @offset: requested offset in the BO 2106 * @size: BO size in bytes 2107 * @flags: attributes of pages (read/write/valid/etc.) 2108 * 2109 * Add a mapping of the BO at the specefied addr into the VM. 2110 * 2111 * Returns: 2112 * 0 for success, error for failure. 2113 * 2114 * Object has to be reserved and unreserved outside! 2115 */ 2116 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2117 struct amdgpu_bo_va *bo_va, 2118 uint64_t saddr, uint64_t offset, 2119 uint64_t size, uint64_t flags) 2120 { 2121 struct amdgpu_bo_va_mapping *mapping, *tmp; 2122 struct amdgpu_bo *bo = bo_va->base.bo; 2123 struct amdgpu_vm *vm = bo_va->base.vm; 2124 uint64_t eaddr; 2125 2126 /* validate the parameters */ 2127 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2128 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2129 return -EINVAL; 2130 2131 /* make sure object fit at this offset */ 2132 eaddr = saddr + size - 1; 2133 if (saddr >= eaddr || 2134 (bo && offset + size > amdgpu_bo_size(bo))) 2135 return -EINVAL; 2136 2137 saddr /= AMDGPU_GPU_PAGE_SIZE; 2138 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2139 2140 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2141 if (tmp) { 2142 /* bo and tmp overlap, invalid addr */ 2143 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2144 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 2145 tmp->start, tmp->last + 1); 2146 return -EINVAL; 2147 } 2148 2149 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2150 if (!mapping) 2151 return -ENOMEM; 2152 2153 mapping->start = saddr; 2154 mapping->last = eaddr; 2155 mapping->offset = offset; 2156 mapping->flags = flags; 2157 2158 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2159 2160 return 0; 2161 } 2162 2163 /** 2164 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 2165 * 2166 * @adev: amdgpu_device pointer 2167 * @bo_va: bo_va to store the address 2168 * @saddr: where to map the BO 2169 * @offset: requested offset in the BO 2170 * @size: BO size in bytes 2171 * @flags: attributes of pages (read/write/valid/etc.) 2172 * 2173 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 2174 * mappings as we do so. 2175 * 2176 * Returns: 2177 * 0 for success, error for failure. 2178 * 2179 * Object has to be reserved and unreserved outside! 2180 */ 2181 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 2182 struct amdgpu_bo_va *bo_va, 2183 uint64_t saddr, uint64_t offset, 2184 uint64_t size, uint64_t flags) 2185 { 2186 struct amdgpu_bo_va_mapping *mapping; 2187 struct amdgpu_bo *bo = bo_va->base.bo; 2188 uint64_t eaddr; 2189 int r; 2190 2191 /* validate the parameters */ 2192 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2193 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2194 return -EINVAL; 2195 2196 /* make sure object fit at this offset */ 2197 eaddr = saddr + size - 1; 2198 if (saddr >= eaddr || 2199 (bo && offset + size > amdgpu_bo_size(bo))) 2200 return -EINVAL; 2201 2202 /* Allocate all the needed memory */ 2203 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2204 if (!mapping) 2205 return -ENOMEM; 2206 2207 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 2208 if (r) { 2209 kfree(mapping); 2210 return r; 2211 } 2212 2213 saddr /= AMDGPU_GPU_PAGE_SIZE; 2214 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2215 2216 mapping->start = saddr; 2217 mapping->last = eaddr; 2218 mapping->offset = offset; 2219 mapping->flags = flags; 2220 2221 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 2222 2223 return 0; 2224 } 2225 2226 /** 2227 * amdgpu_vm_bo_unmap - remove bo mapping from vm 2228 * 2229 * @adev: amdgpu_device pointer 2230 * @bo_va: bo_va to remove the address from 2231 * @saddr: where to the BO is mapped 2232 * 2233 * Remove a mapping of the BO at the specefied addr from the VM. 2234 * 2235 * Returns: 2236 * 0 for success, error for failure. 2237 * 2238 * Object has to be reserved and unreserved outside! 2239 */ 2240 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2241 struct amdgpu_bo_va *bo_va, 2242 uint64_t saddr) 2243 { 2244 struct amdgpu_bo_va_mapping *mapping; 2245 struct amdgpu_vm *vm = bo_va->base.vm; 2246 bool valid = true; 2247 2248 saddr /= AMDGPU_GPU_PAGE_SIZE; 2249 2250 list_for_each_entry(mapping, &bo_va->valids, list) { 2251 if (mapping->start == saddr) 2252 break; 2253 } 2254 2255 if (&mapping->list == &bo_va->valids) { 2256 valid = false; 2257 2258 list_for_each_entry(mapping, &bo_va->invalids, list) { 2259 if (mapping->start == saddr) 2260 break; 2261 } 2262 2263 if (&mapping->list == &bo_va->invalids) 2264 return -ENOENT; 2265 } 2266 2267 list_del(&mapping->list); 2268 amdgpu_vm_it_remove(mapping, &vm->va); 2269 mapping->bo_va = NULL; 2270 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2271 2272 if (valid) 2273 list_add(&mapping->list, &vm->freed); 2274 else 2275 amdgpu_vm_free_mapping(adev, vm, mapping, 2276 bo_va->last_pt_update); 2277 2278 return 0; 2279 } 2280 2281 /** 2282 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 2283 * 2284 * @adev: amdgpu_device pointer 2285 * @vm: VM structure to use 2286 * @saddr: start of the range 2287 * @size: size of the range 2288 * 2289 * Remove all mappings in a range, split them as appropriate. 2290 * 2291 * Returns: 2292 * 0 for success, error for failure. 2293 */ 2294 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 2295 struct amdgpu_vm *vm, 2296 uint64_t saddr, uint64_t size) 2297 { 2298 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 2299 LIST_HEAD(removed); 2300 uint64_t eaddr; 2301 2302 eaddr = saddr + size - 1; 2303 saddr /= AMDGPU_GPU_PAGE_SIZE; 2304 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2305 2306 /* Allocate all the needed memory */ 2307 before = kzalloc(sizeof(*before), GFP_KERNEL); 2308 if (!before) 2309 return -ENOMEM; 2310 INIT_LIST_HEAD(&before->list); 2311 2312 after = kzalloc(sizeof(*after), GFP_KERNEL); 2313 if (!after) { 2314 kfree(before); 2315 return -ENOMEM; 2316 } 2317 INIT_LIST_HEAD(&after->list); 2318 2319 /* Now gather all removed mappings */ 2320 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2321 while (tmp) { 2322 /* Remember mapping split at the start */ 2323 if (tmp->start < saddr) { 2324 before->start = tmp->start; 2325 before->last = saddr - 1; 2326 before->offset = tmp->offset; 2327 before->flags = tmp->flags; 2328 before->bo_va = tmp->bo_va; 2329 list_add(&before->list, &tmp->bo_va->invalids); 2330 } 2331 2332 /* Remember mapping split at the end */ 2333 if (tmp->last > eaddr) { 2334 after->start = eaddr + 1; 2335 after->last = tmp->last; 2336 after->offset = tmp->offset; 2337 after->offset += after->start - tmp->start; 2338 after->flags = tmp->flags; 2339 after->bo_va = tmp->bo_va; 2340 list_add(&after->list, &tmp->bo_va->invalids); 2341 } 2342 2343 list_del(&tmp->list); 2344 list_add(&tmp->list, &removed); 2345 2346 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 2347 } 2348 2349 /* And free them up */ 2350 list_for_each_entry_safe(tmp, next, &removed, list) { 2351 amdgpu_vm_it_remove(tmp, &vm->va); 2352 list_del(&tmp->list); 2353 2354 if (tmp->start < saddr) 2355 tmp->start = saddr; 2356 if (tmp->last > eaddr) 2357 tmp->last = eaddr; 2358 2359 tmp->bo_va = NULL; 2360 list_add(&tmp->list, &vm->freed); 2361 trace_amdgpu_vm_bo_unmap(NULL, tmp); 2362 } 2363 2364 /* Insert partial mapping before the range */ 2365 if (!list_empty(&before->list)) { 2366 amdgpu_vm_it_insert(before, &vm->va); 2367 if (before->flags & AMDGPU_PTE_PRT) 2368 amdgpu_vm_prt_get(adev); 2369 } else { 2370 kfree(before); 2371 } 2372 2373 /* Insert partial mapping after the range */ 2374 if (!list_empty(&after->list)) { 2375 amdgpu_vm_it_insert(after, &vm->va); 2376 if (after->flags & AMDGPU_PTE_PRT) 2377 amdgpu_vm_prt_get(adev); 2378 } else { 2379 kfree(after); 2380 } 2381 2382 return 0; 2383 } 2384 2385 /** 2386 * amdgpu_vm_bo_lookup_mapping - find mapping by address 2387 * 2388 * @vm: the requested VM 2389 * @addr: the address 2390 * 2391 * Find a mapping by it's address. 2392 * 2393 * Returns: 2394 * The amdgpu_bo_va_mapping matching for addr or NULL 2395 * 2396 */ 2397 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 2398 uint64_t addr) 2399 { 2400 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 2401 } 2402 2403 /** 2404 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 2405 * 2406 * @vm: the requested vm 2407 * @ticket: CS ticket 2408 * 2409 * Trace all mappings of BOs reserved during a command submission. 2410 */ 2411 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 2412 { 2413 struct amdgpu_bo_va_mapping *mapping; 2414 2415 if (!trace_amdgpu_vm_bo_cs_enabled()) 2416 return; 2417 2418 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 2419 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 2420 if (mapping->bo_va && mapping->bo_va->base.bo) { 2421 struct amdgpu_bo *bo; 2422 2423 bo = mapping->bo_va->base.bo; 2424 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2425 ticket) 2426 continue; 2427 } 2428 2429 trace_amdgpu_vm_bo_cs(mapping); 2430 } 2431 } 2432 2433 /** 2434 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2435 * 2436 * @adev: amdgpu_device pointer 2437 * @bo_va: requested bo_va 2438 * 2439 * Remove @bo_va->bo from the requested vm. 2440 * 2441 * Object have to be reserved! 2442 */ 2443 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2444 struct amdgpu_bo_va *bo_va) 2445 { 2446 struct amdgpu_bo_va_mapping *mapping, *next; 2447 struct amdgpu_bo *bo = bo_va->base.bo; 2448 struct amdgpu_vm *vm = bo_va->base.vm; 2449 struct amdgpu_vm_bo_base **base; 2450 2451 if (bo) { 2452 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2453 vm->bulk_moveable = false; 2454 2455 for (base = &bo_va->base.bo->vm_bo; *base; 2456 base = &(*base)->next) { 2457 if (*base != &bo_va->base) 2458 continue; 2459 2460 *base = bo_va->base.next; 2461 break; 2462 } 2463 } 2464 2465 spin_lock(&vm->invalidated_lock); 2466 list_del(&bo_va->base.vm_status); 2467 spin_unlock(&vm->invalidated_lock); 2468 2469 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2470 list_del(&mapping->list); 2471 amdgpu_vm_it_remove(mapping, &vm->va); 2472 mapping->bo_va = NULL; 2473 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2474 list_add(&mapping->list, &vm->freed); 2475 } 2476 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2477 list_del(&mapping->list); 2478 amdgpu_vm_it_remove(mapping, &vm->va); 2479 amdgpu_vm_free_mapping(adev, vm, mapping, 2480 bo_va->last_pt_update); 2481 } 2482 2483 dma_fence_put(bo_va->last_pt_update); 2484 2485 if (bo && bo_va->is_xgmi) { 2486 mutex_lock(&adev->vm_manager.lock_pstate); 2487 if (--adev->vm_manager.xgmi_map_counter == 0) 2488 amdgpu_xgmi_set_pstate(adev, 0); 2489 mutex_unlock(&adev->vm_manager.lock_pstate); 2490 } 2491 2492 kfree(bo_va); 2493 } 2494 2495 /** 2496 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2497 * 2498 * @adev: amdgpu_device pointer 2499 * @bo: amdgpu buffer object 2500 * @evicted: is the BO evicted 2501 * 2502 * Mark @bo as invalid. 2503 */ 2504 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2505 struct amdgpu_bo *bo, bool evicted) 2506 { 2507 struct amdgpu_vm_bo_base *bo_base; 2508 2509 /* shadow bo doesn't have bo base, its validation needs its parent */ 2510 if (bo->parent && bo->parent->shadow == bo) 2511 bo = bo->parent; 2512 2513 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2514 struct amdgpu_vm *vm = bo_base->vm; 2515 2516 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 2517 amdgpu_vm_bo_evicted(bo_base); 2518 continue; 2519 } 2520 2521 if (bo_base->moved) 2522 continue; 2523 bo_base->moved = true; 2524 2525 if (bo->tbo.type == ttm_bo_type_kernel) 2526 amdgpu_vm_bo_relocated(bo_base); 2527 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2528 amdgpu_vm_bo_moved(bo_base); 2529 else 2530 amdgpu_vm_bo_invalidated(bo_base); 2531 } 2532 } 2533 2534 /** 2535 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 2536 * 2537 * @vm_size: VM size 2538 * 2539 * Returns: 2540 * VM page table as power of two 2541 */ 2542 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2543 { 2544 /* Total bits covered by PD + PTs */ 2545 unsigned bits = ilog2(vm_size) + 18; 2546 2547 /* Make sure the PD is 4K in size up to 8GB address space. 2548 Above that split equal between PD and PTs */ 2549 if (vm_size <= 8) 2550 return (bits - 9); 2551 else 2552 return ((bits + 3) / 2); 2553 } 2554 2555 /** 2556 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2557 * 2558 * @adev: amdgpu_device pointer 2559 * @min_vm_size: the minimum vm size in GB if it's set auto 2560 * @fragment_size_default: Default PTE fragment size 2561 * @max_level: max VMPT level 2562 * @max_bits: max address space size in bits 2563 * 2564 */ 2565 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2566 uint32_t fragment_size_default, unsigned max_level, 2567 unsigned max_bits) 2568 { 2569 unsigned int max_size = 1 << (max_bits - 30); 2570 unsigned int vm_size; 2571 uint64_t tmp; 2572 2573 /* adjust vm size first */ 2574 if (amdgpu_vm_size != -1) { 2575 vm_size = amdgpu_vm_size; 2576 if (vm_size > max_size) { 2577 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2578 amdgpu_vm_size, max_size); 2579 vm_size = max_size; 2580 } 2581 } else { 2582 struct sysinfo si; 2583 unsigned int phys_ram_gb; 2584 2585 /* Optimal VM size depends on the amount of physical 2586 * RAM available. Underlying requirements and 2587 * assumptions: 2588 * 2589 * - Need to map system memory and VRAM from all GPUs 2590 * - VRAM from other GPUs not known here 2591 * - Assume VRAM <= system memory 2592 * - On GFX8 and older, VM space can be segmented for 2593 * different MTYPEs 2594 * - Need to allow room for fragmentation, guard pages etc. 2595 * 2596 * This adds up to a rough guess of system memory x3. 2597 * Round up to power of two to maximize the available 2598 * VM size with the given page table size. 2599 */ 2600 si_meminfo(&si); 2601 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2602 (1 << 30) - 1) >> 30; 2603 vm_size = roundup_pow_of_two( 2604 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2605 } 2606 2607 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2608 2609 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2610 if (amdgpu_vm_block_size != -1) 2611 tmp >>= amdgpu_vm_block_size - 9; 2612 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2613 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); 2614 switch (adev->vm_manager.num_level) { 2615 case 3: 2616 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2617 break; 2618 case 2: 2619 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2620 break; 2621 case 1: 2622 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2623 break; 2624 default: 2625 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2626 } 2627 /* block size depends on vm size and hw setup*/ 2628 if (amdgpu_vm_block_size != -1) 2629 adev->vm_manager.block_size = 2630 min((unsigned)amdgpu_vm_block_size, max_bits 2631 - AMDGPU_GPU_PAGE_SHIFT 2632 - 9 * adev->vm_manager.num_level); 2633 else if (adev->vm_manager.num_level > 1) 2634 adev->vm_manager.block_size = 9; 2635 else 2636 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2637 2638 if (amdgpu_vm_fragment_size == -1) 2639 adev->vm_manager.fragment_size = fragment_size_default; 2640 else 2641 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2642 2643 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2644 vm_size, adev->vm_manager.num_level + 1, 2645 adev->vm_manager.block_size, 2646 adev->vm_manager.fragment_size); 2647 } 2648 2649 /** 2650 * amdgpu_vm_wait_idle - wait for the VM to become idle 2651 * 2652 * @vm: VM object to wait for 2653 * @timeout: timeout to wait for VM to become idle 2654 */ 2655 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2656 { 2657 return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2658 true, true, timeout); 2659 } 2660 2661 /** 2662 * amdgpu_vm_init - initialize a vm instance 2663 * 2664 * @adev: amdgpu_device pointer 2665 * @vm: requested vm 2666 * @vm_context: Indicates if it GFX or Compute context 2667 * @pasid: Process address space identifier 2668 * 2669 * Init @vm fields. 2670 * 2671 * Returns: 2672 * 0 for success, error for failure. 2673 */ 2674 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2675 int vm_context, unsigned int pasid) 2676 { 2677 struct amdgpu_bo_param bp; 2678 struct amdgpu_bo *root; 2679 int r, i; 2680 2681 vm->va = RB_ROOT_CACHED; 2682 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2683 vm->reserved_vmid[i] = NULL; 2684 INIT_LIST_HEAD(&vm->evicted); 2685 INIT_LIST_HEAD(&vm->relocated); 2686 INIT_LIST_HEAD(&vm->moved); 2687 INIT_LIST_HEAD(&vm->idle); 2688 INIT_LIST_HEAD(&vm->invalidated); 2689 spin_lock_init(&vm->invalidated_lock); 2690 INIT_LIST_HEAD(&vm->freed); 2691 2692 /* create scheduler entities for page table updates */ 2693 r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, 2694 adev->vm_manager.vm_pte_num_rqs, NULL); 2695 if (r) 2696 return r; 2697 2698 r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, 2699 adev->vm_manager.vm_pte_num_rqs, NULL); 2700 if (r) 2701 goto error_free_direct; 2702 2703 vm->pte_support_ats = false; 2704 2705 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { 2706 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2707 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2708 2709 if (adev->asic_type == CHIP_RAVEN) 2710 vm->pte_support_ats = true; 2711 } else { 2712 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2713 AMDGPU_VM_USE_CPU_FOR_GFX); 2714 } 2715 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2716 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2717 WARN_ONCE((vm->use_cpu_for_update && 2718 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2719 "CPU update of VM recommended only for large BAR system\n"); 2720 2721 if (vm->use_cpu_for_update) 2722 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2723 else 2724 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2725 vm->last_update = NULL; 2726 2727 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); 2728 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) 2729 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; 2730 r = amdgpu_bo_create(adev, &bp, &root); 2731 if (r) 2732 goto error_free_delayed; 2733 2734 r = amdgpu_bo_reserve(root, true); 2735 if (r) 2736 goto error_free_root; 2737 2738 r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 2739 if (r) 2740 goto error_unreserve; 2741 2742 amdgpu_vm_bo_base_init(&vm->root.base, vm, root); 2743 2744 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2745 if (r) 2746 goto error_unreserve; 2747 2748 amdgpu_bo_unreserve(vm->root.base.bo); 2749 2750 if (pasid) { 2751 unsigned long flags; 2752 2753 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2754 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2755 GFP_ATOMIC); 2756 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2757 if (r < 0) 2758 goto error_free_root; 2759 2760 vm->pasid = pasid; 2761 } 2762 2763 INIT_KFIFO(vm->faults); 2764 2765 return 0; 2766 2767 error_unreserve: 2768 amdgpu_bo_unreserve(vm->root.base.bo); 2769 2770 error_free_root: 2771 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2772 amdgpu_bo_unref(&vm->root.base.bo); 2773 vm->root.base.bo = NULL; 2774 2775 error_free_delayed: 2776 drm_sched_entity_destroy(&vm->delayed); 2777 2778 error_free_direct: 2779 drm_sched_entity_destroy(&vm->direct); 2780 2781 return r; 2782 } 2783 2784 /** 2785 * amdgpu_vm_check_clean_reserved - check if a VM is clean 2786 * 2787 * @adev: amdgpu_device pointer 2788 * @vm: the VM to check 2789 * 2790 * check all entries of the root PD, if any subsequent PDs are allocated, 2791 * it means there are page table creating and filling, and is no a clean 2792 * VM 2793 * 2794 * Returns: 2795 * 0 if this VM is clean 2796 */ 2797 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 2798 struct amdgpu_vm *vm) 2799 { 2800 enum amdgpu_vm_level root = adev->vm_manager.root_level; 2801 unsigned int entries = amdgpu_vm_num_entries(adev, root); 2802 unsigned int i = 0; 2803 2804 if (!(vm->root.entries)) 2805 return 0; 2806 2807 for (i = 0; i < entries; i++) { 2808 if (vm->root.entries[i].base.bo) 2809 return -EINVAL; 2810 } 2811 2812 return 0; 2813 } 2814 2815 /** 2816 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2817 * 2818 * @adev: amdgpu_device pointer 2819 * @vm: requested vm 2820 * @pasid: pasid to use 2821 * 2822 * This only works on GFX VMs that don't have any BOs added and no 2823 * page tables allocated yet. 2824 * 2825 * Changes the following VM parameters: 2826 * - use_cpu_for_update 2827 * - pte_supports_ats 2828 * - pasid (old PASID is released, because compute manages its own PASIDs) 2829 * 2830 * Reinitializes the page directory to reflect the changed ATS 2831 * setting. 2832 * 2833 * Returns: 2834 * 0 for success, -errno for errors. 2835 */ 2836 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2837 unsigned int pasid) 2838 { 2839 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 2840 int r; 2841 2842 r = amdgpu_bo_reserve(vm->root.base.bo, true); 2843 if (r) 2844 return r; 2845 2846 /* Sanity checks */ 2847 r = amdgpu_vm_check_clean_reserved(adev, vm); 2848 if (r) 2849 goto unreserve_bo; 2850 2851 if (pasid) { 2852 unsigned long flags; 2853 2854 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2855 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, 2856 GFP_ATOMIC); 2857 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2858 2859 if (r == -ENOSPC) 2860 goto unreserve_bo; 2861 r = 0; 2862 } 2863 2864 /* Check if PD needs to be reinitialized and do it before 2865 * changing any other state, in case it fails. 2866 */ 2867 if (pte_support_ats != vm->pte_support_ats) { 2868 vm->pte_support_ats = pte_support_ats; 2869 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); 2870 if (r) 2871 goto free_idr; 2872 } 2873 2874 /* Update VM state */ 2875 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2876 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2877 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2878 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2879 WARN_ONCE((vm->use_cpu_for_update && 2880 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2881 "CPU update of VM recommended only for large BAR system\n"); 2882 2883 if (vm->use_cpu_for_update) 2884 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2885 else 2886 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2887 dma_fence_put(vm->last_update); 2888 vm->last_update = NULL; 2889 2890 if (vm->pasid) { 2891 unsigned long flags; 2892 2893 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2894 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 2895 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2896 2897 /* Free the original amdgpu allocated pasid 2898 * Will be replaced with kfd allocated pasid 2899 */ 2900 amdgpu_pasid_free(vm->pasid); 2901 vm->pasid = 0; 2902 } 2903 2904 /* Free the shadow bo for compute VM */ 2905 amdgpu_bo_unref(&vm->root.base.bo->shadow); 2906 2907 if (pasid) 2908 vm->pasid = pasid; 2909 2910 goto unreserve_bo; 2911 2912 free_idr: 2913 if (pasid) { 2914 unsigned long flags; 2915 2916 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2917 idr_remove(&adev->vm_manager.pasid_idr, pasid); 2918 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2919 } 2920 unreserve_bo: 2921 amdgpu_bo_unreserve(vm->root.base.bo); 2922 return r; 2923 } 2924 2925 /** 2926 * amdgpu_vm_release_compute - release a compute vm 2927 * @adev: amdgpu_device pointer 2928 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 2929 * 2930 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 2931 * pasid from vm. Compute should stop use of vm after this call. 2932 */ 2933 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2934 { 2935 if (vm->pasid) { 2936 unsigned long flags; 2937 2938 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2939 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 2940 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2941 } 2942 vm->pasid = 0; 2943 } 2944 2945 /** 2946 * amdgpu_vm_fini - tear down a vm instance 2947 * 2948 * @adev: amdgpu_device pointer 2949 * @vm: requested vm 2950 * 2951 * Tear down @vm. 2952 * Unbind the VM and remove all bos from the vm bo list 2953 */ 2954 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2955 { 2956 struct amdgpu_bo_va_mapping *mapping, *tmp; 2957 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 2958 struct amdgpu_bo *root; 2959 int i; 2960 2961 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 2962 2963 root = amdgpu_bo_ref(vm->root.base.bo); 2964 amdgpu_bo_reserve(root, true); 2965 if (vm->pasid) { 2966 unsigned long flags; 2967 2968 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 2969 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); 2970 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2971 vm->pasid = 0; 2972 } 2973 2974 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 2975 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 2976 amdgpu_vm_prt_fini(adev, vm); 2977 prt_fini_needed = false; 2978 } 2979 2980 list_del(&mapping->list); 2981 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 2982 } 2983 2984 amdgpu_vm_free_pts(adev, vm, NULL); 2985 amdgpu_bo_unreserve(root); 2986 amdgpu_bo_unref(&root); 2987 WARN_ON(vm->root.base.bo); 2988 2989 drm_sched_entity_destroy(&vm->direct); 2990 drm_sched_entity_destroy(&vm->delayed); 2991 2992 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2993 dev_err(adev->dev, "still active bo inside vm\n"); 2994 } 2995 rbtree_postorder_for_each_entry_safe(mapping, tmp, 2996 &vm->va.rb_root, rb) { 2997 /* Don't remove the mapping here, we don't want to trigger a 2998 * rebalance and the tree is about to be destroyed anyway. 2999 */ 3000 list_del(&mapping->list); 3001 kfree(mapping); 3002 } 3003 3004 dma_fence_put(vm->last_update); 3005 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 3006 amdgpu_vmid_free_reserved(adev, vm, i); 3007 } 3008 3009 /** 3010 * amdgpu_vm_manager_init - init the VM manager 3011 * 3012 * @adev: amdgpu_device pointer 3013 * 3014 * Initialize the VM manager structures 3015 */ 3016 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 3017 { 3018 unsigned i; 3019 3020 amdgpu_vmid_mgr_init(adev); 3021 3022 adev->vm_manager.fence_context = 3023 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3024 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 3025 adev->vm_manager.seqno[i] = 0; 3026 3027 spin_lock_init(&adev->vm_manager.prt_lock); 3028 atomic_set(&adev->vm_manager.num_prt_users, 0); 3029 3030 /* If not overridden by the user, by default, only in large BAR systems 3031 * Compute VM tables will be updated by CPU 3032 */ 3033 #ifdef CONFIG_X86_64 3034 if (amdgpu_vm_update_mode == -1) { 3035 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 3036 adev->vm_manager.vm_update_mode = 3037 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 3038 else 3039 adev->vm_manager.vm_update_mode = 0; 3040 } else 3041 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 3042 #else 3043 adev->vm_manager.vm_update_mode = 0; 3044 #endif 3045 3046 idr_init(&adev->vm_manager.pasid_idr); 3047 spin_lock_init(&adev->vm_manager.pasid_lock); 3048 3049 adev->vm_manager.xgmi_map_counter = 0; 3050 mutex_init(&adev->vm_manager.lock_pstate); 3051 } 3052 3053 /** 3054 * amdgpu_vm_manager_fini - cleanup VM manager 3055 * 3056 * @adev: amdgpu_device pointer 3057 * 3058 * Cleanup the VM manager and free resources. 3059 */ 3060 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 3061 { 3062 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); 3063 idr_destroy(&adev->vm_manager.pasid_idr); 3064 3065 amdgpu_vmid_mgr_fini(adev); 3066 } 3067 3068 /** 3069 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 3070 * 3071 * @dev: drm device pointer 3072 * @data: drm_amdgpu_vm 3073 * @filp: drm file pointer 3074 * 3075 * Returns: 3076 * 0 for success, -errno for errors. 3077 */ 3078 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 3079 { 3080 union drm_amdgpu_vm *args = data; 3081 struct amdgpu_device *adev = dev->dev_private; 3082 struct amdgpu_fpriv *fpriv = filp->driver_priv; 3083 int r; 3084 3085 switch (args->in.op) { 3086 case AMDGPU_VM_OP_RESERVE_VMID: 3087 /* We only have requirement to reserve vmid from gfxhub */ 3088 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, 3089 AMDGPU_GFXHUB_0); 3090 if (r) 3091 return r; 3092 break; 3093 case AMDGPU_VM_OP_UNRESERVE_VMID: 3094 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3095 break; 3096 default: 3097 return -EINVAL; 3098 } 3099 3100 return 0; 3101 } 3102 3103 /** 3104 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 3105 * 3106 * @adev: drm device pointer 3107 * @pasid: PASID identifier for VM 3108 * @task_info: task_info to fill. 3109 */ 3110 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 3111 struct amdgpu_task_info *task_info) 3112 { 3113 struct amdgpu_vm *vm; 3114 unsigned long flags; 3115 3116 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); 3117 3118 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3119 if (vm) 3120 *task_info = vm->task_info; 3121 3122 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3123 } 3124 3125 /** 3126 * amdgpu_vm_set_task_info - Sets VMs task info. 3127 * 3128 * @vm: vm for which to set the info 3129 */ 3130 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 3131 { 3132 if (vm->task_info.pid) 3133 return; 3134 3135 vm->task_info.pid = current->pid; 3136 get_task_comm(vm->task_info.task_name, current); 3137 3138 if (current->group_leader->mm != current->mm) 3139 return; 3140 3141 vm->task_info.tgid = current->group_leader->pid; 3142 get_task_comm(vm->task_info.process_name, current->group_leader); 3143 } 3144 3145 /** 3146 * amdgpu_vm_handle_fault - graceful handling of VM faults. 3147 * @adev: amdgpu device pointer 3148 * @pasid: PASID of the VM 3149 * @addr: Address of the fault 3150 * 3151 * Try to gracefully handle a VM fault. Return true if the fault was handled and 3152 * shouldn't be reported any more. 3153 */ 3154 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, 3155 uint64_t addr) 3156 { 3157 struct amdgpu_bo *root; 3158 uint64_t value, flags; 3159 struct amdgpu_vm *vm; 3160 long r; 3161 3162 spin_lock(&adev->vm_manager.pasid_lock); 3163 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3164 if (vm) 3165 root = amdgpu_bo_ref(vm->root.base.bo); 3166 else 3167 root = NULL; 3168 spin_unlock(&adev->vm_manager.pasid_lock); 3169 3170 if (!root) 3171 return false; 3172 3173 r = amdgpu_bo_reserve(root, true); 3174 if (r) 3175 goto error_unref; 3176 3177 /* Double check that the VM still exists */ 3178 spin_lock(&adev->vm_manager.pasid_lock); 3179 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3180 if (vm && vm->root.base.bo != root) 3181 vm = NULL; 3182 spin_unlock(&adev->vm_manager.pasid_lock); 3183 if (!vm) 3184 goto error_unlock; 3185 3186 addr /= AMDGPU_GPU_PAGE_SIZE; 3187 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 3188 AMDGPU_PTE_SYSTEM; 3189 3190 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 3191 /* Redirect the access to the dummy page */ 3192 value = adev->dummy_page_addr; 3193 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 3194 AMDGPU_PTE_WRITEABLE; 3195 } else { 3196 /* Let the hw retry silently on the PTE */ 3197 value = 0; 3198 } 3199 3200 r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, 3201 flags, value, NULL, NULL); 3202 if (r) 3203 goto error_unlock; 3204 3205 r = amdgpu_vm_update_pdes(adev, vm, true); 3206 3207 error_unlock: 3208 amdgpu_bo_unreserve(root); 3209 if (r < 0) 3210 DRM_ERROR("Can't handle page fault (%ld)\n", r); 3211 3212 error_unref: 3213 amdgpu_bo_unref(&root); 3214 3215 return false; 3216 } 3217