1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/amdgpu_drm.h> 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 33 /* 34 * GPUVM 35 * GPUVM is similar to the legacy gart on older asics, however 36 * rather than there being a single global gart table 37 * for the entire GPU, there are multiple VM page tables active 38 * at any given time. The VM page tables can contain a mix 39 * vram pages and system memory pages and system memory pages 40 * can be mapped as snooped (cached system pages) or unsnooped 41 * (uncached system pages). 42 * Each VM has an ID associated with it and there is a page table 43 * associated with each VMID. When execting a command buffer, 44 * the kernel tells the the ring what VMID to use for that command 45 * buffer. VMIDs are allocated dynamically as commands are submitted. 46 * The userspace drivers maintain their own address space and the kernel 47 * sets up their pages tables accordingly when they submit their 48 * command buffers and a VMID is assigned. 49 * Cayman/Trinity support up to 8 active VMs at any given time; 50 * SI supports 16. 51 */ 52 53 /** 54 * amdgpu_vm_num_pde - return the number of page directory entries 55 * 56 * @adev: amdgpu_device pointer 57 * 58 * Calculate the number of page directory entries. 59 */ 60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) 61 { 62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; 63 } 64 65 /** 66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes 67 * 68 * @adev: amdgpu_device pointer 69 * 70 * Calculate the size of the page directory in bytes. 71 */ 72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) 73 { 74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); 75 } 76 77 /** 78 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 79 * 80 * @vm: vm providing the BOs 81 * @validated: head of validation list 82 * @entry: entry to add 83 * 84 * Add the page directory to the list of BOs to 85 * validate for command submission. 86 */ 87 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 88 struct list_head *validated, 89 struct amdgpu_bo_list_entry *entry) 90 { 91 entry->robj = vm->page_directory; 92 entry->priority = 0; 93 entry->tv.bo = &vm->page_directory->tbo; 94 entry->tv.shared = true; 95 list_add(&entry->tv.head, validated); 96 } 97 98 /** 99 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list 100 * 101 * @vm: vm providing the BOs 102 * @duplicates: head of duplicates list 103 * 104 * Add the page directory to the BO duplicates list 105 * for command submission. 106 */ 107 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) 108 { 109 unsigned i; 110 111 /* add the vm page table to the list */ 112 for (i = 0; i <= vm->max_pde_used; ++i) { 113 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 114 115 if (!entry->robj) 116 continue; 117 118 list_add(&entry->tv.head, duplicates); 119 } 120 121 } 122 123 /** 124 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail 125 * 126 * @adev: amdgpu device instance 127 * @vm: vm providing the BOs 128 * 129 * Move the PT BOs to the tail of the LRU. 130 */ 131 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 132 struct amdgpu_vm *vm) 133 { 134 struct ttm_bo_global *glob = adev->mman.bdev.glob; 135 unsigned i; 136 137 spin_lock(&glob->lru_lock); 138 for (i = 0; i <= vm->max_pde_used; ++i) { 139 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 140 141 if (!entry->robj) 142 continue; 143 144 ttm_bo_move_to_lru_tail(&entry->robj->tbo); 145 } 146 spin_unlock(&glob->lru_lock); 147 } 148 149 /** 150 * amdgpu_vm_grab_id - allocate the next free VMID 151 * 152 * @vm: vm to allocate id for 153 * @ring: ring we want to submit job to 154 * @sync: sync object where we add dependencies 155 * @fence: fence protecting ID from reuse 156 * 157 * Allocate an id for the vm, adding fences to the sync obj as necessary. 158 */ 159 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 160 struct amdgpu_sync *sync, struct fence *fence) 161 { 162 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 163 struct amdgpu_device *adev = ring->adev; 164 struct amdgpu_vm_manager_id *id; 165 int r; 166 167 mutex_lock(&adev->vm_manager.lock); 168 169 /* check if the id is still valid */ 170 if (vm_id->id) { 171 long owner; 172 173 id = &adev->vm_manager.ids[vm_id->id]; 174 owner = atomic_long_read(&id->owner); 175 if (owner == (long)vm) { 176 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 177 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 178 179 fence_put(id->active); 180 id->active = fence_get(fence); 181 182 mutex_unlock(&adev->vm_manager.lock); 183 return 0; 184 } 185 } 186 187 /* we definately need to flush */ 188 vm_id->pd_gpu_addr = ~0ll; 189 190 id = list_first_entry(&adev->vm_manager.ids_lru, 191 struct amdgpu_vm_manager_id, 192 list); 193 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 194 atomic_long_set(&id->owner, (long)vm); 195 196 vm_id->id = id - adev->vm_manager.ids; 197 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 198 199 r = amdgpu_sync_fence(ring->adev, sync, id->active); 200 201 if (!r) { 202 fence_put(id->active); 203 id->active = fence_get(fence); 204 } 205 206 mutex_unlock(&adev->vm_manager.lock); 207 return r; 208 } 209 210 /** 211 * amdgpu_vm_flush - hardware flush the vm 212 * 213 * @ring: ring to use for flush 214 * @vm: vm we want to flush 215 * @updates: last vm update that we waited for 216 * 217 * Flush the vm. 218 */ 219 void amdgpu_vm_flush(struct amdgpu_ring *ring, 220 struct amdgpu_vm *vm, 221 struct fence *updates) 222 { 223 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 224 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 225 struct fence *flushed_updates = vm_id->flushed_updates; 226 bool is_later; 227 228 if (!flushed_updates) 229 is_later = true; 230 else if (!updates) 231 is_later = false; 232 else 233 is_later = fence_is_later(updates, flushed_updates); 234 235 if (pd_addr != vm_id->pd_gpu_addr || is_later) { 236 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 237 if (is_later) { 238 vm_id->flushed_updates = fence_get(updates); 239 fence_put(flushed_updates); 240 } 241 vm_id->pd_gpu_addr = pd_addr; 242 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 243 } 244 } 245 246 /** 247 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 248 * 249 * @vm: requested vm 250 * @bo: requested buffer object 251 * 252 * Find @bo inside the requested vm. 253 * Search inside the @bos vm list for the requested vm 254 * Returns the found bo_va or NULL if none is found 255 * 256 * Object has to be reserved! 257 */ 258 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 259 struct amdgpu_bo *bo) 260 { 261 struct amdgpu_bo_va *bo_va; 262 263 list_for_each_entry(bo_va, &bo->va, bo_list) { 264 if (bo_va->vm == vm) { 265 return bo_va; 266 } 267 } 268 return NULL; 269 } 270 271 /** 272 * amdgpu_vm_update_pages - helper to call the right asic function 273 * 274 * @adev: amdgpu_device pointer 275 * @gtt: GART instance to use for mapping 276 * @gtt_flags: GTT hw access flags 277 * @ib: indirect buffer to fill with commands 278 * @pe: addr of the page entry 279 * @addr: dst addr to write into pe 280 * @count: number of page entries to update 281 * @incr: increase next addr by incr bytes 282 * @flags: hw access flags 283 * 284 * Traces the parameters and calls the right asic functions 285 * to setup the page table using the DMA. 286 */ 287 static void amdgpu_vm_update_pages(struct amdgpu_device *adev, 288 struct amdgpu_gart *gtt, 289 uint32_t gtt_flags, 290 struct amdgpu_ib *ib, 291 uint64_t pe, uint64_t addr, 292 unsigned count, uint32_t incr, 293 uint32_t flags) 294 { 295 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); 296 297 if ((gtt == &adev->gart) && (flags == gtt_flags)) { 298 uint64_t src = gtt->table_addr + (addr >> 12) * 8; 299 amdgpu_vm_copy_pte(adev, ib, pe, src, count); 300 301 } else if (gtt) { 302 dma_addr_t *pages_addr = gtt->pages_addr; 303 amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, 304 count, incr, flags); 305 306 } else if (count < 3) { 307 amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, 308 count, incr, flags); 309 310 } else { 311 amdgpu_vm_set_pte_pde(adev, ib, pe, addr, 312 count, incr, flags); 313 } 314 } 315 316 int amdgpu_vm_free_job(struct amdgpu_job *job) 317 { 318 int i; 319 for (i = 0; i < job->num_ibs; i++) 320 amdgpu_ib_free(job->adev, &job->ibs[i]); 321 kfree(job->ibs); 322 return 0; 323 } 324 325 /** 326 * amdgpu_vm_clear_bo - initially clear the page dir/table 327 * 328 * @adev: amdgpu_device pointer 329 * @bo: bo to clear 330 * 331 * need to reserve bo first before calling it. 332 */ 333 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 334 struct amdgpu_bo *bo) 335 { 336 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 337 struct fence *fence = NULL; 338 struct amdgpu_ib *ib; 339 unsigned entries; 340 uint64_t addr; 341 int r; 342 343 r = reservation_object_reserve_shared(bo->tbo.resv); 344 if (r) 345 return r; 346 347 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 348 if (r) 349 goto error; 350 351 addr = amdgpu_bo_gpu_offset(bo); 352 entries = amdgpu_bo_size(bo) / 8; 353 354 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 355 if (!ib) 356 goto error; 357 358 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 359 if (r) 360 goto error_free; 361 362 ib->length_dw = 0; 363 364 amdgpu_vm_update_pages(adev, NULL, 0, ib, addr, 0, entries, 0, 0); 365 366 amdgpu_vm_pad_ib(adev, ib); 367 WARN_ON(ib->length_dw > 64); 368 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 369 &amdgpu_vm_free_job, 370 AMDGPU_FENCE_OWNER_VM, 371 &fence); 372 if (!r) 373 amdgpu_bo_fence(bo, fence, true); 374 fence_put(fence); 375 return 0; 376 377 error_free: 378 amdgpu_ib_free(adev, ib); 379 kfree(ib); 380 381 error: 382 return r; 383 } 384 385 /** 386 * amdgpu_vm_map_gart - Resolve gart mapping of addr 387 * 388 * @pages_addr: optional DMA address to use for lookup 389 * @addr: the unmapped addr 390 * 391 * Look up the physical address of the page that the pte resolves 392 * to and return the pointer for the page table entry. 393 */ 394 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 395 { 396 uint64_t result; 397 398 if (pages_addr) { 399 /* page table offset */ 400 result = pages_addr[addr >> PAGE_SHIFT]; 401 402 /* in case cpu page size != gpu page size*/ 403 result |= addr & (~PAGE_MASK); 404 405 } else { 406 /* No mapping required */ 407 result = addr; 408 } 409 410 result &= 0xFFFFFFFFFFFFF000ULL; 411 412 return result; 413 } 414 415 /** 416 * amdgpu_vm_update_pdes - make sure that page directory is valid 417 * 418 * @adev: amdgpu_device pointer 419 * @vm: requested vm 420 * @start: start of GPU address range 421 * @end: end of GPU address range 422 * 423 * Allocates new page tables if necessary 424 * and updates the page directory. 425 * Returns 0 for success, error for failure. 426 */ 427 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 428 struct amdgpu_vm *vm) 429 { 430 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 431 struct amdgpu_bo *pd = vm->page_directory; 432 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); 433 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 434 uint64_t last_pde = ~0, last_pt = ~0; 435 unsigned count = 0, pt_idx, ndw; 436 struct amdgpu_ib *ib; 437 struct fence *fence = NULL; 438 439 int r; 440 441 /* padding, etc. */ 442 ndw = 64; 443 444 /* assume the worst case */ 445 ndw += vm->max_pde_used * 6; 446 447 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 448 if (!ib) 449 return -ENOMEM; 450 451 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); 452 if (r) { 453 kfree(ib); 454 return r; 455 } 456 ib->length_dw = 0; 457 458 /* walk over the address space and update the page directory */ 459 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 460 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; 461 uint64_t pde, pt; 462 463 if (bo == NULL) 464 continue; 465 466 pt = amdgpu_bo_gpu_offset(bo); 467 if (vm->page_tables[pt_idx].addr == pt) 468 continue; 469 vm->page_tables[pt_idx].addr = pt; 470 471 pde = pd_addr + pt_idx * 8; 472 if (((last_pde + 8 * count) != pde) || 473 ((last_pt + incr * count) != pt)) { 474 475 if (count) { 476 amdgpu_vm_update_pages(adev, NULL, 0, ib, 477 last_pde, last_pt, 478 count, incr, 479 AMDGPU_PTE_VALID); 480 } 481 482 count = 1; 483 last_pde = pde; 484 last_pt = pt; 485 } else { 486 ++count; 487 } 488 } 489 490 if (count) 491 amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, 492 count, incr, AMDGPU_PTE_VALID); 493 494 if (ib->length_dw != 0) { 495 amdgpu_vm_pad_ib(adev, ib); 496 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); 497 WARN_ON(ib->length_dw > ndw); 498 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 499 &amdgpu_vm_free_job, 500 AMDGPU_FENCE_OWNER_VM, 501 &fence); 502 if (r) 503 goto error_free; 504 505 amdgpu_bo_fence(pd, fence, true); 506 fence_put(vm->page_directory_fence); 507 vm->page_directory_fence = fence_get(fence); 508 fence_put(fence); 509 } 510 511 if (ib->length_dw == 0) { 512 amdgpu_ib_free(adev, ib); 513 kfree(ib); 514 } 515 516 return 0; 517 518 error_free: 519 amdgpu_ib_free(adev, ib); 520 kfree(ib); 521 return r; 522 } 523 524 /** 525 * amdgpu_vm_frag_ptes - add fragment information to PTEs 526 * 527 * @adev: amdgpu_device pointer 528 * @gtt: GART instance to use for mapping 529 * @gtt_flags: GTT hw mapping flags 530 * @ib: IB for the update 531 * @pe_start: first PTE to handle 532 * @pe_end: last PTE to handle 533 * @addr: addr those PTEs should point to 534 * @flags: hw mapping flags 535 */ 536 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, 537 struct amdgpu_gart *gtt, 538 uint32_t gtt_flags, 539 struct amdgpu_ib *ib, 540 uint64_t pe_start, uint64_t pe_end, 541 uint64_t addr, uint32_t flags) 542 { 543 /** 544 * The MC L1 TLB supports variable sized pages, based on a fragment 545 * field in the PTE. When this field is set to a non-zero value, page 546 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 547 * flags are considered valid for all PTEs within the fragment range 548 * and corresponding mappings are assumed to be physically contiguous. 549 * 550 * The L1 TLB can store a single PTE for the whole fragment, 551 * significantly increasing the space available for translation 552 * caching. This leads to large improvements in throughput when the 553 * TLB is under pressure. 554 * 555 * The L2 TLB distributes small and large fragments into two 556 * asymmetric partitions. The large fragment cache is significantly 557 * larger. Thus, we try to use large fragments wherever possible. 558 * Userspace can support this by aligning virtual base address and 559 * allocation size to the fragment size. 560 */ 561 562 /* SI and newer are optimized for 64KB */ 563 uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; 564 uint64_t frag_align = 0x80; 565 566 uint64_t frag_start = ALIGN(pe_start, frag_align); 567 uint64_t frag_end = pe_end & ~(frag_align - 1); 568 569 unsigned count; 570 571 /* Abort early if there isn't anything to do */ 572 if (pe_start == pe_end) 573 return; 574 575 /* system pages are non continuously */ 576 if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { 577 578 count = (pe_end - pe_start) / 8; 579 amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, 580 addr, count, AMDGPU_GPU_PAGE_SIZE, 581 flags); 582 return; 583 } 584 585 /* handle the 4K area at the beginning */ 586 if (pe_start != frag_start) { 587 count = (frag_start - pe_start) / 8; 588 amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, 589 count, AMDGPU_GPU_PAGE_SIZE, flags); 590 addr += AMDGPU_GPU_PAGE_SIZE * count; 591 } 592 593 /* handle the area in the middle */ 594 count = (frag_end - frag_start) / 8; 595 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, 596 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); 597 598 /* handle the 4K area at the end */ 599 if (frag_end != pe_end) { 600 addr += AMDGPU_GPU_PAGE_SIZE * count; 601 count = (pe_end - frag_end) / 8; 602 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, 603 count, AMDGPU_GPU_PAGE_SIZE, flags); 604 } 605 } 606 607 /** 608 * amdgpu_vm_update_ptes - make sure that page tables are valid 609 * 610 * @adev: amdgpu_device pointer 611 * @gtt: GART instance to use for mapping 612 * @gtt_flags: GTT hw mapping flags 613 * @vm: requested vm 614 * @start: start of GPU address range 615 * @end: end of GPU address range 616 * @dst: destination address to map to 617 * @flags: mapping flags 618 * 619 * Update the page tables in the range @start - @end. 620 */ 621 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, 622 struct amdgpu_gart *gtt, 623 uint32_t gtt_flags, 624 struct amdgpu_vm *vm, 625 struct amdgpu_ib *ib, 626 uint64_t start, uint64_t end, 627 uint64_t dst, uint32_t flags) 628 { 629 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 630 631 uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; 632 uint64_t addr; 633 634 /* walk over the address space and update the page tables */ 635 for (addr = start; addr < end; ) { 636 uint64_t pt_idx = addr >> amdgpu_vm_block_size; 637 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; 638 unsigned nptes; 639 uint64_t pe_start; 640 641 if ((addr & ~mask) == (end & ~mask)) 642 nptes = end - addr; 643 else 644 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 645 646 pe_start = amdgpu_bo_gpu_offset(pt); 647 pe_start += (addr & mask) * 8; 648 649 if (last_pe_end != pe_start) { 650 651 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 652 last_pe_start, last_pe_end, 653 last_dst, flags); 654 655 last_pe_start = pe_start; 656 last_pe_end = pe_start + 8 * nptes; 657 last_dst = dst; 658 } else { 659 last_pe_end += 8 * nptes; 660 } 661 662 addr += nptes; 663 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 664 } 665 666 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 667 last_pe_start, last_pe_end, 668 last_dst, flags); 669 } 670 671 /** 672 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 673 * 674 * @adev: amdgpu_device pointer 675 * @gtt: GART instance to use for mapping 676 * @gtt_flags: flags as they are used for GTT 677 * @vm: requested vm 678 * @start: start of mapped range 679 * @last: last mapped entry 680 * @flags: flags for the entries 681 * @addr: addr to set the area to 682 * @fence: optional resulting fence 683 * 684 * Fill in the page table entries between @start and @last. 685 * Returns 0 for success, -EINVAL for failure. 686 */ 687 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 688 struct amdgpu_gart *gtt, 689 uint32_t gtt_flags, 690 struct amdgpu_vm *vm, 691 uint64_t start, uint64_t last, 692 uint32_t flags, uint64_t addr, 693 struct fence **fence) 694 { 695 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 696 void *owner = AMDGPU_FENCE_OWNER_VM; 697 unsigned nptes, ncmds, ndw; 698 struct amdgpu_ib *ib; 699 struct fence *f = NULL; 700 int r; 701 702 /* sync to everything on unmapping */ 703 if (!(flags & AMDGPU_PTE_VALID)) 704 owner = AMDGPU_FENCE_OWNER_UNDEFINED; 705 706 nptes = last - start + 1; 707 708 /* 709 * reserve space for one command every (1 << BLOCK_SIZE) 710 * entries or 2k dwords (whatever is smaller) 711 */ 712 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; 713 714 /* padding, etc. */ 715 ndw = 64; 716 717 if ((gtt == &adev->gart) && (flags == gtt_flags)) { 718 /* only copy commands needed */ 719 ndw += ncmds * 7; 720 721 } else if (gtt) { 722 /* header for write data commands */ 723 ndw += ncmds * 4; 724 725 /* body of write data command */ 726 ndw += nptes * 2; 727 728 } else { 729 /* set page commands needed */ 730 ndw += ncmds * 10; 731 732 /* two extra commands for begin/end of fragment */ 733 ndw += 2 * 10; 734 } 735 736 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 737 if (!ib) 738 return -ENOMEM; 739 740 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); 741 if (r) { 742 kfree(ib); 743 return r; 744 } 745 746 r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv, 747 owner); 748 if (r) 749 goto error_free; 750 751 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); 752 if (r) 753 goto error_free; 754 755 amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, 756 addr, flags); 757 758 amdgpu_vm_pad_ib(adev, ib); 759 WARN_ON(ib->length_dw > ndw); 760 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 761 &amdgpu_vm_free_job, 762 AMDGPU_FENCE_OWNER_VM, 763 &f); 764 if (r) 765 goto error_free; 766 767 amdgpu_bo_fence(vm->page_directory, f, true); 768 if (fence) { 769 fence_put(*fence); 770 *fence = fence_get(f); 771 } 772 fence_put(f); 773 return 0; 774 775 error_free: 776 amdgpu_ib_free(adev, ib); 777 kfree(ib); 778 return r; 779 } 780 781 /** 782 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 783 * 784 * @adev: amdgpu_device pointer 785 * @gtt: GART instance to use for mapping 786 * @vm: requested vm 787 * @mapping: mapped range and flags to use for the update 788 * @addr: addr to set the area to 789 * @gtt_flags: flags as they are used for GTT 790 * @fence: optional resulting fence 791 * 792 * Split the mapping into smaller chunks so that each update fits 793 * into a SDMA IB. 794 * Returns 0 for success, -EINVAL for failure. 795 */ 796 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 797 struct amdgpu_gart *gtt, 798 uint32_t gtt_flags, 799 struct amdgpu_vm *vm, 800 struct amdgpu_bo_va_mapping *mapping, 801 uint64_t addr, struct fence **fence) 802 { 803 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; 804 805 uint64_t start = mapping->it.start; 806 uint32_t flags = gtt_flags; 807 int r; 808 809 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 810 * but in case of something, we filter the flags in first place 811 */ 812 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 813 flags &= ~AMDGPU_PTE_READABLE; 814 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 815 flags &= ~AMDGPU_PTE_WRITEABLE; 816 817 trace_amdgpu_vm_bo_update(mapping); 818 819 addr += mapping->offset; 820 821 if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) 822 return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 823 start, mapping->it.last, 824 flags, addr, fence); 825 826 while (start != mapping->it.last + 1) { 827 uint64_t last; 828 829 last = min((uint64_t)mapping->it.last, start + max_size); 830 r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 831 start, last, flags, addr, 832 fence); 833 if (r) 834 return r; 835 836 start = last + 1; 837 addr += max_size; 838 } 839 840 return 0; 841 } 842 843 /** 844 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 845 * 846 * @adev: amdgpu_device pointer 847 * @bo_va: requested BO and VM object 848 * @mem: ttm mem 849 * 850 * Fill in the page table entries for @bo_va. 851 * Returns 0 for success, -EINVAL for failure. 852 * 853 * Object have to be reserved and mutex must be locked! 854 */ 855 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 856 struct amdgpu_bo_va *bo_va, 857 struct ttm_mem_reg *mem) 858 { 859 struct amdgpu_vm *vm = bo_va->vm; 860 struct amdgpu_bo_va_mapping *mapping; 861 struct amdgpu_gart *gtt = NULL; 862 uint32_t flags; 863 uint64_t addr; 864 int r; 865 866 if (mem) { 867 addr = (u64)mem->start << PAGE_SHIFT; 868 switch (mem->mem_type) { 869 case TTM_PL_TT: 870 gtt = &bo_va->bo->adev->gart; 871 break; 872 873 case TTM_PL_VRAM: 874 addr += adev->vm_manager.vram_base_offset; 875 break; 876 877 default: 878 break; 879 } 880 } else { 881 addr = 0; 882 } 883 884 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 885 886 spin_lock(&vm->status_lock); 887 if (!list_empty(&bo_va->vm_status)) 888 list_splice_init(&bo_va->valids, &bo_va->invalids); 889 spin_unlock(&vm->status_lock); 890 891 list_for_each_entry(mapping, &bo_va->invalids, list) { 892 r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, 893 &bo_va->last_pt_update); 894 if (r) 895 return r; 896 } 897 898 if (trace_amdgpu_vm_bo_mapping_enabled()) { 899 list_for_each_entry(mapping, &bo_va->valids, list) 900 trace_amdgpu_vm_bo_mapping(mapping); 901 902 list_for_each_entry(mapping, &bo_va->invalids, list) 903 trace_amdgpu_vm_bo_mapping(mapping); 904 } 905 906 spin_lock(&vm->status_lock); 907 list_splice_init(&bo_va->invalids, &bo_va->valids); 908 list_del_init(&bo_va->vm_status); 909 if (!mem) 910 list_add(&bo_va->vm_status, &vm->cleared); 911 spin_unlock(&vm->status_lock); 912 913 return 0; 914 } 915 916 /** 917 * amdgpu_vm_clear_freed - clear freed BOs in the PT 918 * 919 * @adev: amdgpu_device pointer 920 * @vm: requested vm 921 * 922 * Make sure all freed BOs are cleared in the PT. 923 * Returns 0 for success. 924 * 925 * PTs have to be reserved and mutex must be locked! 926 */ 927 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 928 struct amdgpu_vm *vm) 929 { 930 struct amdgpu_bo_va_mapping *mapping; 931 int r; 932 933 spin_lock(&vm->freed_lock); 934 while (!list_empty(&vm->freed)) { 935 mapping = list_first_entry(&vm->freed, 936 struct amdgpu_bo_va_mapping, list); 937 list_del(&mapping->list); 938 spin_unlock(&vm->freed_lock); 939 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, 940 0, NULL); 941 kfree(mapping); 942 if (r) 943 return r; 944 945 spin_lock(&vm->freed_lock); 946 } 947 spin_unlock(&vm->freed_lock); 948 949 return 0; 950 951 } 952 953 /** 954 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT 955 * 956 * @adev: amdgpu_device pointer 957 * @vm: requested vm 958 * 959 * Make sure all invalidated BOs are cleared in the PT. 960 * Returns 0 for success. 961 * 962 * PTs have to be reserved and mutex must be locked! 963 */ 964 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, 965 struct amdgpu_vm *vm, struct amdgpu_sync *sync) 966 { 967 struct amdgpu_bo_va *bo_va = NULL; 968 int r = 0; 969 970 spin_lock(&vm->status_lock); 971 while (!list_empty(&vm->invalidated)) { 972 bo_va = list_first_entry(&vm->invalidated, 973 struct amdgpu_bo_va, vm_status); 974 spin_unlock(&vm->status_lock); 975 mutex_lock(&bo_va->mutex); 976 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 977 mutex_unlock(&bo_va->mutex); 978 if (r) 979 return r; 980 981 spin_lock(&vm->status_lock); 982 } 983 spin_unlock(&vm->status_lock); 984 985 if (bo_va) 986 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); 987 988 return r; 989 } 990 991 /** 992 * amdgpu_vm_bo_add - add a bo to a specific vm 993 * 994 * @adev: amdgpu_device pointer 995 * @vm: requested vm 996 * @bo: amdgpu buffer object 997 * 998 * Add @bo into the requested vm. 999 * Add @bo to the list of bos associated with the vm 1000 * Returns newly added bo_va or NULL for failure 1001 * 1002 * Object has to be reserved! 1003 */ 1004 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 1005 struct amdgpu_vm *vm, 1006 struct amdgpu_bo *bo) 1007 { 1008 struct amdgpu_bo_va *bo_va; 1009 1010 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 1011 if (bo_va == NULL) { 1012 return NULL; 1013 } 1014 bo_va->vm = vm; 1015 bo_va->bo = bo; 1016 bo_va->ref_count = 1; 1017 INIT_LIST_HEAD(&bo_va->bo_list); 1018 INIT_LIST_HEAD(&bo_va->valids); 1019 INIT_LIST_HEAD(&bo_va->invalids); 1020 INIT_LIST_HEAD(&bo_va->vm_status); 1021 mutex_init(&bo_va->mutex); 1022 list_add_tail(&bo_va->bo_list, &bo->va); 1023 1024 return bo_va; 1025 } 1026 1027 /** 1028 * amdgpu_vm_bo_map - map bo inside a vm 1029 * 1030 * @adev: amdgpu_device pointer 1031 * @bo_va: bo_va to store the address 1032 * @saddr: where to map the BO 1033 * @offset: requested offset in the BO 1034 * @flags: attributes of pages (read/write/valid/etc.) 1035 * 1036 * Add a mapping of the BO at the specefied addr into the VM. 1037 * Returns 0 for success, error for failure. 1038 * 1039 * Object has to be reserved and unreserved outside! 1040 */ 1041 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 1042 struct amdgpu_bo_va *bo_va, 1043 uint64_t saddr, uint64_t offset, 1044 uint64_t size, uint32_t flags) 1045 { 1046 struct amdgpu_bo_va_mapping *mapping; 1047 struct amdgpu_vm *vm = bo_va->vm; 1048 struct interval_tree_node *it; 1049 unsigned last_pfn, pt_idx; 1050 uint64_t eaddr; 1051 int r; 1052 1053 /* validate the parameters */ 1054 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1055 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 1056 return -EINVAL; 1057 1058 /* make sure object fit at this offset */ 1059 eaddr = saddr + size - 1; 1060 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) 1061 return -EINVAL; 1062 1063 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1064 if (last_pfn >= adev->vm_manager.max_pfn) { 1065 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", 1066 last_pfn, adev->vm_manager.max_pfn); 1067 return -EINVAL; 1068 } 1069 1070 saddr /= AMDGPU_GPU_PAGE_SIZE; 1071 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1072 1073 spin_lock(&vm->it_lock); 1074 it = interval_tree_iter_first(&vm->va, saddr, eaddr); 1075 spin_unlock(&vm->it_lock); 1076 if (it) { 1077 struct amdgpu_bo_va_mapping *tmp; 1078 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1079 /* bo and tmp overlap, invalid addr */ 1080 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1081 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1082 tmp->it.start, tmp->it.last + 1); 1083 r = -EINVAL; 1084 goto error; 1085 } 1086 1087 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1088 if (!mapping) { 1089 r = -ENOMEM; 1090 goto error; 1091 } 1092 1093 INIT_LIST_HEAD(&mapping->list); 1094 mapping->it.start = saddr; 1095 mapping->it.last = eaddr; 1096 mapping->offset = offset; 1097 mapping->flags = flags; 1098 1099 mutex_lock(&bo_va->mutex); 1100 list_add(&mapping->list, &bo_va->invalids); 1101 mutex_unlock(&bo_va->mutex); 1102 spin_lock(&vm->it_lock); 1103 interval_tree_insert(&mapping->it, &vm->va); 1104 spin_unlock(&vm->it_lock); 1105 trace_amdgpu_vm_bo_map(bo_va, mapping); 1106 1107 /* Make sure the page tables are allocated */ 1108 saddr >>= amdgpu_vm_block_size; 1109 eaddr >>= amdgpu_vm_block_size; 1110 1111 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); 1112 1113 if (eaddr > vm->max_pde_used) 1114 vm->max_pde_used = eaddr; 1115 1116 /* walk over the address space and allocate the page tables */ 1117 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1118 struct reservation_object *resv = vm->page_directory->tbo.resv; 1119 struct amdgpu_bo_list_entry *entry; 1120 struct amdgpu_bo *pt; 1121 1122 entry = &vm->page_tables[pt_idx].entry; 1123 if (entry->robj) 1124 continue; 1125 1126 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1127 AMDGPU_GPU_PAGE_SIZE, true, 1128 AMDGPU_GEM_DOMAIN_VRAM, 1129 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1130 NULL, resv, &pt); 1131 if (r) 1132 goto error_free; 1133 1134 /* Keep a reference to the page table to avoid freeing 1135 * them up in the wrong order. 1136 */ 1137 pt->parent = amdgpu_bo_ref(vm->page_directory); 1138 1139 r = amdgpu_vm_clear_bo(adev, pt); 1140 if (r) { 1141 amdgpu_bo_unref(&pt); 1142 goto error_free; 1143 } 1144 1145 entry->robj = pt; 1146 entry->priority = 0; 1147 entry->tv.bo = &entry->robj->tbo; 1148 entry->tv.shared = true; 1149 vm->page_tables[pt_idx].addr = 0; 1150 } 1151 1152 return 0; 1153 1154 error_free: 1155 list_del(&mapping->list); 1156 spin_lock(&vm->it_lock); 1157 interval_tree_remove(&mapping->it, &vm->va); 1158 spin_unlock(&vm->it_lock); 1159 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1160 kfree(mapping); 1161 1162 error: 1163 return r; 1164 } 1165 1166 /** 1167 * amdgpu_vm_bo_unmap - remove bo mapping from vm 1168 * 1169 * @adev: amdgpu_device pointer 1170 * @bo_va: bo_va to remove the address from 1171 * @saddr: where to the BO is mapped 1172 * 1173 * Remove a mapping of the BO at the specefied addr from the VM. 1174 * Returns 0 for success, error for failure. 1175 * 1176 * Object has to be reserved and unreserved outside! 1177 */ 1178 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1179 struct amdgpu_bo_va *bo_va, 1180 uint64_t saddr) 1181 { 1182 struct amdgpu_bo_va_mapping *mapping; 1183 struct amdgpu_vm *vm = bo_va->vm; 1184 bool valid = true; 1185 1186 saddr /= AMDGPU_GPU_PAGE_SIZE; 1187 mutex_lock(&bo_va->mutex); 1188 list_for_each_entry(mapping, &bo_va->valids, list) { 1189 if (mapping->it.start == saddr) 1190 break; 1191 } 1192 1193 if (&mapping->list == &bo_va->valids) { 1194 valid = false; 1195 1196 list_for_each_entry(mapping, &bo_va->invalids, list) { 1197 if (mapping->it.start == saddr) 1198 break; 1199 } 1200 1201 if (&mapping->list == &bo_va->invalids) { 1202 mutex_unlock(&bo_va->mutex); 1203 return -ENOENT; 1204 } 1205 } 1206 mutex_unlock(&bo_va->mutex); 1207 list_del(&mapping->list); 1208 spin_lock(&vm->it_lock); 1209 interval_tree_remove(&mapping->it, &vm->va); 1210 spin_unlock(&vm->it_lock); 1211 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1212 1213 if (valid) { 1214 spin_lock(&vm->freed_lock); 1215 list_add(&mapping->list, &vm->freed); 1216 spin_unlock(&vm->freed_lock); 1217 } else { 1218 kfree(mapping); 1219 } 1220 1221 return 0; 1222 } 1223 1224 /** 1225 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 1226 * 1227 * @adev: amdgpu_device pointer 1228 * @bo_va: requested bo_va 1229 * 1230 * Remove @bo_va->bo from the requested vm. 1231 * 1232 * Object have to be reserved! 1233 */ 1234 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 1235 struct amdgpu_bo_va *bo_va) 1236 { 1237 struct amdgpu_bo_va_mapping *mapping, *next; 1238 struct amdgpu_vm *vm = bo_va->vm; 1239 1240 list_del(&bo_va->bo_list); 1241 1242 spin_lock(&vm->status_lock); 1243 list_del(&bo_va->vm_status); 1244 spin_unlock(&vm->status_lock); 1245 1246 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1247 list_del(&mapping->list); 1248 spin_lock(&vm->it_lock); 1249 interval_tree_remove(&mapping->it, &vm->va); 1250 spin_unlock(&vm->it_lock); 1251 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1252 spin_lock(&vm->freed_lock); 1253 list_add(&mapping->list, &vm->freed); 1254 spin_unlock(&vm->freed_lock); 1255 } 1256 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1257 list_del(&mapping->list); 1258 spin_lock(&vm->it_lock); 1259 interval_tree_remove(&mapping->it, &vm->va); 1260 spin_unlock(&vm->it_lock); 1261 kfree(mapping); 1262 } 1263 fence_put(bo_va->last_pt_update); 1264 mutex_destroy(&bo_va->mutex); 1265 kfree(bo_va); 1266 } 1267 1268 /** 1269 * amdgpu_vm_bo_invalidate - mark the bo as invalid 1270 * 1271 * @adev: amdgpu_device pointer 1272 * @vm: requested vm 1273 * @bo: amdgpu buffer object 1274 * 1275 * Mark @bo as invalid. 1276 */ 1277 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 1278 struct amdgpu_bo *bo) 1279 { 1280 struct amdgpu_bo_va *bo_va; 1281 1282 list_for_each_entry(bo_va, &bo->va, bo_list) { 1283 spin_lock(&bo_va->vm->status_lock); 1284 if (list_empty(&bo_va->vm_status)) 1285 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1286 spin_unlock(&bo_va->vm->status_lock); 1287 } 1288 } 1289 1290 /** 1291 * amdgpu_vm_init - initialize a vm instance 1292 * 1293 * @adev: amdgpu_device pointer 1294 * @vm: requested vm 1295 * 1296 * Init @vm fields. 1297 */ 1298 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1299 { 1300 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 1301 AMDGPU_VM_PTE_COUNT * 8); 1302 unsigned pd_size, pd_entries; 1303 int i, r; 1304 1305 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1306 vm->ids[i].id = 0; 1307 vm->ids[i].flushed_updates = NULL; 1308 } 1309 vm->va = RB_ROOT; 1310 spin_lock_init(&vm->status_lock); 1311 INIT_LIST_HEAD(&vm->invalidated); 1312 INIT_LIST_HEAD(&vm->cleared); 1313 INIT_LIST_HEAD(&vm->freed); 1314 spin_lock_init(&vm->it_lock); 1315 spin_lock_init(&vm->freed_lock); 1316 pd_size = amdgpu_vm_directory_size(adev); 1317 pd_entries = amdgpu_vm_num_pdes(adev); 1318 1319 /* allocate page table array */ 1320 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); 1321 if (vm->page_tables == NULL) { 1322 DRM_ERROR("Cannot allocate memory for page table array\n"); 1323 return -ENOMEM; 1324 } 1325 1326 vm->page_directory_fence = NULL; 1327 1328 r = amdgpu_bo_create(adev, pd_size, align, true, 1329 AMDGPU_GEM_DOMAIN_VRAM, 1330 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1331 NULL, NULL, &vm->page_directory); 1332 if (r) 1333 return r; 1334 r = amdgpu_bo_reserve(vm->page_directory, false); 1335 if (r) { 1336 amdgpu_bo_unref(&vm->page_directory); 1337 vm->page_directory = NULL; 1338 return r; 1339 } 1340 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1341 amdgpu_bo_unreserve(vm->page_directory); 1342 if (r) { 1343 amdgpu_bo_unref(&vm->page_directory); 1344 vm->page_directory = NULL; 1345 return r; 1346 } 1347 1348 return 0; 1349 } 1350 1351 /** 1352 * amdgpu_vm_fini - tear down a vm instance 1353 * 1354 * @adev: amdgpu_device pointer 1355 * @vm: requested vm 1356 * 1357 * Tear down @vm. 1358 * Unbind the VM and remove all bos from the vm bo list 1359 */ 1360 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1361 { 1362 struct amdgpu_bo_va_mapping *mapping, *tmp; 1363 int i; 1364 1365 if (!RB_EMPTY_ROOT(&vm->va)) { 1366 dev_err(adev->dev, "still active bo inside vm\n"); 1367 } 1368 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { 1369 list_del(&mapping->list); 1370 interval_tree_remove(&mapping->it, &vm->va); 1371 kfree(mapping); 1372 } 1373 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 1374 list_del(&mapping->list); 1375 kfree(mapping); 1376 } 1377 1378 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) 1379 amdgpu_bo_unref(&vm->page_tables[i].entry.robj); 1380 drm_free_large(vm->page_tables); 1381 1382 amdgpu_bo_unref(&vm->page_directory); 1383 fence_put(vm->page_directory_fence); 1384 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1385 unsigned id = vm->ids[i].id; 1386 1387 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, 1388 (long)vm, 0); 1389 fence_put(vm->ids[i].flushed_updates); 1390 } 1391 1392 } 1393 1394 /** 1395 * amdgpu_vm_manager_init - init the VM manager 1396 * 1397 * @adev: amdgpu_device pointer 1398 * 1399 * Initialize the VM manager structures 1400 */ 1401 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 1402 { 1403 unsigned i; 1404 1405 INIT_LIST_HEAD(&adev->vm_manager.ids_lru); 1406 1407 /* skip over VMID 0, since it is the system VM */ 1408 for (i = 1; i < adev->vm_manager.num_ids; ++i) 1409 list_add_tail(&adev->vm_manager.ids[i].list, 1410 &adev->vm_manager.ids_lru); 1411 } 1412 1413 /** 1414 * amdgpu_vm_manager_fini - cleanup VM manager 1415 * 1416 * @adev: amdgpu_device pointer 1417 * 1418 * Cleanup the VM manager and free resources. 1419 */ 1420 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 1421 { 1422 unsigned i; 1423 1424 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1425 fence_put(adev->vm_manager.ids[i].active); 1426 } 1427