1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/amdgpu_drm.h> 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 33 /* 34 * GPUVM 35 * GPUVM is similar to the legacy gart on older asics, however 36 * rather than there being a single global gart table 37 * for the entire GPU, there are multiple VM page tables active 38 * at any given time. The VM page tables can contain a mix 39 * vram pages and system memory pages and system memory pages 40 * can be mapped as snooped (cached system pages) or unsnooped 41 * (uncached system pages). 42 * Each VM has an ID associated with it and there is a page table 43 * associated with each VMID. When execting a command buffer, 44 * the kernel tells the the ring what VMID to use for that command 45 * buffer. VMIDs are allocated dynamically as commands are submitted. 46 * The userspace drivers maintain their own address space and the kernel 47 * sets up their pages tables accordingly when they submit their 48 * command buffers and a VMID is assigned. 49 * Cayman/Trinity support up to 8 active VMs at any given time; 50 * SI supports 16. 51 */ 52 53 /** 54 * amdgpu_vm_num_pde - return the number of page directory entries 55 * 56 * @adev: amdgpu_device pointer 57 * 58 * Calculate the number of page directory entries. 59 */ 60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) 61 { 62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; 63 } 64 65 /** 66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes 67 * 68 * @adev: amdgpu_device pointer 69 * 70 * Calculate the size of the page directory in bytes. 71 */ 72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) 73 { 74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); 75 } 76 77 /** 78 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 79 * 80 * @vm: vm providing the BOs 81 * @validated: head of validation list 82 * @entry: entry to add 83 * 84 * Add the page directory to the list of BOs to 85 * validate for command submission. 86 */ 87 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 88 struct list_head *validated, 89 struct amdgpu_bo_list_entry *entry) 90 { 91 entry->robj = vm->page_directory; 92 entry->priority = 0; 93 entry->tv.bo = &vm->page_directory->tbo; 94 entry->tv.shared = true; 95 list_add(&entry->tv.head, validated); 96 } 97 98 /** 99 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list 100 * 101 * @vm: vm providing the BOs 102 * @duplicates: head of duplicates list 103 * 104 * Add the page directory to the BO duplicates list 105 * for command submission. 106 */ 107 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) 108 { 109 unsigned i; 110 111 /* add the vm page table to the list */ 112 for (i = 0; i <= vm->max_pde_used; ++i) { 113 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 114 115 if (!entry->robj) 116 continue; 117 118 list_add(&entry->tv.head, duplicates); 119 } 120 121 } 122 123 /** 124 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail 125 * 126 * @adev: amdgpu device instance 127 * @vm: vm providing the BOs 128 * 129 * Move the PT BOs to the tail of the LRU. 130 */ 131 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 132 struct amdgpu_vm *vm) 133 { 134 struct ttm_bo_global *glob = adev->mman.bdev.glob; 135 unsigned i; 136 137 spin_lock(&glob->lru_lock); 138 for (i = 0; i <= vm->max_pde_used; ++i) { 139 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 140 141 if (!entry->robj) 142 continue; 143 144 ttm_bo_move_to_lru_tail(&entry->robj->tbo); 145 } 146 spin_unlock(&glob->lru_lock); 147 } 148 149 /** 150 * amdgpu_vm_grab_id - allocate the next free VMID 151 * 152 * @vm: vm to allocate id for 153 * @ring: ring we want to submit job to 154 * @sync: sync object where we add dependencies 155 * @fence: fence protecting ID from reuse 156 * 157 * Allocate an id for the vm, adding fences to the sync obj as necessary. 158 */ 159 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 160 struct amdgpu_sync *sync, struct fence *fence) 161 { 162 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 163 struct amdgpu_device *adev = ring->adev; 164 struct amdgpu_vm_manager_id *id; 165 int r; 166 167 mutex_lock(&adev->vm_manager.lock); 168 169 /* check if the id is still valid */ 170 if (vm_id->id) { 171 long owner; 172 173 id = &adev->vm_manager.ids[vm_id->id]; 174 owner = atomic_long_read(&id->owner); 175 if (owner == (long)vm) { 176 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 177 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 178 179 fence_put(id->active); 180 id->active = fence_get(fence); 181 182 mutex_unlock(&adev->vm_manager.lock); 183 return 0; 184 } 185 } 186 187 /* we definately need to flush */ 188 vm_id->pd_gpu_addr = ~0ll; 189 190 id = list_first_entry(&adev->vm_manager.ids_lru, 191 struct amdgpu_vm_manager_id, 192 list); 193 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 194 atomic_long_set(&id->owner, (long)vm); 195 196 vm_id->id = id - adev->vm_manager.ids; 197 trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 198 199 r = amdgpu_sync_fence(ring->adev, sync, id->active); 200 201 if (!r) { 202 fence_put(id->active); 203 id->active = fence_get(fence); 204 } 205 206 mutex_unlock(&adev->vm_manager.lock); 207 return r; 208 } 209 210 /** 211 * amdgpu_vm_flush - hardware flush the vm 212 * 213 * @ring: ring to use for flush 214 * @vm: vm we want to flush 215 * @updates: last vm update that we waited for 216 * 217 * Flush the vm. 218 */ 219 void amdgpu_vm_flush(struct amdgpu_ring *ring, 220 struct amdgpu_vm *vm, 221 struct fence *updates) 222 { 223 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 224 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 225 struct fence *flushed_updates = vm_id->flushed_updates; 226 bool is_later; 227 228 if (!flushed_updates) 229 is_later = true; 230 else if (!updates) 231 is_later = false; 232 else 233 is_later = fence_is_later(updates, flushed_updates); 234 235 if (pd_addr != vm_id->pd_gpu_addr || is_later) { 236 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 237 if (is_later) { 238 vm_id->flushed_updates = fence_get(updates); 239 fence_put(flushed_updates); 240 } 241 vm_id->pd_gpu_addr = pd_addr; 242 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 243 } 244 } 245 246 /** 247 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 248 * 249 * @vm: requested vm 250 * @bo: requested buffer object 251 * 252 * Find @bo inside the requested vm. 253 * Search inside the @bos vm list for the requested vm 254 * Returns the found bo_va or NULL if none is found 255 * 256 * Object has to be reserved! 257 */ 258 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 259 struct amdgpu_bo *bo) 260 { 261 struct amdgpu_bo_va *bo_va; 262 263 list_for_each_entry(bo_va, &bo->va, bo_list) { 264 if (bo_va->vm == vm) { 265 return bo_va; 266 } 267 } 268 return NULL; 269 } 270 271 /** 272 * amdgpu_vm_update_pages - helper to call the right asic function 273 * 274 * @adev: amdgpu_device pointer 275 * @gtt: GART instance to use for mapping 276 * @gtt_flags: GTT hw access flags 277 * @ib: indirect buffer to fill with commands 278 * @pe: addr of the page entry 279 * @addr: dst addr to write into pe 280 * @count: number of page entries to update 281 * @incr: increase next addr by incr bytes 282 * @flags: hw access flags 283 * 284 * Traces the parameters and calls the right asic functions 285 * to setup the page table using the DMA. 286 */ 287 static void amdgpu_vm_update_pages(struct amdgpu_device *adev, 288 struct amdgpu_gart *gtt, 289 uint32_t gtt_flags, 290 struct amdgpu_ib *ib, 291 uint64_t pe, uint64_t addr, 292 unsigned count, uint32_t incr, 293 uint32_t flags) 294 { 295 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); 296 297 if ((gtt == &adev->gart) && (flags == gtt_flags)) { 298 uint64_t src = gtt->table_addr + (addr >> 12) * 8; 299 amdgpu_vm_copy_pte(adev, ib, pe, src, count); 300 301 } else if (gtt) { 302 dma_addr_t *pages_addr = gtt->pages_addr; 303 amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, 304 count, incr, flags); 305 306 } else if (count < 3) { 307 amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, 308 count, incr, flags); 309 310 } else { 311 amdgpu_vm_set_pte_pde(adev, ib, pe, addr, 312 count, incr, flags); 313 } 314 } 315 316 /** 317 * amdgpu_vm_clear_bo - initially clear the page dir/table 318 * 319 * @adev: amdgpu_device pointer 320 * @bo: bo to clear 321 * 322 * need to reserve bo first before calling it. 323 */ 324 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 325 struct amdgpu_bo *bo) 326 { 327 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 328 struct fence *fence = NULL; 329 struct amdgpu_job *job; 330 unsigned entries; 331 uint64_t addr; 332 int r; 333 334 r = reservation_object_reserve_shared(bo->tbo.resv); 335 if (r) 336 return r; 337 338 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 339 if (r) 340 goto error; 341 342 addr = amdgpu_bo_gpu_offset(bo); 343 entries = amdgpu_bo_size(bo) / 8; 344 345 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 346 if (r) 347 goto error; 348 349 amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, 350 0, 0); 351 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 352 353 WARN_ON(job->ibs[0].length_dw > 64); 354 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); 355 if (r) 356 goto error_free; 357 358 amdgpu_bo_fence(bo, fence, true); 359 fence_put(fence); 360 return 0; 361 362 error_free: 363 amdgpu_job_free(job); 364 365 error: 366 return r; 367 } 368 369 /** 370 * amdgpu_vm_map_gart - Resolve gart mapping of addr 371 * 372 * @pages_addr: optional DMA address to use for lookup 373 * @addr: the unmapped addr 374 * 375 * Look up the physical address of the page that the pte resolves 376 * to and return the pointer for the page table entry. 377 */ 378 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 379 { 380 uint64_t result; 381 382 if (pages_addr) { 383 /* page table offset */ 384 result = pages_addr[addr >> PAGE_SHIFT]; 385 386 /* in case cpu page size != gpu page size*/ 387 result |= addr & (~PAGE_MASK); 388 389 } else { 390 /* No mapping required */ 391 result = addr; 392 } 393 394 result &= 0xFFFFFFFFFFFFF000ULL; 395 396 return result; 397 } 398 399 /** 400 * amdgpu_vm_update_pdes - make sure that page directory is valid 401 * 402 * @adev: amdgpu_device pointer 403 * @vm: requested vm 404 * @start: start of GPU address range 405 * @end: end of GPU address range 406 * 407 * Allocates new page tables if necessary 408 * and updates the page directory. 409 * Returns 0 for success, error for failure. 410 */ 411 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 412 struct amdgpu_vm *vm) 413 { 414 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 415 struct amdgpu_bo *pd = vm->page_directory; 416 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); 417 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 418 uint64_t last_pde = ~0, last_pt = ~0; 419 unsigned count = 0, pt_idx, ndw; 420 struct amdgpu_job *job; 421 struct amdgpu_ib *ib; 422 struct fence *fence = NULL; 423 424 int r; 425 426 /* padding, etc. */ 427 ndw = 64; 428 429 /* assume the worst case */ 430 ndw += vm->max_pde_used * 6; 431 432 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 433 if (r) 434 return r; 435 436 ib = &job->ibs[0]; 437 438 /* walk over the address space and update the page directory */ 439 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 440 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; 441 uint64_t pde, pt; 442 443 if (bo == NULL) 444 continue; 445 446 pt = amdgpu_bo_gpu_offset(bo); 447 if (vm->page_tables[pt_idx].addr == pt) 448 continue; 449 vm->page_tables[pt_idx].addr = pt; 450 451 pde = pd_addr + pt_idx * 8; 452 if (((last_pde + 8 * count) != pde) || 453 ((last_pt + incr * count) != pt)) { 454 455 if (count) { 456 amdgpu_vm_update_pages(adev, NULL, 0, ib, 457 last_pde, last_pt, 458 count, incr, 459 AMDGPU_PTE_VALID); 460 } 461 462 count = 1; 463 last_pde = pde; 464 last_pt = pt; 465 } else { 466 ++count; 467 } 468 } 469 470 if (count) 471 amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, 472 count, incr, AMDGPU_PTE_VALID); 473 474 if (ib->length_dw != 0) { 475 amdgpu_ring_pad_ib(ring, ib); 476 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, 477 AMDGPU_FENCE_OWNER_VM); 478 WARN_ON(ib->length_dw > ndw); 479 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); 480 if (r) 481 goto error_free; 482 483 amdgpu_bo_fence(pd, fence, true); 484 fence_put(vm->page_directory_fence); 485 vm->page_directory_fence = fence_get(fence); 486 fence_put(fence); 487 488 } else { 489 amdgpu_job_free(job); 490 } 491 492 return 0; 493 494 error_free: 495 amdgpu_job_free(job); 496 return r; 497 } 498 499 /** 500 * amdgpu_vm_frag_ptes - add fragment information to PTEs 501 * 502 * @adev: amdgpu_device pointer 503 * @gtt: GART instance to use for mapping 504 * @gtt_flags: GTT hw mapping flags 505 * @ib: IB for the update 506 * @pe_start: first PTE to handle 507 * @pe_end: last PTE to handle 508 * @addr: addr those PTEs should point to 509 * @flags: hw mapping flags 510 */ 511 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, 512 struct amdgpu_gart *gtt, 513 uint32_t gtt_flags, 514 struct amdgpu_ib *ib, 515 uint64_t pe_start, uint64_t pe_end, 516 uint64_t addr, uint32_t flags) 517 { 518 /** 519 * The MC L1 TLB supports variable sized pages, based on a fragment 520 * field in the PTE. When this field is set to a non-zero value, page 521 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 522 * flags are considered valid for all PTEs within the fragment range 523 * and corresponding mappings are assumed to be physically contiguous. 524 * 525 * The L1 TLB can store a single PTE for the whole fragment, 526 * significantly increasing the space available for translation 527 * caching. This leads to large improvements in throughput when the 528 * TLB is under pressure. 529 * 530 * The L2 TLB distributes small and large fragments into two 531 * asymmetric partitions. The large fragment cache is significantly 532 * larger. Thus, we try to use large fragments wherever possible. 533 * Userspace can support this by aligning virtual base address and 534 * allocation size to the fragment size. 535 */ 536 537 /* SI and newer are optimized for 64KB */ 538 uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; 539 uint64_t frag_align = 0x80; 540 541 uint64_t frag_start = ALIGN(pe_start, frag_align); 542 uint64_t frag_end = pe_end & ~(frag_align - 1); 543 544 unsigned count; 545 546 /* Abort early if there isn't anything to do */ 547 if (pe_start == pe_end) 548 return; 549 550 /* system pages are non continuously */ 551 if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { 552 553 count = (pe_end - pe_start) / 8; 554 amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, 555 addr, count, AMDGPU_GPU_PAGE_SIZE, 556 flags); 557 return; 558 } 559 560 /* handle the 4K area at the beginning */ 561 if (pe_start != frag_start) { 562 count = (frag_start - pe_start) / 8; 563 amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, 564 count, AMDGPU_GPU_PAGE_SIZE, flags); 565 addr += AMDGPU_GPU_PAGE_SIZE * count; 566 } 567 568 /* handle the area in the middle */ 569 count = (frag_end - frag_start) / 8; 570 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, 571 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); 572 573 /* handle the 4K area at the end */ 574 if (frag_end != pe_end) { 575 addr += AMDGPU_GPU_PAGE_SIZE * count; 576 count = (pe_end - frag_end) / 8; 577 amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, 578 count, AMDGPU_GPU_PAGE_SIZE, flags); 579 } 580 } 581 582 /** 583 * amdgpu_vm_update_ptes - make sure that page tables are valid 584 * 585 * @adev: amdgpu_device pointer 586 * @gtt: GART instance to use for mapping 587 * @gtt_flags: GTT hw mapping flags 588 * @vm: requested vm 589 * @start: start of GPU address range 590 * @end: end of GPU address range 591 * @dst: destination address to map to 592 * @flags: mapping flags 593 * 594 * Update the page tables in the range @start - @end. 595 */ 596 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, 597 struct amdgpu_gart *gtt, 598 uint32_t gtt_flags, 599 struct amdgpu_vm *vm, 600 struct amdgpu_ib *ib, 601 uint64_t start, uint64_t end, 602 uint64_t dst, uint32_t flags) 603 { 604 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 605 606 uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; 607 uint64_t addr; 608 609 /* walk over the address space and update the page tables */ 610 for (addr = start; addr < end; ) { 611 uint64_t pt_idx = addr >> amdgpu_vm_block_size; 612 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; 613 unsigned nptes; 614 uint64_t pe_start; 615 616 if ((addr & ~mask) == (end & ~mask)) 617 nptes = end - addr; 618 else 619 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 620 621 pe_start = amdgpu_bo_gpu_offset(pt); 622 pe_start += (addr & mask) * 8; 623 624 if (last_pe_end != pe_start) { 625 626 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 627 last_pe_start, last_pe_end, 628 last_dst, flags); 629 630 last_pe_start = pe_start; 631 last_pe_end = pe_start + 8 * nptes; 632 last_dst = dst; 633 } else { 634 last_pe_end += 8 * nptes; 635 } 636 637 addr += nptes; 638 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 639 } 640 641 amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, 642 last_pe_start, last_pe_end, 643 last_dst, flags); 644 } 645 646 /** 647 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 648 * 649 * @adev: amdgpu_device pointer 650 * @gtt: GART instance to use for mapping 651 * @gtt_flags: flags as they are used for GTT 652 * @vm: requested vm 653 * @start: start of mapped range 654 * @last: last mapped entry 655 * @flags: flags for the entries 656 * @addr: addr to set the area to 657 * @fence: optional resulting fence 658 * 659 * Fill in the page table entries between @start and @last. 660 * Returns 0 for success, -EINVAL for failure. 661 */ 662 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 663 struct amdgpu_gart *gtt, 664 uint32_t gtt_flags, 665 struct amdgpu_vm *vm, 666 uint64_t start, uint64_t last, 667 uint32_t flags, uint64_t addr, 668 struct fence **fence) 669 { 670 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; 671 void *owner = AMDGPU_FENCE_OWNER_VM; 672 unsigned nptes, ncmds, ndw; 673 struct amdgpu_job *job; 674 struct amdgpu_ib *ib; 675 struct fence *f = NULL; 676 int r; 677 678 /* sync to everything on unmapping */ 679 if (!(flags & AMDGPU_PTE_VALID)) 680 owner = AMDGPU_FENCE_OWNER_UNDEFINED; 681 682 nptes = last - start + 1; 683 684 /* 685 * reserve space for one command every (1 << BLOCK_SIZE) 686 * entries or 2k dwords (whatever is smaller) 687 */ 688 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; 689 690 /* padding, etc. */ 691 ndw = 64; 692 693 if ((gtt == &adev->gart) && (flags == gtt_flags)) { 694 /* only copy commands needed */ 695 ndw += ncmds * 7; 696 697 } else if (gtt) { 698 /* header for write data commands */ 699 ndw += ncmds * 4; 700 701 /* body of write data command */ 702 ndw += nptes * 2; 703 704 } else { 705 /* set page commands needed */ 706 ndw += ncmds * 10; 707 708 /* two extra commands for begin/end of fragment */ 709 ndw += 2 * 10; 710 } 711 712 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 713 if (r) 714 return r; 715 716 ib = &job->ibs[0]; 717 718 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, 719 owner); 720 if (r) 721 goto error_free; 722 723 r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); 724 if (r) 725 goto error_free; 726 727 amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, 728 addr, flags); 729 730 amdgpu_ring_pad_ib(ring, ib); 731 WARN_ON(ib->length_dw > ndw); 732 r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f); 733 if (r) 734 goto error_free; 735 736 amdgpu_bo_fence(vm->page_directory, f, true); 737 if (fence) { 738 fence_put(*fence); 739 *fence = fence_get(f); 740 } 741 fence_put(f); 742 return 0; 743 744 error_free: 745 amdgpu_job_free(job); 746 return r; 747 } 748 749 /** 750 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 751 * 752 * @adev: amdgpu_device pointer 753 * @gtt: GART instance to use for mapping 754 * @vm: requested vm 755 * @mapping: mapped range and flags to use for the update 756 * @addr: addr to set the area to 757 * @gtt_flags: flags as they are used for GTT 758 * @fence: optional resulting fence 759 * 760 * Split the mapping into smaller chunks so that each update fits 761 * into a SDMA IB. 762 * Returns 0 for success, -EINVAL for failure. 763 */ 764 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 765 struct amdgpu_gart *gtt, 766 uint32_t gtt_flags, 767 struct amdgpu_vm *vm, 768 struct amdgpu_bo_va_mapping *mapping, 769 uint64_t addr, struct fence **fence) 770 { 771 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; 772 773 uint64_t start = mapping->it.start; 774 uint32_t flags = gtt_flags; 775 int r; 776 777 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 778 * but in case of something, we filter the flags in first place 779 */ 780 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 781 flags &= ~AMDGPU_PTE_READABLE; 782 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 783 flags &= ~AMDGPU_PTE_WRITEABLE; 784 785 trace_amdgpu_vm_bo_update(mapping); 786 787 addr += mapping->offset; 788 789 if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) 790 return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 791 start, mapping->it.last, 792 flags, addr, fence); 793 794 while (start != mapping->it.last + 1) { 795 uint64_t last; 796 797 last = min((uint64_t)mapping->it.last, start + max_size); 798 r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, 799 start, last, flags, addr, 800 fence); 801 if (r) 802 return r; 803 804 start = last + 1; 805 addr += max_size; 806 } 807 808 return 0; 809 } 810 811 /** 812 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 813 * 814 * @adev: amdgpu_device pointer 815 * @bo_va: requested BO and VM object 816 * @mem: ttm mem 817 * 818 * Fill in the page table entries for @bo_va. 819 * Returns 0 for success, -EINVAL for failure. 820 * 821 * Object have to be reserved and mutex must be locked! 822 */ 823 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 824 struct amdgpu_bo_va *bo_va, 825 struct ttm_mem_reg *mem) 826 { 827 struct amdgpu_vm *vm = bo_va->vm; 828 struct amdgpu_bo_va_mapping *mapping; 829 struct amdgpu_gart *gtt = NULL; 830 uint32_t flags; 831 uint64_t addr; 832 int r; 833 834 if (mem) { 835 addr = (u64)mem->start << PAGE_SHIFT; 836 switch (mem->mem_type) { 837 case TTM_PL_TT: 838 gtt = &bo_va->bo->adev->gart; 839 break; 840 841 case TTM_PL_VRAM: 842 addr += adev->vm_manager.vram_base_offset; 843 break; 844 845 default: 846 break; 847 } 848 } else { 849 addr = 0; 850 } 851 852 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 853 854 spin_lock(&vm->status_lock); 855 if (!list_empty(&bo_va->vm_status)) 856 list_splice_init(&bo_va->valids, &bo_va->invalids); 857 spin_unlock(&vm->status_lock); 858 859 list_for_each_entry(mapping, &bo_va->invalids, list) { 860 r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, 861 &bo_va->last_pt_update); 862 if (r) 863 return r; 864 } 865 866 if (trace_amdgpu_vm_bo_mapping_enabled()) { 867 list_for_each_entry(mapping, &bo_va->valids, list) 868 trace_amdgpu_vm_bo_mapping(mapping); 869 870 list_for_each_entry(mapping, &bo_va->invalids, list) 871 trace_amdgpu_vm_bo_mapping(mapping); 872 } 873 874 spin_lock(&vm->status_lock); 875 list_splice_init(&bo_va->invalids, &bo_va->valids); 876 list_del_init(&bo_va->vm_status); 877 if (!mem) 878 list_add(&bo_va->vm_status, &vm->cleared); 879 spin_unlock(&vm->status_lock); 880 881 return 0; 882 } 883 884 /** 885 * amdgpu_vm_clear_freed - clear freed BOs in the PT 886 * 887 * @adev: amdgpu_device pointer 888 * @vm: requested vm 889 * 890 * Make sure all freed BOs are cleared in the PT. 891 * Returns 0 for success. 892 * 893 * PTs have to be reserved and mutex must be locked! 894 */ 895 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 896 struct amdgpu_vm *vm) 897 { 898 struct amdgpu_bo_va_mapping *mapping; 899 int r; 900 901 spin_lock(&vm->freed_lock); 902 while (!list_empty(&vm->freed)) { 903 mapping = list_first_entry(&vm->freed, 904 struct amdgpu_bo_va_mapping, list); 905 list_del(&mapping->list); 906 spin_unlock(&vm->freed_lock); 907 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, 908 0, NULL); 909 kfree(mapping); 910 if (r) 911 return r; 912 913 spin_lock(&vm->freed_lock); 914 } 915 spin_unlock(&vm->freed_lock); 916 917 return 0; 918 919 } 920 921 /** 922 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT 923 * 924 * @adev: amdgpu_device pointer 925 * @vm: requested vm 926 * 927 * Make sure all invalidated BOs are cleared in the PT. 928 * Returns 0 for success. 929 * 930 * PTs have to be reserved and mutex must be locked! 931 */ 932 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, 933 struct amdgpu_vm *vm, struct amdgpu_sync *sync) 934 { 935 struct amdgpu_bo_va *bo_va = NULL; 936 int r = 0; 937 938 spin_lock(&vm->status_lock); 939 while (!list_empty(&vm->invalidated)) { 940 bo_va = list_first_entry(&vm->invalidated, 941 struct amdgpu_bo_va, vm_status); 942 spin_unlock(&vm->status_lock); 943 mutex_lock(&bo_va->mutex); 944 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 945 mutex_unlock(&bo_va->mutex); 946 if (r) 947 return r; 948 949 spin_lock(&vm->status_lock); 950 } 951 spin_unlock(&vm->status_lock); 952 953 if (bo_va) 954 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); 955 956 return r; 957 } 958 959 /** 960 * amdgpu_vm_bo_add - add a bo to a specific vm 961 * 962 * @adev: amdgpu_device pointer 963 * @vm: requested vm 964 * @bo: amdgpu buffer object 965 * 966 * Add @bo into the requested vm. 967 * Add @bo to the list of bos associated with the vm 968 * Returns newly added bo_va or NULL for failure 969 * 970 * Object has to be reserved! 971 */ 972 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 973 struct amdgpu_vm *vm, 974 struct amdgpu_bo *bo) 975 { 976 struct amdgpu_bo_va *bo_va; 977 978 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 979 if (bo_va == NULL) { 980 return NULL; 981 } 982 bo_va->vm = vm; 983 bo_va->bo = bo; 984 bo_va->ref_count = 1; 985 INIT_LIST_HEAD(&bo_va->bo_list); 986 INIT_LIST_HEAD(&bo_va->valids); 987 INIT_LIST_HEAD(&bo_va->invalids); 988 INIT_LIST_HEAD(&bo_va->vm_status); 989 mutex_init(&bo_va->mutex); 990 list_add_tail(&bo_va->bo_list, &bo->va); 991 992 return bo_va; 993 } 994 995 /** 996 * amdgpu_vm_bo_map - map bo inside a vm 997 * 998 * @adev: amdgpu_device pointer 999 * @bo_va: bo_va to store the address 1000 * @saddr: where to map the BO 1001 * @offset: requested offset in the BO 1002 * @flags: attributes of pages (read/write/valid/etc.) 1003 * 1004 * Add a mapping of the BO at the specefied addr into the VM. 1005 * Returns 0 for success, error for failure. 1006 * 1007 * Object has to be reserved and unreserved outside! 1008 */ 1009 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 1010 struct amdgpu_bo_va *bo_va, 1011 uint64_t saddr, uint64_t offset, 1012 uint64_t size, uint32_t flags) 1013 { 1014 struct amdgpu_bo_va_mapping *mapping; 1015 struct amdgpu_vm *vm = bo_va->vm; 1016 struct interval_tree_node *it; 1017 unsigned last_pfn, pt_idx; 1018 uint64_t eaddr; 1019 int r; 1020 1021 /* validate the parameters */ 1022 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1023 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 1024 return -EINVAL; 1025 1026 /* make sure object fit at this offset */ 1027 eaddr = saddr + size - 1; 1028 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) 1029 return -EINVAL; 1030 1031 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1032 if (last_pfn >= adev->vm_manager.max_pfn) { 1033 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", 1034 last_pfn, adev->vm_manager.max_pfn); 1035 return -EINVAL; 1036 } 1037 1038 saddr /= AMDGPU_GPU_PAGE_SIZE; 1039 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1040 1041 spin_lock(&vm->it_lock); 1042 it = interval_tree_iter_first(&vm->va, saddr, eaddr); 1043 spin_unlock(&vm->it_lock); 1044 if (it) { 1045 struct amdgpu_bo_va_mapping *tmp; 1046 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1047 /* bo and tmp overlap, invalid addr */ 1048 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1049 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1050 tmp->it.start, tmp->it.last + 1); 1051 r = -EINVAL; 1052 goto error; 1053 } 1054 1055 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1056 if (!mapping) { 1057 r = -ENOMEM; 1058 goto error; 1059 } 1060 1061 INIT_LIST_HEAD(&mapping->list); 1062 mapping->it.start = saddr; 1063 mapping->it.last = eaddr; 1064 mapping->offset = offset; 1065 mapping->flags = flags; 1066 1067 mutex_lock(&bo_va->mutex); 1068 list_add(&mapping->list, &bo_va->invalids); 1069 mutex_unlock(&bo_va->mutex); 1070 spin_lock(&vm->it_lock); 1071 interval_tree_insert(&mapping->it, &vm->va); 1072 spin_unlock(&vm->it_lock); 1073 trace_amdgpu_vm_bo_map(bo_va, mapping); 1074 1075 /* Make sure the page tables are allocated */ 1076 saddr >>= amdgpu_vm_block_size; 1077 eaddr >>= amdgpu_vm_block_size; 1078 1079 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); 1080 1081 if (eaddr > vm->max_pde_used) 1082 vm->max_pde_used = eaddr; 1083 1084 /* walk over the address space and allocate the page tables */ 1085 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1086 struct reservation_object *resv = vm->page_directory->tbo.resv; 1087 struct amdgpu_bo_list_entry *entry; 1088 struct amdgpu_bo *pt; 1089 1090 entry = &vm->page_tables[pt_idx].entry; 1091 if (entry->robj) 1092 continue; 1093 1094 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1095 AMDGPU_GPU_PAGE_SIZE, true, 1096 AMDGPU_GEM_DOMAIN_VRAM, 1097 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1098 NULL, resv, &pt); 1099 if (r) 1100 goto error_free; 1101 1102 /* Keep a reference to the page table to avoid freeing 1103 * them up in the wrong order. 1104 */ 1105 pt->parent = amdgpu_bo_ref(vm->page_directory); 1106 1107 r = amdgpu_vm_clear_bo(adev, pt); 1108 if (r) { 1109 amdgpu_bo_unref(&pt); 1110 goto error_free; 1111 } 1112 1113 entry->robj = pt; 1114 entry->priority = 0; 1115 entry->tv.bo = &entry->robj->tbo; 1116 entry->tv.shared = true; 1117 vm->page_tables[pt_idx].addr = 0; 1118 } 1119 1120 return 0; 1121 1122 error_free: 1123 list_del(&mapping->list); 1124 spin_lock(&vm->it_lock); 1125 interval_tree_remove(&mapping->it, &vm->va); 1126 spin_unlock(&vm->it_lock); 1127 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1128 kfree(mapping); 1129 1130 error: 1131 return r; 1132 } 1133 1134 /** 1135 * amdgpu_vm_bo_unmap - remove bo mapping from vm 1136 * 1137 * @adev: amdgpu_device pointer 1138 * @bo_va: bo_va to remove the address from 1139 * @saddr: where to the BO is mapped 1140 * 1141 * Remove a mapping of the BO at the specefied addr from the VM. 1142 * Returns 0 for success, error for failure. 1143 * 1144 * Object has to be reserved and unreserved outside! 1145 */ 1146 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1147 struct amdgpu_bo_va *bo_va, 1148 uint64_t saddr) 1149 { 1150 struct amdgpu_bo_va_mapping *mapping; 1151 struct amdgpu_vm *vm = bo_va->vm; 1152 bool valid = true; 1153 1154 saddr /= AMDGPU_GPU_PAGE_SIZE; 1155 mutex_lock(&bo_va->mutex); 1156 list_for_each_entry(mapping, &bo_va->valids, list) { 1157 if (mapping->it.start == saddr) 1158 break; 1159 } 1160 1161 if (&mapping->list == &bo_va->valids) { 1162 valid = false; 1163 1164 list_for_each_entry(mapping, &bo_va->invalids, list) { 1165 if (mapping->it.start == saddr) 1166 break; 1167 } 1168 1169 if (&mapping->list == &bo_va->invalids) { 1170 mutex_unlock(&bo_va->mutex); 1171 return -ENOENT; 1172 } 1173 } 1174 mutex_unlock(&bo_va->mutex); 1175 list_del(&mapping->list); 1176 spin_lock(&vm->it_lock); 1177 interval_tree_remove(&mapping->it, &vm->va); 1178 spin_unlock(&vm->it_lock); 1179 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1180 1181 if (valid) { 1182 spin_lock(&vm->freed_lock); 1183 list_add(&mapping->list, &vm->freed); 1184 spin_unlock(&vm->freed_lock); 1185 } else { 1186 kfree(mapping); 1187 } 1188 1189 return 0; 1190 } 1191 1192 /** 1193 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 1194 * 1195 * @adev: amdgpu_device pointer 1196 * @bo_va: requested bo_va 1197 * 1198 * Remove @bo_va->bo from the requested vm. 1199 * 1200 * Object have to be reserved! 1201 */ 1202 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 1203 struct amdgpu_bo_va *bo_va) 1204 { 1205 struct amdgpu_bo_va_mapping *mapping, *next; 1206 struct amdgpu_vm *vm = bo_va->vm; 1207 1208 list_del(&bo_va->bo_list); 1209 1210 spin_lock(&vm->status_lock); 1211 list_del(&bo_va->vm_status); 1212 spin_unlock(&vm->status_lock); 1213 1214 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1215 list_del(&mapping->list); 1216 spin_lock(&vm->it_lock); 1217 interval_tree_remove(&mapping->it, &vm->va); 1218 spin_unlock(&vm->it_lock); 1219 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1220 spin_lock(&vm->freed_lock); 1221 list_add(&mapping->list, &vm->freed); 1222 spin_unlock(&vm->freed_lock); 1223 } 1224 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1225 list_del(&mapping->list); 1226 spin_lock(&vm->it_lock); 1227 interval_tree_remove(&mapping->it, &vm->va); 1228 spin_unlock(&vm->it_lock); 1229 kfree(mapping); 1230 } 1231 fence_put(bo_va->last_pt_update); 1232 mutex_destroy(&bo_va->mutex); 1233 kfree(bo_va); 1234 } 1235 1236 /** 1237 * amdgpu_vm_bo_invalidate - mark the bo as invalid 1238 * 1239 * @adev: amdgpu_device pointer 1240 * @vm: requested vm 1241 * @bo: amdgpu buffer object 1242 * 1243 * Mark @bo as invalid. 1244 */ 1245 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 1246 struct amdgpu_bo *bo) 1247 { 1248 struct amdgpu_bo_va *bo_va; 1249 1250 list_for_each_entry(bo_va, &bo->va, bo_list) { 1251 spin_lock(&bo_va->vm->status_lock); 1252 if (list_empty(&bo_va->vm_status)) 1253 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1254 spin_unlock(&bo_va->vm->status_lock); 1255 } 1256 } 1257 1258 /** 1259 * amdgpu_vm_init - initialize a vm instance 1260 * 1261 * @adev: amdgpu_device pointer 1262 * @vm: requested vm 1263 * 1264 * Init @vm fields. 1265 */ 1266 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1267 { 1268 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 1269 AMDGPU_VM_PTE_COUNT * 8); 1270 unsigned pd_size, pd_entries; 1271 int i, r; 1272 1273 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1274 vm->ids[i].id = 0; 1275 vm->ids[i].flushed_updates = NULL; 1276 } 1277 vm->va = RB_ROOT; 1278 spin_lock_init(&vm->status_lock); 1279 INIT_LIST_HEAD(&vm->invalidated); 1280 INIT_LIST_HEAD(&vm->cleared); 1281 INIT_LIST_HEAD(&vm->freed); 1282 spin_lock_init(&vm->it_lock); 1283 spin_lock_init(&vm->freed_lock); 1284 pd_size = amdgpu_vm_directory_size(adev); 1285 pd_entries = amdgpu_vm_num_pdes(adev); 1286 1287 /* allocate page table array */ 1288 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); 1289 if (vm->page_tables == NULL) { 1290 DRM_ERROR("Cannot allocate memory for page table array\n"); 1291 return -ENOMEM; 1292 } 1293 1294 vm->page_directory_fence = NULL; 1295 1296 r = amdgpu_bo_create(adev, pd_size, align, true, 1297 AMDGPU_GEM_DOMAIN_VRAM, 1298 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1299 NULL, NULL, &vm->page_directory); 1300 if (r) 1301 return r; 1302 r = amdgpu_bo_reserve(vm->page_directory, false); 1303 if (r) { 1304 amdgpu_bo_unref(&vm->page_directory); 1305 vm->page_directory = NULL; 1306 return r; 1307 } 1308 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1309 amdgpu_bo_unreserve(vm->page_directory); 1310 if (r) { 1311 amdgpu_bo_unref(&vm->page_directory); 1312 vm->page_directory = NULL; 1313 return r; 1314 } 1315 1316 return 0; 1317 } 1318 1319 /** 1320 * amdgpu_vm_fini - tear down a vm instance 1321 * 1322 * @adev: amdgpu_device pointer 1323 * @vm: requested vm 1324 * 1325 * Tear down @vm. 1326 * Unbind the VM and remove all bos from the vm bo list 1327 */ 1328 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1329 { 1330 struct amdgpu_bo_va_mapping *mapping, *tmp; 1331 int i; 1332 1333 if (!RB_EMPTY_ROOT(&vm->va)) { 1334 dev_err(adev->dev, "still active bo inside vm\n"); 1335 } 1336 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { 1337 list_del(&mapping->list); 1338 interval_tree_remove(&mapping->it, &vm->va); 1339 kfree(mapping); 1340 } 1341 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 1342 list_del(&mapping->list); 1343 kfree(mapping); 1344 } 1345 1346 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) 1347 amdgpu_bo_unref(&vm->page_tables[i].entry.robj); 1348 drm_free_large(vm->page_tables); 1349 1350 amdgpu_bo_unref(&vm->page_directory); 1351 fence_put(vm->page_directory_fence); 1352 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1353 unsigned id = vm->ids[i].id; 1354 1355 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, 1356 (long)vm, 0); 1357 fence_put(vm->ids[i].flushed_updates); 1358 } 1359 1360 } 1361 1362 /** 1363 * amdgpu_vm_manager_init - init the VM manager 1364 * 1365 * @adev: amdgpu_device pointer 1366 * 1367 * Initialize the VM manager structures 1368 */ 1369 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 1370 { 1371 unsigned i; 1372 1373 INIT_LIST_HEAD(&adev->vm_manager.ids_lru); 1374 1375 /* skip over VMID 0, since it is the system VM */ 1376 for (i = 1; i < adev->vm_manager.num_ids; ++i) 1377 list_add_tail(&adev->vm_manager.ids[i].list, 1378 &adev->vm_manager.ids_lru); 1379 } 1380 1381 /** 1382 * amdgpu_vm_manager_fini - cleanup VM manager 1383 * 1384 * @adev: amdgpu_device pointer 1385 * 1386 * Cleanup the VM manager and free resources. 1387 */ 1388 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 1389 { 1390 unsigned i; 1391 1392 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1393 fence_put(adev->vm_manager.ids[i].active); 1394 } 1395