1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/dma-fence-array.h> 29 #include <linux/interval_tree_generic.h> 30 #include <drm/drmP.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_trace.h" 34 35 /* 36 * GPUVM 37 * GPUVM is similar to the legacy gart on older asics, however 38 * rather than there being a single global gart table 39 * for the entire GPU, there are multiple VM page tables active 40 * at any given time. The VM page tables can contain a mix 41 * vram pages and system memory pages and system memory pages 42 * can be mapped as snooped (cached system pages) or unsnooped 43 * (uncached system pages). 44 * Each VM has an ID associated with it and there is a page table 45 * associated with each VMID. When execting a command buffer, 46 * the kernel tells the the ring what VMID to use for that command 47 * buffer. VMIDs are allocated dynamically as commands are submitted. 48 * The userspace drivers maintain their own address space and the kernel 49 * sets up their pages tables accordingly when they submit their 50 * command buffers and a VMID is assigned. 51 * Cayman/Trinity support up to 8 active VMs at any given time; 52 * SI supports 16. 53 */ 54 55 #define START(node) ((node)->start) 56 #define LAST(node) ((node)->last) 57 58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 59 START, LAST, static, amdgpu_vm_it) 60 61 #undef START 62 #undef LAST 63 64 /* Local structure. Encapsulate some VM table update parameters to reduce 65 * the number of function parameters 66 */ 67 struct amdgpu_pte_update_params { 68 /* amdgpu device we do this update for */ 69 struct amdgpu_device *adev; 70 /* optional amdgpu_vm we do this update for */ 71 struct amdgpu_vm *vm; 72 /* address where to copy page table entries from */ 73 uint64_t src; 74 /* indirect buffer to fill with commands */ 75 struct amdgpu_ib *ib; 76 /* Function which actually does the update */ 77 void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, 78 uint64_t addr, unsigned count, uint32_t incr, 79 uint64_t flags); 80 /* The next two are used during VM update by CPU 81 * DMA addresses to use for mapping 82 * Kernel pointer of PD/PT BO that needs to be updated 83 */ 84 dma_addr_t *pages_addr; 85 void *kptr; 86 }; 87 88 /* Helper to disable partial resident texture feature from a fence callback */ 89 struct amdgpu_prt_cb { 90 struct amdgpu_device *adev; 91 struct dma_fence_cb cb; 92 }; 93 94 /** 95 * amdgpu_vm_num_entries - return the number of entries in a PD/PT 96 * 97 * @adev: amdgpu_device pointer 98 * 99 * Calculate the number of entries in a page directory or page table. 100 */ 101 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, 102 unsigned level) 103 { 104 if (level == 0) 105 /* For the root directory */ 106 return adev->vm_manager.max_pfn >> 107 (adev->vm_manager.block_size * 108 adev->vm_manager.num_level); 109 else if (level == adev->vm_manager.num_level) 110 /* For the page tables on the leaves */ 111 return AMDGPU_VM_PTE_COUNT(adev); 112 else 113 /* Everything in between */ 114 return 1 << adev->vm_manager.block_size; 115 } 116 117 /** 118 * amdgpu_vm_bo_size - returns the size of the BOs in bytes 119 * 120 * @adev: amdgpu_device pointer 121 * 122 * Calculate the size of the BO for a page directory or page table in bytes. 123 */ 124 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) 125 { 126 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); 127 } 128 129 /** 130 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 131 * 132 * @vm: vm providing the BOs 133 * @validated: head of validation list 134 * @entry: entry to add 135 * 136 * Add the page directory to the list of BOs to 137 * validate for command submission. 138 */ 139 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 140 struct list_head *validated, 141 struct amdgpu_bo_list_entry *entry) 142 { 143 entry->robj = vm->root.bo; 144 entry->priority = 0; 145 entry->tv.bo = &entry->robj->tbo; 146 entry->tv.shared = true; 147 entry->user_pages = NULL; 148 list_add(&entry->tv.head, validated); 149 } 150 151 /** 152 * amdgpu_vm_validate_layer - validate a single page table level 153 * 154 * @parent: parent page table level 155 * @validate: callback to do the validation 156 * @param: parameter for the validation callback 157 * 158 * Validate the page table BOs on command submission if neccessary. 159 */ 160 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, 161 int (*validate)(void *, struct amdgpu_bo *), 162 void *param, bool use_cpu_for_update, 163 struct ttm_bo_global *glob) 164 { 165 unsigned i; 166 int r; 167 168 if (use_cpu_for_update) { 169 r = amdgpu_bo_kmap(parent->bo, NULL); 170 if (r) 171 return r; 172 } 173 174 if (!parent->entries) 175 return 0; 176 177 for (i = 0; i <= parent->last_entry_used; ++i) { 178 struct amdgpu_vm_pt *entry = &parent->entries[i]; 179 180 if (!entry->bo) 181 continue; 182 183 r = validate(param, entry->bo); 184 if (r) 185 return r; 186 187 spin_lock(&glob->lru_lock); 188 ttm_bo_move_to_lru_tail(&entry->bo->tbo); 189 if (entry->bo->shadow) 190 ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); 191 spin_unlock(&glob->lru_lock); 192 193 /* 194 * Recurse into the sub directory. This is harmless because we 195 * have only a maximum of 5 layers. 196 */ 197 r = amdgpu_vm_validate_level(entry, validate, param, 198 use_cpu_for_update, glob); 199 if (r) 200 return r; 201 } 202 203 return r; 204 } 205 206 /** 207 * amdgpu_vm_validate_pt_bos - validate the page table BOs 208 * 209 * @adev: amdgpu device pointer 210 * @vm: vm providing the BOs 211 * @validate: callback to do the validation 212 * @param: parameter for the validation callback 213 * 214 * Validate the page table BOs on command submission if neccessary. 215 */ 216 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 217 int (*validate)(void *p, struct amdgpu_bo *bo), 218 void *param) 219 { 220 uint64_t num_evictions; 221 222 /* We only need to validate the page tables 223 * if they aren't already valid. 224 */ 225 num_evictions = atomic64_read(&adev->num_evictions); 226 if (num_evictions == vm->last_eviction_counter) 227 return 0; 228 229 return amdgpu_vm_validate_level(&vm->root, validate, param, 230 vm->use_cpu_for_update, 231 adev->mman.bdev.glob); 232 } 233 234 /** 235 * amdgpu_vm_alloc_levels - allocate the PD/PT levels 236 * 237 * @adev: amdgpu_device pointer 238 * @vm: requested vm 239 * @saddr: start of the address range 240 * @eaddr: end of the address range 241 * 242 * Make sure the page directories and page tables are allocated 243 */ 244 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, 245 struct amdgpu_vm *vm, 246 struct amdgpu_vm_pt *parent, 247 uint64_t saddr, uint64_t eaddr, 248 unsigned level) 249 { 250 unsigned shift = (adev->vm_manager.num_level - level) * 251 adev->vm_manager.block_size; 252 unsigned pt_idx, from, to; 253 int r; 254 u64 flags; 255 uint64_t init_value = 0; 256 257 if (!parent->entries) { 258 unsigned num_entries = amdgpu_vm_num_entries(adev, level); 259 260 parent->entries = kvmalloc_array(num_entries, 261 sizeof(struct amdgpu_vm_pt), 262 GFP_KERNEL | __GFP_ZERO); 263 if (!parent->entries) 264 return -ENOMEM; 265 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt)); 266 } 267 268 from = saddr >> shift; 269 to = eaddr >> shift; 270 if (from >= amdgpu_vm_num_entries(adev, level) || 271 to >= amdgpu_vm_num_entries(adev, level)) 272 return -EINVAL; 273 274 if (to > parent->last_entry_used) 275 parent->last_entry_used = to; 276 277 ++level; 278 saddr = saddr & ((1 << shift) - 1); 279 eaddr = eaddr & ((1 << shift) - 1); 280 281 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 282 AMDGPU_GEM_CREATE_VRAM_CLEARED; 283 if (vm->use_cpu_for_update) 284 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 285 else 286 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 287 AMDGPU_GEM_CREATE_SHADOW); 288 289 if (vm->pte_support_ats) { 290 init_value = AMDGPU_PTE_SYSTEM; 291 if (level != adev->vm_manager.num_level - 1) 292 init_value |= AMDGPU_PDE_PTE; 293 } 294 295 /* walk over the address space and allocate the page tables */ 296 for (pt_idx = from; pt_idx <= to; ++pt_idx) { 297 struct reservation_object *resv = vm->root.bo->tbo.resv; 298 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 299 struct amdgpu_bo *pt; 300 301 if (!entry->bo) { 302 r = amdgpu_bo_create(adev, 303 amdgpu_vm_bo_size(adev, level), 304 AMDGPU_GPU_PAGE_SIZE, true, 305 AMDGPU_GEM_DOMAIN_VRAM, 306 flags, 307 NULL, resv, init_value, &pt); 308 if (r) 309 return r; 310 311 if (vm->use_cpu_for_update) { 312 r = amdgpu_bo_kmap(pt, NULL); 313 if (r) { 314 amdgpu_bo_unref(&pt); 315 return r; 316 } 317 } 318 319 /* Keep a reference to the root directory to avoid 320 * freeing them up in the wrong order. 321 */ 322 pt->parent = amdgpu_bo_ref(vm->root.bo); 323 324 entry->bo = pt; 325 entry->addr = 0; 326 } 327 328 if (level < adev->vm_manager.num_level) { 329 uint64_t sub_saddr = (pt_idx == from) ? saddr : 0; 330 uint64_t sub_eaddr = (pt_idx == to) ? eaddr : 331 ((1 << shift) - 1); 332 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, 333 sub_eaddr, level); 334 if (r) 335 return r; 336 } 337 } 338 339 return 0; 340 } 341 342 /** 343 * amdgpu_vm_alloc_pts - Allocate page tables. 344 * 345 * @adev: amdgpu_device pointer 346 * @vm: VM to allocate page tables for 347 * @saddr: Start address which needs to be allocated 348 * @size: Size from start address we need. 349 * 350 * Make sure the page tables are allocated. 351 */ 352 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 353 struct amdgpu_vm *vm, 354 uint64_t saddr, uint64_t size) 355 { 356 uint64_t last_pfn; 357 uint64_t eaddr; 358 359 /* validate the parameters */ 360 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) 361 return -EINVAL; 362 363 eaddr = saddr + size - 1; 364 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 365 if (last_pfn >= adev->vm_manager.max_pfn) { 366 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", 367 last_pfn, adev->vm_manager.max_pfn); 368 return -EINVAL; 369 } 370 371 saddr /= AMDGPU_GPU_PAGE_SIZE; 372 eaddr /= AMDGPU_GPU_PAGE_SIZE; 373 374 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0); 375 } 376 377 /** 378 * amdgpu_vm_had_gpu_reset - check if reset occured since last use 379 * 380 * @adev: amdgpu_device pointer 381 * @id: VMID structure 382 * 383 * Check if GPU reset occured since last use of the VMID. 384 */ 385 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, 386 struct amdgpu_vm_id *id) 387 { 388 return id->current_gpu_reset_count != 389 atomic_read(&adev->gpu_reset_counter); 390 } 391 392 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) 393 { 394 return !!vm->reserved_vmid[vmhub]; 395 } 396 397 /* idr_mgr->lock must be held */ 398 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, 399 struct amdgpu_ring *ring, 400 struct amdgpu_sync *sync, 401 struct dma_fence *fence, 402 struct amdgpu_job *job) 403 { 404 struct amdgpu_device *adev = ring->adev; 405 unsigned vmhub = ring->funcs->vmhub; 406 uint64_t fence_context = adev->fence_context + ring->idx; 407 struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; 408 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 409 struct dma_fence *updates = sync->last_vm_update; 410 int r = 0; 411 struct dma_fence *flushed, *tmp; 412 bool needs_flush = vm->use_cpu_for_update; 413 414 flushed = id->flushed_updates; 415 if ((amdgpu_vm_had_gpu_reset(adev, id)) || 416 (atomic64_read(&id->owner) != vm->client_id) || 417 (job->vm_pd_addr != id->pd_gpu_addr) || 418 (updates && (!flushed || updates->context != flushed->context || 419 dma_fence_is_later(updates, flushed))) || 420 (!id->last_flush || (id->last_flush->context != fence_context && 421 !dma_fence_is_signaled(id->last_flush)))) { 422 needs_flush = true; 423 /* to prevent one context starved by another context */ 424 id->pd_gpu_addr = 0; 425 tmp = amdgpu_sync_peek_fence(&id->active, ring); 426 if (tmp) { 427 r = amdgpu_sync_fence(adev, sync, tmp); 428 return r; 429 } 430 } 431 432 /* Good we can use this VMID. Remember this submission as 433 * user of the VMID. 434 */ 435 r = amdgpu_sync_fence(ring->adev, &id->active, fence); 436 if (r) 437 goto out; 438 439 if (updates && (!flushed || updates->context != flushed->context || 440 dma_fence_is_later(updates, flushed))) { 441 dma_fence_put(id->flushed_updates); 442 id->flushed_updates = dma_fence_get(updates); 443 } 444 id->pd_gpu_addr = job->vm_pd_addr; 445 atomic64_set(&id->owner, vm->client_id); 446 job->vm_needs_flush = needs_flush; 447 if (needs_flush) { 448 dma_fence_put(id->last_flush); 449 id->last_flush = NULL; 450 } 451 job->vm_id = id - id_mgr->ids; 452 trace_amdgpu_vm_grab_id(vm, ring, job); 453 out: 454 return r; 455 } 456 457 /** 458 * amdgpu_vm_grab_id - allocate the next free VMID 459 * 460 * @vm: vm to allocate id for 461 * @ring: ring we want to submit job to 462 * @sync: sync object where we add dependencies 463 * @fence: fence protecting ID from reuse 464 * 465 * Allocate an id for the vm, adding fences to the sync obj as necessary. 466 */ 467 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 468 struct amdgpu_sync *sync, struct dma_fence *fence, 469 struct amdgpu_job *job) 470 { 471 struct amdgpu_device *adev = ring->adev; 472 unsigned vmhub = ring->funcs->vmhub; 473 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 474 uint64_t fence_context = adev->fence_context + ring->idx; 475 struct dma_fence *updates = sync->last_vm_update; 476 struct amdgpu_vm_id *id, *idle; 477 struct dma_fence **fences; 478 unsigned i; 479 int r = 0; 480 481 mutex_lock(&id_mgr->lock); 482 if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { 483 r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); 484 mutex_unlock(&id_mgr->lock); 485 return r; 486 } 487 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 488 if (!fences) { 489 mutex_unlock(&id_mgr->lock); 490 return -ENOMEM; 491 } 492 /* Check if we have an idle VMID */ 493 i = 0; 494 list_for_each_entry(idle, &id_mgr->ids_lru, list) { 495 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); 496 if (!fences[i]) 497 break; 498 ++i; 499 } 500 501 /* If we can't find a idle VMID to use, wait till one becomes available */ 502 if (&idle->list == &id_mgr->ids_lru) { 503 u64 fence_context = adev->vm_manager.fence_context + ring->idx; 504 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 505 struct dma_fence_array *array; 506 unsigned j; 507 508 for (j = 0; j < i; ++j) 509 dma_fence_get(fences[j]); 510 511 array = dma_fence_array_create(i, fences, fence_context, 512 seqno, true); 513 if (!array) { 514 for (j = 0; j < i; ++j) 515 dma_fence_put(fences[j]); 516 kfree(fences); 517 r = -ENOMEM; 518 goto error; 519 } 520 521 522 r = amdgpu_sync_fence(ring->adev, sync, &array->base); 523 dma_fence_put(&array->base); 524 if (r) 525 goto error; 526 527 mutex_unlock(&id_mgr->lock); 528 return 0; 529 530 } 531 kfree(fences); 532 533 job->vm_needs_flush = vm->use_cpu_for_update; 534 /* Check if we can use a VMID already assigned to this VM */ 535 list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { 536 struct dma_fence *flushed; 537 bool needs_flush = vm->use_cpu_for_update; 538 539 /* Check all the prerequisites to using this VMID */ 540 if (amdgpu_vm_had_gpu_reset(adev, id)) 541 continue; 542 543 if (atomic64_read(&id->owner) != vm->client_id) 544 continue; 545 546 if (job->vm_pd_addr != id->pd_gpu_addr) 547 continue; 548 549 if (!id->last_flush || 550 (id->last_flush->context != fence_context && 551 !dma_fence_is_signaled(id->last_flush))) 552 needs_flush = true; 553 554 flushed = id->flushed_updates; 555 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 556 needs_flush = true; 557 558 /* Concurrent flushes are only possible starting with Vega10 */ 559 if (adev->asic_type < CHIP_VEGA10 && needs_flush) 560 continue; 561 562 /* Good we can use this VMID. Remember this submission as 563 * user of the VMID. 564 */ 565 r = amdgpu_sync_fence(ring->adev, &id->active, fence); 566 if (r) 567 goto error; 568 569 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 570 dma_fence_put(id->flushed_updates); 571 id->flushed_updates = dma_fence_get(updates); 572 } 573 574 if (needs_flush) 575 goto needs_flush; 576 else 577 goto no_flush_needed; 578 579 }; 580 581 /* Still no ID to use? Then use the idle one found earlier */ 582 id = idle; 583 584 /* Remember this submission as user of the VMID */ 585 r = amdgpu_sync_fence(ring->adev, &id->active, fence); 586 if (r) 587 goto error; 588 589 id->pd_gpu_addr = job->vm_pd_addr; 590 dma_fence_put(id->flushed_updates); 591 id->flushed_updates = dma_fence_get(updates); 592 atomic64_set(&id->owner, vm->client_id); 593 594 needs_flush: 595 job->vm_needs_flush = true; 596 dma_fence_put(id->last_flush); 597 id->last_flush = NULL; 598 599 no_flush_needed: 600 list_move_tail(&id->list, &id_mgr->ids_lru); 601 602 job->vm_id = id - id_mgr->ids; 603 trace_amdgpu_vm_grab_id(vm, ring, job); 604 605 error: 606 mutex_unlock(&id_mgr->lock); 607 return r; 608 } 609 610 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, 611 struct amdgpu_vm *vm, 612 unsigned vmhub) 613 { 614 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 615 616 mutex_lock(&id_mgr->lock); 617 if (vm->reserved_vmid[vmhub]) { 618 list_add(&vm->reserved_vmid[vmhub]->list, 619 &id_mgr->ids_lru); 620 vm->reserved_vmid[vmhub] = NULL; 621 atomic_dec(&id_mgr->reserved_vmid_num); 622 } 623 mutex_unlock(&id_mgr->lock); 624 } 625 626 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, 627 struct amdgpu_vm *vm, 628 unsigned vmhub) 629 { 630 struct amdgpu_vm_id_manager *id_mgr; 631 struct amdgpu_vm_id *idle; 632 int r = 0; 633 634 id_mgr = &adev->vm_manager.id_mgr[vmhub]; 635 mutex_lock(&id_mgr->lock); 636 if (vm->reserved_vmid[vmhub]) 637 goto unlock; 638 if (atomic_inc_return(&id_mgr->reserved_vmid_num) > 639 AMDGPU_VM_MAX_RESERVED_VMID) { 640 DRM_ERROR("Over limitation of reserved vmid\n"); 641 atomic_dec(&id_mgr->reserved_vmid_num); 642 r = -EINVAL; 643 goto unlock; 644 } 645 /* Select the first entry VMID */ 646 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); 647 list_del_init(&idle->list); 648 vm->reserved_vmid[vmhub] = idle; 649 mutex_unlock(&id_mgr->lock); 650 651 return 0; 652 unlock: 653 mutex_unlock(&id_mgr->lock); 654 return r; 655 } 656 657 /** 658 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 659 * 660 * @adev: amdgpu_device pointer 661 */ 662 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 663 { 664 const struct amdgpu_ip_block *ip_block; 665 bool has_compute_vm_bug; 666 struct amdgpu_ring *ring; 667 int i; 668 669 has_compute_vm_bug = false; 670 671 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 672 if (ip_block) { 673 /* Compute has a VM bug for GFX version < 7. 674 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 675 if (ip_block->version->major <= 7) 676 has_compute_vm_bug = true; 677 else if (ip_block->version->major == 8) 678 if (adev->gfx.mec_fw_version < 673) 679 has_compute_vm_bug = true; 680 } 681 682 for (i = 0; i < adev->num_rings; i++) { 683 ring = adev->rings[i]; 684 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 685 /* only compute rings */ 686 ring->has_compute_vm_bug = has_compute_vm_bug; 687 else 688 ring->has_compute_vm_bug = false; 689 } 690 } 691 692 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 693 struct amdgpu_job *job) 694 { 695 struct amdgpu_device *adev = ring->adev; 696 unsigned vmhub = ring->funcs->vmhub; 697 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 698 struct amdgpu_vm_id *id; 699 bool gds_switch_needed; 700 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; 701 702 if (job->vm_id == 0) 703 return false; 704 id = &id_mgr->ids[job->vm_id]; 705 gds_switch_needed = ring->funcs->emit_gds_switch && ( 706 id->gds_base != job->gds_base || 707 id->gds_size != job->gds_size || 708 id->gws_base != job->gws_base || 709 id->gws_size != job->gws_size || 710 id->oa_base != job->oa_base || 711 id->oa_size != job->oa_size); 712 713 if (amdgpu_vm_had_gpu_reset(adev, id)) 714 return true; 715 716 return vm_flush_needed || gds_switch_needed; 717 } 718 719 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) 720 { 721 return (adev->mc.real_vram_size == adev->mc.visible_vram_size); 722 } 723 724 /** 725 * amdgpu_vm_flush - hardware flush the vm 726 * 727 * @ring: ring to use for flush 728 * @vm_id: vmid number to use 729 * @pd_addr: address of the page directory 730 * 731 * Emit a VM flush when it is necessary. 732 */ 733 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) 734 { 735 struct amdgpu_device *adev = ring->adev; 736 unsigned vmhub = ring->funcs->vmhub; 737 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 738 struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id]; 739 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 740 id->gds_base != job->gds_base || 741 id->gds_size != job->gds_size || 742 id->gws_base != job->gws_base || 743 id->gws_size != job->gws_size || 744 id->oa_base != job->oa_base || 745 id->oa_size != job->oa_size); 746 bool vm_flush_needed = job->vm_needs_flush; 747 unsigned patch_offset = 0; 748 int r; 749 750 if (amdgpu_vm_had_gpu_reset(adev, id)) { 751 gds_switch_needed = true; 752 vm_flush_needed = true; 753 } 754 755 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 756 return 0; 757 758 if (ring->funcs->init_cond_exec) 759 patch_offset = amdgpu_ring_init_cond_exec(ring); 760 761 if (need_pipe_sync) 762 amdgpu_ring_emit_pipeline_sync(ring); 763 764 if (ring->funcs->emit_vm_flush && vm_flush_needed) { 765 struct dma_fence *fence; 766 767 trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr); 768 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); 769 770 r = amdgpu_fence_emit(ring, &fence); 771 if (r) 772 return r; 773 774 mutex_lock(&id_mgr->lock); 775 dma_fence_put(id->last_flush); 776 id->last_flush = fence; 777 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); 778 mutex_unlock(&id_mgr->lock); 779 } 780 781 if (ring->funcs->emit_gds_switch && gds_switch_needed) { 782 id->gds_base = job->gds_base; 783 id->gds_size = job->gds_size; 784 id->gws_base = job->gws_base; 785 id->gws_size = job->gws_size; 786 id->oa_base = job->oa_base; 787 id->oa_size = job->oa_size; 788 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, 789 job->gds_size, job->gws_base, 790 job->gws_size, job->oa_base, 791 job->oa_size); 792 } 793 794 if (ring->funcs->patch_cond_exec) 795 amdgpu_ring_patch_cond_exec(ring, patch_offset); 796 797 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 798 if (ring->funcs->emit_switch_buffer) { 799 amdgpu_ring_emit_switch_buffer(ring); 800 amdgpu_ring_emit_switch_buffer(ring); 801 } 802 return 0; 803 } 804 805 /** 806 * amdgpu_vm_reset_id - reset VMID to zero 807 * 808 * @adev: amdgpu device structure 809 * @vm_id: vmid number to use 810 * 811 * Reset saved GDW, GWS and OA to force switch on next flush. 812 */ 813 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 814 unsigned vmid) 815 { 816 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 817 struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; 818 819 atomic64_set(&id->owner, 0); 820 id->gds_base = 0; 821 id->gds_size = 0; 822 id->gws_base = 0; 823 id->gws_size = 0; 824 id->oa_base = 0; 825 id->oa_size = 0; 826 } 827 828 /** 829 * amdgpu_vm_reset_all_id - reset VMID to zero 830 * 831 * @adev: amdgpu device structure 832 * 833 * Reset VMID to force flush on next use 834 */ 835 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) 836 { 837 unsigned i, j; 838 839 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 840 struct amdgpu_vm_id_manager *id_mgr = 841 &adev->vm_manager.id_mgr[i]; 842 843 for (j = 1; j < id_mgr->num_ids; ++j) 844 amdgpu_vm_reset_id(adev, i, j); 845 } 846 } 847 848 /** 849 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 850 * 851 * @vm: requested vm 852 * @bo: requested buffer object 853 * 854 * Find @bo inside the requested vm. 855 * Search inside the @bos vm list for the requested vm 856 * Returns the found bo_va or NULL if none is found 857 * 858 * Object has to be reserved! 859 */ 860 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 861 struct amdgpu_bo *bo) 862 { 863 struct amdgpu_bo_va *bo_va; 864 865 list_for_each_entry(bo_va, &bo->va, base.bo_list) { 866 if (bo_va->base.vm == vm) { 867 return bo_va; 868 } 869 } 870 return NULL; 871 } 872 873 /** 874 * amdgpu_vm_do_set_ptes - helper to call the right asic function 875 * 876 * @params: see amdgpu_pte_update_params definition 877 * @pe: addr of the page entry 878 * @addr: dst addr to write into pe 879 * @count: number of page entries to update 880 * @incr: increase next addr by incr bytes 881 * @flags: hw access flags 882 * 883 * Traces the parameters and calls the right asic functions 884 * to setup the page table using the DMA. 885 */ 886 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, 887 uint64_t pe, uint64_t addr, 888 unsigned count, uint32_t incr, 889 uint64_t flags) 890 { 891 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); 892 893 if (count < 3) { 894 amdgpu_vm_write_pte(params->adev, params->ib, pe, 895 addr | flags, count, incr); 896 897 } else { 898 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, 899 count, incr, flags); 900 } 901 } 902 903 /** 904 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART 905 * 906 * @params: see amdgpu_pte_update_params definition 907 * @pe: addr of the page entry 908 * @addr: dst addr to write into pe 909 * @count: number of page entries to update 910 * @incr: increase next addr by incr bytes 911 * @flags: hw access flags 912 * 913 * Traces the parameters and calls the DMA function to copy the PTEs. 914 */ 915 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, 916 uint64_t pe, uint64_t addr, 917 unsigned count, uint32_t incr, 918 uint64_t flags) 919 { 920 uint64_t src = (params->src + (addr >> 12) * 8); 921 922 923 trace_amdgpu_vm_copy_ptes(pe, src, count); 924 925 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); 926 } 927 928 /** 929 * amdgpu_vm_map_gart - Resolve gart mapping of addr 930 * 931 * @pages_addr: optional DMA address to use for lookup 932 * @addr: the unmapped addr 933 * 934 * Look up the physical address of the page that the pte resolves 935 * to and return the pointer for the page table entry. 936 */ 937 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 938 { 939 uint64_t result; 940 941 /* page table offset */ 942 result = pages_addr[addr >> PAGE_SHIFT]; 943 944 /* in case cpu page size != gpu page size*/ 945 result |= addr & (~PAGE_MASK); 946 947 result &= 0xFFFFFFFFFFFFF000ULL; 948 949 return result; 950 } 951 952 /** 953 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU 954 * 955 * @params: see amdgpu_pte_update_params definition 956 * @pe: kmap addr of the page entry 957 * @addr: dst addr to write into pe 958 * @count: number of page entries to update 959 * @incr: increase next addr by incr bytes 960 * @flags: hw access flags 961 * 962 * Write count number of PT/PD entries directly. 963 */ 964 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, 965 uint64_t pe, uint64_t addr, 966 unsigned count, uint32_t incr, 967 uint64_t flags) 968 { 969 unsigned int i; 970 uint64_t value; 971 972 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); 973 974 for (i = 0; i < count; i++) { 975 value = params->pages_addr ? 976 amdgpu_vm_map_gart(params->pages_addr, addr) : 977 addr; 978 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, 979 i, value, flags); 980 addr += incr; 981 } 982 } 983 984 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, 985 void *owner) 986 { 987 struct amdgpu_sync sync; 988 int r; 989 990 amdgpu_sync_create(&sync); 991 amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); 992 r = amdgpu_sync_wait(&sync, true); 993 amdgpu_sync_free(&sync); 994 995 return r; 996 } 997 998 /* 999 * amdgpu_vm_update_level - update a single level in the hierarchy 1000 * 1001 * @adev: amdgpu_device pointer 1002 * @vm: requested vm 1003 * @parent: parent directory 1004 * 1005 * Makes sure all entries in @parent are up to date. 1006 * Returns 0 for success, error for failure. 1007 */ 1008 static int amdgpu_vm_update_level(struct amdgpu_device *adev, 1009 struct amdgpu_vm *vm, 1010 struct amdgpu_vm_pt *parent, 1011 unsigned level) 1012 { 1013 struct amdgpu_bo *shadow; 1014 struct amdgpu_ring *ring = NULL; 1015 uint64_t pd_addr, shadow_addr = 0; 1016 uint32_t incr = amdgpu_vm_bo_size(adev, level + 1); 1017 uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; 1018 unsigned count = 0, pt_idx, ndw = 0; 1019 struct amdgpu_job *job; 1020 struct amdgpu_pte_update_params params; 1021 struct dma_fence *fence = NULL; 1022 1023 int r; 1024 1025 if (!parent->entries) 1026 return 0; 1027 1028 memset(¶ms, 0, sizeof(params)); 1029 params.adev = adev; 1030 shadow = parent->bo->shadow; 1031 1032 if (vm->use_cpu_for_update) { 1033 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1034 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); 1035 if (unlikely(r)) 1036 return r; 1037 1038 params.func = amdgpu_vm_cpu_set_ptes; 1039 } else { 1040 ring = container_of(vm->entity.sched, struct amdgpu_ring, 1041 sched); 1042 1043 /* padding, etc. */ 1044 ndw = 64; 1045 1046 /* assume the worst case */ 1047 ndw += parent->last_entry_used * 6; 1048 1049 pd_addr = amdgpu_bo_gpu_offset(parent->bo); 1050 1051 if (shadow) { 1052 shadow_addr = amdgpu_bo_gpu_offset(shadow); 1053 ndw *= 2; 1054 } else { 1055 shadow_addr = 0; 1056 } 1057 1058 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 1059 if (r) 1060 return r; 1061 1062 params.ib = &job->ibs[0]; 1063 params.func = amdgpu_vm_do_set_ptes; 1064 } 1065 1066 1067 /* walk over the address space and update the directory */ 1068 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1069 struct amdgpu_bo *bo = parent->entries[pt_idx].bo; 1070 uint64_t pde, pt; 1071 1072 if (bo == NULL) 1073 continue; 1074 1075 pt = amdgpu_bo_gpu_offset(bo); 1076 pt = amdgpu_gart_get_vm_pde(adev, pt); 1077 /* Don't update huge pages here */ 1078 if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) || 1079 parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID)) 1080 continue; 1081 1082 parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; 1083 1084 pde = pd_addr + pt_idx * 8; 1085 if (((last_pde + 8 * count) != pde) || 1086 ((last_pt + incr * count) != pt) || 1087 (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { 1088 1089 if (count) { 1090 if (shadow) 1091 params.func(¶ms, 1092 last_shadow, 1093 last_pt, count, 1094 incr, 1095 AMDGPU_PTE_VALID); 1096 1097 params.func(¶ms, last_pde, 1098 last_pt, count, incr, 1099 AMDGPU_PTE_VALID); 1100 } 1101 1102 count = 1; 1103 last_pde = pde; 1104 last_shadow = shadow_addr + pt_idx * 8; 1105 last_pt = pt; 1106 } else { 1107 ++count; 1108 } 1109 } 1110 1111 if (count) { 1112 if (vm->root.bo->shadow) 1113 params.func(¶ms, last_shadow, last_pt, 1114 count, incr, AMDGPU_PTE_VALID); 1115 1116 params.func(¶ms, last_pde, last_pt, 1117 count, incr, AMDGPU_PTE_VALID); 1118 } 1119 1120 if (!vm->use_cpu_for_update) { 1121 if (params.ib->length_dw == 0) { 1122 amdgpu_job_free(job); 1123 } else { 1124 amdgpu_ring_pad_ib(ring, params.ib); 1125 amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, 1126 AMDGPU_FENCE_OWNER_VM); 1127 if (shadow) 1128 amdgpu_sync_resv(adev, &job->sync, 1129 shadow->tbo.resv, 1130 AMDGPU_FENCE_OWNER_VM); 1131 1132 WARN_ON(params.ib->length_dw > ndw); 1133 r = amdgpu_job_submit(job, ring, &vm->entity, 1134 AMDGPU_FENCE_OWNER_VM, &fence); 1135 if (r) 1136 goto error_free; 1137 1138 amdgpu_bo_fence(parent->bo, fence, true); 1139 dma_fence_put(vm->last_dir_update); 1140 vm->last_dir_update = dma_fence_get(fence); 1141 dma_fence_put(fence); 1142 } 1143 } 1144 /* 1145 * Recurse into the subdirectories. This recursion is harmless because 1146 * we only have a maximum of 5 layers. 1147 */ 1148 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1149 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 1150 1151 if (!entry->bo) 1152 continue; 1153 1154 r = amdgpu_vm_update_level(adev, vm, entry, level + 1); 1155 if (r) 1156 return r; 1157 } 1158 1159 return 0; 1160 1161 error_free: 1162 amdgpu_job_free(job); 1163 return r; 1164 } 1165 1166 /* 1167 * amdgpu_vm_invalidate_level - mark all PD levels as invalid 1168 * 1169 * @parent: parent PD 1170 * 1171 * Mark all PD level as invalid after an error. 1172 */ 1173 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) 1174 { 1175 unsigned pt_idx; 1176 1177 /* 1178 * Recurse into the subdirectories. This recursion is harmless because 1179 * we only have a maximum of 5 layers. 1180 */ 1181 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1182 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 1183 1184 if (!entry->bo) 1185 continue; 1186 1187 entry->addr = ~0ULL; 1188 amdgpu_vm_invalidate_level(entry); 1189 } 1190 } 1191 1192 /* 1193 * amdgpu_vm_update_directories - make sure that all directories are valid 1194 * 1195 * @adev: amdgpu_device pointer 1196 * @vm: requested vm 1197 * 1198 * Makes sure all directories are up to date. 1199 * Returns 0 for success, error for failure. 1200 */ 1201 int amdgpu_vm_update_directories(struct amdgpu_device *adev, 1202 struct amdgpu_vm *vm) 1203 { 1204 int r; 1205 1206 r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); 1207 if (r) 1208 amdgpu_vm_invalidate_level(&vm->root); 1209 1210 if (vm->use_cpu_for_update) { 1211 /* Flush HDP */ 1212 mb(); 1213 amdgpu_gart_flush_gpu_tlb(adev, 0); 1214 } 1215 1216 return r; 1217 } 1218 1219 /** 1220 * amdgpu_vm_find_entry - find the entry for an address 1221 * 1222 * @p: see amdgpu_pte_update_params definition 1223 * @addr: virtual address in question 1224 * @entry: resulting entry or NULL 1225 * @parent: parent entry 1226 * 1227 * Find the vm_pt entry and it's parent for the given address. 1228 */ 1229 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, 1230 struct amdgpu_vm_pt **entry, 1231 struct amdgpu_vm_pt **parent) 1232 { 1233 unsigned idx, level = p->adev->vm_manager.num_level; 1234 1235 *parent = NULL; 1236 *entry = &p->vm->root; 1237 while ((*entry)->entries) { 1238 idx = addr >> (p->adev->vm_manager.block_size * level--); 1239 idx %= amdgpu_bo_size((*entry)->bo) / 8; 1240 *parent = *entry; 1241 *entry = &(*entry)->entries[idx]; 1242 } 1243 1244 if (level) 1245 *entry = NULL; 1246 } 1247 1248 /** 1249 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages 1250 * 1251 * @p: see amdgpu_pte_update_params definition 1252 * @entry: vm_pt entry to check 1253 * @parent: parent entry 1254 * @nptes: number of PTEs updated with this operation 1255 * @dst: destination address where the PTEs should point to 1256 * @flags: access flags fro the PTEs 1257 * 1258 * Check if we can update the PD with a huge page. 1259 */ 1260 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, 1261 struct amdgpu_vm_pt *entry, 1262 struct amdgpu_vm_pt *parent, 1263 unsigned nptes, uint64_t dst, 1264 uint64_t flags) 1265 { 1266 bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); 1267 uint64_t pd_addr, pde; 1268 1269 /* In the case of a mixed PT the PDE must point to it*/ 1270 if (p->adev->asic_type < CHIP_VEGA10 || 1271 nptes != AMDGPU_VM_PTE_COUNT(p->adev) || 1272 p->src || 1273 !(flags & AMDGPU_PTE_VALID)) { 1274 1275 dst = amdgpu_bo_gpu_offset(entry->bo); 1276 dst = amdgpu_gart_get_vm_pde(p->adev, dst); 1277 flags = AMDGPU_PTE_VALID; 1278 } else { 1279 /* Set the huge page flag to stop scanning at this PDE */ 1280 flags |= AMDGPU_PDE_PTE; 1281 } 1282 1283 if (entry->addr == (dst | flags)) 1284 return; 1285 1286 entry->addr = (dst | flags); 1287 1288 if (use_cpu_update) { 1289 /* In case a huge page is replaced with a system 1290 * memory mapping, p->pages_addr != NULL and 1291 * amdgpu_vm_cpu_set_ptes would try to translate dst 1292 * through amdgpu_vm_map_gart. But dst is already a 1293 * GPU address (of the page table). Disable 1294 * amdgpu_vm_map_gart temporarily. 1295 */ 1296 dma_addr_t *tmp; 1297 1298 tmp = p->pages_addr; 1299 p->pages_addr = NULL; 1300 1301 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1302 pde = pd_addr + (entry - parent->entries) * 8; 1303 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); 1304 1305 p->pages_addr = tmp; 1306 } else { 1307 if (parent->bo->shadow) { 1308 pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); 1309 pde = pd_addr + (entry - parent->entries) * 8; 1310 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); 1311 } 1312 pd_addr = amdgpu_bo_gpu_offset(parent->bo); 1313 pde = pd_addr + (entry - parent->entries) * 8; 1314 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); 1315 } 1316 } 1317 1318 /** 1319 * amdgpu_vm_update_ptes - make sure that page tables are valid 1320 * 1321 * @params: see amdgpu_pte_update_params definition 1322 * @vm: requested vm 1323 * @start: start of GPU address range 1324 * @end: end of GPU address range 1325 * @dst: destination address to map to, the next dst inside the function 1326 * @flags: mapping flags 1327 * 1328 * Update the page tables in the range @start - @end. 1329 * Returns 0 for success, -EINVAL for failure. 1330 */ 1331 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, 1332 uint64_t start, uint64_t end, 1333 uint64_t dst, uint64_t flags) 1334 { 1335 struct amdgpu_device *adev = params->adev; 1336 const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; 1337 1338 uint64_t addr, pe_start; 1339 struct amdgpu_bo *pt; 1340 unsigned nptes; 1341 bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); 1342 1343 /* walk over the address space and update the page tables */ 1344 for (addr = start; addr < end; addr += nptes, 1345 dst += nptes * AMDGPU_GPU_PAGE_SIZE) { 1346 struct amdgpu_vm_pt *entry, *parent; 1347 1348 amdgpu_vm_get_entry(params, addr, &entry, &parent); 1349 if (!entry) 1350 return -ENOENT; 1351 1352 if ((addr & ~mask) == (end & ~mask)) 1353 nptes = end - addr; 1354 else 1355 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); 1356 1357 amdgpu_vm_handle_huge_pages(params, entry, parent, 1358 nptes, dst, flags); 1359 /* We don't need to update PTEs for huge pages */ 1360 if (entry->addr & AMDGPU_PDE_PTE) 1361 continue; 1362 1363 pt = entry->bo; 1364 if (use_cpu_update) { 1365 pe_start = (unsigned long)amdgpu_bo_kptr(pt); 1366 } else { 1367 if (pt->shadow) { 1368 pe_start = amdgpu_bo_gpu_offset(pt->shadow); 1369 pe_start += (addr & mask) * 8; 1370 params->func(params, pe_start, dst, nptes, 1371 AMDGPU_GPU_PAGE_SIZE, flags); 1372 } 1373 pe_start = amdgpu_bo_gpu_offset(pt); 1374 } 1375 1376 pe_start += (addr & mask) * 8; 1377 params->func(params, pe_start, dst, nptes, 1378 AMDGPU_GPU_PAGE_SIZE, flags); 1379 } 1380 1381 return 0; 1382 } 1383 1384 /* 1385 * amdgpu_vm_frag_ptes - add fragment information to PTEs 1386 * 1387 * @params: see amdgpu_pte_update_params definition 1388 * @vm: requested vm 1389 * @start: first PTE to handle 1390 * @end: last PTE to handle 1391 * @dst: addr those PTEs should point to 1392 * @flags: hw mapping flags 1393 * Returns 0 for success, -EINVAL for failure. 1394 */ 1395 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, 1396 uint64_t start, uint64_t end, 1397 uint64_t dst, uint64_t flags) 1398 { 1399 int r; 1400 1401 /** 1402 * The MC L1 TLB supports variable sized pages, based on a fragment 1403 * field in the PTE. When this field is set to a non-zero value, page 1404 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 1405 * flags are considered valid for all PTEs within the fragment range 1406 * and corresponding mappings are assumed to be physically contiguous. 1407 * 1408 * The L1 TLB can store a single PTE for the whole fragment, 1409 * significantly increasing the space available for translation 1410 * caching. This leads to large improvements in throughput when the 1411 * TLB is under pressure. 1412 * 1413 * The L2 TLB distributes small and large fragments into two 1414 * asymmetric partitions. The large fragment cache is significantly 1415 * larger. Thus, we try to use large fragments wherever possible. 1416 * Userspace can support this by aligning virtual base address and 1417 * allocation size to the fragment size. 1418 */ 1419 unsigned pages_per_frag = params->adev->vm_manager.fragment_size; 1420 uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); 1421 uint64_t frag_align = 1 << pages_per_frag; 1422 1423 uint64_t frag_start = ALIGN(start, frag_align); 1424 uint64_t frag_end = end & ~(frag_align - 1); 1425 1426 /* system pages are non continuously */ 1427 if (params->src || !(flags & AMDGPU_PTE_VALID) || 1428 (frag_start >= frag_end)) 1429 return amdgpu_vm_update_ptes(params, start, end, dst, flags); 1430 1431 /* handle the 4K area at the beginning */ 1432 if (start != frag_start) { 1433 r = amdgpu_vm_update_ptes(params, start, frag_start, 1434 dst, flags); 1435 if (r) 1436 return r; 1437 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; 1438 } 1439 1440 /* handle the area in the middle */ 1441 r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst, 1442 flags | frag_flags); 1443 if (r) 1444 return r; 1445 1446 /* handle the 4K area at the end */ 1447 if (frag_end != end) { 1448 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; 1449 r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags); 1450 } 1451 return r; 1452 } 1453 1454 /** 1455 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 1456 * 1457 * @adev: amdgpu_device pointer 1458 * @exclusive: fence we need to sync to 1459 * @src: address where to copy page table entries from 1460 * @pages_addr: DMA addresses to use for mapping 1461 * @vm: requested vm 1462 * @start: start of mapped range 1463 * @last: last mapped entry 1464 * @flags: flags for the entries 1465 * @addr: addr to set the area to 1466 * @fence: optional resulting fence 1467 * 1468 * Fill in the page table entries between @start and @last. 1469 * Returns 0 for success, -EINVAL for failure. 1470 */ 1471 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1472 struct dma_fence *exclusive, 1473 uint64_t src, 1474 dma_addr_t *pages_addr, 1475 struct amdgpu_vm *vm, 1476 uint64_t start, uint64_t last, 1477 uint64_t flags, uint64_t addr, 1478 struct dma_fence **fence) 1479 { 1480 struct amdgpu_ring *ring; 1481 void *owner = AMDGPU_FENCE_OWNER_VM; 1482 unsigned nptes, ncmds, ndw; 1483 struct amdgpu_job *job; 1484 struct amdgpu_pte_update_params params; 1485 struct dma_fence *f = NULL; 1486 int r; 1487 1488 memset(¶ms, 0, sizeof(params)); 1489 params.adev = adev; 1490 params.vm = vm; 1491 params.src = src; 1492 1493 /* sync to everything on unmapping */ 1494 if (!(flags & AMDGPU_PTE_VALID)) 1495 owner = AMDGPU_FENCE_OWNER_UNDEFINED; 1496 1497 if (vm->use_cpu_for_update) { 1498 /* params.src is used as flag to indicate system Memory */ 1499 if (pages_addr) 1500 params.src = ~0; 1501 1502 /* Wait for PT BOs to be free. PTs share the same resv. object 1503 * as the root PD BO 1504 */ 1505 r = amdgpu_vm_wait_pd(adev, vm, owner); 1506 if (unlikely(r)) 1507 return r; 1508 1509 params.func = amdgpu_vm_cpu_set_ptes; 1510 params.pages_addr = pages_addr; 1511 return amdgpu_vm_frag_ptes(¶ms, start, last + 1, 1512 addr, flags); 1513 } 1514 1515 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 1516 1517 nptes = last - start + 1; 1518 1519 /* 1520 * reserve space for one command every (1 << BLOCK_SIZE) 1521 * entries or 2k dwords (whatever is smaller) 1522 */ 1523 ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1; 1524 1525 /* padding, etc. */ 1526 ndw = 64; 1527 1528 /* one PDE write for each huge page */ 1529 ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; 1530 1531 if (src) { 1532 /* only copy commands needed */ 1533 ndw += ncmds * 7; 1534 1535 params.func = amdgpu_vm_do_copy_ptes; 1536 1537 } else if (pages_addr) { 1538 /* copy commands needed */ 1539 ndw += ncmds * 7; 1540 1541 /* and also PTEs */ 1542 ndw += nptes * 2; 1543 1544 params.func = amdgpu_vm_do_copy_ptes; 1545 1546 } else { 1547 /* set page commands needed */ 1548 ndw += ncmds * 10; 1549 1550 /* two extra commands for begin/end of fragment */ 1551 ndw += 2 * 10; 1552 1553 params.func = amdgpu_vm_do_set_ptes; 1554 } 1555 1556 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); 1557 if (r) 1558 return r; 1559 1560 params.ib = &job->ibs[0]; 1561 1562 if (!src && pages_addr) { 1563 uint64_t *pte; 1564 unsigned i; 1565 1566 /* Put the PTEs at the end of the IB. */ 1567 i = ndw - nptes * 2; 1568 pte= (uint64_t *)&(job->ibs->ptr[i]); 1569 params.src = job->ibs->gpu_addr + i * 4; 1570 1571 for (i = 0; i < nptes; ++i) { 1572 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * 1573 AMDGPU_GPU_PAGE_SIZE); 1574 pte[i] |= flags; 1575 } 1576 addr = 0; 1577 } 1578 1579 r = amdgpu_sync_fence(adev, &job->sync, exclusive); 1580 if (r) 1581 goto error_free; 1582 1583 r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv, 1584 owner); 1585 if (r) 1586 goto error_free; 1587 1588 r = reservation_object_reserve_shared(vm->root.bo->tbo.resv); 1589 if (r) 1590 goto error_free; 1591 1592 r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); 1593 if (r) 1594 goto error_free; 1595 1596 amdgpu_ring_pad_ib(ring, params.ib); 1597 WARN_ON(params.ib->length_dw > ndw); 1598 r = amdgpu_job_submit(job, ring, &vm->entity, 1599 AMDGPU_FENCE_OWNER_VM, &f); 1600 if (r) 1601 goto error_free; 1602 1603 amdgpu_bo_fence(vm->root.bo, f, true); 1604 dma_fence_put(*fence); 1605 *fence = f; 1606 return 0; 1607 1608 error_free: 1609 amdgpu_job_free(job); 1610 amdgpu_vm_invalidate_level(&vm->root); 1611 return r; 1612 } 1613 1614 /** 1615 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 1616 * 1617 * @adev: amdgpu_device pointer 1618 * @exclusive: fence we need to sync to 1619 * @pages_addr: DMA addresses to use for mapping 1620 * @vm: requested vm 1621 * @mapping: mapped range and flags to use for the update 1622 * @flags: HW flags for the mapping 1623 * @nodes: array of drm_mm_nodes with the MC addresses 1624 * @fence: optional resulting fence 1625 * 1626 * Split the mapping into smaller chunks so that each update fits 1627 * into a SDMA IB. 1628 * Returns 0 for success, -EINVAL for failure. 1629 */ 1630 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1631 struct dma_fence *exclusive, 1632 dma_addr_t *pages_addr, 1633 struct amdgpu_vm *vm, 1634 struct amdgpu_bo_va_mapping *mapping, 1635 uint64_t flags, 1636 struct drm_mm_node *nodes, 1637 struct dma_fence **fence) 1638 { 1639 uint64_t pfn, src = 0, start = mapping->start; 1640 int r; 1641 1642 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1643 * but in case of something, we filter the flags in first place 1644 */ 1645 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1646 flags &= ~AMDGPU_PTE_READABLE; 1647 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1648 flags &= ~AMDGPU_PTE_WRITEABLE; 1649 1650 flags &= ~AMDGPU_PTE_EXECUTABLE; 1651 flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 1652 1653 flags &= ~AMDGPU_PTE_MTYPE_MASK; 1654 flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK); 1655 1656 if ((mapping->flags & AMDGPU_PTE_PRT) && 1657 (adev->asic_type >= CHIP_VEGA10)) { 1658 flags |= AMDGPU_PTE_PRT; 1659 flags &= ~AMDGPU_PTE_VALID; 1660 } 1661 1662 trace_amdgpu_vm_bo_update(mapping); 1663 1664 pfn = mapping->offset >> PAGE_SHIFT; 1665 if (nodes) { 1666 while (pfn >= nodes->size) { 1667 pfn -= nodes->size; 1668 ++nodes; 1669 } 1670 } 1671 1672 do { 1673 uint64_t max_entries; 1674 uint64_t addr, last; 1675 1676 if (nodes) { 1677 addr = nodes->start << PAGE_SHIFT; 1678 max_entries = (nodes->size - pfn) * 1679 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 1680 } else { 1681 addr = 0; 1682 max_entries = S64_MAX; 1683 } 1684 1685 if (pages_addr) { 1686 max_entries = min(max_entries, 16ull * 1024ull); 1687 addr = 0; 1688 } else if (flags & AMDGPU_PTE_VALID) { 1689 addr += adev->vm_manager.vram_base_offset; 1690 } 1691 addr += pfn << PAGE_SHIFT; 1692 1693 last = min((uint64_t)mapping->last, start + max_entries - 1); 1694 r = amdgpu_vm_bo_update_mapping(adev, exclusive, 1695 src, pages_addr, vm, 1696 start, last, flags, addr, 1697 fence); 1698 if (r) 1699 return r; 1700 1701 pfn += last - start + 1; 1702 if (nodes && nodes->size == pfn) { 1703 pfn = 0; 1704 ++nodes; 1705 } 1706 start = last + 1; 1707 1708 } while (unlikely(start != mapping->last + 1)); 1709 1710 return 0; 1711 } 1712 1713 /** 1714 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1715 * 1716 * @adev: amdgpu_device pointer 1717 * @bo_va: requested BO and VM object 1718 * @clear: if true clear the entries 1719 * 1720 * Fill in the page table entries for @bo_va. 1721 * Returns 0 for success, -EINVAL for failure. 1722 */ 1723 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 1724 struct amdgpu_bo_va *bo_va, 1725 bool clear) 1726 { 1727 struct amdgpu_bo *bo = bo_va->base.bo; 1728 struct amdgpu_vm *vm = bo_va->base.vm; 1729 struct amdgpu_bo_va_mapping *mapping; 1730 dma_addr_t *pages_addr = NULL; 1731 struct ttm_mem_reg *mem; 1732 struct drm_mm_node *nodes; 1733 struct dma_fence *exclusive; 1734 uint64_t flags; 1735 int r; 1736 1737 if (clear || !bo_va->base.bo) { 1738 mem = NULL; 1739 nodes = NULL; 1740 exclusive = NULL; 1741 } else { 1742 struct ttm_dma_tt *ttm; 1743 1744 mem = &bo_va->base.bo->tbo.mem; 1745 nodes = mem->mm_node; 1746 if (mem->mem_type == TTM_PL_TT) { 1747 ttm = container_of(bo_va->base.bo->tbo.ttm, 1748 struct ttm_dma_tt, ttm); 1749 pages_addr = ttm->dma_address; 1750 } 1751 exclusive = reservation_object_get_excl(bo->tbo.resv); 1752 } 1753 1754 if (bo) 1755 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1756 else 1757 flags = 0x0; 1758 1759 spin_lock(&vm->status_lock); 1760 if (!list_empty(&bo_va->base.vm_status)) 1761 list_splice_init(&bo_va->valids, &bo_va->invalids); 1762 spin_unlock(&vm->status_lock); 1763 1764 list_for_each_entry(mapping, &bo_va->invalids, list) { 1765 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, 1766 mapping, flags, nodes, 1767 &bo_va->last_pt_update); 1768 if (r) 1769 return r; 1770 } 1771 1772 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1773 list_for_each_entry(mapping, &bo_va->valids, list) 1774 trace_amdgpu_vm_bo_mapping(mapping); 1775 1776 list_for_each_entry(mapping, &bo_va->invalids, list) 1777 trace_amdgpu_vm_bo_mapping(mapping); 1778 } 1779 1780 spin_lock(&vm->status_lock); 1781 list_splice_init(&bo_va->invalids, &bo_va->valids); 1782 list_del_init(&bo_va->base.vm_status); 1783 if (clear) 1784 list_add(&bo_va->base.vm_status, &vm->cleared); 1785 spin_unlock(&vm->status_lock); 1786 1787 if (vm->use_cpu_for_update) { 1788 /* Flush HDP */ 1789 mb(); 1790 amdgpu_gart_flush_gpu_tlb(adev, 0); 1791 } 1792 1793 return 0; 1794 } 1795 1796 /** 1797 * amdgpu_vm_update_prt_state - update the global PRT state 1798 */ 1799 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1800 { 1801 unsigned long flags; 1802 bool enable; 1803 1804 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1805 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1806 adev->gart.gart_funcs->set_prt(adev, enable); 1807 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1808 } 1809 1810 /** 1811 * amdgpu_vm_prt_get - add a PRT user 1812 */ 1813 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1814 { 1815 if (!adev->gart.gart_funcs->set_prt) 1816 return; 1817 1818 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1819 amdgpu_vm_update_prt_state(adev); 1820 } 1821 1822 /** 1823 * amdgpu_vm_prt_put - drop a PRT user 1824 */ 1825 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1826 { 1827 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1828 amdgpu_vm_update_prt_state(adev); 1829 } 1830 1831 /** 1832 * amdgpu_vm_prt_cb - callback for updating the PRT status 1833 */ 1834 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1835 { 1836 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1837 1838 amdgpu_vm_prt_put(cb->adev); 1839 kfree(cb); 1840 } 1841 1842 /** 1843 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1844 */ 1845 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1846 struct dma_fence *fence) 1847 { 1848 struct amdgpu_prt_cb *cb; 1849 1850 if (!adev->gart.gart_funcs->set_prt) 1851 return; 1852 1853 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1854 if (!cb) { 1855 /* Last resort when we are OOM */ 1856 if (fence) 1857 dma_fence_wait(fence, false); 1858 1859 amdgpu_vm_prt_put(adev); 1860 } else { 1861 cb->adev = adev; 1862 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1863 amdgpu_vm_prt_cb)) 1864 amdgpu_vm_prt_cb(fence, &cb->cb); 1865 } 1866 } 1867 1868 /** 1869 * amdgpu_vm_free_mapping - free a mapping 1870 * 1871 * @adev: amdgpu_device pointer 1872 * @vm: requested vm 1873 * @mapping: mapping to be freed 1874 * @fence: fence of the unmap operation 1875 * 1876 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1877 */ 1878 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1879 struct amdgpu_vm *vm, 1880 struct amdgpu_bo_va_mapping *mapping, 1881 struct dma_fence *fence) 1882 { 1883 if (mapping->flags & AMDGPU_PTE_PRT) 1884 amdgpu_vm_add_prt_cb(adev, fence); 1885 kfree(mapping); 1886 } 1887 1888 /** 1889 * amdgpu_vm_prt_fini - finish all prt mappings 1890 * 1891 * @adev: amdgpu_device pointer 1892 * @vm: requested vm 1893 * 1894 * Register a cleanup callback to disable PRT support after VM dies. 1895 */ 1896 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1897 { 1898 struct reservation_object *resv = vm->root.bo->tbo.resv; 1899 struct dma_fence *excl, **shared; 1900 unsigned i, shared_count; 1901 int r; 1902 1903 r = reservation_object_get_fences_rcu(resv, &excl, 1904 &shared_count, &shared); 1905 if (r) { 1906 /* Not enough memory to grab the fence list, as last resort 1907 * block for all the fences to complete. 1908 */ 1909 reservation_object_wait_timeout_rcu(resv, true, false, 1910 MAX_SCHEDULE_TIMEOUT); 1911 return; 1912 } 1913 1914 /* Add a callback for each fence in the reservation object */ 1915 amdgpu_vm_prt_get(adev); 1916 amdgpu_vm_add_prt_cb(adev, excl); 1917 1918 for (i = 0; i < shared_count; ++i) { 1919 amdgpu_vm_prt_get(adev); 1920 amdgpu_vm_add_prt_cb(adev, shared[i]); 1921 } 1922 1923 kfree(shared); 1924 } 1925 1926 /** 1927 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1928 * 1929 * @adev: amdgpu_device pointer 1930 * @vm: requested vm 1931 * @fence: optional resulting fence (unchanged if no work needed to be done 1932 * or if an error occurred) 1933 * 1934 * Make sure all freed BOs are cleared in the PT. 1935 * Returns 0 for success. 1936 * 1937 * PTs have to be reserved and mutex must be locked! 1938 */ 1939 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1940 struct amdgpu_vm *vm, 1941 struct dma_fence **fence) 1942 { 1943 struct amdgpu_bo_va_mapping *mapping; 1944 struct dma_fence *f = NULL; 1945 int r; 1946 uint64_t init_pte_value = 0; 1947 1948 while (!list_empty(&vm->freed)) { 1949 mapping = list_first_entry(&vm->freed, 1950 struct amdgpu_bo_va_mapping, list); 1951 list_del(&mapping->list); 1952 1953 if (vm->pte_support_ats) 1954 init_pte_value = AMDGPU_PTE_SYSTEM; 1955 1956 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, 1957 mapping->start, mapping->last, 1958 init_pte_value, 0, &f); 1959 amdgpu_vm_free_mapping(adev, vm, mapping, f); 1960 if (r) { 1961 dma_fence_put(f); 1962 return r; 1963 } 1964 } 1965 1966 if (fence && f) { 1967 dma_fence_put(*fence); 1968 *fence = f; 1969 } else { 1970 dma_fence_put(f); 1971 } 1972 1973 return 0; 1974 1975 } 1976 1977 /** 1978 * amdgpu_vm_clear_moved - clear moved BOs in the PT 1979 * 1980 * @adev: amdgpu_device pointer 1981 * @vm: requested vm 1982 * 1983 * Make sure all moved BOs are cleared in the PT. 1984 * Returns 0 for success. 1985 * 1986 * PTs have to be reserved and mutex must be locked! 1987 */ 1988 int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1989 struct amdgpu_sync *sync) 1990 { 1991 struct amdgpu_bo_va *bo_va = NULL; 1992 int r = 0; 1993 1994 spin_lock(&vm->status_lock); 1995 while (!list_empty(&vm->moved)) { 1996 bo_va = list_first_entry(&vm->moved, 1997 struct amdgpu_bo_va, base.vm_status); 1998 spin_unlock(&vm->status_lock); 1999 2000 r = amdgpu_vm_bo_update(adev, bo_va, true); 2001 if (r) 2002 return r; 2003 2004 spin_lock(&vm->status_lock); 2005 } 2006 spin_unlock(&vm->status_lock); 2007 2008 if (bo_va) 2009 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); 2010 2011 return r; 2012 } 2013 2014 /** 2015 * amdgpu_vm_bo_add - add a bo to a specific vm 2016 * 2017 * @adev: amdgpu_device pointer 2018 * @vm: requested vm 2019 * @bo: amdgpu buffer object 2020 * 2021 * Add @bo into the requested vm. 2022 * Add @bo to the list of bos associated with the vm 2023 * Returns newly added bo_va or NULL for failure 2024 * 2025 * Object has to be reserved! 2026 */ 2027 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 2028 struct amdgpu_vm *vm, 2029 struct amdgpu_bo *bo) 2030 { 2031 struct amdgpu_bo_va *bo_va; 2032 2033 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 2034 if (bo_va == NULL) { 2035 return NULL; 2036 } 2037 bo_va->base.vm = vm; 2038 bo_va->base.bo = bo; 2039 INIT_LIST_HEAD(&bo_va->base.bo_list); 2040 INIT_LIST_HEAD(&bo_va->base.vm_status); 2041 2042 bo_va->ref_count = 1; 2043 INIT_LIST_HEAD(&bo_va->valids); 2044 INIT_LIST_HEAD(&bo_va->invalids); 2045 2046 if (bo) 2047 list_add_tail(&bo_va->base.bo_list, &bo->va); 2048 2049 return bo_va; 2050 } 2051 2052 /** 2053 * amdgpu_vm_bo_map - map bo inside a vm 2054 * 2055 * @adev: amdgpu_device pointer 2056 * @bo_va: bo_va to store the address 2057 * @saddr: where to map the BO 2058 * @offset: requested offset in the BO 2059 * @flags: attributes of pages (read/write/valid/etc.) 2060 * 2061 * Add a mapping of the BO at the specefied addr into the VM. 2062 * Returns 0 for success, error for failure. 2063 * 2064 * Object has to be reserved and unreserved outside! 2065 */ 2066 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 2067 struct amdgpu_bo_va *bo_va, 2068 uint64_t saddr, uint64_t offset, 2069 uint64_t size, uint64_t flags) 2070 { 2071 struct amdgpu_bo_va_mapping *mapping, *tmp; 2072 struct amdgpu_bo *bo = bo_va->base.bo; 2073 struct amdgpu_vm *vm = bo_va->base.vm; 2074 uint64_t eaddr; 2075 2076 /* validate the parameters */ 2077 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2078 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2079 return -EINVAL; 2080 2081 /* make sure object fit at this offset */ 2082 eaddr = saddr + size - 1; 2083 if (saddr >= eaddr || 2084 (bo && offset + size > amdgpu_bo_size(bo))) 2085 return -EINVAL; 2086 2087 saddr /= AMDGPU_GPU_PAGE_SIZE; 2088 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2089 2090 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2091 if (tmp) { 2092 /* bo and tmp overlap, invalid addr */ 2093 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2094 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 2095 tmp->start, tmp->last + 1); 2096 return -EINVAL; 2097 } 2098 2099 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2100 if (!mapping) 2101 return -ENOMEM; 2102 2103 INIT_LIST_HEAD(&mapping->list); 2104 mapping->start = saddr; 2105 mapping->last = eaddr; 2106 mapping->offset = offset; 2107 mapping->flags = flags; 2108 2109 list_add(&mapping->list, &bo_va->invalids); 2110 amdgpu_vm_it_insert(mapping, &vm->va); 2111 2112 if (flags & AMDGPU_PTE_PRT) 2113 amdgpu_vm_prt_get(adev); 2114 2115 return 0; 2116 } 2117 2118 /** 2119 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 2120 * 2121 * @adev: amdgpu_device pointer 2122 * @bo_va: bo_va to store the address 2123 * @saddr: where to map the BO 2124 * @offset: requested offset in the BO 2125 * @flags: attributes of pages (read/write/valid/etc.) 2126 * 2127 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 2128 * mappings as we do so. 2129 * Returns 0 for success, error for failure. 2130 * 2131 * Object has to be reserved and unreserved outside! 2132 */ 2133 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 2134 struct amdgpu_bo_va *bo_va, 2135 uint64_t saddr, uint64_t offset, 2136 uint64_t size, uint64_t flags) 2137 { 2138 struct amdgpu_bo_va_mapping *mapping; 2139 struct amdgpu_bo *bo = bo_va->base.bo; 2140 struct amdgpu_vm *vm = bo_va->base.vm; 2141 uint64_t eaddr; 2142 int r; 2143 2144 /* validate the parameters */ 2145 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2146 size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2147 return -EINVAL; 2148 2149 /* make sure object fit at this offset */ 2150 eaddr = saddr + size - 1; 2151 if (saddr >= eaddr || 2152 (bo && offset + size > amdgpu_bo_size(bo))) 2153 return -EINVAL; 2154 2155 /* Allocate all the needed memory */ 2156 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 2157 if (!mapping) 2158 return -ENOMEM; 2159 2160 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 2161 if (r) { 2162 kfree(mapping); 2163 return r; 2164 } 2165 2166 saddr /= AMDGPU_GPU_PAGE_SIZE; 2167 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2168 2169 mapping->start = saddr; 2170 mapping->last = eaddr; 2171 mapping->offset = offset; 2172 mapping->flags = flags; 2173 2174 list_add(&mapping->list, &bo_va->invalids); 2175 amdgpu_vm_it_insert(mapping, &vm->va); 2176 2177 if (flags & AMDGPU_PTE_PRT) 2178 amdgpu_vm_prt_get(adev); 2179 2180 return 0; 2181 } 2182 2183 /** 2184 * amdgpu_vm_bo_unmap - remove bo mapping from vm 2185 * 2186 * @adev: amdgpu_device pointer 2187 * @bo_va: bo_va to remove the address from 2188 * @saddr: where to the BO is mapped 2189 * 2190 * Remove a mapping of the BO at the specefied addr from the VM. 2191 * Returns 0 for success, error for failure. 2192 * 2193 * Object has to be reserved and unreserved outside! 2194 */ 2195 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 2196 struct amdgpu_bo_va *bo_va, 2197 uint64_t saddr) 2198 { 2199 struct amdgpu_bo_va_mapping *mapping; 2200 struct amdgpu_vm *vm = bo_va->base.vm; 2201 bool valid = true; 2202 2203 saddr /= AMDGPU_GPU_PAGE_SIZE; 2204 2205 list_for_each_entry(mapping, &bo_va->valids, list) { 2206 if (mapping->start == saddr) 2207 break; 2208 } 2209 2210 if (&mapping->list == &bo_va->valids) { 2211 valid = false; 2212 2213 list_for_each_entry(mapping, &bo_va->invalids, list) { 2214 if (mapping->start == saddr) 2215 break; 2216 } 2217 2218 if (&mapping->list == &bo_va->invalids) 2219 return -ENOENT; 2220 } 2221 2222 list_del(&mapping->list); 2223 amdgpu_vm_it_remove(mapping, &vm->va); 2224 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2225 2226 if (valid) 2227 list_add(&mapping->list, &vm->freed); 2228 else 2229 amdgpu_vm_free_mapping(adev, vm, mapping, 2230 bo_va->last_pt_update); 2231 2232 return 0; 2233 } 2234 2235 /** 2236 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 2237 * 2238 * @adev: amdgpu_device pointer 2239 * @vm: VM structure to use 2240 * @saddr: start of the range 2241 * @size: size of the range 2242 * 2243 * Remove all mappings in a range, split them as appropriate. 2244 * Returns 0 for success, error for failure. 2245 */ 2246 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 2247 struct amdgpu_vm *vm, 2248 uint64_t saddr, uint64_t size) 2249 { 2250 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 2251 LIST_HEAD(removed); 2252 uint64_t eaddr; 2253 2254 eaddr = saddr + size - 1; 2255 saddr /= AMDGPU_GPU_PAGE_SIZE; 2256 eaddr /= AMDGPU_GPU_PAGE_SIZE; 2257 2258 /* Allocate all the needed memory */ 2259 before = kzalloc(sizeof(*before), GFP_KERNEL); 2260 if (!before) 2261 return -ENOMEM; 2262 INIT_LIST_HEAD(&before->list); 2263 2264 after = kzalloc(sizeof(*after), GFP_KERNEL); 2265 if (!after) { 2266 kfree(before); 2267 return -ENOMEM; 2268 } 2269 INIT_LIST_HEAD(&after->list); 2270 2271 /* Now gather all removed mappings */ 2272 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 2273 while (tmp) { 2274 /* Remember mapping split at the start */ 2275 if (tmp->start < saddr) { 2276 before->start = tmp->start; 2277 before->last = saddr - 1; 2278 before->offset = tmp->offset; 2279 before->flags = tmp->flags; 2280 list_add(&before->list, &tmp->list); 2281 } 2282 2283 /* Remember mapping split at the end */ 2284 if (tmp->last > eaddr) { 2285 after->start = eaddr + 1; 2286 after->last = tmp->last; 2287 after->offset = tmp->offset; 2288 after->offset += after->start - tmp->start; 2289 after->flags = tmp->flags; 2290 list_add(&after->list, &tmp->list); 2291 } 2292 2293 list_del(&tmp->list); 2294 list_add(&tmp->list, &removed); 2295 2296 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 2297 } 2298 2299 /* And free them up */ 2300 list_for_each_entry_safe(tmp, next, &removed, list) { 2301 amdgpu_vm_it_remove(tmp, &vm->va); 2302 list_del(&tmp->list); 2303 2304 if (tmp->start < saddr) 2305 tmp->start = saddr; 2306 if (tmp->last > eaddr) 2307 tmp->last = eaddr; 2308 2309 list_add(&tmp->list, &vm->freed); 2310 trace_amdgpu_vm_bo_unmap(NULL, tmp); 2311 } 2312 2313 /* Insert partial mapping before the range */ 2314 if (!list_empty(&before->list)) { 2315 amdgpu_vm_it_insert(before, &vm->va); 2316 if (before->flags & AMDGPU_PTE_PRT) 2317 amdgpu_vm_prt_get(adev); 2318 } else { 2319 kfree(before); 2320 } 2321 2322 /* Insert partial mapping after the range */ 2323 if (!list_empty(&after->list)) { 2324 amdgpu_vm_it_insert(after, &vm->va); 2325 if (after->flags & AMDGPU_PTE_PRT) 2326 amdgpu_vm_prt_get(adev); 2327 } else { 2328 kfree(after); 2329 } 2330 2331 return 0; 2332 } 2333 2334 /** 2335 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2336 * 2337 * @adev: amdgpu_device pointer 2338 * @bo_va: requested bo_va 2339 * 2340 * Remove @bo_va->bo from the requested vm. 2341 * 2342 * Object have to be reserved! 2343 */ 2344 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 2345 struct amdgpu_bo_va *bo_va) 2346 { 2347 struct amdgpu_bo_va_mapping *mapping, *next; 2348 struct amdgpu_vm *vm = bo_va->base.vm; 2349 2350 list_del(&bo_va->base.bo_list); 2351 2352 spin_lock(&vm->status_lock); 2353 list_del(&bo_va->base.vm_status); 2354 spin_unlock(&vm->status_lock); 2355 2356 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2357 list_del(&mapping->list); 2358 amdgpu_vm_it_remove(mapping, &vm->va); 2359 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2360 list_add(&mapping->list, &vm->freed); 2361 } 2362 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2363 list_del(&mapping->list); 2364 amdgpu_vm_it_remove(mapping, &vm->va); 2365 amdgpu_vm_free_mapping(adev, vm, mapping, 2366 bo_va->last_pt_update); 2367 } 2368 2369 dma_fence_put(bo_va->last_pt_update); 2370 kfree(bo_va); 2371 } 2372 2373 /** 2374 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2375 * 2376 * @adev: amdgpu_device pointer 2377 * @vm: requested vm 2378 * @bo: amdgpu buffer object 2379 * 2380 * Mark @bo as invalid. 2381 */ 2382 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2383 struct amdgpu_bo *bo) 2384 { 2385 struct amdgpu_vm_bo_base *bo_base; 2386 2387 list_for_each_entry(bo_base, &bo->va, bo_list) { 2388 spin_lock(&bo_base->vm->status_lock); 2389 if (list_empty(&bo_base->vm_status)) 2390 list_add(&bo_base->vm_status, 2391 &bo_base->vm->moved); 2392 spin_unlock(&bo_base->vm->status_lock); 2393 } 2394 } 2395 2396 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2397 { 2398 /* Total bits covered by PD + PTs */ 2399 unsigned bits = ilog2(vm_size) + 18; 2400 2401 /* Make sure the PD is 4K in size up to 8GB address space. 2402 Above that split equal between PD and PTs */ 2403 if (vm_size <= 8) 2404 return (bits - 9); 2405 else 2406 return ((bits + 3) / 2); 2407 } 2408 2409 /** 2410 * amdgpu_vm_set_fragment_size - adjust fragment size in PTE 2411 * 2412 * @adev: amdgpu_device pointer 2413 * @fragment_size_default: the default fragment size if it's set auto 2414 */ 2415 void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default) 2416 { 2417 if (amdgpu_vm_fragment_size == -1) 2418 adev->vm_manager.fragment_size = fragment_size_default; 2419 else 2420 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2421 } 2422 2423 /** 2424 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2425 * 2426 * @adev: amdgpu_device pointer 2427 * @vm_size: the default vm size if it's set auto 2428 */ 2429 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default) 2430 { 2431 /* adjust vm size firstly */ 2432 if (amdgpu_vm_size == -1) 2433 adev->vm_manager.vm_size = vm_size; 2434 else 2435 adev->vm_manager.vm_size = amdgpu_vm_size; 2436 2437 /* block size depends on vm size */ 2438 if (amdgpu_vm_block_size == -1) 2439 adev->vm_manager.block_size = 2440 amdgpu_vm_get_block_size(adev->vm_manager.vm_size); 2441 else 2442 adev->vm_manager.block_size = amdgpu_vm_block_size; 2443 2444 amdgpu_vm_set_fragment_size(adev, fragment_size_default); 2445 2446 DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n", 2447 adev->vm_manager.vm_size, adev->vm_manager.block_size, 2448 adev->vm_manager.fragment_size); 2449 } 2450 2451 /** 2452 * amdgpu_vm_init - initialize a vm instance 2453 * 2454 * @adev: amdgpu_device pointer 2455 * @vm: requested vm 2456 * @vm_context: Indicates if it GFX or Compute context 2457 * 2458 * Init @vm fields. 2459 */ 2460 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2461 int vm_context) 2462 { 2463 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 2464 AMDGPU_VM_PTE_COUNT(adev) * 8); 2465 unsigned ring_instance; 2466 struct amdgpu_ring *ring; 2467 struct amd_sched_rq *rq; 2468 int r, i; 2469 u64 flags; 2470 uint64_t init_pde_value = 0; 2471 2472 vm->va = RB_ROOT_CACHED; 2473 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); 2474 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2475 vm->reserved_vmid[i] = NULL; 2476 spin_lock_init(&vm->status_lock); 2477 INIT_LIST_HEAD(&vm->moved); 2478 INIT_LIST_HEAD(&vm->cleared); 2479 INIT_LIST_HEAD(&vm->freed); 2480 2481 /* create scheduler entity for page table updates */ 2482 2483 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); 2484 ring_instance %= adev->vm_manager.vm_pte_num_rings; 2485 ring = adev->vm_manager.vm_pte_rings[ring_instance]; 2486 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 2487 r = amd_sched_entity_init(&ring->sched, &vm->entity, 2488 rq, amdgpu_sched_jobs); 2489 if (r) 2490 return r; 2491 2492 vm->pte_support_ats = false; 2493 2494 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { 2495 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2496 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2497 2498 if (adev->asic_type == CHIP_RAVEN) { 2499 vm->pte_support_ats = true; 2500 init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; 2501 } 2502 } else 2503 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2504 AMDGPU_VM_USE_CPU_FOR_GFX); 2505 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2506 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2507 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), 2508 "CPU update of VM recommended only for large BAR system\n"); 2509 vm->last_dir_update = NULL; 2510 2511 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 2512 AMDGPU_GEM_CREATE_VRAM_CLEARED; 2513 if (vm->use_cpu_for_update) 2514 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 2515 else 2516 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 2517 AMDGPU_GEM_CREATE_SHADOW); 2518 2519 r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, 2520 AMDGPU_GEM_DOMAIN_VRAM, 2521 flags, 2522 NULL, NULL, init_pde_value, &vm->root.bo); 2523 if (r) 2524 goto error_free_sched_entity; 2525 2526 r = amdgpu_bo_reserve(vm->root.bo, false); 2527 if (r) 2528 goto error_free_root; 2529 2530 vm->last_eviction_counter = atomic64_read(&adev->num_evictions); 2531 2532 if (vm->use_cpu_for_update) { 2533 r = amdgpu_bo_kmap(vm->root.bo, NULL); 2534 if (r) 2535 goto error_free_root; 2536 } 2537 2538 amdgpu_bo_unreserve(vm->root.bo); 2539 2540 return 0; 2541 2542 error_free_root: 2543 amdgpu_bo_unref(&vm->root.bo->shadow); 2544 amdgpu_bo_unref(&vm->root.bo); 2545 vm->root.bo = NULL; 2546 2547 error_free_sched_entity: 2548 amd_sched_entity_fini(&ring->sched, &vm->entity); 2549 2550 return r; 2551 } 2552 2553 /** 2554 * amdgpu_vm_free_levels - free PD/PT levels 2555 * 2556 * @level: PD/PT starting level to free 2557 * 2558 * Free the page directory or page table level and all sub levels. 2559 */ 2560 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) 2561 { 2562 unsigned i; 2563 2564 if (level->bo) { 2565 amdgpu_bo_unref(&level->bo->shadow); 2566 amdgpu_bo_unref(&level->bo); 2567 } 2568 2569 if (level->entries) 2570 for (i = 0; i <= level->last_entry_used; i++) 2571 amdgpu_vm_free_levels(&level->entries[i]); 2572 2573 kvfree(level->entries); 2574 } 2575 2576 /** 2577 * amdgpu_vm_fini - tear down a vm instance 2578 * 2579 * @adev: amdgpu_device pointer 2580 * @vm: requested vm 2581 * 2582 * Tear down @vm. 2583 * Unbind the VM and remove all bos from the vm bo list 2584 */ 2585 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2586 { 2587 struct amdgpu_bo_va_mapping *mapping, *tmp; 2588 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; 2589 int i; 2590 2591 amd_sched_entity_fini(vm->entity.sched, &vm->entity); 2592 2593 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2594 dev_err(adev->dev, "still active bo inside vm\n"); 2595 } 2596 rbtree_postorder_for_each_entry_safe(mapping, tmp, 2597 &vm->va.rb_root, rb) { 2598 list_del(&mapping->list); 2599 amdgpu_vm_it_remove(mapping, &vm->va); 2600 kfree(mapping); 2601 } 2602 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 2603 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 2604 amdgpu_vm_prt_fini(adev, vm); 2605 prt_fini_needed = false; 2606 } 2607 2608 list_del(&mapping->list); 2609 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 2610 } 2611 2612 amdgpu_vm_free_levels(&vm->root); 2613 dma_fence_put(vm->last_dir_update); 2614 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2615 amdgpu_vm_free_reserved_vmid(adev, vm, i); 2616 } 2617 2618 /** 2619 * amdgpu_vm_manager_init - init the VM manager 2620 * 2621 * @adev: amdgpu_device pointer 2622 * 2623 * Initialize the VM manager structures 2624 */ 2625 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 2626 { 2627 unsigned i, j; 2628 2629 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 2630 struct amdgpu_vm_id_manager *id_mgr = 2631 &adev->vm_manager.id_mgr[i]; 2632 2633 mutex_init(&id_mgr->lock); 2634 INIT_LIST_HEAD(&id_mgr->ids_lru); 2635 atomic_set(&id_mgr->reserved_vmid_num, 0); 2636 2637 /* skip over VMID 0, since it is the system VM */ 2638 for (j = 1; j < id_mgr->num_ids; ++j) { 2639 amdgpu_vm_reset_id(adev, i, j); 2640 amdgpu_sync_create(&id_mgr->ids[i].active); 2641 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 2642 } 2643 } 2644 2645 adev->vm_manager.fence_context = 2646 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2647 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2648 adev->vm_manager.seqno[i] = 0; 2649 2650 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 2651 atomic64_set(&adev->vm_manager.client_counter, 0); 2652 spin_lock_init(&adev->vm_manager.prt_lock); 2653 atomic_set(&adev->vm_manager.num_prt_users, 0); 2654 2655 /* If not overridden by the user, by default, only in large BAR systems 2656 * Compute VM tables will be updated by CPU 2657 */ 2658 #ifdef CONFIG_X86_64 2659 if (amdgpu_vm_update_mode == -1) { 2660 if (amdgpu_vm_is_large_bar(adev)) 2661 adev->vm_manager.vm_update_mode = 2662 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 2663 else 2664 adev->vm_manager.vm_update_mode = 0; 2665 } else 2666 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 2667 #else 2668 adev->vm_manager.vm_update_mode = 0; 2669 #endif 2670 2671 } 2672 2673 /** 2674 * amdgpu_vm_manager_fini - cleanup VM manager 2675 * 2676 * @adev: amdgpu_device pointer 2677 * 2678 * Cleanup the VM manager and free resources. 2679 */ 2680 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 2681 { 2682 unsigned i, j; 2683 2684 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 2685 struct amdgpu_vm_id_manager *id_mgr = 2686 &adev->vm_manager.id_mgr[i]; 2687 2688 mutex_destroy(&id_mgr->lock); 2689 for (j = 0; j < AMDGPU_NUM_VM; ++j) { 2690 struct amdgpu_vm_id *id = &id_mgr->ids[j]; 2691 2692 amdgpu_sync_free(&id->active); 2693 dma_fence_put(id->flushed_updates); 2694 dma_fence_put(id->last_flush); 2695 } 2696 } 2697 } 2698 2699 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 2700 { 2701 union drm_amdgpu_vm *args = data; 2702 struct amdgpu_device *adev = dev->dev_private; 2703 struct amdgpu_fpriv *fpriv = filp->driver_priv; 2704 int r; 2705 2706 switch (args->in.op) { 2707 case AMDGPU_VM_OP_RESERVE_VMID: 2708 /* current, we only have requirement to reserve vmid from gfxhub */ 2709 r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, 2710 AMDGPU_GFXHUB); 2711 if (r) 2712 return r; 2713 break; 2714 case AMDGPU_VM_OP_UNRESERVE_VMID: 2715 amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); 2716 break; 2717 default: 2718 return -EINVAL; 2719 } 2720 2721 return 0; 2722 } 2723