1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 #include "radeon_reg.h" 32 33 /* 34 * GART 35 * The GART (Graphics Aperture Remapping Table) is an aperture 36 * in the GPU's address space. System pages can be mapped into 37 * the aperture and look like contiguous pages from the GPU's 38 * perspective. A page table maps the pages in the aperture 39 * to the actual backing pages in system memory. 40 * 41 * Radeon GPUs support both an internal GART, as described above, 42 * and AGP. AGP works similarly, but the GART table is configured 43 * and maintained by the northbridge rather than the driver. 44 * Radeon hw has a separate AGP aperture that is programmed to 45 * point to the AGP aperture provided by the northbridge and the 46 * requests are passed through to the northbridge aperture. 47 * Both AGP and internal GART can be used at the same time, however 48 * that is not currently supported by the driver. 49 * 50 * This file handles the common internal GART management. 51 */ 52 53 /* 54 * Common GART table functions. 55 */ 56 /** 57 * radeon_gart_table_ram_alloc - allocate system ram for gart page table 58 * 59 * @rdev: radeon_device pointer 60 * 61 * Allocate system memory for GART page table 62 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the 63 * gart table to be in system memory. 64 * Returns 0 for success, -ENOMEM for failure. 65 */ 66 int radeon_gart_table_ram_alloc(struct radeon_device *rdev) 67 { 68 void *ptr; 69 70 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, 71 &rdev->gart.table_addr); 72 if (ptr == NULL) { 73 return -ENOMEM; 74 } 75 #ifdef CONFIG_X86 76 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || 77 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 78 set_memory_uc((unsigned long)ptr, 79 rdev->gart.table_size >> PAGE_SHIFT); 80 } 81 #endif 82 rdev->gart.ptr = ptr; 83 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); 84 return 0; 85 } 86 87 /** 88 * radeon_gart_table_ram_free - free system ram for gart page table 89 * 90 * @rdev: radeon_device pointer 91 * 92 * Free system memory for GART page table 93 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the 94 * gart table to be in system memory. 95 */ 96 void radeon_gart_table_ram_free(struct radeon_device *rdev) 97 { 98 if (rdev->gart.ptr == NULL) { 99 return; 100 } 101 #ifdef CONFIG_X86 102 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || 103 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 104 set_memory_wb((unsigned long)rdev->gart.ptr, 105 rdev->gart.table_size >> PAGE_SHIFT); 106 } 107 #endif 108 pci_free_consistent(rdev->pdev, rdev->gart.table_size, 109 (void *)rdev->gart.ptr, 110 rdev->gart.table_addr); 111 rdev->gart.ptr = NULL; 112 rdev->gart.table_addr = 0; 113 } 114 115 /** 116 * radeon_gart_table_vram_alloc - allocate vram for gart page table 117 * 118 * @rdev: radeon_device pointer 119 * 120 * Allocate video memory for GART page table 121 * (pcie r4xx, r5xx+). These asics require the 122 * gart table to be in video memory. 123 * Returns 0 for success, error for failure. 124 */ 125 int radeon_gart_table_vram_alloc(struct radeon_device *rdev) 126 { 127 int r; 128 129 if (rdev->gart.robj == NULL) { 130 r = radeon_bo_create(rdev, rdev->gart.table_size, 131 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 132 NULL, &rdev->gart.robj); 133 if (r) { 134 return r; 135 } 136 } 137 return 0; 138 } 139 140 /** 141 * radeon_gart_table_vram_pin - pin gart page table in vram 142 * 143 * @rdev: radeon_device pointer 144 * 145 * Pin the GART page table in vram so it will not be moved 146 * by the memory manager (pcie r4xx, r5xx+). These asics require the 147 * gart table to be in video memory. 148 * Returns 0 for success, error for failure. 149 */ 150 int radeon_gart_table_vram_pin(struct radeon_device *rdev) 151 { 152 uint64_t gpu_addr; 153 int r; 154 155 r = radeon_bo_reserve(rdev->gart.robj, false); 156 if (unlikely(r != 0)) 157 return r; 158 r = radeon_bo_pin(rdev->gart.robj, 159 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 160 if (r) { 161 radeon_bo_unreserve(rdev->gart.robj); 162 return r; 163 } 164 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); 165 if (r) 166 radeon_bo_unpin(rdev->gart.robj); 167 radeon_bo_unreserve(rdev->gart.robj); 168 rdev->gart.table_addr = gpu_addr; 169 return r; 170 } 171 172 /** 173 * radeon_gart_table_vram_unpin - unpin gart page table in vram 174 * 175 * @rdev: radeon_device pointer 176 * 177 * Unpin the GART page table in vram (pcie r4xx, r5xx+). 178 * These asics require the gart table to be in video memory. 179 */ 180 void radeon_gart_table_vram_unpin(struct radeon_device *rdev) 181 { 182 int r; 183 184 if (rdev->gart.robj == NULL) { 185 return; 186 } 187 r = radeon_bo_reserve(rdev->gart.robj, false); 188 if (likely(r == 0)) { 189 radeon_bo_kunmap(rdev->gart.robj); 190 radeon_bo_unpin(rdev->gart.robj); 191 radeon_bo_unreserve(rdev->gart.robj); 192 rdev->gart.ptr = NULL; 193 } 194 } 195 196 /** 197 * radeon_gart_table_vram_free - free gart page table vram 198 * 199 * @rdev: radeon_device pointer 200 * 201 * Free the video memory used for the GART page table 202 * (pcie r4xx, r5xx+). These asics require the gart table to 203 * be in video memory. 204 */ 205 void radeon_gart_table_vram_free(struct radeon_device *rdev) 206 { 207 if (rdev->gart.robj == NULL) { 208 return; 209 } 210 radeon_bo_unref(&rdev->gart.robj); 211 } 212 213 /* 214 * Common gart functions. 215 */ 216 /** 217 * radeon_gart_unbind - unbind pages from the gart page table 218 * 219 * @rdev: radeon_device pointer 220 * @offset: offset into the GPU's gart aperture 221 * @pages: number of pages to unbind 222 * 223 * Unbinds the requested pages from the gart page table and 224 * replaces them with the dummy page (all asics). 225 */ 226 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 227 int pages) 228 { 229 unsigned t; 230 unsigned p; 231 int i, j; 232 u64 page_base; 233 234 if (!rdev->gart.ready) { 235 WARN(1, "trying to unbind memory from uninitialized GART !\n"); 236 return; 237 } 238 t = offset / RADEON_GPU_PAGE_SIZE; 239 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 240 for (i = 0; i < pages; i++, p++) { 241 if (rdev->gart.pages[p]) { 242 rdev->gart.pages[p] = NULL; 243 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 244 page_base = rdev->gart.pages_addr[p]; 245 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 246 if (rdev->gart.ptr) { 247 radeon_gart_set_page(rdev, t, page_base); 248 } 249 page_base += RADEON_GPU_PAGE_SIZE; 250 } 251 } 252 } 253 mb(); 254 radeon_gart_tlb_flush(rdev); 255 } 256 257 /** 258 * radeon_gart_bind - bind pages into the gart page table 259 * 260 * @rdev: radeon_device pointer 261 * @offset: offset into the GPU's gart aperture 262 * @pages: number of pages to bind 263 * @pagelist: pages to bind 264 * @dma_addr: DMA addresses of pages 265 * 266 * Binds the requested pages to the gart page table 267 * (all asics). 268 * Returns 0 for success, -EINVAL for failure. 269 */ 270 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 271 int pages, struct page **pagelist, dma_addr_t *dma_addr) 272 { 273 unsigned t; 274 unsigned p; 275 uint64_t page_base; 276 int i, j; 277 278 if (!rdev->gart.ready) { 279 WARN(1, "trying to bind memory to uninitialized GART !\n"); 280 return -EINVAL; 281 } 282 t = offset / RADEON_GPU_PAGE_SIZE; 283 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 284 285 for (i = 0; i < pages; i++, p++) { 286 rdev->gart.pages_addr[p] = dma_addr[i]; 287 rdev->gart.pages[p] = pagelist[i]; 288 if (rdev->gart.ptr) { 289 page_base = rdev->gart.pages_addr[p]; 290 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 291 radeon_gart_set_page(rdev, t, page_base); 292 page_base += RADEON_GPU_PAGE_SIZE; 293 } 294 } 295 } 296 mb(); 297 radeon_gart_tlb_flush(rdev); 298 return 0; 299 } 300 301 /** 302 * radeon_gart_restore - bind all pages in the gart page table 303 * 304 * @rdev: radeon_device pointer 305 * 306 * Binds all pages in the gart page table (all asics). 307 * Used to rebuild the gart table on device startup or resume. 308 */ 309 void radeon_gart_restore(struct radeon_device *rdev) 310 { 311 int i, j, t; 312 u64 page_base; 313 314 if (!rdev->gart.ptr) { 315 return; 316 } 317 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { 318 page_base = rdev->gart.pages_addr[i]; 319 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 320 radeon_gart_set_page(rdev, t, page_base); 321 page_base += RADEON_GPU_PAGE_SIZE; 322 } 323 } 324 mb(); 325 radeon_gart_tlb_flush(rdev); 326 } 327 328 /** 329 * radeon_gart_init - init the driver info for managing the gart 330 * 331 * @rdev: radeon_device pointer 332 * 333 * Allocate the dummy page and init the gart driver info (all asics). 334 * Returns 0 for success, error for failure. 335 */ 336 int radeon_gart_init(struct radeon_device *rdev) 337 { 338 int r, i; 339 340 if (rdev->gart.pages) { 341 return 0; 342 } 343 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ 344 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { 345 DRM_ERROR("Page size is smaller than GPU page size!\n"); 346 return -EINVAL; 347 } 348 r = radeon_dummy_page_init(rdev); 349 if (r) 350 return r; 351 /* Compute table size */ 352 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 353 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 354 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 355 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 356 /* Allocate pages table */ 357 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); 358 if (rdev->gart.pages == NULL) { 359 radeon_gart_fini(rdev); 360 return -ENOMEM; 361 } 362 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * 363 rdev->gart.num_cpu_pages); 364 if (rdev->gart.pages_addr == NULL) { 365 radeon_gart_fini(rdev); 366 return -ENOMEM; 367 } 368 /* set GART entry to point to the dummy page by default */ 369 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 370 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 371 } 372 return 0; 373 } 374 375 /** 376 * radeon_gart_fini - tear down the driver info for managing the gart 377 * 378 * @rdev: radeon_device pointer 379 * 380 * Tear down the gart driver info and free the dummy page (all asics). 381 */ 382 void radeon_gart_fini(struct radeon_device *rdev) 383 { 384 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { 385 /* unbind pages */ 386 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 387 } 388 rdev->gart.ready = false; 389 vfree(rdev->gart.pages); 390 vfree(rdev->gart.pages_addr); 391 rdev->gart.pages = NULL; 392 rdev->gart.pages_addr = NULL; 393 394 radeon_dummy_page_fini(rdev); 395 } 396 397 /* 398 * GPUVM 399 * GPUVM is similar to the legacy gart on older asics, however 400 * rather than there being a single global gart table 401 * for the entire GPU, there are multiple VM page tables active 402 * at any given time. The VM page tables can contain a mix 403 * vram pages and system memory pages and system memory pages 404 * can be mapped as snooped (cached system pages) or unsnooped 405 * (uncached system pages). 406 * Each VM has an ID associated with it and there is a page table 407 * associated with each VMID. When execting a command buffer, 408 * the kernel tells the the ring what VMID to use for that command 409 * buffer. VMIDs are allocated dynamically as commands are submitted. 410 * The userspace drivers maintain their own address space and the kernel 411 * sets up their pages tables accordingly when they submit their 412 * command buffers and a VMID is assigned. 413 * Cayman/Trinity support up to 8 active VMs at any given time; 414 * SI supports 16. 415 */ 416 417 /* 418 * vm helpers 419 * 420 * TODO bind a default page at vm initialization for default address 421 */ 422 423 /** 424 * radeon_vm_num_pde - return the number of page directory entries 425 * 426 * @rdev: radeon_device pointer 427 * 428 * Calculate the number of page directory entries (cayman+). 429 */ 430 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) 431 { 432 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; 433 } 434 435 /** 436 * radeon_vm_directory_size - returns the size of the page directory in bytes 437 * 438 * @rdev: radeon_device pointer 439 * 440 * Calculate the size of the page directory in bytes (cayman+). 441 */ 442 static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 443 { 444 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); 445 } 446 447 /** 448 * radeon_vm_manager_init - init the vm manager 449 * 450 * @rdev: radeon_device pointer 451 * 452 * Init the vm manager (cayman+). 453 * Returns 0 for success, error for failure. 454 */ 455 int radeon_vm_manager_init(struct radeon_device *rdev) 456 { 457 struct radeon_vm *vm; 458 struct radeon_bo_va *bo_va; 459 int r; 460 unsigned size; 461 462 if (!rdev->vm_manager.enabled) { 463 /* allocate enough for 2 full VM pts */ 464 size = radeon_vm_directory_size(rdev); 465 size += rdev->vm_manager.max_pfn * 8; 466 size *= 2; 467 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 468 RADEON_GPU_PAGE_ALIGN(size), 469 RADEON_VM_PTB_ALIGN_SIZE, 470 RADEON_GEM_DOMAIN_VRAM); 471 if (r) { 472 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 473 (rdev->vm_manager.max_pfn * 8) >> 10); 474 return r; 475 } 476 477 r = radeon_asic_vm_init(rdev); 478 if (r) 479 return r; 480 481 rdev->vm_manager.enabled = true; 482 483 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); 484 if (r) 485 return r; 486 } 487 488 /* restore page table */ 489 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 490 if (vm->page_directory == NULL) 491 continue; 492 493 list_for_each_entry(bo_va, &vm->va, vm_list) { 494 bo_va->valid = false; 495 } 496 } 497 return 0; 498 } 499 500 /** 501 * radeon_vm_free_pt - free the page table for a specific vm 502 * 503 * @rdev: radeon_device pointer 504 * @vm: vm to unbind 505 * 506 * Free the page table of a specific vm (cayman+). 507 * 508 * Global and local mutex must be lock! 509 */ 510 static void radeon_vm_free_pt(struct radeon_device *rdev, 511 struct radeon_vm *vm) 512 { 513 struct radeon_bo_va *bo_va; 514 int i; 515 516 if (!vm->page_directory) 517 return; 518 519 list_del_init(&vm->list); 520 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); 521 522 list_for_each_entry(bo_va, &vm->va, vm_list) { 523 bo_va->valid = false; 524 } 525 526 if (vm->page_tables == NULL) 527 return; 528 529 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 530 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); 531 532 kfree(vm->page_tables); 533 } 534 535 /** 536 * radeon_vm_manager_fini - tear down the vm manager 537 * 538 * @rdev: radeon_device pointer 539 * 540 * Tear down the VM manager (cayman+). 541 */ 542 void radeon_vm_manager_fini(struct radeon_device *rdev) 543 { 544 struct radeon_vm *vm, *tmp; 545 int i; 546 547 if (!rdev->vm_manager.enabled) 548 return; 549 550 mutex_lock(&rdev->vm_manager.lock); 551 /* free all allocated page tables */ 552 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { 553 mutex_lock(&vm->mutex); 554 radeon_vm_free_pt(rdev, vm); 555 mutex_unlock(&vm->mutex); 556 } 557 for (i = 0; i < RADEON_NUM_VM; ++i) { 558 radeon_fence_unref(&rdev->vm_manager.active[i]); 559 } 560 radeon_asic_vm_fini(rdev); 561 mutex_unlock(&rdev->vm_manager.lock); 562 563 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); 564 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); 565 rdev->vm_manager.enabled = false; 566 } 567 568 /** 569 * radeon_vm_evict - evict page table to make room for new one 570 * 571 * @rdev: radeon_device pointer 572 * @vm: VM we want to allocate something for 573 * 574 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). 575 * Returns 0 for success, -ENOMEM for failure. 576 * 577 * Global and local mutex must be locked! 578 */ 579 static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) 580 { 581 struct radeon_vm *vm_evict; 582 583 if (list_empty(&rdev->vm_manager.lru_vm)) 584 return -ENOMEM; 585 586 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, 587 struct radeon_vm, list); 588 if (vm_evict == vm) 589 return -ENOMEM; 590 591 mutex_lock(&vm_evict->mutex); 592 radeon_vm_free_pt(rdev, vm_evict); 593 mutex_unlock(&vm_evict->mutex); 594 return 0; 595 } 596 597 /** 598 * radeon_vm_alloc_pt - allocates a page table for a VM 599 * 600 * @rdev: radeon_device pointer 601 * @vm: vm to bind 602 * 603 * Allocate a page table for the requested vm (cayman+). 604 * Returns 0 for success, error for failure. 605 * 606 * Global and local mutex must be locked! 607 */ 608 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 609 { 610 unsigned pd_size, pd_entries, pts_size; 611 struct radeon_ib ib; 612 int r; 613 614 if (vm == NULL) { 615 return -EINVAL; 616 } 617 618 if (vm->page_directory != NULL) { 619 return 0; 620 } 621 622 pd_size = radeon_vm_directory_size(rdev); 623 pd_entries = radeon_vm_num_pdes(rdev); 624 625 retry: 626 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 627 &vm->page_directory, pd_size, 628 RADEON_VM_PTB_ALIGN_SIZE, false); 629 if (r == -ENOMEM) { 630 r = radeon_vm_evict(rdev, vm); 631 if (r) 632 return r; 633 goto retry; 634 635 } else if (r) { 636 return r; 637 } 638 639 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); 640 641 /* Initially clear the page directory */ 642 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, 643 NULL, pd_entries * 2 + 64); 644 if (r) { 645 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); 646 return r; 647 } 648 649 ib.length_dw = 0; 650 651 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, 652 0, pd_entries, 0, 0); 653 654 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 655 r = radeon_ib_schedule(rdev, &ib, NULL); 656 if (r) { 657 radeon_ib_free(rdev, &ib); 658 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); 659 return r; 660 } 661 radeon_fence_unref(&vm->fence); 662 vm->fence = radeon_fence_ref(ib.fence); 663 radeon_ib_free(rdev, &ib); 664 radeon_fence_unref(&vm->last_flush); 665 666 /* allocate page table array */ 667 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); 668 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); 669 670 if (vm->page_tables == NULL) { 671 DRM_ERROR("Cannot allocate memory for page table array\n"); 672 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); 673 return -ENOMEM; 674 } 675 676 return 0; 677 } 678 679 /** 680 * radeon_vm_add_to_lru - add VMs page table to LRU list 681 * 682 * @rdev: radeon_device pointer 683 * @vm: vm to add to LRU 684 * 685 * Add the allocated page table to the LRU list (cayman+). 686 * 687 * Global mutex must be locked! 688 */ 689 void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) 690 { 691 list_del_init(&vm->list); 692 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 693 } 694 695 /** 696 * radeon_vm_grab_id - allocate the next free VMID 697 * 698 * @rdev: radeon_device pointer 699 * @vm: vm to allocate id for 700 * @ring: ring we want to submit job to 701 * 702 * Allocate an id for the vm (cayman+). 703 * Returns the fence we need to sync to (if any). 704 * 705 * Global and local mutex must be locked! 706 */ 707 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 708 struct radeon_vm *vm, int ring) 709 { 710 struct radeon_fence *best[RADEON_NUM_RINGS] = {}; 711 unsigned choices[2] = {}; 712 unsigned i; 713 714 /* check if the id is still valid */ 715 if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) 716 return NULL; 717 718 /* we definately need to flush */ 719 radeon_fence_unref(&vm->last_flush); 720 721 /* skip over VMID 0, since it is the system VM */ 722 for (i = 1; i < rdev->vm_manager.nvm; ++i) { 723 struct radeon_fence *fence = rdev->vm_manager.active[i]; 724 725 if (fence == NULL) { 726 /* found a free one */ 727 vm->id = i; 728 return NULL; 729 } 730 731 if (radeon_fence_is_earlier(fence, best[fence->ring])) { 732 best[fence->ring] = fence; 733 choices[fence->ring == ring ? 0 : 1] = i; 734 } 735 } 736 737 for (i = 0; i < 2; ++i) { 738 if (choices[i]) { 739 vm->id = choices[i]; 740 return rdev->vm_manager.active[choices[i]]; 741 } 742 } 743 744 /* should never happen */ 745 BUG(); 746 return NULL; 747 } 748 749 /** 750 * radeon_vm_fence - remember fence for vm 751 * 752 * @rdev: radeon_device pointer 753 * @vm: vm we want to fence 754 * @fence: fence to remember 755 * 756 * Fence the vm (cayman+). 757 * Set the fence used to protect page table and id. 758 * 759 * Global and local mutex must be locked! 760 */ 761 void radeon_vm_fence(struct radeon_device *rdev, 762 struct radeon_vm *vm, 763 struct radeon_fence *fence) 764 { 765 radeon_fence_unref(&rdev->vm_manager.active[vm->id]); 766 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); 767 768 radeon_fence_unref(&vm->fence); 769 vm->fence = radeon_fence_ref(fence); 770 } 771 772 /** 773 * radeon_vm_bo_find - find the bo_va for a specific vm & bo 774 * 775 * @vm: requested vm 776 * @bo: requested buffer object 777 * 778 * Find @bo inside the requested vm (cayman+). 779 * Search inside the @bos vm list for the requested vm 780 * Returns the found bo_va or NULL if none is found 781 * 782 * Object has to be reserved! 783 */ 784 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 785 struct radeon_bo *bo) 786 { 787 struct radeon_bo_va *bo_va; 788 789 list_for_each_entry(bo_va, &bo->va, bo_list) { 790 if (bo_va->vm == vm) { 791 return bo_va; 792 } 793 } 794 return NULL; 795 } 796 797 /** 798 * radeon_vm_bo_add - add a bo to a specific vm 799 * 800 * @rdev: radeon_device pointer 801 * @vm: requested vm 802 * @bo: radeon buffer object 803 * 804 * Add @bo into the requested vm (cayman+). 805 * Add @bo to the list of bos associated with the vm 806 * Returns newly added bo_va or NULL for failure 807 * 808 * Object has to be reserved! 809 */ 810 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 811 struct radeon_vm *vm, 812 struct radeon_bo *bo) 813 { 814 struct radeon_bo_va *bo_va; 815 816 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 817 if (bo_va == NULL) { 818 return NULL; 819 } 820 bo_va->vm = vm; 821 bo_va->bo = bo; 822 bo_va->soffset = 0; 823 bo_va->eoffset = 0; 824 bo_va->flags = 0; 825 bo_va->valid = false; 826 bo_va->ref_count = 1; 827 INIT_LIST_HEAD(&bo_va->bo_list); 828 INIT_LIST_HEAD(&bo_va->vm_list); 829 830 mutex_lock(&vm->mutex); 831 list_add(&bo_va->vm_list, &vm->va); 832 list_add_tail(&bo_va->bo_list, &bo->va); 833 mutex_unlock(&vm->mutex); 834 835 return bo_va; 836 } 837 838 /** 839 * radeon_vm_bo_set_addr - set bos virtual address inside a vm 840 * 841 * @rdev: radeon_device pointer 842 * @bo_va: bo_va to store the address 843 * @soffset: requested offset of the buffer in the VM address space 844 * @flags: attributes of pages (read/write/valid/etc.) 845 * 846 * Set offset of @bo_va (cayman+). 847 * Validate and set the offset requested within the vm address space. 848 * Returns 0 for success, error for failure. 849 * 850 * Object has to be reserved! 851 */ 852 int radeon_vm_bo_set_addr(struct radeon_device *rdev, 853 struct radeon_bo_va *bo_va, 854 uint64_t soffset, 855 uint32_t flags) 856 { 857 uint64_t size = radeon_bo_size(bo_va->bo); 858 uint64_t eoffset, last_offset = 0; 859 struct radeon_vm *vm = bo_va->vm; 860 struct radeon_bo_va *tmp; 861 struct list_head *head; 862 unsigned last_pfn; 863 864 if (soffset) { 865 /* make sure object fit at this offset */ 866 eoffset = soffset + size; 867 if (soffset >= eoffset) { 868 return -EINVAL; 869 } 870 871 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 872 if (last_pfn > rdev->vm_manager.max_pfn) { 873 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 874 last_pfn, rdev->vm_manager.max_pfn); 875 return -EINVAL; 876 } 877 878 } else { 879 eoffset = last_pfn = 0; 880 } 881 882 mutex_lock(&vm->mutex); 883 head = &vm->va; 884 last_offset = 0; 885 list_for_each_entry(tmp, &vm->va, vm_list) { 886 if (bo_va == tmp) { 887 /* skip over currently modified bo */ 888 continue; 889 } 890 891 if (soffset >= last_offset && eoffset <= tmp->soffset) { 892 /* bo can be added before this one */ 893 break; 894 } 895 if (eoffset > tmp->soffset && soffset < tmp->eoffset) { 896 /* bo and tmp overlap, invalid offset */ 897 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", 898 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, 899 (unsigned)tmp->soffset, (unsigned)tmp->eoffset); 900 mutex_unlock(&vm->mutex); 901 return -EINVAL; 902 } 903 last_offset = tmp->eoffset; 904 head = &tmp->vm_list; 905 } 906 907 bo_va->soffset = soffset; 908 bo_va->eoffset = eoffset; 909 bo_va->flags = flags; 910 bo_va->valid = false; 911 list_move(&bo_va->vm_list, head); 912 913 mutex_unlock(&vm->mutex); 914 return 0; 915 } 916 917 /** 918 * radeon_vm_map_gart - get the physical address of a gart page 919 * 920 * @rdev: radeon_device pointer 921 * @addr: the unmapped addr 922 * 923 * Look up the physical address of the page that the pte resolves 924 * to (cayman+). 925 * Returns the physical address of the page. 926 */ 927 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) 928 { 929 uint64_t result; 930 931 /* page table offset */ 932 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 933 934 /* in case cpu page size != gpu page size*/ 935 result |= addr & (~PAGE_MASK); 936 937 return result; 938 } 939 940 /** 941 * radeon_vm_page_flags - translate page flags to what the hw uses 942 * 943 * @flags: flags comming from userspace 944 * 945 * Translate the flags the userspace ABI uses to hw flags. 946 */ 947 static uint32_t radeon_vm_page_flags(uint32_t flags) 948 { 949 uint32_t hw_flags = 0; 950 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; 951 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 952 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 953 if (flags & RADEON_VM_PAGE_SYSTEM) { 954 hw_flags |= R600_PTE_SYSTEM; 955 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 956 } 957 return hw_flags; 958 } 959 960 /** 961 * radeon_vm_update_pdes - make sure that page directory is valid 962 * 963 * @rdev: radeon_device pointer 964 * @vm: requested vm 965 * @start: start of GPU address range 966 * @end: end of GPU address range 967 * 968 * Allocates new page tables if necessary 969 * and updates the page directory (cayman+). 970 * Returns 0 for success, error for failure. 971 * 972 * Global and local mutex must be locked! 973 */ 974 static int radeon_vm_update_pdes(struct radeon_device *rdev, 975 struct radeon_vm *vm, 976 struct radeon_ib *ib, 977 uint64_t start, uint64_t end) 978 { 979 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 980 981 uint64_t last_pde = ~0, last_pt = ~0; 982 unsigned count = 0; 983 uint64_t pt_idx; 984 int r; 985 986 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 987 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 988 989 /* walk over the address space and update the page directory */ 990 for (pt_idx = start; pt_idx <= end; ++pt_idx) { 991 uint64_t pde, pt; 992 993 if (vm->page_tables[pt_idx]) 994 continue; 995 996 retry: 997 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 998 &vm->page_tables[pt_idx], 999 RADEON_VM_PTE_COUNT * 8, 1000 RADEON_GPU_PAGE_SIZE, false); 1001 1002 if (r == -ENOMEM) { 1003 r = radeon_vm_evict(rdev, vm); 1004 if (r) 1005 return r; 1006 goto retry; 1007 } else if (r) { 1008 return r; 1009 } 1010 1011 pde = vm->pd_gpu_addr + pt_idx * 8; 1012 1013 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); 1014 1015 if (((last_pde + 8 * count) != pde) || 1016 ((last_pt + incr * count) != pt)) { 1017 1018 if (count) { 1019 radeon_asic_vm_set_page(rdev, ib, last_pde, 1020 last_pt, count, incr, 1021 R600_PTE_VALID); 1022 1023 count *= RADEON_VM_PTE_COUNT; 1024 radeon_asic_vm_set_page(rdev, ib, last_pt, 0, 1025 count, 0, 0); 1026 } 1027 1028 count = 1; 1029 last_pde = pde; 1030 last_pt = pt; 1031 } else { 1032 ++count; 1033 } 1034 } 1035 1036 if (count) { 1037 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, 1038 incr, R600_PTE_VALID); 1039 1040 count *= RADEON_VM_PTE_COUNT; 1041 radeon_asic_vm_set_page(rdev, ib, last_pt, 0, 1042 count, 0, 0); 1043 } 1044 1045 return 0; 1046 } 1047 1048 /** 1049 * radeon_vm_update_ptes - make sure that page tables are valid 1050 * 1051 * @rdev: radeon_device pointer 1052 * @vm: requested vm 1053 * @start: start of GPU address range 1054 * @end: end of GPU address range 1055 * @dst: destination address to map to 1056 * @flags: mapping flags 1057 * 1058 * Update the page tables in the range @start - @end (cayman+). 1059 * 1060 * Global and local mutex must be locked! 1061 */ 1062 static void radeon_vm_update_ptes(struct radeon_device *rdev, 1063 struct radeon_vm *vm, 1064 struct radeon_ib *ib, 1065 uint64_t start, uint64_t end, 1066 uint64_t dst, uint32_t flags) 1067 { 1068 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; 1069 1070 uint64_t last_pte = ~0, last_dst = ~0; 1071 unsigned count = 0; 1072 uint64_t addr; 1073 1074 start = start / RADEON_GPU_PAGE_SIZE; 1075 end = end / RADEON_GPU_PAGE_SIZE; 1076 1077 /* walk over the address space and update the page tables */ 1078 for (addr = start; addr < end; ) { 1079 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 1080 unsigned nptes; 1081 uint64_t pte; 1082 1083 if ((addr & ~mask) == (end & ~mask)) 1084 nptes = end - addr; 1085 else 1086 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 1087 1088 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); 1089 pte += (addr & mask) * 8; 1090 1091 if ((last_pte + 8 * count) != pte) { 1092 1093 if (count) { 1094 radeon_asic_vm_set_page(rdev, ib, last_pte, 1095 last_dst, count, 1096 RADEON_GPU_PAGE_SIZE, 1097 flags); 1098 } 1099 1100 count = nptes; 1101 last_pte = pte; 1102 last_dst = dst; 1103 } else { 1104 count += nptes; 1105 } 1106 1107 addr += nptes; 1108 dst += nptes * RADEON_GPU_PAGE_SIZE; 1109 } 1110 1111 if (count) { 1112 radeon_asic_vm_set_page(rdev, ib, last_pte, 1113 last_dst, count, 1114 RADEON_GPU_PAGE_SIZE, flags); 1115 } 1116 } 1117 1118 /** 1119 * radeon_vm_bo_update_pte - map a bo into the vm page table 1120 * 1121 * @rdev: radeon_device pointer 1122 * @vm: requested vm 1123 * @bo: radeon buffer object 1124 * @mem: ttm mem 1125 * 1126 * Fill in the page table entries for @bo (cayman+). 1127 * Returns 0 for success, -EINVAL for failure. 1128 * 1129 * Object have to be reserved & global and local mutex must be locked! 1130 */ 1131 int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1132 struct radeon_vm *vm, 1133 struct radeon_bo *bo, 1134 struct ttm_mem_reg *mem) 1135 { 1136 struct radeon_ib ib; 1137 struct radeon_bo_va *bo_va; 1138 unsigned nptes, npdes, ndw; 1139 uint64_t addr; 1140 int r; 1141 1142 /* nothing to do if vm isn't bound */ 1143 if (vm->page_directory == NULL) 1144 return 0; 1145 1146 bo_va = radeon_vm_bo_find(vm, bo); 1147 if (bo_va == NULL) { 1148 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); 1149 return -EINVAL; 1150 } 1151 1152 if (!bo_va->soffset) { 1153 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 1154 bo, vm); 1155 return -EINVAL; 1156 } 1157 1158 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) 1159 return 0; 1160 1161 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 1162 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 1163 if (mem) { 1164 addr = mem->start << PAGE_SHIFT; 1165 if (mem->mem_type != TTM_PL_SYSTEM) { 1166 bo_va->flags |= RADEON_VM_PAGE_VALID; 1167 bo_va->valid = true; 1168 } 1169 if (mem->mem_type == TTM_PL_TT) { 1170 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 1171 } else { 1172 addr += rdev->vm_manager.vram_base_offset; 1173 } 1174 } else { 1175 addr = 0; 1176 bo_va->valid = false; 1177 } 1178 1179 nptes = radeon_bo_ngpu_pages(bo); 1180 1181 /* assume two extra pdes in case the mapping overlaps the borders */ 1182 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; 1183 1184 /* padding, etc. */ 1185 ndw = 64; 1186 1187 if (RADEON_VM_BLOCK_SIZE > 11) 1188 /* reserve space for one header for every 2k dwords */ 1189 ndw += (nptes >> 11) * 4; 1190 else 1191 /* reserve space for one header for 1192 every (1 << BLOCK_SIZE) entries */ 1193 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; 1194 1195 /* reserve space for pte addresses */ 1196 ndw += nptes * 2; 1197 1198 /* reserve space for one header for every 2k dwords */ 1199 ndw += (npdes >> 11) * 4; 1200 1201 /* reserve space for pde addresses */ 1202 ndw += npdes * 2; 1203 1204 /* reserve space for clearing new page tables */ 1205 ndw += npdes * 2 * RADEON_VM_PTE_COUNT; 1206 1207 /* update too big for an IB */ 1208 if (ndw > 0xfffff) 1209 return -ENOMEM; 1210 1211 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 1212 if (r) 1213 return r; 1214 ib.length_dw = 0; 1215 1216 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); 1217 if (r) { 1218 radeon_ib_free(rdev, &ib); 1219 return r; 1220 } 1221 1222 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 1223 addr, radeon_vm_page_flags(bo_va->flags)); 1224 1225 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 1226 r = radeon_ib_schedule(rdev, &ib, NULL); 1227 if (r) { 1228 radeon_ib_free(rdev, &ib); 1229 return r; 1230 } 1231 radeon_fence_unref(&vm->fence); 1232 vm->fence = radeon_fence_ref(ib.fence); 1233 radeon_ib_free(rdev, &ib); 1234 radeon_fence_unref(&vm->last_flush); 1235 1236 return 0; 1237 } 1238 1239 /** 1240 * radeon_vm_bo_rmv - remove a bo to a specific vm 1241 * 1242 * @rdev: radeon_device pointer 1243 * @bo_va: requested bo_va 1244 * 1245 * Remove @bo_va->bo from the requested vm (cayman+). 1246 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and 1247 * remove the ptes for @bo_va in the page table. 1248 * Returns 0 for success. 1249 * 1250 * Object have to be reserved! 1251 */ 1252 int radeon_vm_bo_rmv(struct radeon_device *rdev, 1253 struct radeon_bo_va *bo_va) 1254 { 1255 int r = 0; 1256 1257 mutex_lock(&rdev->vm_manager.lock); 1258 mutex_lock(&bo_va->vm->mutex); 1259 if (bo_va->soffset) { 1260 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1261 } 1262 mutex_unlock(&rdev->vm_manager.lock); 1263 list_del(&bo_va->vm_list); 1264 mutex_unlock(&bo_va->vm->mutex); 1265 list_del(&bo_va->bo_list); 1266 1267 kfree(bo_va); 1268 return r; 1269 } 1270 1271 /** 1272 * radeon_vm_bo_invalidate - mark the bo as invalid 1273 * 1274 * @rdev: radeon_device pointer 1275 * @vm: requested vm 1276 * @bo: radeon buffer object 1277 * 1278 * Mark @bo as invalid (cayman+). 1279 */ 1280 void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1281 struct radeon_bo *bo) 1282 { 1283 struct radeon_bo_va *bo_va; 1284 1285 list_for_each_entry(bo_va, &bo->va, bo_list) { 1286 bo_va->valid = false; 1287 } 1288 } 1289 1290 /** 1291 * radeon_vm_init - initialize a vm instance 1292 * 1293 * @rdev: radeon_device pointer 1294 * @vm: requested vm 1295 * 1296 * Init @vm fields (cayman+). 1297 */ 1298 void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1299 { 1300 vm->id = 0; 1301 vm->fence = NULL; 1302 mutex_init(&vm->mutex); 1303 INIT_LIST_HEAD(&vm->list); 1304 INIT_LIST_HEAD(&vm->va); 1305 } 1306 1307 /** 1308 * radeon_vm_fini - tear down a vm instance 1309 * 1310 * @rdev: radeon_device pointer 1311 * @vm: requested vm 1312 * 1313 * Tear down @vm (cayman+). 1314 * Unbind the VM and remove all bos from the vm bo list 1315 */ 1316 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) 1317 { 1318 struct radeon_bo_va *bo_va, *tmp; 1319 int r; 1320 1321 mutex_lock(&rdev->vm_manager.lock); 1322 mutex_lock(&vm->mutex); 1323 radeon_vm_free_pt(rdev, vm); 1324 mutex_unlock(&rdev->vm_manager.lock); 1325 1326 if (!list_empty(&vm->va)) { 1327 dev_err(rdev->dev, "still active bo inside vm\n"); 1328 } 1329 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { 1330 list_del_init(&bo_va->vm_list); 1331 r = radeon_bo_reserve(bo_va->bo, false); 1332 if (!r) { 1333 list_del_init(&bo_va->bo_list); 1334 radeon_bo_unreserve(bo_va->bo); 1335 kfree(bo_va); 1336 } 1337 } 1338 radeon_fence_unref(&vm->fence); 1339 radeon_fence_unref(&vm->last_flush); 1340 mutex_unlock(&vm->mutex); 1341 } 1342