1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <ttm/ttm_bo_api.h> 33 #include <ttm/ttm_bo_driver.h> 34 #include <ttm/ttm_placement.h> 35 #include <ttm/ttm_module.h> 36 #include <ttm/ttm_page_alloc.h> 37 #include <ttm/ttm_memory.h> 38 #include <drm/drmP.h> 39 #include <drm/amdgpu_drm.h> 40 #include <linux/seq_file.h> 41 #include <linux/slab.h> 42 #include <linux/swiotlb.h> 43 #include <linux/swap.h> 44 #include <linux/pagemap.h> 45 #include <linux/debugfs.h> 46 #include "amdgpu.h" 47 #include "bif/bif_4_1_d.h" 48 49 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 50 51 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 52 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); 53 54 static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) 55 { 56 struct amdgpu_mman *mman; 57 struct amdgpu_device *adev; 58 59 mman = container_of(bdev, struct amdgpu_mman, bdev); 60 adev = container_of(mman, struct amdgpu_device, mman); 61 return adev; 62 } 63 64 65 /* 66 * Global memory. 67 */ 68 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) 69 { 70 return ttm_mem_global_init(ref->object); 71 } 72 73 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) 74 { 75 ttm_mem_global_release(ref->object); 76 } 77 78 int amdgpu_ttm_global_init(struct amdgpu_device *adev) 79 { 80 struct drm_global_reference *global_ref; 81 struct amdgpu_ring *ring; 82 struct amd_sched_rq *rq; 83 int r; 84 85 adev->mman.mem_global_referenced = false; 86 global_ref = &adev->mman.mem_global_ref; 87 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 88 global_ref->size = sizeof(struct ttm_mem_global); 89 global_ref->init = &amdgpu_ttm_mem_global_init; 90 global_ref->release = &amdgpu_ttm_mem_global_release; 91 r = drm_global_item_ref(global_ref); 92 if (r) { 93 DRM_ERROR("Failed setting up TTM memory accounting " 94 "subsystem.\n"); 95 goto error_mem; 96 } 97 98 adev->mman.bo_global_ref.mem_glob = 99 adev->mman.mem_global_ref.object; 100 global_ref = &adev->mman.bo_global_ref.ref; 101 global_ref->global_type = DRM_GLOBAL_TTM_BO; 102 global_ref->size = sizeof(struct ttm_bo_global); 103 global_ref->init = &ttm_bo_global_init; 104 global_ref->release = &ttm_bo_global_release; 105 r = drm_global_item_ref(global_ref); 106 if (r) { 107 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 108 goto error_bo; 109 } 110 111 ring = adev->mman.buffer_funcs_ring; 112 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 113 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, 114 rq, amdgpu_sched_jobs); 115 if (r) { 116 DRM_ERROR("Failed setting up TTM BO move run queue.\n"); 117 goto error_entity; 118 } 119 120 adev->mman.mem_global_referenced = true; 121 122 return 0; 123 124 error_entity: 125 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 126 error_bo: 127 drm_global_item_unref(&adev->mman.mem_global_ref); 128 error_mem: 129 return r; 130 } 131 132 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 133 { 134 if (adev->mman.mem_global_referenced) { 135 amd_sched_entity_fini(adev->mman.entity.sched, 136 &adev->mman.entity); 137 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 138 drm_global_item_unref(&adev->mman.mem_global_ref); 139 adev->mman.mem_global_referenced = false; 140 } 141 } 142 143 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 144 { 145 return 0; 146 } 147 148 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 149 struct ttm_mem_type_manager *man) 150 { 151 struct amdgpu_device *adev; 152 153 adev = amdgpu_get_adev(bdev); 154 155 switch (type) { 156 case TTM_PL_SYSTEM: 157 /* System memory */ 158 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 159 man->available_caching = TTM_PL_MASK_CACHING; 160 man->default_caching = TTM_PL_FLAG_CACHED; 161 break; 162 case TTM_PL_TT: 163 man->func = &amdgpu_gtt_mgr_func; 164 man->gpu_offset = adev->mc.gtt_start; 165 man->available_caching = TTM_PL_MASK_CACHING; 166 man->default_caching = TTM_PL_FLAG_CACHED; 167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 168 break; 169 case TTM_PL_VRAM: 170 /* "On-card" video ram */ 171 man->func = &ttm_bo_manager_func; 172 man->gpu_offset = adev->mc.vram_start; 173 man->flags = TTM_MEMTYPE_FLAG_FIXED | 174 TTM_MEMTYPE_FLAG_MAPPABLE; 175 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 176 man->default_caching = TTM_PL_FLAG_WC; 177 break; 178 case AMDGPU_PL_GDS: 179 case AMDGPU_PL_GWS: 180 case AMDGPU_PL_OA: 181 /* On-chip GDS memory*/ 182 man->func = &ttm_bo_manager_func; 183 man->gpu_offset = 0; 184 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; 185 man->available_caching = TTM_PL_FLAG_UNCACHED; 186 man->default_caching = TTM_PL_FLAG_UNCACHED; 187 break; 188 default: 189 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 190 return -EINVAL; 191 } 192 return 0; 193 } 194 195 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 196 struct ttm_placement *placement) 197 { 198 struct amdgpu_bo *abo; 199 static struct ttm_place placements = { 200 .fpfn = 0, 201 .lpfn = 0, 202 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM 203 }; 204 unsigned i; 205 206 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { 207 placement->placement = &placements; 208 placement->busy_placement = &placements; 209 placement->num_placement = 1; 210 placement->num_busy_placement = 1; 211 return; 212 } 213 abo = container_of(bo, struct amdgpu_bo, tbo); 214 switch (bo->mem.mem_type) { 215 case TTM_PL_VRAM: 216 if (abo->adev->mman.buffer_funcs_ring->ready == false) { 217 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 218 } else { 219 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 220 for (i = 0; i < abo->placement.num_placement; ++i) { 221 if (!(abo->placements[i].flags & 222 TTM_PL_FLAG_TT)) 223 continue; 224 225 if (abo->placements[i].lpfn) 226 continue; 227 228 /* set an upper limit to force directly 229 * allocating address space for the BO. 230 */ 231 abo->placements[i].lpfn = 232 abo->adev->mc.gtt_size >> PAGE_SHIFT; 233 } 234 } 235 break; 236 case TTM_PL_TT: 237 default: 238 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 239 } 240 *placement = abo->placement; 241 } 242 243 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) 244 { 245 struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); 246 247 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 248 return -EPERM; 249 return drm_vma_node_verify_access(&abo->gem_base.vma_node, 250 filp->private_data); 251 } 252 253 static void amdgpu_move_null(struct ttm_buffer_object *bo, 254 struct ttm_mem_reg *new_mem) 255 { 256 struct ttm_mem_reg *old_mem = &bo->mem; 257 258 BUG_ON(old_mem->mm_node != NULL); 259 *old_mem = *new_mem; 260 new_mem->mm_node = NULL; 261 } 262 263 static int amdgpu_move_blit(struct ttm_buffer_object *bo, 264 bool evict, bool no_wait_gpu, 265 struct ttm_mem_reg *new_mem, 266 struct ttm_mem_reg *old_mem) 267 { 268 struct amdgpu_device *adev; 269 struct amdgpu_ring *ring; 270 uint64_t old_start, new_start; 271 struct fence *fence; 272 int r; 273 274 adev = amdgpu_get_adev(bo->bdev); 275 ring = adev->mman.buffer_funcs_ring; 276 277 switch (old_mem->mem_type) { 278 case TTM_PL_TT: 279 r = amdgpu_ttm_bind(bo, old_mem); 280 if (r) 281 return r; 282 283 case TTM_PL_VRAM: 284 old_start = (u64)old_mem->start << PAGE_SHIFT; 285 old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; 286 break; 287 default: 288 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 289 return -EINVAL; 290 } 291 switch (new_mem->mem_type) { 292 case TTM_PL_TT: 293 r = amdgpu_ttm_bind(bo, new_mem); 294 if (r) 295 return r; 296 297 case TTM_PL_VRAM: 298 new_start = (u64)new_mem->start << PAGE_SHIFT; 299 new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; 300 break; 301 default: 302 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 303 return -EINVAL; 304 } 305 if (!ring->ready) { 306 DRM_ERROR("Trying to move memory with ring turned off.\n"); 307 return -EINVAL; 308 } 309 310 BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); 311 312 r = amdgpu_copy_buffer(ring, old_start, new_start, 313 new_mem->num_pages * PAGE_SIZE, /* bytes */ 314 bo->resv, &fence, false); 315 if (r) 316 return r; 317 318 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 319 fence_put(fence); 320 return r; 321 } 322 323 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, 324 bool evict, bool interruptible, 325 bool no_wait_gpu, 326 struct ttm_mem_reg *new_mem) 327 { 328 struct amdgpu_device *adev; 329 struct ttm_mem_reg *old_mem = &bo->mem; 330 struct ttm_mem_reg tmp_mem; 331 struct ttm_place placements; 332 struct ttm_placement placement; 333 int r; 334 335 adev = amdgpu_get_adev(bo->bdev); 336 tmp_mem = *new_mem; 337 tmp_mem.mm_node = NULL; 338 placement.num_placement = 1; 339 placement.placement = &placements; 340 placement.num_busy_placement = 1; 341 placement.busy_placement = &placements; 342 placements.fpfn = 0; 343 placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; 344 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 345 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 346 interruptible, no_wait_gpu); 347 if (unlikely(r)) { 348 return r; 349 } 350 351 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); 352 if (unlikely(r)) { 353 goto out_cleanup; 354 } 355 356 r = ttm_tt_bind(bo->ttm, &tmp_mem); 357 if (unlikely(r)) { 358 goto out_cleanup; 359 } 360 r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); 361 if (unlikely(r)) { 362 goto out_cleanup; 363 } 364 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); 365 out_cleanup: 366 ttm_bo_mem_put(bo, &tmp_mem); 367 return r; 368 } 369 370 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, 371 bool evict, bool interruptible, 372 bool no_wait_gpu, 373 struct ttm_mem_reg *new_mem) 374 { 375 struct amdgpu_device *adev; 376 struct ttm_mem_reg *old_mem = &bo->mem; 377 struct ttm_mem_reg tmp_mem; 378 struct ttm_placement placement; 379 struct ttm_place placements; 380 int r; 381 382 adev = amdgpu_get_adev(bo->bdev); 383 tmp_mem = *new_mem; 384 tmp_mem.mm_node = NULL; 385 placement.num_placement = 1; 386 placement.placement = &placements; 387 placement.num_busy_placement = 1; 388 placement.busy_placement = &placements; 389 placements.fpfn = 0; 390 placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; 391 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 392 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 393 interruptible, no_wait_gpu); 394 if (unlikely(r)) { 395 return r; 396 } 397 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); 398 if (unlikely(r)) { 399 goto out_cleanup; 400 } 401 r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); 402 if (unlikely(r)) { 403 goto out_cleanup; 404 } 405 out_cleanup: 406 ttm_bo_mem_put(bo, &tmp_mem); 407 return r; 408 } 409 410 static int amdgpu_bo_move(struct ttm_buffer_object *bo, 411 bool evict, bool interruptible, 412 bool no_wait_gpu, 413 struct ttm_mem_reg *new_mem) 414 { 415 struct amdgpu_device *adev; 416 struct amdgpu_bo *abo; 417 struct ttm_mem_reg *old_mem = &bo->mem; 418 int r; 419 420 /* Can't move a pinned BO */ 421 abo = container_of(bo, struct amdgpu_bo, tbo); 422 if (WARN_ON_ONCE(abo->pin_count > 0)) 423 return -EINVAL; 424 425 adev = amdgpu_get_adev(bo->bdev); 426 427 /* remember the eviction */ 428 if (evict) 429 atomic64_inc(&adev->num_evictions); 430 431 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 432 amdgpu_move_null(bo, new_mem); 433 return 0; 434 } 435 if ((old_mem->mem_type == TTM_PL_TT && 436 new_mem->mem_type == TTM_PL_SYSTEM) || 437 (old_mem->mem_type == TTM_PL_SYSTEM && 438 new_mem->mem_type == TTM_PL_TT)) { 439 /* bind is enough */ 440 amdgpu_move_null(bo, new_mem); 441 return 0; 442 } 443 if (adev->mman.buffer_funcs == NULL || 444 adev->mman.buffer_funcs_ring == NULL || 445 !adev->mman.buffer_funcs_ring->ready) { 446 /* use memcpy */ 447 goto memcpy; 448 } 449 450 if (old_mem->mem_type == TTM_PL_VRAM && 451 new_mem->mem_type == TTM_PL_SYSTEM) { 452 r = amdgpu_move_vram_ram(bo, evict, interruptible, 453 no_wait_gpu, new_mem); 454 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 455 new_mem->mem_type == TTM_PL_VRAM) { 456 r = amdgpu_move_ram_vram(bo, evict, interruptible, 457 no_wait_gpu, new_mem); 458 } else { 459 r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); 460 } 461 462 if (r) { 463 memcpy: 464 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); 465 if (r) { 466 return r; 467 } 468 } 469 470 /* update statistics */ 471 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); 472 return 0; 473 } 474 475 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 476 { 477 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 478 struct amdgpu_device *adev = amdgpu_get_adev(bdev); 479 480 mem->bus.addr = NULL; 481 mem->bus.offset = 0; 482 mem->bus.size = mem->num_pages << PAGE_SHIFT; 483 mem->bus.base = 0; 484 mem->bus.is_iomem = false; 485 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 486 return -EINVAL; 487 switch (mem->mem_type) { 488 case TTM_PL_SYSTEM: 489 /* system memory */ 490 return 0; 491 case TTM_PL_TT: 492 break; 493 case TTM_PL_VRAM: 494 mem->bus.offset = mem->start << PAGE_SHIFT; 495 /* check if it's visible */ 496 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) 497 return -EINVAL; 498 mem->bus.base = adev->mc.aper_base; 499 mem->bus.is_iomem = true; 500 #ifdef __alpha__ 501 /* 502 * Alpha: use bus.addr to hold the ioremap() return, 503 * so we can modify bus.base below. 504 */ 505 if (mem->placement & TTM_PL_FLAG_WC) 506 mem->bus.addr = 507 ioremap_wc(mem->bus.base + mem->bus.offset, 508 mem->bus.size); 509 else 510 mem->bus.addr = 511 ioremap_nocache(mem->bus.base + mem->bus.offset, 512 mem->bus.size); 513 514 /* 515 * Alpha: Use just the bus offset plus 516 * the hose/domain memory base for bus.base. 517 * It then can be used to build PTEs for VRAM 518 * access, as done in ttm_bo_vm_fault(). 519 */ 520 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + 521 adev->ddev->hose->dense_mem_base; 522 #endif 523 break; 524 default: 525 return -EINVAL; 526 } 527 return 0; 528 } 529 530 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 531 { 532 } 533 534 /* 535 * TTM backend functions. 536 */ 537 struct amdgpu_ttm_gup_task_list { 538 struct list_head list; 539 struct task_struct *task; 540 }; 541 542 struct amdgpu_ttm_tt { 543 struct ttm_dma_tt ttm; 544 struct amdgpu_device *adev; 545 u64 offset; 546 uint64_t userptr; 547 struct mm_struct *usermm; 548 uint32_t userflags; 549 spinlock_t guptasklock; 550 struct list_head guptasks; 551 atomic_t mmu_invalidations; 552 struct list_head list; 553 }; 554 555 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 556 { 557 struct amdgpu_ttm_tt *gtt = (void *)ttm; 558 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 559 unsigned pinned = 0; 560 int r; 561 562 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 563 /* check that we only use anonymous memory 564 to prevent problems with writeback */ 565 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; 566 struct vm_area_struct *vma; 567 568 vma = find_vma(gtt->usermm, gtt->userptr); 569 if (!vma || vma->vm_file || vma->vm_end < end) 570 return -EPERM; 571 } 572 573 do { 574 unsigned num_pages = ttm->num_pages - pinned; 575 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 576 struct page **p = pages + pinned; 577 struct amdgpu_ttm_gup_task_list guptask; 578 579 guptask.task = current; 580 spin_lock(>t->guptasklock); 581 list_add(&guptask.list, >t->guptasks); 582 spin_unlock(>t->guptasklock); 583 584 r = get_user_pages(userptr, num_pages, write, 0, p, NULL); 585 586 spin_lock(>t->guptasklock); 587 list_del(&guptask.list); 588 spin_unlock(>t->guptasklock); 589 590 if (r < 0) 591 goto release_pages; 592 593 pinned += r; 594 595 } while (pinned < ttm->num_pages); 596 597 return 0; 598 599 release_pages: 600 release_pages(pages, pinned, 0); 601 return r; 602 } 603 604 /* prepare the sg table with the user pages */ 605 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) 606 { 607 struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 608 struct amdgpu_ttm_tt *gtt = (void *)ttm; 609 unsigned nents; 610 int r; 611 612 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 613 enum dma_data_direction direction = write ? 614 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 615 616 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 617 ttm->num_pages << PAGE_SHIFT, 618 GFP_KERNEL); 619 if (r) 620 goto release_sg; 621 622 r = -ENOMEM; 623 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 624 if (nents != ttm->sg->nents) 625 goto release_sg; 626 627 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 628 gtt->ttm.dma_address, ttm->num_pages); 629 630 return 0; 631 632 release_sg: 633 kfree(ttm->sg); 634 return r; 635 } 636 637 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) 638 { 639 struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 640 struct amdgpu_ttm_tt *gtt = (void *)ttm; 641 struct sg_page_iter sg_iter; 642 643 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 644 enum dma_data_direction direction = write ? 645 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 646 647 /* double check that we don't free the table twice */ 648 if (!ttm->sg->sgl) 649 return; 650 651 /* free the sg table and pages again */ 652 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 653 654 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { 655 struct page *page = sg_page_iter_page(&sg_iter); 656 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) 657 set_page_dirty(page); 658 659 mark_page_accessed(page); 660 put_page(page); 661 } 662 663 sg_free_table(ttm->sg); 664 } 665 666 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 667 struct ttm_mem_reg *bo_mem) 668 { 669 struct amdgpu_ttm_tt *gtt = (void*)ttm; 670 int r; 671 672 if (gtt->userptr) { 673 r = amdgpu_ttm_tt_pin_userptr(ttm); 674 if (r) { 675 DRM_ERROR("failed to pin userptr\n"); 676 return r; 677 } 678 } 679 if (!ttm->num_pages) { 680 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 681 ttm->num_pages, bo_mem, ttm); 682 } 683 684 if (bo_mem->mem_type == AMDGPU_PL_GDS || 685 bo_mem->mem_type == AMDGPU_PL_GWS || 686 bo_mem->mem_type == AMDGPU_PL_OA) 687 return -EINVAL; 688 689 return 0; 690 } 691 692 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) 693 { 694 struct amdgpu_ttm_tt *gtt = (void *)ttm; 695 696 return gtt && !list_empty(>t->list); 697 } 698 699 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) 700 { 701 struct ttm_tt *ttm = bo->ttm; 702 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; 703 uint32_t flags; 704 int r; 705 706 if (!ttm || amdgpu_ttm_is_bound(ttm)) 707 return 0; 708 709 r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, 710 NULL, bo_mem); 711 if (r) { 712 DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); 713 return r; 714 } 715 716 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 717 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 718 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 719 ttm->pages, gtt->ttm.dma_address, flags); 720 721 if (r) { 722 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 723 ttm->num_pages, gtt->offset); 724 return r; 725 } 726 spin_lock(>t->adev->gtt_list_lock); 727 list_add_tail(>t->list, >t->adev->gtt_list); 728 spin_unlock(>t->adev->gtt_list_lock); 729 return 0; 730 } 731 732 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) 733 { 734 struct amdgpu_ttm_tt *gtt, *tmp; 735 struct ttm_mem_reg bo_mem; 736 uint32_t flags; 737 int r; 738 739 bo_mem.mem_type = TTM_PL_TT; 740 spin_lock(&adev->gtt_list_lock); 741 list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { 742 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); 743 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, 744 gtt->ttm.ttm.pages, gtt->ttm.dma_address, 745 flags); 746 if (r) { 747 spin_unlock(&adev->gtt_list_lock); 748 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 749 gtt->ttm.ttm.num_pages, gtt->offset); 750 return r; 751 } 752 } 753 spin_unlock(&adev->gtt_list_lock); 754 return 0; 755 } 756 757 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) 758 { 759 struct amdgpu_ttm_tt *gtt = (void *)ttm; 760 761 if (gtt->userptr) 762 amdgpu_ttm_tt_unpin_userptr(ttm); 763 764 if (!amdgpu_ttm_is_bound(ttm)) 765 return 0; 766 767 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 768 if (gtt->adev->gart.ready) 769 amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); 770 771 spin_lock(>t->adev->gtt_list_lock); 772 list_del_init(>t->list); 773 spin_unlock(>t->adev->gtt_list_lock); 774 775 return 0; 776 } 777 778 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) 779 { 780 struct amdgpu_ttm_tt *gtt = (void *)ttm; 781 782 ttm_dma_tt_fini(>t->ttm); 783 kfree(gtt); 784 } 785 786 static struct ttm_backend_func amdgpu_backend_func = { 787 .bind = &amdgpu_ttm_backend_bind, 788 .unbind = &amdgpu_ttm_backend_unbind, 789 .destroy = &amdgpu_ttm_backend_destroy, 790 }; 791 792 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, 793 unsigned long size, uint32_t page_flags, 794 struct page *dummy_read_page) 795 { 796 struct amdgpu_device *adev; 797 struct amdgpu_ttm_tt *gtt; 798 799 adev = amdgpu_get_adev(bdev); 800 801 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 802 if (gtt == NULL) { 803 return NULL; 804 } 805 gtt->ttm.ttm.func = &amdgpu_backend_func; 806 gtt->adev = adev; 807 if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { 808 kfree(gtt); 809 return NULL; 810 } 811 INIT_LIST_HEAD(>t->list); 812 return >t->ttm.ttm; 813 } 814 815 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) 816 { 817 struct amdgpu_device *adev; 818 struct amdgpu_ttm_tt *gtt = (void *)ttm; 819 unsigned i; 820 int r; 821 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 822 823 if (ttm->state != tt_unpopulated) 824 return 0; 825 826 if (gtt && gtt->userptr) { 827 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 828 if (!ttm->sg) 829 return -ENOMEM; 830 831 ttm->page_flags |= TTM_PAGE_FLAG_SG; 832 ttm->state = tt_unbound; 833 return 0; 834 } 835 836 if (slave && ttm->sg) { 837 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 838 gtt->ttm.dma_address, ttm->num_pages); 839 ttm->state = tt_unbound; 840 return 0; 841 } 842 843 adev = amdgpu_get_adev(ttm->bdev); 844 845 #ifdef CONFIG_SWIOTLB 846 if (swiotlb_nr_tbl()) { 847 return ttm_dma_populate(>t->ttm, adev->dev); 848 } 849 #endif 850 851 r = ttm_pool_populate(ttm); 852 if (r) { 853 return r; 854 } 855 856 for (i = 0; i < ttm->num_pages; i++) { 857 gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], 858 0, PAGE_SIZE, 859 PCI_DMA_BIDIRECTIONAL); 860 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 861 while (i--) { 862 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 863 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 864 gtt->ttm.dma_address[i] = 0; 865 } 866 ttm_pool_unpopulate(ttm); 867 return -EFAULT; 868 } 869 } 870 return 0; 871 } 872 873 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) 874 { 875 struct amdgpu_device *adev; 876 struct amdgpu_ttm_tt *gtt = (void *)ttm; 877 unsigned i; 878 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 879 880 if (gtt && gtt->userptr) { 881 kfree(ttm->sg); 882 ttm->page_flags &= ~TTM_PAGE_FLAG_SG; 883 return; 884 } 885 886 if (slave) 887 return; 888 889 adev = amdgpu_get_adev(ttm->bdev); 890 891 #ifdef CONFIG_SWIOTLB 892 if (swiotlb_nr_tbl()) { 893 ttm_dma_unpopulate(>t->ttm, adev->dev); 894 return; 895 } 896 #endif 897 898 for (i = 0; i < ttm->num_pages; i++) { 899 if (gtt->ttm.dma_address[i]) { 900 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 901 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 902 } 903 } 904 905 ttm_pool_unpopulate(ttm); 906 } 907 908 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 909 uint32_t flags) 910 { 911 struct amdgpu_ttm_tt *gtt = (void *)ttm; 912 913 if (gtt == NULL) 914 return -EINVAL; 915 916 gtt->userptr = addr; 917 gtt->usermm = current->mm; 918 gtt->userflags = flags; 919 spin_lock_init(>t->guptasklock); 920 INIT_LIST_HEAD(>t->guptasks); 921 atomic_set(>t->mmu_invalidations, 0); 922 923 return 0; 924 } 925 926 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 927 { 928 struct amdgpu_ttm_tt *gtt = (void *)ttm; 929 930 if (gtt == NULL) 931 return NULL; 932 933 return gtt->usermm; 934 } 935 936 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 937 unsigned long end) 938 { 939 struct amdgpu_ttm_tt *gtt = (void *)ttm; 940 struct amdgpu_ttm_gup_task_list *entry; 941 unsigned long size; 942 943 if (gtt == NULL || !gtt->userptr) 944 return false; 945 946 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; 947 if (gtt->userptr > end || gtt->userptr + size <= start) 948 return false; 949 950 spin_lock(>t->guptasklock); 951 list_for_each_entry(entry, >t->guptasks, list) { 952 if (entry->task == current) { 953 spin_unlock(>t->guptasklock); 954 return false; 955 } 956 } 957 spin_unlock(>t->guptasklock); 958 959 atomic_inc(>t->mmu_invalidations); 960 961 return true; 962 } 963 964 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 965 int *last_invalidated) 966 { 967 struct amdgpu_ttm_tt *gtt = (void *)ttm; 968 int prev_invalidated = *last_invalidated; 969 970 *last_invalidated = atomic_read(>t->mmu_invalidations); 971 return prev_invalidated != *last_invalidated; 972 } 973 974 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 975 { 976 struct amdgpu_ttm_tt *gtt = (void *)ttm; 977 978 if (gtt == NULL) 979 return false; 980 981 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 982 } 983 984 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 985 struct ttm_mem_reg *mem) 986 { 987 uint32_t flags = 0; 988 989 if (mem && mem->mem_type != TTM_PL_SYSTEM) 990 flags |= AMDGPU_PTE_VALID; 991 992 if (mem && mem->mem_type == TTM_PL_TT) { 993 flags |= AMDGPU_PTE_SYSTEM; 994 995 if (ttm->caching_state == tt_cached) 996 flags |= AMDGPU_PTE_SNOOPED; 997 } 998 999 if (adev->asic_type >= CHIP_TONGA) 1000 flags |= AMDGPU_PTE_EXECUTABLE; 1001 1002 flags |= AMDGPU_PTE_READABLE; 1003 1004 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1005 flags |= AMDGPU_PTE_WRITEABLE; 1006 1007 return flags; 1008 } 1009 1010 static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) 1011 { 1012 struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1013 unsigned i, j; 1014 1015 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { 1016 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; 1017 1018 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1019 if (&tbo->lru == lru->lru[j]) 1020 lru->lru[j] = tbo->lru.prev; 1021 1022 if (&tbo->swap == lru->swap_lru) 1023 lru->swap_lru = tbo->swap.prev; 1024 } 1025 } 1026 1027 static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) 1028 { 1029 struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1030 unsigned log2_size = min(ilog2(tbo->num_pages), 1031 AMDGPU_TTM_LRU_SIZE - 1); 1032 1033 return &adev->mman.log2_size[log2_size]; 1034 } 1035 1036 static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) 1037 { 1038 struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); 1039 struct list_head *res = lru->lru[tbo->mem.mem_type]; 1040 1041 lru->lru[tbo->mem.mem_type] = &tbo->lru; 1042 while ((++lru)->lru[tbo->mem.mem_type] == res) 1043 lru->lru[tbo->mem.mem_type] = &tbo->lru; 1044 1045 return res; 1046 } 1047 1048 static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) 1049 { 1050 struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); 1051 struct list_head *res = lru->swap_lru; 1052 1053 lru->swap_lru = &tbo->swap; 1054 while ((++lru)->swap_lru == res) 1055 lru->swap_lru = &tbo->swap; 1056 1057 return res; 1058 } 1059 1060 static struct ttm_bo_driver amdgpu_bo_driver = { 1061 .ttm_tt_create = &amdgpu_ttm_tt_create, 1062 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1063 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1064 .invalidate_caches = &amdgpu_invalidate_caches, 1065 .init_mem_type = &amdgpu_init_mem_type, 1066 .evict_flags = &amdgpu_evict_flags, 1067 .move = &amdgpu_bo_move, 1068 .verify_access = &amdgpu_verify_access, 1069 .move_notify = &amdgpu_bo_move_notify, 1070 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, 1071 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1072 .io_mem_free = &amdgpu_ttm_io_mem_free, 1073 .lru_removal = &amdgpu_ttm_lru_removal, 1074 .lru_tail = &amdgpu_ttm_lru_tail, 1075 .swap_lru_tail = &amdgpu_ttm_swap_lru_tail, 1076 }; 1077 1078 int amdgpu_ttm_init(struct amdgpu_device *adev) 1079 { 1080 unsigned i, j; 1081 int r; 1082 1083 /* No others user of address space so set it to 0 */ 1084 r = ttm_bo_device_init(&adev->mman.bdev, 1085 adev->mman.bo_global_ref.ref.object, 1086 &amdgpu_bo_driver, 1087 adev->ddev->anon_inode->i_mapping, 1088 DRM_FILE_PAGE_OFFSET, 1089 adev->need_dma32); 1090 if (r) { 1091 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1092 return r; 1093 } 1094 1095 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { 1096 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; 1097 1098 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1099 lru->lru[j] = &adev->mman.bdev.man[j].lru; 1100 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1101 } 1102 1103 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1104 adev->mman.guard.lru[j] = NULL; 1105 adev->mman.guard.swap_lru = NULL; 1106 1107 adev->mman.initialized = true; 1108 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1109 adev->mc.real_vram_size >> PAGE_SHIFT); 1110 if (r) { 1111 DRM_ERROR("Failed initializing VRAM heap.\n"); 1112 return r; 1113 } 1114 /* Change the size here instead of the init above so only lpfn is affected */ 1115 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 1116 1117 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, 1118 AMDGPU_GEM_DOMAIN_VRAM, 1119 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 1120 NULL, NULL, &adev->stollen_vga_memory); 1121 if (r) { 1122 return r; 1123 } 1124 r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); 1125 if (r) 1126 return r; 1127 r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); 1128 amdgpu_bo_unreserve(adev->stollen_vga_memory); 1129 if (r) { 1130 amdgpu_bo_unref(&adev->stollen_vga_memory); 1131 return r; 1132 } 1133 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1134 (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); 1135 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, 1136 adev->mc.gtt_size >> PAGE_SHIFT); 1137 if (r) { 1138 DRM_ERROR("Failed initializing GTT heap.\n"); 1139 return r; 1140 } 1141 DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 1142 (unsigned)(adev->mc.gtt_size / (1024 * 1024))); 1143 1144 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; 1145 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; 1146 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; 1147 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; 1148 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; 1149 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; 1150 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; 1151 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; 1152 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; 1153 /* GDS Memory */ 1154 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, 1155 adev->gds.mem.total_size >> PAGE_SHIFT); 1156 if (r) { 1157 DRM_ERROR("Failed initializing GDS heap.\n"); 1158 return r; 1159 } 1160 1161 /* GWS */ 1162 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, 1163 adev->gds.gws.total_size >> PAGE_SHIFT); 1164 if (r) { 1165 DRM_ERROR("Failed initializing gws heap.\n"); 1166 return r; 1167 } 1168 1169 /* OA */ 1170 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, 1171 adev->gds.oa.total_size >> PAGE_SHIFT); 1172 if (r) { 1173 DRM_ERROR("Failed initializing oa heap.\n"); 1174 return r; 1175 } 1176 1177 r = amdgpu_ttm_debugfs_init(adev); 1178 if (r) { 1179 DRM_ERROR("Failed to init debugfs\n"); 1180 return r; 1181 } 1182 return 0; 1183 } 1184 1185 void amdgpu_ttm_fini(struct amdgpu_device *adev) 1186 { 1187 int r; 1188 1189 if (!adev->mman.initialized) 1190 return; 1191 amdgpu_ttm_debugfs_fini(adev); 1192 if (adev->stollen_vga_memory) { 1193 r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); 1194 if (r == 0) { 1195 amdgpu_bo_unpin(adev->stollen_vga_memory); 1196 amdgpu_bo_unreserve(adev->stollen_vga_memory); 1197 } 1198 amdgpu_bo_unref(&adev->stollen_vga_memory); 1199 } 1200 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); 1201 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); 1202 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); 1203 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); 1204 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); 1205 ttm_bo_device_release(&adev->mman.bdev); 1206 amdgpu_gart_fini(adev); 1207 amdgpu_ttm_global_fini(adev); 1208 adev->mman.initialized = false; 1209 DRM_INFO("amdgpu: ttm finalized\n"); 1210 } 1211 1212 /* this should only be called at bootup or when userspace 1213 * isn't running */ 1214 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) 1215 { 1216 struct ttm_mem_type_manager *man; 1217 1218 if (!adev->mman.initialized) 1219 return; 1220 1221 man = &adev->mman.bdev.man[TTM_PL_VRAM]; 1222 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 1223 man->size = size >> PAGE_SHIFT; 1224 } 1225 1226 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) 1227 { 1228 struct drm_file *file_priv; 1229 struct amdgpu_device *adev; 1230 1231 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 1232 return -EINVAL; 1233 1234 file_priv = filp->private_data; 1235 adev = file_priv->minor->dev->dev_private; 1236 if (adev == NULL) 1237 return -EINVAL; 1238 1239 return ttm_bo_mmap(filp, vma, &adev->mman.bdev); 1240 } 1241 1242 int amdgpu_copy_buffer(struct amdgpu_ring *ring, 1243 uint64_t src_offset, 1244 uint64_t dst_offset, 1245 uint32_t byte_count, 1246 struct reservation_object *resv, 1247 struct fence **fence, bool direct_submit) 1248 { 1249 struct amdgpu_device *adev = ring->adev; 1250 struct amdgpu_job *job; 1251 1252 uint32_t max_bytes; 1253 unsigned num_loops, num_dw; 1254 unsigned i; 1255 int r; 1256 1257 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 1258 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 1259 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; 1260 1261 /* for IB padding */ 1262 while (num_dw & 0x7) 1263 num_dw++; 1264 1265 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 1266 if (r) 1267 return r; 1268 1269 if (resv) { 1270 r = amdgpu_sync_resv(adev, &job->sync, resv, 1271 AMDGPU_FENCE_OWNER_UNDEFINED); 1272 if (r) { 1273 DRM_ERROR("sync failed (%d).\n", r); 1274 goto error_free; 1275 } 1276 } 1277 1278 for (i = 0; i < num_loops; i++) { 1279 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1280 1281 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 1282 dst_offset, cur_size_in_bytes); 1283 1284 src_offset += cur_size_in_bytes; 1285 dst_offset += cur_size_in_bytes; 1286 byte_count -= cur_size_in_bytes; 1287 } 1288 1289 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1290 WARN_ON(job->ibs[0].length_dw > num_dw); 1291 if (direct_submit) { 1292 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, 1293 NULL, NULL, fence); 1294 job->fence = fence_get(*fence); 1295 if (r) 1296 DRM_ERROR("Error scheduling IBs (%d)\n", r); 1297 amdgpu_job_free(job); 1298 } else { 1299 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1300 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1301 if (r) 1302 goto error_free; 1303 } 1304 1305 return r; 1306 1307 error_free: 1308 amdgpu_job_free(job); 1309 return r; 1310 } 1311 1312 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 1313 uint32_t src_data, 1314 struct reservation_object *resv, 1315 struct fence **fence) 1316 { 1317 struct amdgpu_device *adev = bo->adev; 1318 struct amdgpu_job *job; 1319 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 1320 1321 uint32_t max_bytes, byte_count; 1322 uint64_t dst_offset; 1323 unsigned int num_loops, num_dw; 1324 unsigned int i; 1325 int r; 1326 1327 byte_count = bo->tbo.num_pages << PAGE_SHIFT; 1328 max_bytes = adev->mman.buffer_funcs->fill_max_bytes; 1329 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 1330 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; 1331 1332 /* for IB padding */ 1333 while (num_dw & 0x7) 1334 num_dw++; 1335 1336 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 1337 if (r) 1338 return r; 1339 1340 if (resv) { 1341 r = amdgpu_sync_resv(adev, &job->sync, resv, 1342 AMDGPU_FENCE_OWNER_UNDEFINED); 1343 if (r) { 1344 DRM_ERROR("sync failed (%d).\n", r); 1345 goto error_free; 1346 } 1347 } 1348 1349 dst_offset = bo->tbo.mem.start << PAGE_SHIFT; 1350 for (i = 0; i < num_loops; i++) { 1351 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1352 1353 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, 1354 dst_offset, cur_size_in_bytes); 1355 1356 dst_offset += cur_size_in_bytes; 1357 byte_count -= cur_size_in_bytes; 1358 } 1359 1360 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1361 WARN_ON(job->ibs[0].length_dw > num_dw); 1362 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1363 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1364 if (r) 1365 goto error_free; 1366 1367 return 0; 1368 1369 error_free: 1370 amdgpu_job_free(job); 1371 return r; 1372 } 1373 1374 #if defined(CONFIG_DEBUG_FS) 1375 1376 static int amdgpu_mm_dump_table(struct seq_file *m, void *data) 1377 { 1378 struct drm_info_node *node = (struct drm_info_node *)m->private; 1379 unsigned ttm_pl = *(int *)node->info_ent->data; 1380 struct drm_device *dev = node->minor->dev; 1381 struct amdgpu_device *adev = dev->dev_private; 1382 struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; 1383 int ret; 1384 struct ttm_bo_global *glob = adev->mman.bdev.glob; 1385 1386 spin_lock(&glob->lru_lock); 1387 ret = drm_mm_dump_table(m, mm); 1388 spin_unlock(&glob->lru_lock); 1389 if (ttm_pl == TTM_PL_VRAM) 1390 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", 1391 adev->mman.bdev.man[ttm_pl].size, 1392 (u64)atomic64_read(&adev->vram_usage) >> 20, 1393 (u64)atomic64_read(&adev->vram_vis_usage) >> 20); 1394 return ret; 1395 } 1396 1397 static int ttm_pl_vram = TTM_PL_VRAM; 1398 static int ttm_pl_tt = TTM_PL_TT; 1399 1400 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { 1401 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, 1402 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, 1403 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, 1404 #ifdef CONFIG_SWIOTLB 1405 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} 1406 #endif 1407 }; 1408 1409 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 1410 size_t size, loff_t *pos) 1411 { 1412 struct amdgpu_device *adev = f->f_inode->i_private; 1413 ssize_t result = 0; 1414 int r; 1415 1416 if (size & 0x3 || *pos & 0x3) 1417 return -EINVAL; 1418 1419 while (size) { 1420 unsigned long flags; 1421 uint32_t value; 1422 1423 if (*pos >= adev->mc.mc_vram_size) 1424 return result; 1425 1426 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1427 WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); 1428 WREG32(mmMM_INDEX_HI, *pos >> 31); 1429 value = RREG32(mmMM_DATA); 1430 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 1431 1432 r = put_user(value, (uint32_t *)buf); 1433 if (r) 1434 return r; 1435 1436 result += 4; 1437 buf += 4; 1438 *pos += 4; 1439 size -= 4; 1440 } 1441 1442 return result; 1443 } 1444 1445 static const struct file_operations amdgpu_ttm_vram_fops = { 1446 .owner = THIS_MODULE, 1447 .read = amdgpu_ttm_vram_read, 1448 .llseek = default_llseek 1449 }; 1450 1451 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 1452 1453 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, 1454 size_t size, loff_t *pos) 1455 { 1456 struct amdgpu_device *adev = f->f_inode->i_private; 1457 ssize_t result = 0; 1458 int r; 1459 1460 while (size) { 1461 loff_t p = *pos / PAGE_SIZE; 1462 unsigned off = *pos & ~PAGE_MASK; 1463 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); 1464 struct page *page; 1465 void *ptr; 1466 1467 if (p >= adev->gart.num_cpu_pages) 1468 return result; 1469 1470 page = adev->gart.pages[p]; 1471 if (page) { 1472 ptr = kmap(page); 1473 ptr += off; 1474 1475 r = copy_to_user(buf, ptr, cur_size); 1476 kunmap(adev->gart.pages[p]); 1477 } else 1478 r = clear_user(buf, cur_size); 1479 1480 if (r) 1481 return -EFAULT; 1482 1483 result += cur_size; 1484 buf += cur_size; 1485 *pos += cur_size; 1486 size -= cur_size; 1487 } 1488 1489 return result; 1490 } 1491 1492 static const struct file_operations amdgpu_ttm_gtt_fops = { 1493 .owner = THIS_MODULE, 1494 .read = amdgpu_ttm_gtt_read, 1495 .llseek = default_llseek 1496 }; 1497 1498 #endif 1499 1500 #endif 1501 1502 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 1503 { 1504 #if defined(CONFIG_DEBUG_FS) 1505 unsigned count; 1506 1507 struct drm_minor *minor = adev->ddev->primary; 1508 struct dentry *ent, *root = minor->debugfs_root; 1509 1510 ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, 1511 adev, &amdgpu_ttm_vram_fops); 1512 if (IS_ERR(ent)) 1513 return PTR_ERR(ent); 1514 i_size_write(ent->d_inode, adev->mc.mc_vram_size); 1515 adev->mman.vram = ent; 1516 1517 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 1518 ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, 1519 adev, &amdgpu_ttm_gtt_fops); 1520 if (IS_ERR(ent)) 1521 return PTR_ERR(ent); 1522 i_size_write(ent->d_inode, adev->mc.gtt_size); 1523 adev->mman.gtt = ent; 1524 1525 #endif 1526 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); 1527 1528 #ifdef CONFIG_SWIOTLB 1529 if (!swiotlb_nr_tbl()) 1530 --count; 1531 #endif 1532 1533 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); 1534 #else 1535 1536 return 0; 1537 #endif 1538 } 1539 1540 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) 1541 { 1542 #if defined(CONFIG_DEBUG_FS) 1543 1544 debugfs_remove(adev->mman.vram); 1545 adev->mman.vram = NULL; 1546 1547 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 1548 debugfs_remove(adev->mman.gtt); 1549 adev->mman.gtt = NULL; 1550 #endif 1551 1552 #endif 1553 } 1554 1555 u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev) 1556 { 1557 return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object); 1558 } 1559