1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <drm/ttm/ttm_bo_api.h> 33 #include <drm/ttm/ttm_bo_driver.h> 34 #include <drm/ttm/ttm_placement.h> 35 #include <drm/ttm/ttm_module.h> 36 #include <drm/ttm/ttm_page_alloc.h> 37 #include <drm/drmP.h> 38 #include <drm/amdgpu_drm.h> 39 #include <linux/seq_file.h> 40 #include <linux/slab.h> 41 #include <linux/swiotlb.h> 42 #include <linux/swap.h> 43 #include <linux/pagemap.h> 44 #include <linux/debugfs.h> 45 #include <linux/iommu.h> 46 #include "amdgpu.h" 47 #include "amdgpu_object.h" 48 #include "amdgpu_trace.h" 49 #include "bif/bif_4_1_d.h" 50 51 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 52 53 static int amdgpu_map_buffer(struct ttm_buffer_object *bo, 54 struct ttm_mem_reg *mem, unsigned num_pages, 55 uint64_t offset, unsigned window, 56 struct amdgpu_ring *ring, 57 uint64_t *addr); 58 59 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 60 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); 61 62 /* 63 * Global memory. 64 */ 65 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) 66 { 67 return ttm_mem_global_init(ref->object); 68 } 69 70 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) 71 { 72 ttm_mem_global_release(ref->object); 73 } 74 75 static int amdgpu_ttm_global_init(struct amdgpu_device *adev) 76 { 77 struct drm_global_reference *global_ref; 78 struct amdgpu_ring *ring; 79 struct amd_sched_rq *rq; 80 int r; 81 82 adev->mman.mem_global_referenced = false; 83 global_ref = &adev->mman.mem_global_ref; 84 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 85 global_ref->size = sizeof(struct ttm_mem_global); 86 global_ref->init = &amdgpu_ttm_mem_global_init; 87 global_ref->release = &amdgpu_ttm_mem_global_release; 88 r = drm_global_item_ref(global_ref); 89 if (r) { 90 DRM_ERROR("Failed setting up TTM memory accounting " 91 "subsystem.\n"); 92 goto error_mem; 93 } 94 95 adev->mman.bo_global_ref.mem_glob = 96 adev->mman.mem_global_ref.object; 97 global_ref = &adev->mman.bo_global_ref.ref; 98 global_ref->global_type = DRM_GLOBAL_TTM_BO; 99 global_ref->size = sizeof(struct ttm_bo_global); 100 global_ref->init = &ttm_bo_global_init; 101 global_ref->release = &ttm_bo_global_release; 102 r = drm_global_item_ref(global_ref); 103 if (r) { 104 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 105 goto error_bo; 106 } 107 108 mutex_init(&adev->mman.gtt_window_lock); 109 110 ring = adev->mman.buffer_funcs_ring; 111 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 112 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, 113 rq, amdgpu_sched_jobs, NULL); 114 if (r) { 115 DRM_ERROR("Failed setting up TTM BO move run queue.\n"); 116 goto error_entity; 117 } 118 119 adev->mman.mem_global_referenced = true; 120 121 return 0; 122 123 error_entity: 124 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 125 error_bo: 126 drm_global_item_unref(&adev->mman.mem_global_ref); 127 error_mem: 128 return r; 129 } 130 131 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 132 { 133 if (adev->mman.mem_global_referenced) { 134 amd_sched_entity_fini(adev->mman.entity.sched, 135 &adev->mman.entity); 136 mutex_destroy(&adev->mman.gtt_window_lock); 137 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 138 drm_global_item_unref(&adev->mman.mem_global_ref); 139 adev->mman.mem_global_referenced = false; 140 } 141 } 142 143 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 144 { 145 return 0; 146 } 147 148 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 149 struct ttm_mem_type_manager *man) 150 { 151 struct amdgpu_device *adev; 152 153 adev = amdgpu_ttm_adev(bdev); 154 155 switch (type) { 156 case TTM_PL_SYSTEM: 157 /* System memory */ 158 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 159 man->available_caching = TTM_PL_MASK_CACHING; 160 man->default_caching = TTM_PL_FLAG_CACHED; 161 break; 162 case TTM_PL_TT: 163 man->func = &amdgpu_gtt_mgr_func; 164 man->gpu_offset = adev->mc.gart_start; 165 man->available_caching = TTM_PL_MASK_CACHING; 166 man->default_caching = TTM_PL_FLAG_CACHED; 167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 168 break; 169 case TTM_PL_VRAM: 170 /* "On-card" video ram */ 171 man->func = &amdgpu_vram_mgr_func; 172 man->gpu_offset = adev->mc.vram_start; 173 man->flags = TTM_MEMTYPE_FLAG_FIXED | 174 TTM_MEMTYPE_FLAG_MAPPABLE; 175 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 176 man->default_caching = TTM_PL_FLAG_WC; 177 break; 178 case AMDGPU_PL_GDS: 179 case AMDGPU_PL_GWS: 180 case AMDGPU_PL_OA: 181 /* On-chip GDS memory*/ 182 man->func = &ttm_bo_manager_func; 183 man->gpu_offset = 0; 184 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; 185 man->available_caching = TTM_PL_FLAG_UNCACHED; 186 man->default_caching = TTM_PL_FLAG_UNCACHED; 187 break; 188 default: 189 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 190 return -EINVAL; 191 } 192 return 0; 193 } 194 195 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 196 struct ttm_placement *placement) 197 { 198 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 199 struct amdgpu_bo *abo; 200 static const struct ttm_place placements = { 201 .fpfn = 0, 202 .lpfn = 0, 203 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM 204 }; 205 206 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { 207 placement->placement = &placements; 208 placement->busy_placement = &placements; 209 placement->num_placement = 1; 210 placement->num_busy_placement = 1; 211 return; 212 } 213 abo = ttm_to_amdgpu_bo(bo); 214 switch (bo->mem.mem_type) { 215 case TTM_PL_VRAM: 216 if (adev->mman.buffer_funcs && 217 adev->mman.buffer_funcs_ring && 218 adev->mman.buffer_funcs_ring->ready == false) { 219 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 220 } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && 221 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 222 unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 223 struct drm_mm_node *node = bo->mem.mm_node; 224 unsigned long pages_left; 225 226 for (pages_left = bo->mem.num_pages; 227 pages_left; 228 pages_left -= node->size, node++) { 229 if (node->start < fpfn) 230 break; 231 } 232 233 if (!pages_left) 234 goto gtt; 235 236 /* Try evicting to the CPU inaccessible part of VRAM 237 * first, but only set GTT as busy placement, so this 238 * BO will be evicted to GTT rather than causing other 239 * BOs to be evicted from VRAM 240 */ 241 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 242 AMDGPU_GEM_DOMAIN_GTT); 243 abo->placements[0].fpfn = fpfn; 244 abo->placements[0].lpfn = 0; 245 abo->placement.busy_placement = &abo->placements[1]; 246 abo->placement.num_busy_placement = 1; 247 } else { 248 gtt: 249 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 250 } 251 break; 252 case TTM_PL_TT: 253 default: 254 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 255 } 256 *placement = abo->placement; 257 } 258 259 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) 260 { 261 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 262 263 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 264 return -EPERM; 265 return drm_vma_node_verify_access(&abo->gem_base.vma_node, 266 filp->private_data); 267 } 268 269 static void amdgpu_move_null(struct ttm_buffer_object *bo, 270 struct ttm_mem_reg *new_mem) 271 { 272 struct ttm_mem_reg *old_mem = &bo->mem; 273 274 BUG_ON(old_mem->mm_node != NULL); 275 *old_mem = *new_mem; 276 new_mem->mm_node = NULL; 277 } 278 279 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, 280 struct drm_mm_node *mm_node, 281 struct ttm_mem_reg *mem) 282 { 283 uint64_t addr = 0; 284 285 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { 286 addr = mm_node->start << PAGE_SHIFT; 287 addr += bo->bdev->man[mem->mem_type].gpu_offset; 288 } 289 return addr; 290 } 291 292 /** 293 * amdgpu_find_mm_node - Helper function finds the drm_mm_node 294 * corresponding to @offset. It also modifies the offset to be 295 * within the drm_mm_node returned 296 */ 297 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, 298 unsigned long *offset) 299 { 300 struct drm_mm_node *mm_node = mem->mm_node; 301 302 while (*offset >= (mm_node->size << PAGE_SHIFT)) { 303 *offset -= (mm_node->size << PAGE_SHIFT); 304 ++mm_node; 305 } 306 return mm_node; 307 } 308 309 /** 310 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy 311 * 312 * The function copies @size bytes from {src->mem + src->offset} to 313 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a 314 * move and different for a BO to BO copy. 315 * 316 * @f: Returns the last fence if multiple jobs are submitted. 317 */ 318 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 319 struct amdgpu_copy_mem *src, 320 struct amdgpu_copy_mem *dst, 321 uint64_t size, 322 struct reservation_object *resv, 323 struct dma_fence **f) 324 { 325 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 326 struct drm_mm_node *src_mm, *dst_mm; 327 uint64_t src_node_start, dst_node_start, src_node_size, 328 dst_node_size, src_page_offset, dst_page_offset; 329 struct dma_fence *fence = NULL; 330 int r = 0; 331 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * 332 AMDGPU_GPU_PAGE_SIZE); 333 334 if (!ring->ready) { 335 DRM_ERROR("Trying to move memory with ring turned off.\n"); 336 return -EINVAL; 337 } 338 339 src_mm = amdgpu_find_mm_node(src->mem, &src->offset); 340 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + 341 src->offset; 342 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; 343 src_page_offset = src_node_start & (PAGE_SIZE - 1); 344 345 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); 346 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + 347 dst->offset; 348 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; 349 dst_page_offset = dst_node_start & (PAGE_SIZE - 1); 350 351 mutex_lock(&adev->mman.gtt_window_lock); 352 353 while (size) { 354 unsigned long cur_size; 355 uint64_t from = src_node_start, to = dst_node_start; 356 struct dma_fence *next; 357 358 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst 359 * begins at an offset, then adjust the size accordingly 360 */ 361 cur_size = min3(min(src_node_size, dst_node_size), size, 362 GTT_MAX_BYTES); 363 if (cur_size + src_page_offset > GTT_MAX_BYTES || 364 cur_size + dst_page_offset > GTT_MAX_BYTES) 365 cur_size -= max(src_page_offset, dst_page_offset); 366 367 /* Map only what needs to be accessed. Map src to window 0 and 368 * dst to window 1 369 */ 370 if (src->mem->mem_type == TTM_PL_TT && 371 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { 372 r = amdgpu_map_buffer(src->bo, src->mem, 373 PFN_UP(cur_size + src_page_offset), 374 src_node_start, 0, ring, 375 &from); 376 if (r) 377 goto error; 378 /* Adjust the offset because amdgpu_map_buffer returns 379 * start of mapped page 380 */ 381 from += src_page_offset; 382 } 383 384 if (dst->mem->mem_type == TTM_PL_TT && 385 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { 386 r = amdgpu_map_buffer(dst->bo, dst->mem, 387 PFN_UP(cur_size + dst_page_offset), 388 dst_node_start, 1, ring, 389 &to); 390 if (r) 391 goto error; 392 to += dst_page_offset; 393 } 394 395 r = amdgpu_copy_buffer(ring, from, to, cur_size, 396 resv, &next, false, true); 397 if (r) 398 goto error; 399 400 dma_fence_put(fence); 401 fence = next; 402 403 size -= cur_size; 404 if (!size) 405 break; 406 407 src_node_size -= cur_size; 408 if (!src_node_size) { 409 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, 410 src->mem); 411 src_node_size = (src_mm->size << PAGE_SHIFT); 412 } else { 413 src_node_start += cur_size; 414 src_page_offset = src_node_start & (PAGE_SIZE - 1); 415 } 416 dst_node_size -= cur_size; 417 if (!dst_node_size) { 418 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, 419 dst->mem); 420 dst_node_size = (dst_mm->size << PAGE_SHIFT); 421 } else { 422 dst_node_start += cur_size; 423 dst_page_offset = dst_node_start & (PAGE_SIZE - 1); 424 } 425 } 426 error: 427 mutex_unlock(&adev->mman.gtt_window_lock); 428 if (f) 429 *f = dma_fence_get(fence); 430 dma_fence_put(fence); 431 return r; 432 } 433 434 435 static int amdgpu_move_blit(struct ttm_buffer_object *bo, 436 bool evict, bool no_wait_gpu, 437 struct ttm_mem_reg *new_mem, 438 struct ttm_mem_reg *old_mem) 439 { 440 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 441 struct amdgpu_copy_mem src, dst; 442 struct dma_fence *fence = NULL; 443 int r; 444 445 src.bo = bo; 446 dst.bo = bo; 447 src.mem = old_mem; 448 dst.mem = new_mem; 449 src.offset = 0; 450 dst.offset = 0; 451 452 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, 453 new_mem->num_pages << PAGE_SHIFT, 454 bo->resv, &fence); 455 if (r) 456 goto error; 457 458 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 459 dma_fence_put(fence); 460 return r; 461 462 error: 463 if (fence) 464 dma_fence_wait(fence, false); 465 dma_fence_put(fence); 466 return r; 467 } 468 469 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, 470 struct ttm_operation_ctx *ctx, 471 struct ttm_mem_reg *new_mem) 472 { 473 struct amdgpu_device *adev; 474 struct ttm_mem_reg *old_mem = &bo->mem; 475 struct ttm_mem_reg tmp_mem; 476 struct ttm_place placements; 477 struct ttm_placement placement; 478 int r; 479 480 adev = amdgpu_ttm_adev(bo->bdev); 481 tmp_mem = *new_mem; 482 tmp_mem.mm_node = NULL; 483 placement.num_placement = 1; 484 placement.placement = &placements; 485 placement.num_busy_placement = 1; 486 placement.busy_placement = &placements; 487 placements.fpfn = 0; 488 placements.lpfn = 0; 489 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 490 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); 491 if (unlikely(r)) { 492 return r; 493 } 494 495 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); 496 if (unlikely(r)) { 497 goto out_cleanup; 498 } 499 500 r = ttm_tt_bind(bo->ttm, &tmp_mem); 501 if (unlikely(r)) { 502 goto out_cleanup; 503 } 504 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); 505 if (unlikely(r)) { 506 goto out_cleanup; 507 } 508 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem); 509 out_cleanup: 510 ttm_bo_mem_put(bo, &tmp_mem); 511 return r; 512 } 513 514 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, 515 struct ttm_operation_ctx *ctx, 516 struct ttm_mem_reg *new_mem) 517 { 518 struct amdgpu_device *adev; 519 struct ttm_mem_reg *old_mem = &bo->mem; 520 struct ttm_mem_reg tmp_mem; 521 struct ttm_placement placement; 522 struct ttm_place placements; 523 int r; 524 525 adev = amdgpu_ttm_adev(bo->bdev); 526 tmp_mem = *new_mem; 527 tmp_mem.mm_node = NULL; 528 placement.num_placement = 1; 529 placement.placement = &placements; 530 placement.num_busy_placement = 1; 531 placement.busy_placement = &placements; 532 placements.fpfn = 0; 533 placements.lpfn = 0; 534 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 535 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); 536 if (unlikely(r)) { 537 return r; 538 } 539 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem); 540 if (unlikely(r)) { 541 goto out_cleanup; 542 } 543 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); 544 if (unlikely(r)) { 545 goto out_cleanup; 546 } 547 out_cleanup: 548 ttm_bo_mem_put(bo, &tmp_mem); 549 return r; 550 } 551 552 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, 553 struct ttm_operation_ctx *ctx, 554 struct ttm_mem_reg *new_mem) 555 { 556 struct amdgpu_device *adev; 557 struct amdgpu_bo *abo; 558 struct ttm_mem_reg *old_mem = &bo->mem; 559 int r; 560 561 /* Can't move a pinned BO */ 562 abo = ttm_to_amdgpu_bo(bo); 563 if (WARN_ON_ONCE(abo->pin_count > 0)) 564 return -EINVAL; 565 566 adev = amdgpu_ttm_adev(bo->bdev); 567 568 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 569 amdgpu_move_null(bo, new_mem); 570 return 0; 571 } 572 if ((old_mem->mem_type == TTM_PL_TT && 573 new_mem->mem_type == TTM_PL_SYSTEM) || 574 (old_mem->mem_type == TTM_PL_SYSTEM && 575 new_mem->mem_type == TTM_PL_TT)) { 576 /* bind is enough */ 577 amdgpu_move_null(bo, new_mem); 578 return 0; 579 } 580 if (adev->mman.buffer_funcs == NULL || 581 adev->mman.buffer_funcs_ring == NULL || 582 !adev->mman.buffer_funcs_ring->ready) { 583 /* use memcpy */ 584 goto memcpy; 585 } 586 587 if (old_mem->mem_type == TTM_PL_VRAM && 588 new_mem->mem_type == TTM_PL_SYSTEM) { 589 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); 590 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 591 new_mem->mem_type == TTM_PL_VRAM) { 592 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); 593 } else { 594 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, 595 new_mem, old_mem); 596 } 597 598 if (r) { 599 memcpy: 600 r = ttm_bo_move_memcpy(bo, ctx->interruptible, 601 ctx->no_wait_gpu, new_mem); 602 if (r) { 603 return r; 604 } 605 } 606 607 if (bo->type == ttm_bo_type_device && 608 new_mem->mem_type == TTM_PL_VRAM && 609 old_mem->mem_type != TTM_PL_VRAM) { 610 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 611 * accesses the BO after it's moved. 612 */ 613 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 614 } 615 616 /* update statistics */ 617 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); 618 return 0; 619 } 620 621 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 622 { 623 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 624 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 625 626 mem->bus.addr = NULL; 627 mem->bus.offset = 0; 628 mem->bus.size = mem->num_pages << PAGE_SHIFT; 629 mem->bus.base = 0; 630 mem->bus.is_iomem = false; 631 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 632 return -EINVAL; 633 switch (mem->mem_type) { 634 case TTM_PL_SYSTEM: 635 /* system memory */ 636 return 0; 637 case TTM_PL_TT: 638 break; 639 case TTM_PL_VRAM: 640 mem->bus.offset = mem->start << PAGE_SHIFT; 641 /* check if it's visible */ 642 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) 643 return -EINVAL; 644 mem->bus.base = adev->mc.aper_base; 645 mem->bus.is_iomem = true; 646 break; 647 default: 648 return -EINVAL; 649 } 650 return 0; 651 } 652 653 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 654 { 655 } 656 657 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 658 unsigned long page_offset) 659 { 660 struct drm_mm_node *mm; 661 unsigned long offset = (page_offset << PAGE_SHIFT); 662 663 mm = amdgpu_find_mm_node(&bo->mem, &offset); 664 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + 665 (offset >> PAGE_SHIFT); 666 } 667 668 /* 669 * TTM backend functions. 670 */ 671 struct amdgpu_ttm_gup_task_list { 672 struct list_head list; 673 struct task_struct *task; 674 }; 675 676 struct amdgpu_ttm_tt { 677 struct ttm_dma_tt ttm; 678 struct amdgpu_device *adev; 679 u64 offset; 680 uint64_t userptr; 681 struct mm_struct *usermm; 682 uint32_t userflags; 683 spinlock_t guptasklock; 684 struct list_head guptasks; 685 atomic_t mmu_invalidations; 686 uint32_t last_set_pages; 687 }; 688 689 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 690 { 691 struct amdgpu_ttm_tt *gtt = (void *)ttm; 692 unsigned int flags = 0; 693 unsigned pinned = 0; 694 int r; 695 696 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) 697 flags |= FOLL_WRITE; 698 699 down_read(¤t->mm->mmap_sem); 700 701 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 702 /* check that we only use anonymous memory 703 to prevent problems with writeback */ 704 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; 705 struct vm_area_struct *vma; 706 707 vma = find_vma(gtt->usermm, gtt->userptr); 708 if (!vma || vma->vm_file || vma->vm_end < end) { 709 up_read(¤t->mm->mmap_sem); 710 return -EPERM; 711 } 712 } 713 714 do { 715 unsigned num_pages = ttm->num_pages - pinned; 716 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 717 struct page **p = pages + pinned; 718 struct amdgpu_ttm_gup_task_list guptask; 719 720 guptask.task = current; 721 spin_lock(>t->guptasklock); 722 list_add(&guptask.list, >t->guptasks); 723 spin_unlock(>t->guptasklock); 724 725 r = get_user_pages(userptr, num_pages, flags, p, NULL); 726 727 spin_lock(>t->guptasklock); 728 list_del(&guptask.list); 729 spin_unlock(>t->guptasklock); 730 731 if (r < 0) 732 goto release_pages; 733 734 pinned += r; 735 736 } while (pinned < ttm->num_pages); 737 738 up_read(¤t->mm->mmap_sem); 739 return 0; 740 741 release_pages: 742 release_pages(pages, pinned); 743 up_read(¤t->mm->mmap_sem); 744 return r; 745 } 746 747 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) 748 { 749 struct amdgpu_ttm_tt *gtt = (void *)ttm; 750 unsigned i; 751 752 gtt->last_set_pages = atomic_read(>t->mmu_invalidations); 753 for (i = 0; i < ttm->num_pages; ++i) { 754 if (ttm->pages[i]) 755 put_page(ttm->pages[i]); 756 757 ttm->pages[i] = pages ? pages[i] : NULL; 758 } 759 } 760 761 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) 762 { 763 struct amdgpu_ttm_tt *gtt = (void *)ttm; 764 unsigned i; 765 766 for (i = 0; i < ttm->num_pages; ++i) { 767 struct page *page = ttm->pages[i]; 768 769 if (!page) 770 continue; 771 772 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) 773 set_page_dirty(page); 774 775 mark_page_accessed(page); 776 } 777 } 778 779 /* prepare the sg table with the user pages */ 780 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) 781 { 782 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 783 struct amdgpu_ttm_tt *gtt = (void *)ttm; 784 unsigned nents; 785 int r; 786 787 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 788 enum dma_data_direction direction = write ? 789 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 790 791 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 792 ttm->num_pages << PAGE_SHIFT, 793 GFP_KERNEL); 794 if (r) 795 goto release_sg; 796 797 r = -ENOMEM; 798 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 799 if (nents != ttm->sg->nents) 800 goto release_sg; 801 802 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 803 gtt->ttm.dma_address, ttm->num_pages); 804 805 return 0; 806 807 release_sg: 808 kfree(ttm->sg); 809 return r; 810 } 811 812 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) 813 { 814 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 815 struct amdgpu_ttm_tt *gtt = (void *)ttm; 816 817 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 818 enum dma_data_direction direction = write ? 819 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 820 821 /* double check that we don't free the table twice */ 822 if (!ttm->sg->sgl) 823 return; 824 825 /* free the sg table and pages again */ 826 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 827 828 amdgpu_ttm_tt_mark_user_pages(ttm); 829 830 sg_free_table(ttm->sg); 831 } 832 833 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 834 struct ttm_mem_reg *bo_mem) 835 { 836 struct amdgpu_ttm_tt *gtt = (void*)ttm; 837 uint64_t flags; 838 int r = 0; 839 840 if (gtt->userptr) { 841 r = amdgpu_ttm_tt_pin_userptr(ttm); 842 if (r) { 843 DRM_ERROR("failed to pin userptr\n"); 844 return r; 845 } 846 } 847 if (!ttm->num_pages) { 848 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 849 ttm->num_pages, bo_mem, ttm); 850 } 851 852 if (bo_mem->mem_type == AMDGPU_PL_GDS || 853 bo_mem->mem_type == AMDGPU_PL_GWS || 854 bo_mem->mem_type == AMDGPU_PL_OA) 855 return -EINVAL; 856 857 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { 858 gtt->offset = AMDGPU_BO_INVALID_OFFSET; 859 return 0; 860 } 861 862 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 863 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 864 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 865 ttm->pages, gtt->ttm.dma_address, flags); 866 867 if (r) 868 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 869 ttm->num_pages, gtt->offset); 870 return r; 871 } 872 873 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) 874 { 875 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 876 struct ttm_operation_ctx ctx = { false, false }; 877 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; 878 struct ttm_mem_reg tmp; 879 struct ttm_placement placement; 880 struct ttm_place placements; 881 uint64_t flags; 882 int r; 883 884 if (bo->mem.mem_type != TTM_PL_TT || 885 amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) 886 return 0; 887 888 tmp = bo->mem; 889 tmp.mm_node = NULL; 890 placement.num_placement = 1; 891 placement.placement = &placements; 892 placement.num_busy_placement = 1; 893 placement.busy_placement = &placements; 894 placements.fpfn = 0; 895 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; 896 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | 897 TTM_PL_FLAG_TT; 898 899 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 900 if (unlikely(r)) 901 return r; 902 903 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); 904 gtt->offset = (u64)tmp.start << PAGE_SHIFT; 905 r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages, 906 bo->ttm->pages, gtt->ttm.dma_address, flags); 907 if (unlikely(r)) { 908 ttm_bo_mem_put(bo, &tmp); 909 return r; 910 } 911 912 ttm_bo_mem_put(bo, &bo->mem); 913 bo->mem = tmp; 914 bo->offset = (bo->mem.start << PAGE_SHIFT) + 915 bo->bdev->man[bo->mem.mem_type].gpu_offset; 916 917 return 0; 918 } 919 920 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 921 { 922 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 923 struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; 924 uint64_t flags; 925 int r; 926 927 if (!gtt) 928 return 0; 929 930 flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); 931 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, 932 gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); 933 if (r) 934 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 935 gtt->ttm.ttm.num_pages, gtt->offset); 936 return r; 937 } 938 939 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) 940 { 941 struct amdgpu_ttm_tt *gtt = (void *)ttm; 942 int r; 943 944 if (gtt->userptr) 945 amdgpu_ttm_tt_unpin_userptr(ttm); 946 947 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) 948 return 0; 949 950 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 951 r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); 952 if (r) 953 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", 954 gtt->ttm.ttm.num_pages, gtt->offset); 955 return r; 956 } 957 958 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) 959 { 960 struct amdgpu_ttm_tt *gtt = (void *)ttm; 961 962 ttm_dma_tt_fini(>t->ttm); 963 kfree(gtt); 964 } 965 966 static struct ttm_backend_func amdgpu_backend_func = { 967 .bind = &amdgpu_ttm_backend_bind, 968 .unbind = &amdgpu_ttm_backend_unbind, 969 .destroy = &amdgpu_ttm_backend_destroy, 970 }; 971 972 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, 973 unsigned long size, uint32_t page_flags, 974 struct page *dummy_read_page) 975 { 976 struct amdgpu_device *adev; 977 struct amdgpu_ttm_tt *gtt; 978 979 adev = amdgpu_ttm_adev(bdev); 980 981 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 982 if (gtt == NULL) { 983 return NULL; 984 } 985 gtt->ttm.ttm.func = &amdgpu_backend_func; 986 gtt->adev = adev; 987 if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { 988 kfree(gtt); 989 return NULL; 990 } 991 return >t->ttm.ttm; 992 } 993 994 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) 995 { 996 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 997 struct amdgpu_ttm_tt *gtt = (void *)ttm; 998 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 999 1000 if (ttm->state != tt_unpopulated) 1001 return 0; 1002 1003 if (gtt && gtt->userptr) { 1004 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 1005 if (!ttm->sg) 1006 return -ENOMEM; 1007 1008 ttm->page_flags |= TTM_PAGE_FLAG_SG; 1009 ttm->state = tt_unbound; 1010 return 0; 1011 } 1012 1013 if (slave && ttm->sg) { 1014 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1015 gtt->ttm.dma_address, ttm->num_pages); 1016 ttm->state = tt_unbound; 1017 return 0; 1018 } 1019 1020 #ifdef CONFIG_SWIOTLB 1021 if (swiotlb_nr_tbl()) { 1022 return ttm_dma_populate(>t->ttm, adev->dev); 1023 } 1024 #endif 1025 1026 return ttm_populate_and_map_pages(adev->dev, >t->ttm); 1027 } 1028 1029 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) 1030 { 1031 struct amdgpu_device *adev; 1032 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1033 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1034 1035 if (gtt && gtt->userptr) { 1036 amdgpu_ttm_tt_set_user_pages(ttm, NULL); 1037 kfree(ttm->sg); 1038 ttm->page_flags &= ~TTM_PAGE_FLAG_SG; 1039 return; 1040 } 1041 1042 if (slave) 1043 return; 1044 1045 adev = amdgpu_ttm_adev(ttm->bdev); 1046 1047 #ifdef CONFIG_SWIOTLB 1048 if (swiotlb_nr_tbl()) { 1049 ttm_dma_unpopulate(>t->ttm, adev->dev); 1050 return; 1051 } 1052 #endif 1053 1054 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); 1055 } 1056 1057 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 1058 uint32_t flags) 1059 { 1060 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1061 1062 if (gtt == NULL) 1063 return -EINVAL; 1064 1065 gtt->userptr = addr; 1066 gtt->usermm = current->mm; 1067 gtt->userflags = flags; 1068 spin_lock_init(>t->guptasklock); 1069 INIT_LIST_HEAD(>t->guptasks); 1070 atomic_set(>t->mmu_invalidations, 0); 1071 gtt->last_set_pages = 0; 1072 1073 return 0; 1074 } 1075 1076 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 1077 { 1078 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1079 1080 if (gtt == NULL) 1081 return NULL; 1082 1083 return gtt->usermm; 1084 } 1085 1086 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1087 unsigned long end) 1088 { 1089 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1090 struct amdgpu_ttm_gup_task_list *entry; 1091 unsigned long size; 1092 1093 if (gtt == NULL || !gtt->userptr) 1094 return false; 1095 1096 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; 1097 if (gtt->userptr > end || gtt->userptr + size <= start) 1098 return false; 1099 1100 spin_lock(>t->guptasklock); 1101 list_for_each_entry(entry, >t->guptasks, list) { 1102 if (entry->task == current) { 1103 spin_unlock(>t->guptasklock); 1104 return false; 1105 } 1106 } 1107 spin_unlock(>t->guptasklock); 1108 1109 atomic_inc(>t->mmu_invalidations); 1110 1111 return true; 1112 } 1113 1114 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 1115 int *last_invalidated) 1116 { 1117 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1118 int prev_invalidated = *last_invalidated; 1119 1120 *last_invalidated = atomic_read(>t->mmu_invalidations); 1121 return prev_invalidated != *last_invalidated; 1122 } 1123 1124 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) 1125 { 1126 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1127 1128 if (gtt == NULL || !gtt->userptr) 1129 return false; 1130 1131 return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; 1132 } 1133 1134 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 1135 { 1136 struct amdgpu_ttm_tt *gtt = (void *)ttm; 1137 1138 if (gtt == NULL) 1139 return false; 1140 1141 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1142 } 1143 1144 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 1145 struct ttm_mem_reg *mem) 1146 { 1147 uint64_t flags = 0; 1148 1149 if (mem && mem->mem_type != TTM_PL_SYSTEM) 1150 flags |= AMDGPU_PTE_VALID; 1151 1152 if (mem && mem->mem_type == TTM_PL_TT) { 1153 flags |= AMDGPU_PTE_SYSTEM; 1154 1155 if (ttm->caching_state == tt_cached) 1156 flags |= AMDGPU_PTE_SNOOPED; 1157 } 1158 1159 flags |= adev->gart.gart_pte_flags; 1160 flags |= AMDGPU_PTE_READABLE; 1161 1162 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1163 flags |= AMDGPU_PTE_WRITEABLE; 1164 1165 return flags; 1166 } 1167 1168 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1169 const struct ttm_place *place) 1170 { 1171 unsigned long num_pages = bo->mem.num_pages; 1172 struct drm_mm_node *node = bo->mem.mm_node; 1173 1174 switch (bo->mem.mem_type) { 1175 case TTM_PL_TT: 1176 return true; 1177 1178 case TTM_PL_VRAM: 1179 /* Check each drm MM node individually */ 1180 while (num_pages) { 1181 if (place->fpfn < (node->start + node->size) && 1182 !(place->lpfn && place->lpfn <= node->start)) 1183 return true; 1184 1185 num_pages -= node->size; 1186 ++node; 1187 } 1188 return false; 1189 1190 default: 1191 break; 1192 } 1193 1194 return ttm_bo_eviction_valuable(bo, place); 1195 } 1196 1197 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, 1198 unsigned long offset, 1199 void *buf, int len, int write) 1200 { 1201 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1202 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1203 struct drm_mm_node *nodes; 1204 uint32_t value = 0; 1205 int ret = 0; 1206 uint64_t pos; 1207 unsigned long flags; 1208 1209 if (bo->mem.mem_type != TTM_PL_VRAM) 1210 return -EIO; 1211 1212 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); 1213 pos = (nodes->start << PAGE_SHIFT) + offset; 1214 1215 while (len && pos < adev->mc.mc_vram_size) { 1216 uint64_t aligned_pos = pos & ~(uint64_t)3; 1217 uint32_t bytes = 4 - (pos & 3); 1218 uint32_t shift = (pos & 3) * 8; 1219 uint32_t mask = 0xffffffff << shift; 1220 1221 if (len < bytes) { 1222 mask &= 0xffffffff >> (bytes - len) * 8; 1223 bytes = len; 1224 } 1225 1226 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1227 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); 1228 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); 1229 if (!write || mask != 0xffffffff) 1230 value = RREG32_NO_KIQ(mmMM_DATA); 1231 if (write) { 1232 value &= ~mask; 1233 value |= (*(uint32_t *)buf << shift) & mask; 1234 WREG32_NO_KIQ(mmMM_DATA, value); 1235 } 1236 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 1237 if (!write) { 1238 value = (value & mask) >> shift; 1239 memcpy(buf, &value, bytes); 1240 } 1241 1242 ret += bytes; 1243 buf = (uint8_t *)buf + bytes; 1244 pos += bytes; 1245 len -= bytes; 1246 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { 1247 ++nodes; 1248 pos = (nodes->start << PAGE_SHIFT); 1249 } 1250 } 1251 1252 return ret; 1253 } 1254 1255 static struct ttm_bo_driver amdgpu_bo_driver = { 1256 .ttm_tt_create = &amdgpu_ttm_tt_create, 1257 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1258 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1259 .invalidate_caches = &amdgpu_invalidate_caches, 1260 .init_mem_type = &amdgpu_init_mem_type, 1261 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, 1262 .evict_flags = &amdgpu_evict_flags, 1263 .move = &amdgpu_bo_move, 1264 .verify_access = &amdgpu_verify_access, 1265 .move_notify = &amdgpu_bo_move_notify, 1266 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, 1267 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1268 .io_mem_free = &amdgpu_ttm_io_mem_free, 1269 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1270 .access_memory = &amdgpu_ttm_access_memory 1271 }; 1272 1273 int amdgpu_ttm_init(struct amdgpu_device *adev) 1274 { 1275 uint64_t gtt_size; 1276 int r; 1277 u64 vis_vram_limit; 1278 1279 r = amdgpu_ttm_global_init(adev); 1280 if (r) { 1281 return r; 1282 } 1283 /* No others user of address space so set it to 0 */ 1284 r = ttm_bo_device_init(&adev->mman.bdev, 1285 adev->mman.bo_global_ref.ref.object, 1286 &amdgpu_bo_driver, 1287 adev->ddev->anon_inode->i_mapping, 1288 DRM_FILE_PAGE_OFFSET, 1289 adev->need_dma32); 1290 if (r) { 1291 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1292 return r; 1293 } 1294 adev->mman.initialized = true; 1295 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1296 adev->mc.real_vram_size >> PAGE_SHIFT); 1297 if (r) { 1298 DRM_ERROR("Failed initializing VRAM heap.\n"); 1299 return r; 1300 } 1301 1302 /* Reduce size of CPU-visible VRAM if requested */ 1303 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; 1304 if (amdgpu_vis_vram_limit > 0 && 1305 vis_vram_limit <= adev->mc.visible_vram_size) 1306 adev->mc.visible_vram_size = vis_vram_limit; 1307 1308 /* Change the size here instead of the init above so only lpfn is affected */ 1309 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 1310 1311 /* 1312 *The reserved vram for firmware must be pinned to the specified 1313 *place on the VRAM, so reserve it early. 1314 */ 1315 r = amdgpu_fw_reserve_vram_init(adev); 1316 if (r) { 1317 return r; 1318 } 1319 1320 r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, 1321 AMDGPU_GEM_DOMAIN_VRAM, 1322 &adev->stolen_vga_memory, 1323 NULL, NULL); 1324 if (r) 1325 return r; 1326 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1327 (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); 1328 1329 if (amdgpu_gtt_size == -1) { 1330 struct sysinfo si; 1331 1332 si_meminfo(&si); 1333 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), 1334 adev->mc.mc_vram_size), 1335 ((uint64_t)si.totalram * si.mem_unit * 3/4)); 1336 } 1337 else 1338 gtt_size = (uint64_t)amdgpu_gtt_size << 20; 1339 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); 1340 if (r) { 1341 DRM_ERROR("Failed initializing GTT heap.\n"); 1342 return r; 1343 } 1344 DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 1345 (unsigned)(gtt_size / (1024 * 1024))); 1346 1347 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; 1348 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; 1349 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; 1350 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; 1351 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; 1352 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; 1353 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; 1354 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; 1355 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; 1356 /* GDS Memory */ 1357 if (adev->gds.mem.total_size) { 1358 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, 1359 adev->gds.mem.total_size >> PAGE_SHIFT); 1360 if (r) { 1361 DRM_ERROR("Failed initializing GDS heap.\n"); 1362 return r; 1363 } 1364 } 1365 1366 /* GWS */ 1367 if (adev->gds.gws.total_size) { 1368 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, 1369 adev->gds.gws.total_size >> PAGE_SHIFT); 1370 if (r) { 1371 DRM_ERROR("Failed initializing gws heap.\n"); 1372 return r; 1373 } 1374 } 1375 1376 /* OA */ 1377 if (adev->gds.oa.total_size) { 1378 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, 1379 adev->gds.oa.total_size >> PAGE_SHIFT); 1380 if (r) { 1381 DRM_ERROR("Failed initializing oa heap.\n"); 1382 return r; 1383 } 1384 } 1385 1386 r = amdgpu_ttm_debugfs_init(adev); 1387 if (r) { 1388 DRM_ERROR("Failed to init debugfs\n"); 1389 return r; 1390 } 1391 return 0; 1392 } 1393 1394 void amdgpu_ttm_fini(struct amdgpu_device *adev) 1395 { 1396 if (!adev->mman.initialized) 1397 return; 1398 1399 amdgpu_ttm_debugfs_fini(adev); 1400 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); 1401 amdgpu_fw_reserve_vram_fini(adev); 1402 1403 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); 1404 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); 1405 if (adev->gds.mem.total_size) 1406 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); 1407 if (adev->gds.gws.total_size) 1408 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); 1409 if (adev->gds.oa.total_size) 1410 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); 1411 ttm_bo_device_release(&adev->mman.bdev); 1412 amdgpu_ttm_global_fini(adev); 1413 adev->mman.initialized = false; 1414 DRM_INFO("amdgpu: ttm finalized\n"); 1415 } 1416 1417 /* this should only be called at bootup or when userspace 1418 * isn't running */ 1419 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) 1420 { 1421 struct ttm_mem_type_manager *man; 1422 1423 if (!adev->mman.initialized) 1424 return; 1425 1426 man = &adev->mman.bdev.man[TTM_PL_VRAM]; 1427 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 1428 man->size = size >> PAGE_SHIFT; 1429 } 1430 1431 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) 1432 { 1433 struct drm_file *file_priv; 1434 struct amdgpu_device *adev; 1435 1436 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 1437 return -EINVAL; 1438 1439 file_priv = filp->private_data; 1440 adev = file_priv->minor->dev->dev_private; 1441 if (adev == NULL) 1442 return -EINVAL; 1443 1444 return ttm_bo_mmap(filp, vma, &adev->mman.bdev); 1445 } 1446 1447 static int amdgpu_map_buffer(struct ttm_buffer_object *bo, 1448 struct ttm_mem_reg *mem, unsigned num_pages, 1449 uint64_t offset, unsigned window, 1450 struct amdgpu_ring *ring, 1451 uint64_t *addr) 1452 { 1453 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; 1454 struct amdgpu_device *adev = ring->adev; 1455 struct ttm_tt *ttm = bo->ttm; 1456 struct amdgpu_job *job; 1457 unsigned num_dw, num_bytes; 1458 dma_addr_t *dma_address; 1459 struct dma_fence *fence; 1460 uint64_t src_addr, dst_addr; 1461 uint64_t flags; 1462 int r; 1463 1464 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 1465 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 1466 1467 *addr = adev->mc.gart_start; 1468 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 1469 AMDGPU_GPU_PAGE_SIZE; 1470 1471 num_dw = adev->mman.buffer_funcs->copy_num_dw; 1472 while (num_dw & 0x7) 1473 num_dw++; 1474 1475 num_bytes = num_pages * 8; 1476 1477 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); 1478 if (r) 1479 return r; 1480 1481 src_addr = num_dw * 4; 1482 src_addr += job->ibs[0].gpu_addr; 1483 1484 dst_addr = adev->gart.table_addr; 1485 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; 1486 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 1487 dst_addr, num_bytes); 1488 1489 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1490 WARN_ON(job->ibs[0].length_dw > num_dw); 1491 1492 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; 1493 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); 1494 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, 1495 &job->ibs[0].ptr[num_dw]); 1496 if (r) 1497 goto error_free; 1498 1499 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1500 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 1501 if (r) 1502 goto error_free; 1503 1504 dma_fence_put(fence); 1505 1506 return r; 1507 1508 error_free: 1509 amdgpu_job_free(job); 1510 return r; 1511 } 1512 1513 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 1514 uint64_t dst_offset, uint32_t byte_count, 1515 struct reservation_object *resv, 1516 struct dma_fence **fence, bool direct_submit, 1517 bool vm_needs_flush) 1518 { 1519 struct amdgpu_device *adev = ring->adev; 1520 struct amdgpu_job *job; 1521 1522 uint32_t max_bytes; 1523 unsigned num_loops, num_dw; 1524 unsigned i; 1525 int r; 1526 1527 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 1528 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 1529 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; 1530 1531 /* for IB padding */ 1532 while (num_dw & 0x7) 1533 num_dw++; 1534 1535 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 1536 if (r) 1537 return r; 1538 1539 job->vm_needs_flush = vm_needs_flush; 1540 if (resv) { 1541 r = amdgpu_sync_resv(adev, &job->sync, resv, 1542 AMDGPU_FENCE_OWNER_UNDEFINED, 1543 false); 1544 if (r) { 1545 DRM_ERROR("sync failed (%d).\n", r); 1546 goto error_free; 1547 } 1548 } 1549 1550 for (i = 0; i < num_loops; i++) { 1551 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1552 1553 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 1554 dst_offset, cur_size_in_bytes); 1555 1556 src_offset += cur_size_in_bytes; 1557 dst_offset += cur_size_in_bytes; 1558 byte_count -= cur_size_in_bytes; 1559 } 1560 1561 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1562 WARN_ON(job->ibs[0].length_dw > num_dw); 1563 if (direct_submit) { 1564 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, 1565 NULL, fence); 1566 job->fence = dma_fence_get(*fence); 1567 if (r) 1568 DRM_ERROR("Error scheduling IBs (%d)\n", r); 1569 amdgpu_job_free(job); 1570 } else { 1571 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1572 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1573 if (r) 1574 goto error_free; 1575 } 1576 1577 return r; 1578 1579 error_free: 1580 amdgpu_job_free(job); 1581 return r; 1582 } 1583 1584 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 1585 uint64_t src_data, 1586 struct reservation_object *resv, 1587 struct dma_fence **fence) 1588 { 1589 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1590 uint32_t max_bytes = 8 * 1591 adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde; 1592 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 1593 1594 struct drm_mm_node *mm_node; 1595 unsigned long num_pages; 1596 unsigned int num_loops, num_dw; 1597 1598 struct amdgpu_job *job; 1599 int r; 1600 1601 if (!ring->ready) { 1602 DRM_ERROR("Trying to clear memory with ring turned off.\n"); 1603 return -EINVAL; 1604 } 1605 1606 if (bo->tbo.mem.mem_type == TTM_PL_TT) { 1607 r = amdgpu_ttm_alloc_gart(&bo->tbo); 1608 if (r) 1609 return r; 1610 } 1611 1612 num_pages = bo->tbo.num_pages; 1613 mm_node = bo->tbo.mem.mm_node; 1614 num_loops = 0; 1615 while (num_pages) { 1616 uint32_t byte_count = mm_node->size << PAGE_SHIFT; 1617 1618 num_loops += DIV_ROUND_UP(byte_count, max_bytes); 1619 num_pages -= mm_node->size; 1620 ++mm_node; 1621 } 1622 1623 /* num of dwords for each SDMA_OP_PTEPDE cmd */ 1624 num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; 1625 1626 /* for IB padding */ 1627 num_dw += 64; 1628 1629 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); 1630 if (r) 1631 return r; 1632 1633 if (resv) { 1634 r = amdgpu_sync_resv(adev, &job->sync, resv, 1635 AMDGPU_FENCE_OWNER_UNDEFINED, false); 1636 if (r) { 1637 DRM_ERROR("sync failed (%d).\n", r); 1638 goto error_free; 1639 } 1640 } 1641 1642 num_pages = bo->tbo.num_pages; 1643 mm_node = bo->tbo.mem.mm_node; 1644 1645 while (num_pages) { 1646 uint32_t byte_count = mm_node->size << PAGE_SHIFT; 1647 uint64_t dst_addr; 1648 1649 WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); 1650 1651 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); 1652 while (byte_count) { 1653 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 1654 1655 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], 1656 dst_addr, 0, 1657 cur_size_in_bytes >> 3, 0, 1658 src_data); 1659 1660 dst_addr += cur_size_in_bytes; 1661 byte_count -= cur_size_in_bytes; 1662 } 1663 1664 num_pages -= mm_node->size; 1665 ++mm_node; 1666 } 1667 1668 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 1669 WARN_ON(job->ibs[0].length_dw > num_dw); 1670 r = amdgpu_job_submit(job, ring, &adev->mman.entity, 1671 AMDGPU_FENCE_OWNER_UNDEFINED, fence); 1672 if (r) 1673 goto error_free; 1674 1675 return 0; 1676 1677 error_free: 1678 amdgpu_job_free(job); 1679 return r; 1680 } 1681 1682 #if defined(CONFIG_DEBUG_FS) 1683 1684 static int amdgpu_mm_dump_table(struct seq_file *m, void *data) 1685 { 1686 struct drm_info_node *node = (struct drm_info_node *)m->private; 1687 unsigned ttm_pl = *(int *)node->info_ent->data; 1688 struct drm_device *dev = node->minor->dev; 1689 struct amdgpu_device *adev = dev->dev_private; 1690 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; 1691 struct drm_printer p = drm_seq_file_printer(m); 1692 1693 man->func->debug(man, &p); 1694 return 0; 1695 } 1696 1697 static int ttm_pl_vram = TTM_PL_VRAM; 1698 static int ttm_pl_tt = TTM_PL_TT; 1699 1700 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { 1701 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, 1702 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, 1703 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, 1704 #ifdef CONFIG_SWIOTLB 1705 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} 1706 #endif 1707 }; 1708 1709 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 1710 size_t size, loff_t *pos) 1711 { 1712 struct amdgpu_device *adev = file_inode(f)->i_private; 1713 ssize_t result = 0; 1714 int r; 1715 1716 if (size & 0x3 || *pos & 0x3) 1717 return -EINVAL; 1718 1719 if (*pos >= adev->mc.mc_vram_size) 1720 return -ENXIO; 1721 1722 while (size) { 1723 unsigned long flags; 1724 uint32_t value; 1725 1726 if (*pos >= adev->mc.mc_vram_size) 1727 return result; 1728 1729 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1730 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); 1731 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); 1732 value = RREG32_NO_KIQ(mmMM_DATA); 1733 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 1734 1735 r = put_user(value, (uint32_t *)buf); 1736 if (r) 1737 return r; 1738 1739 result += 4; 1740 buf += 4; 1741 *pos += 4; 1742 size -= 4; 1743 } 1744 1745 return result; 1746 } 1747 1748 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, 1749 size_t size, loff_t *pos) 1750 { 1751 struct amdgpu_device *adev = file_inode(f)->i_private; 1752 ssize_t result = 0; 1753 int r; 1754 1755 if (size & 0x3 || *pos & 0x3) 1756 return -EINVAL; 1757 1758 if (*pos >= adev->mc.mc_vram_size) 1759 return -ENXIO; 1760 1761 while (size) { 1762 unsigned long flags; 1763 uint32_t value; 1764 1765 if (*pos >= adev->mc.mc_vram_size) 1766 return result; 1767 1768 r = get_user(value, (uint32_t *)buf); 1769 if (r) 1770 return r; 1771 1772 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 1773 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); 1774 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); 1775 WREG32_NO_KIQ(mmMM_DATA, value); 1776 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 1777 1778 result += 4; 1779 buf += 4; 1780 *pos += 4; 1781 size -= 4; 1782 } 1783 1784 return result; 1785 } 1786 1787 static const struct file_operations amdgpu_ttm_vram_fops = { 1788 .owner = THIS_MODULE, 1789 .read = amdgpu_ttm_vram_read, 1790 .write = amdgpu_ttm_vram_write, 1791 .llseek = default_llseek, 1792 }; 1793 1794 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 1795 1796 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, 1797 size_t size, loff_t *pos) 1798 { 1799 struct amdgpu_device *adev = file_inode(f)->i_private; 1800 ssize_t result = 0; 1801 int r; 1802 1803 while (size) { 1804 loff_t p = *pos / PAGE_SIZE; 1805 unsigned off = *pos & ~PAGE_MASK; 1806 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); 1807 struct page *page; 1808 void *ptr; 1809 1810 if (p >= adev->gart.num_cpu_pages) 1811 return result; 1812 1813 page = adev->gart.pages[p]; 1814 if (page) { 1815 ptr = kmap(page); 1816 ptr += off; 1817 1818 r = copy_to_user(buf, ptr, cur_size); 1819 kunmap(adev->gart.pages[p]); 1820 } else 1821 r = clear_user(buf, cur_size); 1822 1823 if (r) 1824 return -EFAULT; 1825 1826 result += cur_size; 1827 buf += cur_size; 1828 *pos += cur_size; 1829 size -= cur_size; 1830 } 1831 1832 return result; 1833 } 1834 1835 static const struct file_operations amdgpu_ttm_gtt_fops = { 1836 .owner = THIS_MODULE, 1837 .read = amdgpu_ttm_gtt_read, 1838 .llseek = default_llseek 1839 }; 1840 1841 #endif 1842 1843 static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf, 1844 size_t size, loff_t *pos) 1845 { 1846 struct amdgpu_device *adev = file_inode(f)->i_private; 1847 int r; 1848 uint64_t phys; 1849 struct iommu_domain *dom; 1850 1851 // always return 8 bytes 1852 if (size != 8) 1853 return -EINVAL; 1854 1855 // only accept page addresses 1856 if (*pos & 0xFFF) 1857 return -EINVAL; 1858 1859 dom = iommu_get_domain_for_dev(adev->dev); 1860 if (dom) 1861 phys = iommu_iova_to_phys(dom, *pos); 1862 else 1863 phys = *pos; 1864 1865 r = copy_to_user(buf, &phys, 8); 1866 if (r) 1867 return -EFAULT; 1868 1869 return 8; 1870 } 1871 1872 static const struct file_operations amdgpu_ttm_iova_fops = { 1873 .owner = THIS_MODULE, 1874 .read = amdgpu_iova_to_phys_read, 1875 .llseek = default_llseek 1876 }; 1877 1878 static const struct { 1879 char *name; 1880 const struct file_operations *fops; 1881 int domain; 1882 } ttm_debugfs_entries[] = { 1883 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, 1884 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 1885 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, 1886 #endif 1887 { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM }, 1888 }; 1889 1890 #endif 1891 1892 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 1893 { 1894 #if defined(CONFIG_DEBUG_FS) 1895 unsigned count; 1896 1897 struct drm_minor *minor = adev->ddev->primary; 1898 struct dentry *ent, *root = minor->debugfs_root; 1899 1900 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { 1901 ent = debugfs_create_file( 1902 ttm_debugfs_entries[count].name, 1903 S_IFREG | S_IRUGO, root, 1904 adev, 1905 ttm_debugfs_entries[count].fops); 1906 if (IS_ERR(ent)) 1907 return PTR_ERR(ent); 1908 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) 1909 i_size_write(ent->d_inode, adev->mc.mc_vram_size); 1910 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) 1911 i_size_write(ent->d_inode, adev->mc.gart_size); 1912 adev->mman.debugfs_entries[count] = ent; 1913 } 1914 1915 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); 1916 1917 #ifdef CONFIG_SWIOTLB 1918 if (!swiotlb_nr_tbl()) 1919 --count; 1920 #endif 1921 1922 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); 1923 #else 1924 return 0; 1925 #endif 1926 } 1927 1928 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) 1929 { 1930 #if defined(CONFIG_DEBUG_FS) 1931 unsigned i; 1932 1933 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) 1934 debugfs_remove(adev->mman.debugfs_entries[i]); 1935 #endif 1936 } 1937