1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <linux/list.h> 33 #include <linux/slab.h> 34 #include <drm/drmP.h> 35 #include <drm/amdgpu_drm.h> 36 #include <drm/drm_cache.h> 37 #include "amdgpu.h" 38 #include "amdgpu_trace.h" 39 #include "amdgpu_amdkfd.h" 40 41 /** 42 * DOC: amdgpu_object 43 * 44 * This defines the interfaces to operate on an &amdgpu_bo buffer object which 45 * represents memory used by driver (VRAM, system memory, etc.). The driver 46 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces 47 * to create/destroy/set buffer object which are then managed by the kernel TTM 48 * memory manager. 49 * The interfaces are also used internally by kernel clients, including gfx, 50 * uvd, etc. for kernel managed allocations used by the GPU. 51 * 52 */ 53 54 static bool amdgpu_need_backup(struct amdgpu_device *adev) 55 { 56 if (adev->flags & AMD_IS_APU) 57 return false; 58 59 if (amdgpu_gpu_recovery == 0 || 60 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev))) 61 return false; 62 63 return true; 64 } 65 66 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 67 { 68 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 69 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 70 71 if (bo->kfd_bo) 72 amdgpu_amdkfd_unreserve_system_memory_limit(bo); 73 74 amdgpu_bo_kunmap(bo); 75 76 if (bo->gem_base.import_attach) 77 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); 78 drm_gem_object_release(&bo->gem_base); 79 amdgpu_bo_unref(&bo->parent); 80 if (!list_empty(&bo->shadow_list)) { 81 mutex_lock(&adev->shadow_list_lock); 82 list_del_init(&bo->shadow_list); 83 mutex_unlock(&adev->shadow_list_lock); 84 } 85 kfree(bo->metadata); 86 kfree(bo); 87 } 88 89 /** 90 * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo 91 * @bo: buffer object to be checked 92 * 93 * Uses destroy function associated with the object to determine if this is 94 * an &amdgpu_bo. 95 * 96 * Returns: 97 * true if the object belongs to &amdgpu_bo, false if not. 98 */ 99 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) 100 { 101 if (bo->destroy == &amdgpu_ttm_bo_destroy) 102 return true; 103 return false; 104 } 105 106 /** 107 * amdgpu_ttm_placement_from_domain - set buffer's placement 108 * @abo: &amdgpu_bo buffer object whose placement is to be set 109 * @domain: requested domain 110 * 111 * Sets buffer's placement according to requested domain and the buffer's 112 * flags. 113 */ 114 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 115 { 116 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 117 struct ttm_placement *placement = &abo->placement; 118 struct ttm_place *places = abo->placements; 119 u64 flags = abo->flags; 120 u32 c = 0; 121 122 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 123 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 124 125 places[c].fpfn = 0; 126 places[c].lpfn = 0; 127 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 128 TTM_PL_FLAG_VRAM; 129 130 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) 131 places[c].lpfn = visible_pfn; 132 else 133 places[c].flags |= TTM_PL_FLAG_TOPDOWN; 134 135 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 136 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; 137 c++; 138 } 139 140 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 141 places[c].fpfn = 0; 142 if (flags & AMDGPU_GEM_CREATE_SHADOW) 143 places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 144 else 145 places[c].lpfn = 0; 146 places[c].flags = TTM_PL_FLAG_TT; 147 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 148 places[c].flags |= TTM_PL_FLAG_WC | 149 TTM_PL_FLAG_UNCACHED; 150 else 151 places[c].flags |= TTM_PL_FLAG_CACHED; 152 c++; 153 } 154 155 if (domain & AMDGPU_GEM_DOMAIN_CPU) { 156 places[c].fpfn = 0; 157 places[c].lpfn = 0; 158 places[c].flags = TTM_PL_FLAG_SYSTEM; 159 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 160 places[c].flags |= TTM_PL_FLAG_WC | 161 TTM_PL_FLAG_UNCACHED; 162 else 163 places[c].flags |= TTM_PL_FLAG_CACHED; 164 c++; 165 } 166 167 if (domain & AMDGPU_GEM_DOMAIN_GDS) { 168 places[c].fpfn = 0; 169 places[c].lpfn = 0; 170 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; 171 c++; 172 } 173 174 if (domain & AMDGPU_GEM_DOMAIN_GWS) { 175 places[c].fpfn = 0; 176 places[c].lpfn = 0; 177 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; 178 c++; 179 } 180 181 if (domain & AMDGPU_GEM_DOMAIN_OA) { 182 places[c].fpfn = 0; 183 places[c].lpfn = 0; 184 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; 185 c++; 186 } 187 188 if (!c) { 189 places[c].fpfn = 0; 190 places[c].lpfn = 0; 191 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 192 c++; 193 } 194 195 placement->num_placement = c; 196 placement->placement = places; 197 198 placement->num_busy_placement = c; 199 placement->busy_placement = places; 200 } 201 202 /** 203 * amdgpu_bo_create_reserved - create reserved BO for kernel use 204 * 205 * @adev: amdgpu device object 206 * @size: size for the new BO 207 * @align: alignment for the new BO 208 * @domain: where to place it 209 * @bo_ptr: used to initialize BOs in structures 210 * @gpu_addr: GPU addr of the pinned BO 211 * @cpu_addr: optional CPU address mapping 212 * 213 * Allocates and pins a BO for kernel internal use, and returns it still 214 * reserved. 215 * 216 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 217 * 218 * Returns: 219 * 0 on success, negative error code otherwise. 220 */ 221 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 222 unsigned long size, int align, 223 u32 domain, struct amdgpu_bo **bo_ptr, 224 u64 *gpu_addr, void **cpu_addr) 225 { 226 struct amdgpu_bo_param bp; 227 bool free = false; 228 int r; 229 230 memset(&bp, 0, sizeof(bp)); 231 bp.size = size; 232 bp.byte_align = align; 233 bp.domain = domain; 234 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 235 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 236 bp.type = ttm_bo_type_kernel; 237 bp.resv = NULL; 238 239 if (!*bo_ptr) { 240 r = amdgpu_bo_create(adev, &bp, bo_ptr); 241 if (r) { 242 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 243 r); 244 return r; 245 } 246 free = true; 247 } 248 249 r = amdgpu_bo_reserve(*bo_ptr, false); 250 if (r) { 251 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); 252 goto error_free; 253 } 254 255 r = amdgpu_bo_pin(*bo_ptr, domain); 256 if (r) { 257 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); 258 goto error_unreserve; 259 } 260 261 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo); 262 if (r) { 263 dev_err(adev->dev, "%p bind failed\n", *bo_ptr); 264 goto error_unpin; 265 } 266 267 if (gpu_addr) 268 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr); 269 270 if (cpu_addr) { 271 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); 272 if (r) { 273 dev_err(adev->dev, "(%d) kernel bo map failed\n", r); 274 goto error_unpin; 275 } 276 } 277 278 return 0; 279 280 error_unpin: 281 amdgpu_bo_unpin(*bo_ptr); 282 error_unreserve: 283 amdgpu_bo_unreserve(*bo_ptr); 284 285 error_free: 286 if (free) 287 amdgpu_bo_unref(bo_ptr); 288 289 return r; 290 } 291 292 /** 293 * amdgpu_bo_create_kernel - create BO for kernel use 294 * 295 * @adev: amdgpu device object 296 * @size: size for the new BO 297 * @align: alignment for the new BO 298 * @domain: where to place it 299 * @bo_ptr: used to initialize BOs in structures 300 * @gpu_addr: GPU addr of the pinned BO 301 * @cpu_addr: optional CPU address mapping 302 * 303 * Allocates and pins a BO for kernel internal use. 304 * 305 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 306 * 307 * Returns: 308 * 0 on success, negative error code otherwise. 309 */ 310 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 311 unsigned long size, int align, 312 u32 domain, struct amdgpu_bo **bo_ptr, 313 u64 *gpu_addr, void **cpu_addr) 314 { 315 int r; 316 317 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, 318 gpu_addr, cpu_addr); 319 320 if (r) 321 return r; 322 323 amdgpu_bo_unreserve(*bo_ptr); 324 325 return 0; 326 } 327 328 /** 329 * amdgpu_bo_free_kernel - free BO for kernel use 330 * 331 * @bo: amdgpu BO to free 332 * @gpu_addr: pointer to where the BO's GPU memory space address was stored 333 * @cpu_addr: pointer to where the BO's CPU memory space address was stored 334 * 335 * unmaps and unpin a BO for kernel internal use. 336 */ 337 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 338 void **cpu_addr) 339 { 340 if (*bo == NULL) 341 return; 342 343 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { 344 if (cpu_addr) 345 amdgpu_bo_kunmap(*bo); 346 347 amdgpu_bo_unpin(*bo); 348 amdgpu_bo_unreserve(*bo); 349 } 350 amdgpu_bo_unref(bo); 351 352 if (gpu_addr) 353 *gpu_addr = 0; 354 355 if (cpu_addr) 356 *cpu_addr = NULL; 357 } 358 359 /* Validate bo size is bit bigger then the request domain */ 360 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, 361 unsigned long size, u32 domain) 362 { 363 struct ttm_mem_type_manager *man = NULL; 364 365 /* 366 * If GTT is part of requested domains the check must succeed to 367 * allow fall back to GTT 368 */ 369 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 370 man = &adev->mman.bdev.man[TTM_PL_TT]; 371 372 if (size < (man->size << PAGE_SHIFT)) 373 return true; 374 else 375 goto fail; 376 } 377 378 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 379 man = &adev->mman.bdev.man[TTM_PL_VRAM]; 380 381 if (size < (man->size << PAGE_SHIFT)) 382 return true; 383 else 384 goto fail; 385 } 386 387 388 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ 389 return true; 390 391 fail: 392 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, 393 man->size << PAGE_SHIFT); 394 return false; 395 } 396 397 static int amdgpu_bo_do_create(struct amdgpu_device *adev, 398 struct amdgpu_bo_param *bp, 399 struct amdgpu_bo **bo_ptr) 400 { 401 struct ttm_operation_ctx ctx = { 402 .interruptible = (bp->type != ttm_bo_type_kernel), 403 .no_wait_gpu = false, 404 .resv = bp->resv, 405 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT 406 }; 407 struct amdgpu_bo *bo; 408 unsigned long page_align, size = bp->size; 409 size_t acc_size; 410 int r; 411 412 page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; 413 size = ALIGN(size, PAGE_SIZE); 414 415 if (!amdgpu_bo_validate_size(adev, size, bp->domain)) 416 return -ENOMEM; 417 418 *bo_ptr = NULL; 419 420 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 421 sizeof(struct amdgpu_bo)); 422 423 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); 424 if (bo == NULL) 425 return -ENOMEM; 426 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); 427 INIT_LIST_HEAD(&bo->shadow_list); 428 INIT_LIST_HEAD(&bo->va); 429 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : 430 bp->domain; 431 bo->allowed_domains = bo->preferred_domains; 432 if (bp->type != ttm_bo_type_kernel && 433 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 434 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 435 436 bo->flags = bp->flags; 437 438 #ifdef CONFIG_X86_32 439 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 440 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 441 */ 442 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 443 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 444 /* Don't try to enable write-combining when it can't work, or things 445 * may be slow 446 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 447 */ 448 449 #ifndef CONFIG_COMPILE_TEST 450 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 451 thanks to write-combining 452 #endif 453 454 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 455 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 456 "better performance thanks to write-combining\n"); 457 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 458 #else 459 /* For architectures that don't support WC memory, 460 * mask out the WC flag from the BO 461 */ 462 if (!drm_arch_can_wc_memory()) 463 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 464 #endif 465 466 bo->tbo.bdev = &adev->mman.bdev; 467 amdgpu_ttm_placement_from_domain(bo, bp->domain); 468 if (bp->type == ttm_bo_type_kernel) 469 bo->tbo.priority = 1; 470 471 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, 472 &bo->placement, page_align, &ctx, acc_size, 473 NULL, bp->resv, &amdgpu_ttm_bo_destroy); 474 if (unlikely(r != 0)) 475 return r; 476 477 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 478 bo->tbo.mem.mem_type == TTM_PL_VRAM && 479 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) 480 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 481 ctx.bytes_moved); 482 else 483 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 484 485 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 486 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 487 struct dma_fence *fence; 488 489 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); 490 if (unlikely(r)) 491 goto fail_unreserve; 492 493 amdgpu_bo_fence(bo, fence, false); 494 dma_fence_put(bo->tbo.moving); 495 bo->tbo.moving = dma_fence_get(fence); 496 dma_fence_put(fence); 497 } 498 if (!bp->resv) 499 amdgpu_bo_unreserve(bo); 500 *bo_ptr = bo; 501 502 trace_amdgpu_bo_create(bo); 503 504 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ 505 if (bp->type == ttm_bo_type_device) 506 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 507 508 return 0; 509 510 fail_unreserve: 511 if (!bp->resv) 512 ww_mutex_unlock(&bo->tbo.resv->lock); 513 amdgpu_bo_unref(&bo); 514 return r; 515 } 516 517 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, 518 unsigned long size, int byte_align, 519 struct amdgpu_bo *bo) 520 { 521 struct amdgpu_bo_param bp; 522 int r; 523 524 if (bo->shadow) 525 return 0; 526 527 memset(&bp, 0, sizeof(bp)); 528 bp.size = size; 529 bp.byte_align = byte_align; 530 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 531 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | 532 AMDGPU_GEM_CREATE_SHADOW; 533 bp.type = ttm_bo_type_kernel; 534 bp.resv = bo->tbo.resv; 535 536 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); 537 if (!r) { 538 bo->shadow->parent = amdgpu_bo_ref(bo); 539 mutex_lock(&adev->shadow_list_lock); 540 list_add_tail(&bo->shadow_list, &adev->shadow_list); 541 mutex_unlock(&adev->shadow_list_lock); 542 } 543 544 return r; 545 } 546 547 /** 548 * amdgpu_bo_create - create an &amdgpu_bo buffer object 549 * @adev: amdgpu device object 550 * @bp: parameters to be used for the buffer object 551 * @bo_ptr: pointer to the buffer object pointer 552 * 553 * Creates an &amdgpu_bo buffer object; and if requested, also creates a 554 * shadow object. 555 * Shadow object is used to backup the original buffer object, and is always 556 * in GTT. 557 * 558 * Returns: 559 * 0 for success or a negative error code on failure. 560 */ 561 int amdgpu_bo_create(struct amdgpu_device *adev, 562 struct amdgpu_bo_param *bp, 563 struct amdgpu_bo **bo_ptr) 564 { 565 u64 flags = bp->flags; 566 int r; 567 568 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; 569 r = amdgpu_bo_do_create(adev, bp, bo_ptr); 570 if (r) 571 return r; 572 573 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 574 if (!bp->resv) 575 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 576 NULL)); 577 578 r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr)); 579 580 if (!bp->resv) 581 reservation_object_unlock((*bo_ptr)->tbo.resv); 582 583 if (r) 584 amdgpu_bo_unref(bo_ptr); 585 } 586 587 return r; 588 } 589 590 /** 591 * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object 592 * @adev: amdgpu device object 593 * @ring: amdgpu_ring for the engine handling the buffer operations 594 * @bo: &amdgpu_bo buffer to be backed up 595 * @resv: reservation object with embedded fence 596 * @fence: dma_fence associated with the operation 597 * @direct: whether to submit the job directly 598 * 599 * Copies an &amdgpu_bo buffer object to its shadow object. 600 * Not used for now. 601 * 602 * Returns: 603 * 0 for success or a negative error code on failure. 604 */ 605 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, 606 struct amdgpu_ring *ring, 607 struct amdgpu_bo *bo, 608 struct reservation_object *resv, 609 struct dma_fence **fence, 610 bool direct) 611 612 { 613 struct amdgpu_bo *shadow = bo->shadow; 614 uint64_t bo_addr, shadow_addr; 615 int r; 616 617 if (!shadow) 618 return -EINVAL; 619 620 bo_addr = amdgpu_bo_gpu_offset(bo); 621 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); 622 623 r = reservation_object_reserve_shared(bo->tbo.resv); 624 if (r) 625 goto err; 626 627 r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, 628 amdgpu_bo_size(bo), resv, fence, 629 direct, false); 630 if (!r) 631 amdgpu_bo_fence(bo, *fence, true); 632 633 err: 634 return r; 635 } 636 637 /** 638 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object 639 * @bo: pointer to the buffer object 640 * 641 * Sets placement according to domain; and changes placement and caching 642 * policy of the buffer object according to the placement. 643 * This is used for validating shadow bos. It calls ttm_bo_validate() to 644 * make sure the buffer is resident where it needs to be. 645 * 646 * Returns: 647 * 0 for success or a negative error code on failure. 648 */ 649 int amdgpu_bo_validate(struct amdgpu_bo *bo) 650 { 651 struct ttm_operation_ctx ctx = { false, false }; 652 uint32_t domain; 653 int r; 654 655 if (bo->pin_count) 656 return 0; 657 658 domain = bo->preferred_domains; 659 660 retry: 661 amdgpu_ttm_placement_from_domain(bo, domain); 662 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 663 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 664 domain = bo->allowed_domains; 665 goto retry; 666 } 667 668 return r; 669 } 670 671 /** 672 * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object 673 * @adev: amdgpu device object 674 * @ring: amdgpu_ring for the engine handling the buffer operations 675 * @bo: &amdgpu_bo buffer to be restored 676 * @resv: reservation object with embedded fence 677 * @fence: dma_fence associated with the operation 678 * @direct: whether to submit the job directly 679 * 680 * Copies a buffer object's shadow content back to the object. 681 * This is used for recovering a buffer from its shadow in case of a gpu 682 * reset where vram context may be lost. 683 * 684 * Returns: 685 * 0 for success or a negative error code on failure. 686 */ 687 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, 688 struct amdgpu_ring *ring, 689 struct amdgpu_bo *bo, 690 struct reservation_object *resv, 691 struct dma_fence **fence, 692 bool direct) 693 694 { 695 struct amdgpu_bo *shadow = bo->shadow; 696 uint64_t bo_addr, shadow_addr; 697 int r; 698 699 if (!shadow) 700 return -EINVAL; 701 702 bo_addr = amdgpu_bo_gpu_offset(bo); 703 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); 704 705 r = reservation_object_reserve_shared(bo->tbo.resv); 706 if (r) 707 goto err; 708 709 r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, 710 amdgpu_bo_size(bo), resv, fence, 711 direct, false); 712 if (!r) 713 amdgpu_bo_fence(bo, *fence, true); 714 715 err: 716 return r; 717 } 718 719 /** 720 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object 721 * @bo: &amdgpu_bo buffer object to be mapped 722 * @ptr: kernel virtual address to be returned 723 * 724 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls 725 * amdgpu_bo_kptr() to get the kernel virtual address. 726 * 727 * Returns: 728 * 0 for success or a negative error code on failure. 729 */ 730 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 731 { 732 void *kptr; 733 long r; 734 735 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 736 return -EPERM; 737 738 kptr = amdgpu_bo_kptr(bo); 739 if (kptr) { 740 if (ptr) 741 *ptr = kptr; 742 return 0; 743 } 744 745 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, 746 MAX_SCHEDULE_TIMEOUT); 747 if (r < 0) 748 return r; 749 750 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 751 if (r) 752 return r; 753 754 if (ptr) 755 *ptr = amdgpu_bo_kptr(bo); 756 757 return 0; 758 } 759 760 /** 761 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object 762 * @bo: &amdgpu_bo buffer object 763 * 764 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address 765 * 766 * Returns: 767 * the virtual address of a buffer object area. 768 */ 769 void *amdgpu_bo_kptr(struct amdgpu_bo *bo) 770 { 771 bool is_iomem; 772 773 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 774 } 775 776 /** 777 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object 778 * @bo: &amdgpu_bo buffer object to be unmapped 779 * 780 * Unmaps a kernel map set up by amdgpu_bo_kmap(). 781 */ 782 void amdgpu_bo_kunmap(struct amdgpu_bo *bo) 783 { 784 if (bo->kmap.bo) 785 ttm_bo_kunmap(&bo->kmap); 786 } 787 788 /** 789 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object 790 * @bo: &amdgpu_bo buffer object 791 * 792 * References the contained &ttm_buffer_object. 793 * 794 * Returns: 795 * a refcounted pointer to the &amdgpu_bo buffer object. 796 */ 797 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) 798 { 799 if (bo == NULL) 800 return NULL; 801 802 ttm_bo_reference(&bo->tbo); 803 return bo; 804 } 805 806 /** 807 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object 808 * @bo: &amdgpu_bo buffer object 809 * 810 * Unreferences the contained &ttm_buffer_object and clear the pointer 811 */ 812 void amdgpu_bo_unref(struct amdgpu_bo **bo) 813 { 814 struct ttm_buffer_object *tbo; 815 816 if ((*bo) == NULL) 817 return; 818 819 tbo = &((*bo)->tbo); 820 ttm_bo_unref(&tbo); 821 if (tbo == NULL) 822 *bo = NULL; 823 } 824 825 /** 826 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object 827 * @bo: &amdgpu_bo buffer object to be pinned 828 * @domain: domain to be pinned to 829 * @min_offset: the start of requested address range 830 * @max_offset: the end of requested address range 831 * 832 * Pins the buffer object according to requested domain and address range. If 833 * the memory is unbound gart memory, binds the pages into gart table. Adjusts 834 * pin_count and pin_size accordingly. 835 * 836 * Pinning means to lock pages in memory along with keeping them at a fixed 837 * offset. It is required when a buffer can not be moved, for example, when 838 * a display buffer is being scanned out. 839 * 840 * Compared with amdgpu_bo_pin(), this function gives more flexibility on 841 * where to pin a buffer if there are specific restrictions on where a buffer 842 * must be located. 843 * 844 * Returns: 845 * 0 for success or a negative error code on failure. 846 */ 847 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 848 u64 min_offset, u64 max_offset) 849 { 850 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 851 struct ttm_operation_ctx ctx = { false, false }; 852 int r, i; 853 854 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 855 return -EPERM; 856 857 if (WARN_ON_ONCE(min_offset > max_offset)) 858 return -EINVAL; 859 860 /* A shared bo cannot be migrated to VRAM */ 861 if (bo->prime_shared_count) { 862 if (domain & AMDGPU_GEM_DOMAIN_GTT) 863 domain = AMDGPU_GEM_DOMAIN_GTT; 864 else 865 return -EINVAL; 866 } 867 868 /* This assumes only APU display buffers are pinned with (VRAM|GTT). 869 * See function amdgpu_display_supported_domains() 870 */ 871 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); 872 873 if (bo->pin_count) { 874 uint32_t mem_type = bo->tbo.mem.mem_type; 875 876 if (!(domain & amdgpu_mem_type_to_domain(mem_type))) 877 return -EINVAL; 878 879 bo->pin_count++; 880 881 if (max_offset != 0) { 882 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; 883 WARN_ON_ONCE(max_offset < 884 (amdgpu_bo_gpu_offset(bo) - domain_start)); 885 } 886 887 return 0; 888 } 889 890 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 891 /* force to pin into visible video ram */ 892 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) 893 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 894 amdgpu_ttm_placement_from_domain(bo, domain); 895 for (i = 0; i < bo->placement.num_placement; i++) { 896 unsigned fpfn, lpfn; 897 898 fpfn = min_offset >> PAGE_SHIFT; 899 lpfn = max_offset >> PAGE_SHIFT; 900 901 if (fpfn > bo->placements[i].fpfn) 902 bo->placements[i].fpfn = fpfn; 903 if (!bo->placements[i].lpfn || 904 (lpfn && lpfn < bo->placements[i].lpfn)) 905 bo->placements[i].lpfn = lpfn; 906 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 907 } 908 909 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 910 if (unlikely(r)) { 911 dev_err(adev->dev, "%p pin failed\n", bo); 912 goto error; 913 } 914 915 bo->pin_count = 1; 916 917 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 918 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 919 adev->vram_pin_size += amdgpu_bo_size(bo); 920 adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); 921 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 922 adev->gart_pin_size += amdgpu_bo_size(bo); 923 } 924 925 error: 926 return r; 927 } 928 929 /** 930 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object 931 * @bo: &amdgpu_bo buffer object to be pinned 932 * @domain: domain to be pinned to 933 * 934 * A simple wrapper to amdgpu_bo_pin_restricted(). 935 * Provides a simpler API for buffers that do not have any strict restrictions 936 * on where a buffer must be located. 937 * 938 * Returns: 939 * 0 for success or a negative error code on failure. 940 */ 941 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) 942 { 943 return amdgpu_bo_pin_restricted(bo, domain, 0, 0); 944 } 945 946 /** 947 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object 948 * @bo: &amdgpu_bo buffer object to be unpinned 949 * 950 * Decreases the pin_count, and clears the flags if pin_count reaches 0. 951 * Changes placement and pin size accordingly. 952 * 953 * Returns: 954 * 0 for success or a negative error code on failure. 955 */ 956 int amdgpu_bo_unpin(struct amdgpu_bo *bo) 957 { 958 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 959 struct ttm_operation_ctx ctx = { false, false }; 960 int r, i; 961 962 if (!bo->pin_count) { 963 dev_warn(adev->dev, "%p unpin not necessary\n", bo); 964 return 0; 965 } 966 bo->pin_count--; 967 if (bo->pin_count) 968 return 0; 969 970 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 971 adev->vram_pin_size -= amdgpu_bo_size(bo); 972 adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); 973 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 974 adev->gart_pin_size -= amdgpu_bo_size(bo); 975 } 976 977 for (i = 0; i < bo->placement.num_placement; i++) { 978 bo->placements[i].lpfn = 0; 979 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 980 } 981 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 982 if (unlikely(r)) 983 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 984 985 return r; 986 } 987 988 /** 989 * amdgpu_bo_evict_vram - evict VRAM buffers 990 * @adev: amdgpu device object 991 * 992 * Evicts all VRAM buffers on the lru list of the memory type. 993 * Mainly used for evicting vram at suspend time. 994 * 995 * Returns: 996 * 0 for success or a negative error code on failure. 997 */ 998 int amdgpu_bo_evict_vram(struct amdgpu_device *adev) 999 { 1000 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 1001 if (0 && (adev->flags & AMD_IS_APU)) { 1002 /* Useless to evict on IGP chips */ 1003 return 0; 1004 } 1005 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 1006 } 1007 1008 static const char *amdgpu_vram_names[] = { 1009 "UNKNOWN", 1010 "GDDR1", 1011 "DDR2", 1012 "GDDR3", 1013 "GDDR4", 1014 "GDDR5", 1015 "HBM", 1016 "DDR3", 1017 "DDR4", 1018 }; 1019 1020 /** 1021 * amdgpu_bo_init - initialize memory manager 1022 * @adev: amdgpu device object 1023 * 1024 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. 1025 * 1026 * Returns: 1027 * 0 for success or a negative error code on failure. 1028 */ 1029 int amdgpu_bo_init(struct amdgpu_device *adev) 1030 { 1031 /* reserve PAT memory space to WC for VRAM */ 1032 arch_io_reserve_memtype_wc(adev->gmc.aper_base, 1033 adev->gmc.aper_size); 1034 1035 /* Add an MTRR for the VRAM */ 1036 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, 1037 adev->gmc.aper_size); 1038 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 1039 adev->gmc.mc_vram_size >> 20, 1040 (unsigned long long)adev->gmc.aper_size >> 20); 1041 DRM_INFO("RAM width %dbits %s\n", 1042 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); 1043 return amdgpu_ttm_init(adev); 1044 } 1045 1046 /** 1047 * amdgpu_bo_late_init - late init 1048 * @adev: amdgpu device object 1049 * 1050 * Calls amdgpu_ttm_late_init() to free resources used earlier during 1051 * initialization. 1052 * 1053 * Returns: 1054 * 0 for success or a negative error code on failure. 1055 */ 1056 int amdgpu_bo_late_init(struct amdgpu_device *adev) 1057 { 1058 amdgpu_ttm_late_init(adev); 1059 1060 return 0; 1061 } 1062 1063 /** 1064 * amdgpu_bo_fini - tear down memory manager 1065 * @adev: amdgpu device object 1066 * 1067 * Reverses amdgpu_bo_init() to tear down memory manager. 1068 */ 1069 void amdgpu_bo_fini(struct amdgpu_device *adev) 1070 { 1071 amdgpu_ttm_fini(adev); 1072 arch_phys_wc_del(adev->gmc.vram_mtrr); 1073 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 1074 } 1075 1076 /** 1077 * amdgpu_bo_fbdev_mmap - mmap fbdev memory 1078 * @bo: &amdgpu_bo buffer object 1079 * @vma: vma as input from the fbdev mmap method 1080 * 1081 * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo. 1082 * 1083 * Returns: 1084 * 0 for success or a negative error code on failure. 1085 */ 1086 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 1087 struct vm_area_struct *vma) 1088 { 1089 return ttm_fbdev_mmap(vma, &bo->tbo); 1090 } 1091 1092 /** 1093 * amdgpu_bo_set_tiling_flags - set tiling flags 1094 * @bo: &amdgpu_bo buffer object 1095 * @tiling_flags: new flags 1096 * 1097 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or 1098 * kernel driver to set the tiling flags on a buffer. 1099 * 1100 * Returns: 1101 * 0 for success or a negative error code on failure. 1102 */ 1103 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) 1104 { 1105 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1106 1107 if (adev->family <= AMDGPU_FAMILY_CZ && 1108 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) 1109 return -EINVAL; 1110 1111 bo->tiling_flags = tiling_flags; 1112 return 0; 1113 } 1114 1115 /** 1116 * amdgpu_bo_get_tiling_flags - get tiling flags 1117 * @bo: &amdgpu_bo buffer object 1118 * @tiling_flags: returned flags 1119 * 1120 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to 1121 * set the tiling flags on a buffer. 1122 */ 1123 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1124 { 1125 lockdep_assert_held(&bo->tbo.resv->lock.base); 1126 1127 if (tiling_flags) 1128 *tiling_flags = bo->tiling_flags; 1129 } 1130 1131 /** 1132 * amdgpu_bo_set_metadata - set metadata 1133 * @bo: &amdgpu_bo buffer object 1134 * @metadata: new metadata 1135 * @metadata_size: size of the new metadata 1136 * @flags: flags of the new metadata 1137 * 1138 * Sets buffer object's metadata, its size and flags. 1139 * Used via GEM ioctl. 1140 * 1141 * Returns: 1142 * 0 for success or a negative error code on failure. 1143 */ 1144 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 1145 uint32_t metadata_size, uint64_t flags) 1146 { 1147 void *buffer; 1148 1149 if (!metadata_size) { 1150 if (bo->metadata_size) { 1151 kfree(bo->metadata); 1152 bo->metadata = NULL; 1153 bo->metadata_size = 0; 1154 } 1155 return 0; 1156 } 1157 1158 if (metadata == NULL) 1159 return -EINVAL; 1160 1161 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); 1162 if (buffer == NULL) 1163 return -ENOMEM; 1164 1165 kfree(bo->metadata); 1166 bo->metadata_flags = flags; 1167 bo->metadata = buffer; 1168 bo->metadata_size = metadata_size; 1169 1170 return 0; 1171 } 1172 1173 /** 1174 * amdgpu_bo_get_metadata - get metadata 1175 * @bo: &amdgpu_bo buffer object 1176 * @buffer: returned metadata 1177 * @buffer_size: size of the buffer 1178 * @metadata_size: size of the returned metadata 1179 * @flags: flags of the returned metadata 1180 * 1181 * Gets buffer object's metadata, its size and flags. buffer_size shall not be 1182 * less than metadata_size. 1183 * Used via GEM ioctl. 1184 * 1185 * Returns: 1186 * 0 for success or a negative error code on failure. 1187 */ 1188 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 1189 size_t buffer_size, uint32_t *metadata_size, 1190 uint64_t *flags) 1191 { 1192 if (!buffer && !metadata_size) 1193 return -EINVAL; 1194 1195 if (buffer) { 1196 if (buffer_size < bo->metadata_size) 1197 return -EINVAL; 1198 1199 if (bo->metadata_size) 1200 memcpy(buffer, bo->metadata, bo->metadata_size); 1201 } 1202 1203 if (metadata_size) 1204 *metadata_size = bo->metadata_size; 1205 if (flags) 1206 *flags = bo->metadata_flags; 1207 1208 return 0; 1209 } 1210 1211 /** 1212 * amdgpu_bo_move_notify - notification about a memory move 1213 * @bo: pointer to a buffer object 1214 * @evict: if this move is evicting the buffer from the graphics address space 1215 * @new_mem: new information of the bufer object 1216 * 1217 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs 1218 * bookkeeping. 1219 * TTM driver callback which is called when ttm moves a buffer. 1220 */ 1221 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 1222 bool evict, 1223 struct ttm_mem_reg *new_mem) 1224 { 1225 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1226 struct amdgpu_bo *abo; 1227 struct ttm_mem_reg *old_mem = &bo->mem; 1228 1229 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1230 return; 1231 1232 abo = ttm_to_amdgpu_bo(bo); 1233 amdgpu_vm_bo_invalidate(adev, abo, evict); 1234 1235 amdgpu_bo_kunmap(abo); 1236 1237 /* remember the eviction */ 1238 if (evict) 1239 atomic64_inc(&adev->num_evictions); 1240 1241 /* update statistics */ 1242 if (!new_mem) 1243 return; 1244 1245 /* move_notify is called before move happens */ 1246 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 1247 } 1248 1249 /** 1250 * amdgpu_bo_fault_reserve_notify - notification about a memory fault 1251 * @bo: pointer to a buffer object 1252 * 1253 * Notifies the driver we are taking a fault on this BO and have reserved it, 1254 * also performs bookkeeping. 1255 * TTM driver callback for dealing with vm faults. 1256 * 1257 * Returns: 1258 * 0 for success or a negative error code on failure. 1259 */ 1260 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 1261 { 1262 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1263 struct ttm_operation_ctx ctx = { false, false }; 1264 struct amdgpu_bo *abo; 1265 unsigned long offset, size; 1266 int r; 1267 1268 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1269 return 0; 1270 1271 abo = ttm_to_amdgpu_bo(bo); 1272 1273 /* Remember that this BO was accessed by the CPU */ 1274 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1275 1276 if (bo->mem.mem_type != TTM_PL_VRAM) 1277 return 0; 1278 1279 size = bo->mem.num_pages << PAGE_SHIFT; 1280 offset = bo->mem.start << PAGE_SHIFT; 1281 if ((offset + size) <= adev->gmc.visible_vram_size) 1282 return 0; 1283 1284 /* Can't move a pinned BO to visible VRAM */ 1285 if (abo->pin_count > 0) 1286 return -EINVAL; 1287 1288 /* hurrah the memory is not visible ! */ 1289 atomic64_inc(&adev->num_vram_cpu_page_faults); 1290 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 1291 AMDGPU_GEM_DOMAIN_GTT); 1292 1293 /* Avoid costly evictions; only set GTT as a busy placement */ 1294 abo->placement.num_busy_placement = 1; 1295 abo->placement.busy_placement = &abo->placements[1]; 1296 1297 r = ttm_bo_validate(bo, &abo->placement, &ctx); 1298 if (unlikely(r != 0)) 1299 return r; 1300 1301 offset = bo->mem.start << PAGE_SHIFT; 1302 /* this should never happen */ 1303 if (bo->mem.mem_type == TTM_PL_VRAM && 1304 (offset + size) > adev->gmc.visible_vram_size) 1305 return -EINVAL; 1306 1307 return 0; 1308 } 1309 1310 /** 1311 * amdgpu_bo_fence - add fence to buffer object 1312 * 1313 * @bo: buffer object in question 1314 * @fence: fence to add 1315 * @shared: true if fence should be added shared 1316 * 1317 */ 1318 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1319 bool shared) 1320 { 1321 struct reservation_object *resv = bo->tbo.resv; 1322 1323 if (shared) 1324 reservation_object_add_shared_fence(resv, fence); 1325 else 1326 reservation_object_add_excl_fence(resv, fence); 1327 } 1328 1329 /** 1330 * amdgpu_bo_gpu_offset - return GPU offset of bo 1331 * @bo: amdgpu object for which we query the offset 1332 * 1333 * Note: object should either be pinned or reserved when calling this 1334 * function, it might be useful to add check for this for debugging. 1335 * 1336 * Returns: 1337 * current GPU offset of the object. 1338 */ 1339 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1340 { 1341 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1342 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && 1343 !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); 1344 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && 1345 !bo->pin_count); 1346 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1347 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && 1348 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); 1349 1350 return bo->tbo.offset; 1351 } 1352 1353 /** 1354 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout 1355 * @adev: amdgpu device object 1356 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` 1357 * 1358 * Returns: 1359 * Which of the allowed domains is preferred for pinning the BO for scanout. 1360 */ 1361 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 1362 uint32_t domain) 1363 { 1364 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { 1365 domain = AMDGPU_GEM_DOMAIN_VRAM; 1366 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) 1367 domain = AMDGPU_GEM_DOMAIN_GTT; 1368 } 1369 return domain; 1370 } 1371