1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/dma-resv.h> 44 45 #include "ttm_module.h" 46 47 /* default destructor */ 48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) 49 { 50 kfree(bo); 51 } 52 53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 54 struct ttm_placement *placement) 55 { 56 struct drm_printer p = drm_debug_printer(TTM_PFX); 57 struct ttm_resource_manager *man; 58 int i, mem_type; 59 60 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", 61 bo, bo->resource->num_pages, bo->base.size >> 10, 62 bo->base.size >> 20); 63 for (i = 0; i < placement->num_placement; i++) { 64 mem_type = placement->placement[i].mem_type; 65 drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 66 i, placement->placement[i].flags, mem_type); 67 man = ttm_manager_type(bo->bdev, mem_type); 68 ttm_resource_manager_debug(man, &p); 69 } 70 } 71 72 static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo) 73 { 74 struct ttm_device *bdev = bo->bdev; 75 76 list_move_tail(&bo->lru, &bdev->pinned); 77 78 if (bdev->funcs->del_from_lru_notify) 79 bdev->funcs->del_from_lru_notify(bo); 80 } 81 82 static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 83 { 84 struct ttm_device *bdev = bo->bdev; 85 86 list_del_init(&bo->lru); 87 88 if (bdev->funcs->del_from_lru_notify) 89 bdev->funcs->del_from_lru_notify(bo); 90 } 91 92 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, 93 struct ttm_buffer_object *bo) 94 { 95 if (!pos->first) 96 pos->first = bo; 97 pos->last = bo; 98 } 99 100 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 101 struct ttm_resource *mem, 102 struct ttm_lru_bulk_move *bulk) 103 { 104 struct ttm_device *bdev = bo->bdev; 105 struct ttm_resource_manager *man; 106 107 if (!bo->deleted) 108 dma_resv_assert_held(bo->base.resv); 109 110 if (bo->pin_count) { 111 ttm_bo_move_to_pinned(bo); 112 return; 113 } 114 115 if (!mem) 116 return; 117 118 man = ttm_manager_type(bdev, mem->mem_type); 119 list_move_tail(&bo->lru, &man->lru[bo->priority]); 120 121 if (bdev->funcs->del_from_lru_notify) 122 bdev->funcs->del_from_lru_notify(bo); 123 124 if (bulk && !bo->pin_count) { 125 switch (bo->resource->mem_type) { 126 case TTM_PL_TT: 127 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); 128 break; 129 130 case TTM_PL_VRAM: 131 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); 132 break; 133 } 134 } 135 } 136 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 137 138 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) 139 { 140 unsigned i; 141 142 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 143 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; 144 struct ttm_resource_manager *man; 145 146 if (!pos->first) 147 continue; 148 149 dma_resv_assert_held(pos->first->base.resv); 150 dma_resv_assert_held(pos->last->base.resv); 151 152 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); 153 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 154 &pos->last->lru); 155 } 156 157 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 158 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; 159 struct ttm_resource_manager *man; 160 161 if (!pos->first) 162 continue; 163 164 dma_resv_assert_held(pos->first->base.resv); 165 dma_resv_assert_held(pos->last->base.resv); 166 167 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); 168 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 169 &pos->last->lru); 170 } 171 } 172 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); 173 174 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 175 struct ttm_resource *mem, bool evict, 176 struct ttm_operation_ctx *ctx, 177 struct ttm_place *hop) 178 { 179 struct ttm_resource_manager *old_man, *new_man; 180 struct ttm_device *bdev = bo->bdev; 181 int ret; 182 183 old_man = ttm_manager_type(bdev, bo->resource->mem_type); 184 new_man = ttm_manager_type(bdev, mem->mem_type); 185 186 ttm_bo_unmap_virtual(bo); 187 188 /* 189 * Create and bind a ttm if required. 190 */ 191 192 if (new_man->use_tt) { 193 /* Zero init the new TTM structure if the old location should 194 * have used one as well. 195 */ 196 ret = ttm_tt_create(bo, old_man->use_tt); 197 if (ret) 198 goto out_err; 199 200 if (mem->mem_type != TTM_PL_SYSTEM) { 201 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); 202 if (ret) 203 goto out_err; 204 } 205 } 206 207 ret = bdev->funcs->move(bo, evict, ctx, mem, hop); 208 if (ret) { 209 if (ret == -EMULTIHOP) 210 return ret; 211 goto out_err; 212 } 213 214 ctx->bytes_moved += bo->base.size; 215 return 0; 216 217 out_err: 218 new_man = ttm_manager_type(bdev, bo->resource->mem_type); 219 if (!new_man->use_tt) 220 ttm_bo_tt_destroy(bo); 221 222 return ret; 223 } 224 225 /* 226 * Call bo::reserved. 227 * Will release GPU memory type usage on destruction. 228 * This is the place to put in driver specific hooks to release 229 * driver private resources. 230 * Will release the bo::reserved lock. 231 */ 232 233 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 234 { 235 if (bo->bdev->funcs->delete_mem_notify) 236 bo->bdev->funcs->delete_mem_notify(bo); 237 238 ttm_bo_tt_destroy(bo); 239 ttm_resource_free(bo, &bo->resource); 240 } 241 242 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 243 { 244 int r; 245 246 if (bo->base.resv == &bo->base._resv) 247 return 0; 248 249 BUG_ON(!dma_resv_trylock(&bo->base._resv)); 250 251 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 252 dma_resv_unlock(&bo->base._resv); 253 if (r) 254 return r; 255 256 if (bo->type != ttm_bo_type_sg) { 257 /* This works because the BO is about to be destroyed and nobody 258 * reference it any more. The only tricky case is the trylock on 259 * the resv object while holding the lru_lock. 260 */ 261 spin_lock(&bo->bdev->lru_lock); 262 bo->base.resv = &bo->base._resv; 263 spin_unlock(&bo->bdev->lru_lock); 264 } 265 266 return r; 267 } 268 269 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 270 { 271 struct dma_resv *resv = &bo->base._resv; 272 struct dma_resv_list *fobj; 273 struct dma_fence *fence; 274 int i; 275 276 rcu_read_lock(); 277 fobj = dma_resv_shared_list(resv); 278 fence = dma_resv_excl_fence(resv); 279 if (fence && !fence->ops->signaled) 280 dma_fence_enable_sw_signaling(fence); 281 282 for (i = 0; fobj && i < fobj->shared_count; ++i) { 283 fence = rcu_dereference(fobj->shared[i]); 284 285 if (!fence->ops->signaled) 286 dma_fence_enable_sw_signaling(fence); 287 } 288 rcu_read_unlock(); 289 } 290 291 /** 292 * ttm_bo_cleanup_refs 293 * If bo idle, remove from lru lists, and unref. 294 * If not idle, block if possible. 295 * 296 * Must be called with lru_lock and reservation held, this function 297 * will drop the lru lock and optionally the reservation lock before returning. 298 * 299 * @bo: The buffer object to clean-up 300 * @interruptible: Any sleeps should occur interruptibly. 301 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead. 302 * @unlock_resv: Unlock the reservation lock as well. 303 */ 304 305 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, 306 bool interruptible, bool no_wait_gpu, 307 bool unlock_resv) 308 { 309 struct dma_resv *resv = &bo->base._resv; 310 int ret; 311 312 if (dma_resv_test_signaled(resv, true)) 313 ret = 0; 314 else 315 ret = -EBUSY; 316 317 if (ret && !no_wait_gpu) { 318 long lret; 319 320 if (unlock_resv) 321 dma_resv_unlock(bo->base.resv); 322 spin_unlock(&bo->bdev->lru_lock); 323 324 lret = dma_resv_wait_timeout(resv, true, interruptible, 325 30 * HZ); 326 327 if (lret < 0) 328 return lret; 329 else if (lret == 0) 330 return -EBUSY; 331 332 spin_lock(&bo->bdev->lru_lock); 333 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 334 /* 335 * We raced, and lost, someone else holds the reservation now, 336 * and is probably busy in ttm_bo_cleanup_memtype_use. 337 * 338 * Even if it's not the case, because we finished waiting any 339 * delayed destruction would succeed, so just return success 340 * here. 341 */ 342 spin_unlock(&bo->bdev->lru_lock); 343 return 0; 344 } 345 ret = 0; 346 } 347 348 if (ret || unlikely(list_empty(&bo->ddestroy))) { 349 if (unlock_resv) 350 dma_resv_unlock(bo->base.resv); 351 spin_unlock(&bo->bdev->lru_lock); 352 return ret; 353 } 354 355 ttm_bo_move_to_pinned(bo); 356 list_del_init(&bo->ddestroy); 357 spin_unlock(&bo->bdev->lru_lock); 358 ttm_bo_cleanup_memtype_use(bo); 359 360 if (unlock_resv) 361 dma_resv_unlock(bo->base.resv); 362 363 ttm_bo_put(bo); 364 365 return 0; 366 } 367 368 /* 369 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 370 * encountered buffers. 371 */ 372 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all) 373 { 374 struct list_head removed; 375 bool empty; 376 377 INIT_LIST_HEAD(&removed); 378 379 spin_lock(&bdev->lru_lock); 380 while (!list_empty(&bdev->ddestroy)) { 381 struct ttm_buffer_object *bo; 382 383 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, 384 ddestroy); 385 list_move_tail(&bo->ddestroy, &removed); 386 if (!ttm_bo_get_unless_zero(bo)) 387 continue; 388 389 if (remove_all || bo->base.resv != &bo->base._resv) { 390 spin_unlock(&bdev->lru_lock); 391 dma_resv_lock(bo->base.resv, NULL); 392 393 spin_lock(&bdev->lru_lock); 394 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 395 396 } else if (dma_resv_trylock(bo->base.resv)) { 397 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 398 } else { 399 spin_unlock(&bdev->lru_lock); 400 } 401 402 ttm_bo_put(bo); 403 spin_lock(&bdev->lru_lock); 404 } 405 list_splice_tail(&removed, &bdev->ddestroy); 406 empty = list_empty(&bdev->ddestroy); 407 spin_unlock(&bdev->lru_lock); 408 409 return empty; 410 } 411 412 static void ttm_bo_release(struct kref *kref) 413 { 414 struct ttm_buffer_object *bo = 415 container_of(kref, struct ttm_buffer_object, kref); 416 struct ttm_device *bdev = bo->bdev; 417 int ret; 418 419 WARN_ON_ONCE(bo->pin_count); 420 421 if (!bo->deleted) { 422 ret = ttm_bo_individualize_resv(bo); 423 if (ret) { 424 /* Last resort, if we fail to allocate memory for the 425 * fences block for the BO to become idle 426 */ 427 dma_resv_wait_timeout(bo->base.resv, true, false, 428 30 * HZ); 429 } 430 431 if (bo->bdev->funcs->release_notify) 432 bo->bdev->funcs->release_notify(bo); 433 434 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 435 ttm_mem_io_free(bdev, bo->resource); 436 } 437 438 if (!dma_resv_test_signaled(bo->base.resv, true) || 439 !dma_resv_trylock(bo->base.resv)) { 440 /* The BO is not idle, resurrect it for delayed destroy */ 441 ttm_bo_flush_all_fences(bo); 442 bo->deleted = true; 443 444 spin_lock(&bo->bdev->lru_lock); 445 446 /* 447 * Make pinned bos immediately available to 448 * shrinkers, now that they are queued for 449 * destruction. 450 * 451 * FIXME: QXL is triggering this. Can be removed when the 452 * driver is fixed. 453 */ 454 if (bo->pin_count) { 455 bo->pin_count = 0; 456 ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); 457 } 458 459 kref_init(&bo->kref); 460 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 461 spin_unlock(&bo->bdev->lru_lock); 462 463 schedule_delayed_work(&bdev->wq, 464 ((HZ / 100) < 1) ? 1 : HZ / 100); 465 return; 466 } 467 468 spin_lock(&bo->bdev->lru_lock); 469 ttm_bo_del_from_lru(bo); 470 list_del(&bo->ddestroy); 471 spin_unlock(&bo->bdev->lru_lock); 472 473 ttm_bo_cleanup_memtype_use(bo); 474 dma_resv_unlock(bo->base.resv); 475 476 atomic_dec(&ttm_glob.bo_count); 477 dma_fence_put(bo->moving); 478 bo->destroy(bo); 479 } 480 481 void ttm_bo_put(struct ttm_buffer_object *bo) 482 { 483 kref_put(&bo->kref, ttm_bo_release); 484 } 485 EXPORT_SYMBOL(ttm_bo_put); 486 487 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev) 488 { 489 return cancel_delayed_work_sync(&bdev->wq); 490 } 491 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 492 493 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched) 494 { 495 if (resched) 496 schedule_delayed_work(&bdev->wq, 497 ((HZ / 100) < 1) ? 1 : HZ / 100); 498 } 499 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 500 501 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 502 struct ttm_resource **mem, 503 struct ttm_operation_ctx *ctx, 504 struct ttm_place *hop) 505 { 506 struct ttm_placement hop_placement; 507 struct ttm_resource *hop_mem; 508 int ret; 509 510 hop_placement.num_placement = hop_placement.num_busy_placement = 1; 511 hop_placement.placement = hop_placement.busy_placement = hop; 512 513 /* find space in the bounce domain */ 514 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); 515 if (ret) 516 return ret; 517 /* move to the bounce domain */ 518 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); 519 if (ret) { 520 ttm_resource_free(bo, &hop_mem); 521 return ret; 522 } 523 return 0; 524 } 525 526 static int ttm_bo_evict(struct ttm_buffer_object *bo, 527 struct ttm_operation_ctx *ctx) 528 { 529 struct ttm_device *bdev = bo->bdev; 530 struct ttm_resource *evict_mem; 531 struct ttm_placement placement; 532 struct ttm_place hop; 533 int ret = 0; 534 535 memset(&hop, 0, sizeof(hop)); 536 537 dma_resv_assert_held(bo->base.resv); 538 539 placement.num_placement = 0; 540 placement.num_busy_placement = 0; 541 bdev->funcs->evict_flags(bo, &placement); 542 543 if (!placement.num_placement && !placement.num_busy_placement) { 544 ret = ttm_bo_wait(bo, true, false); 545 if (ret) 546 return ret; 547 548 /* 549 * Since we've already synced, this frees backing store 550 * immediately. 551 */ 552 return ttm_bo_pipeline_gutting(bo); 553 } 554 555 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 556 if (ret) { 557 if (ret != -ERESTARTSYS) { 558 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 559 bo); 560 ttm_bo_mem_space_debug(bo, &placement); 561 } 562 goto out; 563 } 564 565 bounce: 566 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); 567 if (ret == -EMULTIHOP) { 568 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop); 569 if (ret) { 570 pr_err("Buffer eviction failed\n"); 571 ttm_resource_free(bo, &evict_mem); 572 goto out; 573 } 574 /* try and move to final place now. */ 575 goto bounce; 576 } 577 out: 578 return ret; 579 } 580 581 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 582 const struct ttm_place *place) 583 { 584 dma_resv_assert_held(bo->base.resv); 585 if (bo->resource->mem_type == TTM_PL_SYSTEM) 586 return true; 587 588 /* Don't evict this BO if it's outside of the 589 * requested placement range 590 */ 591 if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) || 592 (place->lpfn && place->lpfn <= bo->resource->start)) 593 return false; 594 595 return true; 596 } 597 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 598 599 /* 600 * Check the target bo is allowable to be evicted or swapout, including cases: 601 * 602 * a. if share same reservation object with ctx->resv, have assumption 603 * reservation objects should already be locked, so not lock again and 604 * return true directly when either the opreation allow_reserved_eviction 605 * or the target bo already is in delayed free list; 606 * 607 * b. Otherwise, trylock it. 608 */ 609 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, 610 struct ttm_operation_ctx *ctx, 611 const struct ttm_place *place, 612 bool *locked, bool *busy) 613 { 614 bool ret = false; 615 616 if (bo->base.resv == ctx->resv) { 617 dma_resv_assert_held(bo->base.resv); 618 if (ctx->allow_res_evict) 619 ret = true; 620 *locked = false; 621 if (busy) 622 *busy = false; 623 } else { 624 ret = dma_resv_trylock(bo->base.resv); 625 *locked = ret; 626 if (busy) 627 *busy = !ret; 628 } 629 630 if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) { 631 ret = false; 632 if (*locked) { 633 dma_resv_unlock(bo->base.resv); 634 *locked = false; 635 } 636 } 637 638 return ret; 639 } 640 641 /** 642 * ttm_mem_evict_wait_busy - wait for a busy BO to become available 643 * 644 * @busy_bo: BO which couldn't be locked with trylock 645 * @ctx: operation context 646 * @ticket: acquire ticket 647 * 648 * Try to lock a busy buffer object to avoid failing eviction. 649 */ 650 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, 651 struct ttm_operation_ctx *ctx, 652 struct ww_acquire_ctx *ticket) 653 { 654 int r; 655 656 if (!busy_bo || !ticket) 657 return -EBUSY; 658 659 if (ctx->interruptible) 660 r = dma_resv_lock_interruptible(busy_bo->base.resv, 661 ticket); 662 else 663 r = dma_resv_lock(busy_bo->base.resv, ticket); 664 665 /* 666 * TODO: It would be better to keep the BO locked until allocation is at 667 * least tried one more time, but that would mean a much larger rework 668 * of TTM. 669 */ 670 if (!r) 671 dma_resv_unlock(busy_bo->base.resv); 672 673 return r == -EDEADLK ? -EBUSY : r; 674 } 675 676 int ttm_mem_evict_first(struct ttm_device *bdev, 677 struct ttm_resource_manager *man, 678 const struct ttm_place *place, 679 struct ttm_operation_ctx *ctx, 680 struct ww_acquire_ctx *ticket) 681 { 682 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; 683 bool locked = false; 684 unsigned i; 685 int ret; 686 687 spin_lock(&bdev->lru_lock); 688 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 689 list_for_each_entry(bo, &man->lru[i], lru) { 690 bool busy; 691 692 if (!ttm_bo_evict_swapout_allowable(bo, ctx, place, 693 &locked, &busy)) { 694 if (busy && !busy_bo && ticket != 695 dma_resv_locking_ctx(bo->base.resv)) 696 busy_bo = bo; 697 continue; 698 } 699 700 if (!ttm_bo_get_unless_zero(bo)) { 701 if (locked) 702 dma_resv_unlock(bo->base.resv); 703 continue; 704 } 705 break; 706 } 707 708 /* If the inner loop terminated early, we have our candidate */ 709 if (&bo->lru != &man->lru[i]) 710 break; 711 712 bo = NULL; 713 } 714 715 if (!bo) { 716 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo)) 717 busy_bo = NULL; 718 spin_unlock(&bdev->lru_lock); 719 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); 720 if (busy_bo) 721 ttm_bo_put(busy_bo); 722 return ret; 723 } 724 725 if (bo->deleted) { 726 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, 727 ctx->no_wait_gpu, locked); 728 ttm_bo_put(bo); 729 return ret; 730 } 731 732 spin_unlock(&bdev->lru_lock); 733 734 ret = ttm_bo_evict(bo, ctx); 735 if (locked) 736 ttm_bo_unreserve(bo); 737 738 ttm_bo_put(bo); 739 return ret; 740 } 741 742 /* 743 * Add the last move fence to the BO and reserve a new shared slot. We only use 744 * a shared slot to avoid unecessary sync and rely on the subsequent bo move to 745 * either stall or use an exclusive fence respectively set bo->moving. 746 */ 747 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 748 struct ttm_resource_manager *man, 749 struct ttm_resource *mem, 750 bool no_wait_gpu) 751 { 752 struct dma_fence *fence; 753 int ret; 754 755 spin_lock(&man->move_lock); 756 fence = dma_fence_get(man->move); 757 spin_unlock(&man->move_lock); 758 759 if (!fence) 760 return 0; 761 762 if (no_wait_gpu) { 763 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; 764 dma_fence_put(fence); 765 return ret; 766 } 767 768 dma_resv_add_shared_fence(bo->base.resv, fence); 769 770 ret = dma_resv_reserve_shared(bo->base.resv, 1); 771 if (unlikely(ret)) { 772 dma_fence_put(fence); 773 return ret; 774 } 775 776 dma_fence_put(bo->moving); 777 bo->moving = fence; 778 return 0; 779 } 780 781 /* 782 * Repeatedly evict memory from the LRU for @mem_type until we create enough 783 * space, or we've evicted everything and there isn't enough space. 784 */ 785 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 786 const struct ttm_place *place, 787 struct ttm_resource **mem, 788 struct ttm_operation_ctx *ctx) 789 { 790 struct ttm_device *bdev = bo->bdev; 791 struct ttm_resource_manager *man; 792 struct ww_acquire_ctx *ticket; 793 int ret; 794 795 man = ttm_manager_type(bdev, place->mem_type); 796 ticket = dma_resv_locking_ctx(bo->base.resv); 797 do { 798 ret = ttm_resource_alloc(bo, place, mem); 799 if (likely(!ret)) 800 break; 801 if (unlikely(ret != -ENOSPC)) 802 return ret; 803 ret = ttm_mem_evict_first(bdev, man, place, ctx, 804 ticket); 805 if (unlikely(ret != 0)) 806 return ret; 807 } while (1); 808 809 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu); 810 } 811 812 /* 813 * Creates space for memory region @mem according to its type. 814 * 815 * This function first searches for free space in compatible memory types in 816 * the priority order defined by the driver. If free space isn't found, then 817 * ttm_bo_mem_force_space is attempted in priority order to evict and find 818 * space. 819 */ 820 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 821 struct ttm_placement *placement, 822 struct ttm_resource **mem, 823 struct ttm_operation_ctx *ctx) 824 { 825 struct ttm_device *bdev = bo->bdev; 826 bool type_found = false; 827 int i, ret; 828 829 ret = dma_resv_reserve_shared(bo->base.resv, 1); 830 if (unlikely(ret)) 831 return ret; 832 833 for (i = 0; i < placement->num_placement; ++i) { 834 const struct ttm_place *place = &placement->placement[i]; 835 struct ttm_resource_manager *man; 836 837 man = ttm_manager_type(bdev, place->mem_type); 838 if (!man || !ttm_resource_manager_used(man)) 839 continue; 840 841 type_found = true; 842 ret = ttm_resource_alloc(bo, place, mem); 843 if (ret == -ENOSPC) 844 continue; 845 if (unlikely(ret)) 846 goto error; 847 848 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu); 849 if (unlikely(ret)) { 850 ttm_resource_free(bo, mem); 851 if (ret == -EBUSY) 852 continue; 853 854 goto error; 855 } 856 return 0; 857 } 858 859 for (i = 0; i < placement->num_busy_placement; ++i) { 860 const struct ttm_place *place = &placement->busy_placement[i]; 861 struct ttm_resource_manager *man; 862 863 man = ttm_manager_type(bdev, place->mem_type); 864 if (!man || !ttm_resource_manager_used(man)) 865 continue; 866 867 type_found = true; 868 ret = ttm_bo_mem_force_space(bo, place, mem, ctx); 869 if (likely(!ret)) 870 return 0; 871 872 if (ret && ret != -EBUSY) 873 goto error; 874 } 875 876 ret = -ENOMEM; 877 if (!type_found) { 878 pr_err(TTM_PFX "No compatible memory type found\n"); 879 ret = -EINVAL; 880 } 881 882 error: 883 if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count) 884 ttm_bo_move_to_lru_tail_unlocked(bo); 885 886 return ret; 887 } 888 EXPORT_SYMBOL(ttm_bo_mem_space); 889 890 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 891 struct ttm_placement *placement, 892 struct ttm_operation_ctx *ctx) 893 { 894 struct ttm_resource *mem; 895 struct ttm_place hop; 896 int ret; 897 898 dma_resv_assert_held(bo->base.resv); 899 900 /* 901 * Determine where to move the buffer. 902 * 903 * If driver determines move is going to need 904 * an extra step then it will return -EMULTIHOP 905 * and the buffer will be moved to the temporary 906 * stop and the driver will be called to make 907 * the second hop. 908 */ 909 ret = ttm_bo_mem_space(bo, placement, &mem, ctx); 910 if (ret) 911 return ret; 912 bounce: 913 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop); 914 if (ret == -EMULTIHOP) { 915 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); 916 if (ret) 917 goto out; 918 /* try and move to final place now. */ 919 goto bounce; 920 } 921 out: 922 if (ret) 923 ttm_resource_free(bo, &mem); 924 return ret; 925 } 926 927 int ttm_bo_validate(struct ttm_buffer_object *bo, 928 struct ttm_placement *placement, 929 struct ttm_operation_ctx *ctx) 930 { 931 int ret; 932 933 dma_resv_assert_held(bo->base.resv); 934 935 /* 936 * Remove the backing store if no placement is given. 937 */ 938 if (!placement->num_placement && !placement->num_busy_placement) 939 return ttm_bo_pipeline_gutting(bo); 940 941 /* 942 * Check whether we need to move buffer. 943 */ 944 if (!ttm_resource_compat(bo->resource, placement)) { 945 ret = ttm_bo_move_buffer(bo, placement, ctx); 946 if (ret) 947 return ret; 948 } 949 /* 950 * We might need to add a TTM. 951 */ 952 if (bo->resource->mem_type == TTM_PL_SYSTEM) { 953 ret = ttm_tt_create(bo, true); 954 if (ret) 955 return ret; 956 } 957 return 0; 958 } 959 EXPORT_SYMBOL(ttm_bo_validate); 960 961 int ttm_bo_init_reserved(struct ttm_device *bdev, 962 struct ttm_buffer_object *bo, 963 size_t size, 964 enum ttm_bo_type type, 965 struct ttm_placement *placement, 966 uint32_t page_alignment, 967 struct ttm_operation_ctx *ctx, 968 struct sg_table *sg, 969 struct dma_resv *resv, 970 void (*destroy) (struct ttm_buffer_object *)) 971 { 972 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; 973 bool locked; 974 int ret; 975 976 bo->destroy = destroy ? destroy : ttm_bo_default_destroy; 977 978 kref_init(&bo->kref); 979 INIT_LIST_HEAD(&bo->lru); 980 INIT_LIST_HEAD(&bo->ddestroy); 981 bo->bdev = bdev; 982 bo->type = type; 983 bo->page_alignment = page_alignment; 984 bo->moving = NULL; 985 bo->pin_count = 0; 986 bo->sg = sg; 987 if (resv) { 988 bo->base.resv = resv; 989 dma_resv_assert_held(bo->base.resv); 990 } else { 991 bo->base.resv = &bo->base._resv; 992 } 993 atomic_inc(&ttm_glob.bo_count); 994 995 ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource); 996 if (unlikely(ret)) { 997 ttm_bo_put(bo); 998 return ret; 999 } 1000 1001 /* 1002 * For ttm_bo_type_device buffers, allocate 1003 * address space from the device. 1004 */ 1005 if (bo->type == ttm_bo_type_device || 1006 bo->type == ttm_bo_type_sg) 1007 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 1008 bo->resource->num_pages); 1009 1010 /* passed reservation objects should already be locked, 1011 * since otherwise lockdep will be angered in radeon. 1012 */ 1013 if (!resv) { 1014 locked = dma_resv_trylock(bo->base.resv); 1015 WARN_ON(!locked); 1016 } 1017 1018 if (likely(!ret)) 1019 ret = ttm_bo_validate(bo, placement, ctx); 1020 1021 if (unlikely(ret)) { 1022 if (!resv) 1023 ttm_bo_unreserve(bo); 1024 1025 ttm_bo_put(bo); 1026 return ret; 1027 } 1028 1029 ttm_bo_move_to_lru_tail_unlocked(bo); 1030 1031 return ret; 1032 } 1033 EXPORT_SYMBOL(ttm_bo_init_reserved); 1034 1035 int ttm_bo_init(struct ttm_device *bdev, 1036 struct ttm_buffer_object *bo, 1037 size_t size, 1038 enum ttm_bo_type type, 1039 struct ttm_placement *placement, 1040 uint32_t page_alignment, 1041 bool interruptible, 1042 struct sg_table *sg, 1043 struct dma_resv *resv, 1044 void (*destroy) (struct ttm_buffer_object *)) 1045 { 1046 struct ttm_operation_ctx ctx = { interruptible, false }; 1047 int ret; 1048 1049 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, 1050 page_alignment, &ctx, sg, resv, destroy); 1051 if (ret) 1052 return ret; 1053 1054 if (!resv) 1055 ttm_bo_unreserve(bo); 1056 1057 return 0; 1058 } 1059 EXPORT_SYMBOL(ttm_bo_init); 1060 1061 /* 1062 * buffer object vm functions. 1063 */ 1064 1065 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1066 { 1067 struct ttm_device *bdev = bo->bdev; 1068 1069 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1070 ttm_mem_io_free(bdev, bo->resource); 1071 } 1072 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1073 1074 int ttm_bo_wait(struct ttm_buffer_object *bo, 1075 bool interruptible, bool no_wait) 1076 { 1077 long timeout = 15 * HZ; 1078 1079 if (no_wait) { 1080 if (dma_resv_test_signaled(bo->base.resv, true)) 1081 return 0; 1082 else 1083 return -EBUSY; 1084 } 1085 1086 timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, 1087 timeout); 1088 if (timeout < 0) 1089 return timeout; 1090 1091 if (timeout == 0) 1092 return -EBUSY; 1093 1094 dma_resv_add_excl_fence(bo->base.resv, NULL); 1095 return 0; 1096 } 1097 EXPORT_SYMBOL(ttm_bo_wait); 1098 1099 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, 1100 gfp_t gfp_flags) 1101 { 1102 struct ttm_place place; 1103 bool locked; 1104 int ret; 1105 1106 /* 1107 * While the bo may already reside in SYSTEM placement, set 1108 * SYSTEM as new placement to cover also the move further below. 1109 * The driver may use the fact that we're moving from SYSTEM 1110 * as an indication that we're about to swap out. 1111 */ 1112 memset(&place, 0, sizeof(place)); 1113 place.mem_type = TTM_PL_SYSTEM; 1114 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL)) 1115 return -EBUSY; 1116 1117 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || 1118 bo->ttm->page_flags & TTM_PAGE_FLAG_SG || 1119 bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED || 1120 !ttm_bo_get_unless_zero(bo)) { 1121 if (locked) 1122 dma_resv_unlock(bo->base.resv); 1123 return -EBUSY; 1124 } 1125 1126 if (bo->deleted) { 1127 ret = ttm_bo_cleanup_refs(bo, false, false, locked); 1128 ttm_bo_put(bo); 1129 return ret == -EBUSY ? -ENOSPC : ret; 1130 } 1131 1132 ttm_bo_move_to_pinned(bo); 1133 /* TODO: Cleanup the locking */ 1134 spin_unlock(&bo->bdev->lru_lock); 1135 1136 /* 1137 * Move to system cached 1138 */ 1139 if (bo->resource->mem_type != TTM_PL_SYSTEM) { 1140 struct ttm_operation_ctx ctx = { false, false }; 1141 struct ttm_resource *evict_mem; 1142 struct ttm_place hop; 1143 1144 memset(&hop, 0, sizeof(hop)); 1145 ret = ttm_resource_alloc(bo, &place, &evict_mem); 1146 if (unlikely(ret)) 1147 goto out; 1148 1149 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop); 1150 if (unlikely(ret != 0)) { 1151 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n"); 1152 goto out; 1153 } 1154 } 1155 1156 /* 1157 * Make sure BO is idle. 1158 */ 1159 ret = ttm_bo_wait(bo, false, false); 1160 if (unlikely(ret != 0)) 1161 goto out; 1162 1163 ttm_bo_unmap_virtual(bo); 1164 1165 /* 1166 * Swap out. Buffer will be swapped in again as soon as 1167 * anyone tries to access a ttm page. 1168 */ 1169 if (bo->bdev->funcs->swap_notify) 1170 bo->bdev->funcs->swap_notify(bo); 1171 1172 if (ttm_tt_is_populated(bo->ttm)) 1173 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags); 1174 out: 1175 1176 /* 1177 * Unreserve without putting on LRU to avoid swapping out an 1178 * already swapped buffer. 1179 */ 1180 if (locked) 1181 dma_resv_unlock(bo->base.resv); 1182 ttm_bo_put(bo); 1183 return ret == -EBUSY ? -ENOSPC : ret; 1184 } 1185 1186 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) 1187 { 1188 if (bo->ttm == NULL) 1189 return; 1190 1191 ttm_tt_unpopulate(bo->bdev, bo->ttm); 1192 ttm_tt_destroy(bo->bdev, bo->ttm); 1193 bo->ttm = NULL; 1194 } 1195