1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/dma-resv.h> 44 45 #include "ttm_module.h" 46 47 static void ttm_bo_global_kobj_release(struct kobject *kobj); 48 49 /* 50 * ttm_global_mutex - protecting the global BO state 51 */ 52 DEFINE_MUTEX(ttm_global_mutex); 53 unsigned ttm_bo_glob_use_count; 54 struct ttm_bo_global ttm_bo_glob; 55 EXPORT_SYMBOL(ttm_bo_glob); 56 57 static struct attribute ttm_bo_count = { 58 .name = "bo_count", 59 .mode = S_IRUGO 60 }; 61 62 /* default destructor */ 63 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) 64 { 65 kfree(bo); 66 } 67 68 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 69 struct ttm_placement *placement) 70 { 71 struct drm_printer p = drm_debug_printer(TTM_PFX); 72 struct ttm_resource_manager *man; 73 int i, mem_type; 74 75 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", 76 bo, bo->mem.num_pages, bo->base.size >> 10, 77 bo->base.size >> 20); 78 for (i = 0; i < placement->num_placement; i++) { 79 mem_type = placement->placement[i].mem_type; 80 drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 81 i, placement->placement[i].flags, mem_type); 82 man = ttm_manager_type(bo->bdev, mem_type); 83 ttm_resource_manager_debug(man, &p); 84 } 85 } 86 87 static ssize_t ttm_bo_global_show(struct kobject *kobj, 88 struct attribute *attr, 89 char *buffer) 90 { 91 struct ttm_bo_global *glob = 92 container_of(kobj, struct ttm_bo_global, kobj); 93 94 return snprintf(buffer, PAGE_SIZE, "%d\n", 95 atomic_read(&glob->bo_count)); 96 } 97 98 static struct attribute *ttm_bo_global_attrs[] = { 99 &ttm_bo_count, 100 NULL 101 }; 102 103 static const struct sysfs_ops ttm_bo_global_ops = { 104 .show = &ttm_bo_global_show 105 }; 106 107 static struct kobj_type ttm_bo_glob_kobj_type = { 108 .release = &ttm_bo_global_kobj_release, 109 .sysfs_ops = &ttm_bo_global_ops, 110 .default_attrs = ttm_bo_global_attrs 111 }; 112 113 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 114 { 115 struct ttm_bo_device *bdev = bo->bdev; 116 117 list_del_init(&bo->swap); 118 list_del_init(&bo->lru); 119 120 if (bdev->driver->del_from_lru_notify) 121 bdev->driver->del_from_lru_notify(bo); 122 } 123 124 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, 125 struct ttm_buffer_object *bo) 126 { 127 if (!pos->first) 128 pos->first = bo; 129 pos->last = bo; 130 } 131 132 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 133 struct ttm_resource *mem, 134 struct ttm_lru_bulk_move *bulk) 135 { 136 struct ttm_bo_device *bdev = bo->bdev; 137 struct ttm_resource_manager *man; 138 139 if (!bo->deleted) 140 dma_resv_assert_held(bo->base.resv); 141 142 if (bo->pin_count) { 143 ttm_bo_del_from_lru(bo); 144 return; 145 } 146 147 man = ttm_manager_type(bdev, mem->mem_type); 148 list_move_tail(&bo->lru, &man->lru[bo->priority]); 149 if (man->use_tt && bo->ttm && 150 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | 151 TTM_PAGE_FLAG_SWAPPED))) { 152 struct list_head *swap; 153 154 swap = &ttm_bo_glob.swap_lru[bo->priority]; 155 list_move_tail(&bo->swap, swap); 156 } 157 158 if (bdev->driver->del_from_lru_notify) 159 bdev->driver->del_from_lru_notify(bo); 160 161 if (bulk && !bo->pin_count) { 162 switch (bo->mem.mem_type) { 163 case TTM_PL_TT: 164 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); 165 break; 166 167 case TTM_PL_VRAM: 168 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); 169 break; 170 } 171 if (bo->ttm && !(bo->ttm->page_flags & 172 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) 173 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); 174 } 175 } 176 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 177 178 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) 179 { 180 unsigned i; 181 182 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 183 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; 184 struct ttm_resource_manager *man; 185 186 if (!pos->first) 187 continue; 188 189 dma_resv_assert_held(pos->first->base.resv); 190 dma_resv_assert_held(pos->last->base.resv); 191 192 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); 193 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 194 &pos->last->lru); 195 } 196 197 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 198 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; 199 struct ttm_resource_manager *man; 200 201 if (!pos->first) 202 continue; 203 204 dma_resv_assert_held(pos->first->base.resv); 205 dma_resv_assert_held(pos->last->base.resv); 206 207 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); 208 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 209 &pos->last->lru); 210 } 211 212 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 213 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i]; 214 struct list_head *lru; 215 216 if (!pos->first) 217 continue; 218 219 dma_resv_assert_held(pos->first->base.resv); 220 dma_resv_assert_held(pos->last->base.resv); 221 222 lru = &ttm_bo_glob.swap_lru[i]; 223 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); 224 } 225 } 226 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); 227 228 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 229 struct ttm_resource *mem, bool evict, 230 struct ttm_operation_ctx *ctx, 231 struct ttm_place *hop) 232 { 233 struct ttm_bo_device *bdev = bo->bdev; 234 struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); 235 struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); 236 int ret; 237 238 ttm_bo_unmap_virtual(bo); 239 240 /* 241 * Create and bind a ttm if required. 242 */ 243 244 if (new_man->use_tt) { 245 /* Zero init the new TTM structure if the old location should 246 * have used one as well. 247 */ 248 ret = ttm_tt_create(bo, old_man->use_tt); 249 if (ret) 250 goto out_err; 251 252 if (mem->mem_type != TTM_PL_SYSTEM) { 253 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); 254 if (ret) 255 goto out_err; 256 } 257 } 258 259 ret = bdev->driver->move(bo, evict, ctx, mem, hop); 260 if (ret) { 261 if (ret == -EMULTIHOP) 262 return ret; 263 goto out_err; 264 } 265 266 ctx->bytes_moved += bo->base.size; 267 return 0; 268 269 out_err: 270 new_man = ttm_manager_type(bdev, bo->mem.mem_type); 271 if (!new_man->use_tt) 272 ttm_bo_tt_destroy(bo); 273 274 return ret; 275 } 276 277 /* 278 * Call bo::reserved. 279 * Will release GPU memory type usage on destruction. 280 * This is the place to put in driver specific hooks to release 281 * driver private resources. 282 * Will release the bo::reserved lock. 283 */ 284 285 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 286 { 287 if (bo->bdev->driver->delete_mem_notify) 288 bo->bdev->driver->delete_mem_notify(bo); 289 290 ttm_bo_tt_destroy(bo); 291 ttm_resource_free(bo, &bo->mem); 292 } 293 294 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 295 { 296 int r; 297 298 if (bo->base.resv == &bo->base._resv) 299 return 0; 300 301 BUG_ON(!dma_resv_trylock(&bo->base._resv)); 302 303 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 304 dma_resv_unlock(&bo->base._resv); 305 if (r) 306 return r; 307 308 if (bo->type != ttm_bo_type_sg) { 309 /* This works because the BO is about to be destroyed and nobody 310 * reference it any more. The only tricky case is the trylock on 311 * the resv object while holding the lru_lock. 312 */ 313 spin_lock(&ttm_bo_glob.lru_lock); 314 bo->base.resv = &bo->base._resv; 315 spin_unlock(&ttm_bo_glob.lru_lock); 316 } 317 318 return r; 319 } 320 321 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 322 { 323 struct dma_resv *resv = &bo->base._resv; 324 struct dma_resv_list *fobj; 325 struct dma_fence *fence; 326 int i; 327 328 rcu_read_lock(); 329 fobj = rcu_dereference(resv->fence); 330 fence = rcu_dereference(resv->fence_excl); 331 if (fence && !fence->ops->signaled) 332 dma_fence_enable_sw_signaling(fence); 333 334 for (i = 0; fobj && i < fobj->shared_count; ++i) { 335 fence = rcu_dereference(fobj->shared[i]); 336 337 if (!fence->ops->signaled) 338 dma_fence_enable_sw_signaling(fence); 339 } 340 rcu_read_unlock(); 341 } 342 343 /** 344 * function ttm_bo_cleanup_refs 345 * If bo idle, remove from lru lists, and unref. 346 * If not idle, block if possible. 347 * 348 * Must be called with lru_lock and reservation held, this function 349 * will drop the lru lock and optionally the reservation lock before returning. 350 * 351 * @bo: The buffer object to clean-up 352 * @interruptible: Any sleeps should occur interruptibly. 353 * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead. 354 * @unlock_resv: Unlock the reservation lock as well. 355 */ 356 357 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, 358 bool interruptible, bool no_wait_gpu, 359 bool unlock_resv) 360 { 361 struct dma_resv *resv = &bo->base._resv; 362 int ret; 363 364 if (dma_resv_test_signaled_rcu(resv, true)) 365 ret = 0; 366 else 367 ret = -EBUSY; 368 369 if (ret && !no_wait_gpu) { 370 long lret; 371 372 if (unlock_resv) 373 dma_resv_unlock(bo->base.resv); 374 spin_unlock(&ttm_bo_glob.lru_lock); 375 376 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, 377 30 * HZ); 378 379 if (lret < 0) 380 return lret; 381 else if (lret == 0) 382 return -EBUSY; 383 384 spin_lock(&ttm_bo_glob.lru_lock); 385 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 386 /* 387 * We raced, and lost, someone else holds the reservation now, 388 * and is probably busy in ttm_bo_cleanup_memtype_use. 389 * 390 * Even if it's not the case, because we finished waiting any 391 * delayed destruction would succeed, so just return success 392 * here. 393 */ 394 spin_unlock(&ttm_bo_glob.lru_lock); 395 return 0; 396 } 397 ret = 0; 398 } 399 400 if (ret || unlikely(list_empty(&bo->ddestroy))) { 401 if (unlock_resv) 402 dma_resv_unlock(bo->base.resv); 403 spin_unlock(&ttm_bo_glob.lru_lock); 404 return ret; 405 } 406 407 ttm_bo_del_from_lru(bo); 408 list_del_init(&bo->ddestroy); 409 spin_unlock(&ttm_bo_glob.lru_lock); 410 ttm_bo_cleanup_memtype_use(bo); 411 412 if (unlock_resv) 413 dma_resv_unlock(bo->base.resv); 414 415 ttm_bo_put(bo); 416 417 return 0; 418 } 419 420 /* 421 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 422 * encountered buffers. 423 */ 424 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 425 { 426 struct ttm_bo_global *glob = &ttm_bo_glob; 427 struct list_head removed; 428 bool empty; 429 430 INIT_LIST_HEAD(&removed); 431 432 spin_lock(&glob->lru_lock); 433 while (!list_empty(&bdev->ddestroy)) { 434 struct ttm_buffer_object *bo; 435 436 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, 437 ddestroy); 438 list_move_tail(&bo->ddestroy, &removed); 439 if (!ttm_bo_get_unless_zero(bo)) 440 continue; 441 442 if (remove_all || bo->base.resv != &bo->base._resv) { 443 spin_unlock(&glob->lru_lock); 444 dma_resv_lock(bo->base.resv, NULL); 445 446 spin_lock(&glob->lru_lock); 447 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 448 449 } else if (dma_resv_trylock(bo->base.resv)) { 450 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 451 } else { 452 spin_unlock(&glob->lru_lock); 453 } 454 455 ttm_bo_put(bo); 456 spin_lock(&glob->lru_lock); 457 } 458 list_splice_tail(&removed, &bdev->ddestroy); 459 empty = list_empty(&bdev->ddestroy); 460 spin_unlock(&glob->lru_lock); 461 462 return empty; 463 } 464 465 static void ttm_bo_delayed_workqueue(struct work_struct *work) 466 { 467 struct ttm_bo_device *bdev = 468 container_of(work, struct ttm_bo_device, wq.work); 469 470 if (!ttm_bo_delayed_delete(bdev, false)) 471 schedule_delayed_work(&bdev->wq, 472 ((HZ / 100) < 1) ? 1 : HZ / 100); 473 } 474 475 static void ttm_bo_release(struct kref *kref) 476 { 477 struct ttm_buffer_object *bo = 478 container_of(kref, struct ttm_buffer_object, kref); 479 struct ttm_bo_device *bdev = bo->bdev; 480 size_t acc_size = bo->acc_size; 481 int ret; 482 483 if (!bo->deleted) { 484 ret = ttm_bo_individualize_resv(bo); 485 if (ret) { 486 /* Last resort, if we fail to allocate memory for the 487 * fences block for the BO to become idle 488 */ 489 dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 490 30 * HZ); 491 } 492 493 if (bo->bdev->driver->release_notify) 494 bo->bdev->driver->release_notify(bo); 495 496 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 497 ttm_mem_io_free(bdev, &bo->mem); 498 } 499 500 if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || 501 !dma_resv_trylock(bo->base.resv)) { 502 /* The BO is not idle, resurrect it for delayed destroy */ 503 ttm_bo_flush_all_fences(bo); 504 bo->deleted = true; 505 506 spin_lock(&ttm_bo_glob.lru_lock); 507 508 /* 509 * Make pinned bos immediately available to 510 * shrinkers, now that they are queued for 511 * destruction. 512 * 513 * FIXME: QXL is triggering this. Can be removed when the 514 * driver is fixed. 515 */ 516 if (WARN_ON_ONCE(bo->pin_count)) { 517 bo->pin_count = 0; 518 ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); 519 } 520 521 kref_init(&bo->kref); 522 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 523 spin_unlock(&ttm_bo_glob.lru_lock); 524 525 schedule_delayed_work(&bdev->wq, 526 ((HZ / 100) < 1) ? 1 : HZ / 100); 527 return; 528 } 529 530 spin_lock(&ttm_bo_glob.lru_lock); 531 ttm_bo_del_from_lru(bo); 532 list_del(&bo->ddestroy); 533 spin_unlock(&ttm_bo_glob.lru_lock); 534 535 ttm_bo_cleanup_memtype_use(bo); 536 dma_resv_unlock(bo->base.resv); 537 538 atomic_dec(&ttm_bo_glob.bo_count); 539 dma_fence_put(bo->moving); 540 if (!ttm_bo_uses_embedded_gem_object(bo)) 541 dma_resv_fini(&bo->base._resv); 542 bo->destroy(bo); 543 ttm_mem_global_free(&ttm_mem_glob, acc_size); 544 } 545 546 void ttm_bo_put(struct ttm_buffer_object *bo) 547 { 548 kref_put(&bo->kref, ttm_bo_release); 549 } 550 EXPORT_SYMBOL(ttm_bo_put); 551 552 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 553 { 554 return cancel_delayed_work_sync(&bdev->wq); 555 } 556 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 557 558 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 559 { 560 if (resched) 561 schedule_delayed_work(&bdev->wq, 562 ((HZ / 100) < 1) ? 1 : HZ / 100); 563 } 564 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 565 566 static int ttm_bo_evict(struct ttm_buffer_object *bo, 567 struct ttm_operation_ctx *ctx) 568 { 569 struct ttm_bo_device *bdev = bo->bdev; 570 struct ttm_resource evict_mem; 571 struct ttm_placement placement; 572 struct ttm_place hop; 573 int ret = 0; 574 575 memset(&hop, 0, sizeof(hop)); 576 577 dma_resv_assert_held(bo->base.resv); 578 579 placement.num_placement = 0; 580 placement.num_busy_placement = 0; 581 bdev->driver->evict_flags(bo, &placement); 582 583 if (!placement.num_placement && !placement.num_busy_placement) { 584 ttm_bo_wait(bo, false, false); 585 586 ttm_bo_cleanup_memtype_use(bo); 587 return ttm_tt_create(bo, false); 588 } 589 590 evict_mem = bo->mem; 591 evict_mem.mm_node = NULL; 592 evict_mem.bus.offset = 0; 593 evict_mem.bus.addr = NULL; 594 595 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 596 if (ret) { 597 if (ret != -ERESTARTSYS) { 598 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 599 bo); 600 ttm_bo_mem_space_debug(bo, &placement); 601 } 602 goto out; 603 } 604 605 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop); 606 if (unlikely(ret)) { 607 WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n"); 608 if (ret != -ERESTARTSYS) 609 pr_err("Buffer eviction failed\n"); 610 ttm_resource_free(bo, &evict_mem); 611 } 612 out: 613 return ret; 614 } 615 616 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 617 const struct ttm_place *place) 618 { 619 /* Don't evict this BO if it's outside of the 620 * requested placement range 621 */ 622 if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || 623 (place->lpfn && place->lpfn <= bo->mem.start)) 624 return false; 625 626 return true; 627 } 628 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 629 630 /* 631 * Check the target bo is allowable to be evicted or swapout, including cases: 632 * 633 * a. if share same reservation object with ctx->resv, have assumption 634 * reservation objects should already be locked, so not lock again and 635 * return true directly when either the opreation allow_reserved_eviction 636 * or the target bo already is in delayed free list; 637 * 638 * b. Otherwise, trylock it. 639 */ 640 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, 641 struct ttm_operation_ctx *ctx, bool *locked, bool *busy) 642 { 643 bool ret = false; 644 645 if (bo->base.resv == ctx->resv) { 646 dma_resv_assert_held(bo->base.resv); 647 if (ctx->allow_res_evict) 648 ret = true; 649 *locked = false; 650 if (busy) 651 *busy = false; 652 } else { 653 ret = dma_resv_trylock(bo->base.resv); 654 *locked = ret; 655 if (busy) 656 *busy = !ret; 657 } 658 659 return ret; 660 } 661 662 /** 663 * ttm_mem_evict_wait_busy - wait for a busy BO to become available 664 * 665 * @busy_bo: BO which couldn't be locked with trylock 666 * @ctx: operation context 667 * @ticket: acquire ticket 668 * 669 * Try to lock a busy buffer object to avoid failing eviction. 670 */ 671 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, 672 struct ttm_operation_ctx *ctx, 673 struct ww_acquire_ctx *ticket) 674 { 675 int r; 676 677 if (!busy_bo || !ticket) 678 return -EBUSY; 679 680 if (ctx->interruptible) 681 r = dma_resv_lock_interruptible(busy_bo->base.resv, 682 ticket); 683 else 684 r = dma_resv_lock(busy_bo->base.resv, ticket); 685 686 /* 687 * TODO: It would be better to keep the BO locked until allocation is at 688 * least tried one more time, but that would mean a much larger rework 689 * of TTM. 690 */ 691 if (!r) 692 dma_resv_unlock(busy_bo->base.resv); 693 694 return r == -EDEADLK ? -EBUSY : r; 695 } 696 697 int ttm_mem_evict_first(struct ttm_bo_device *bdev, 698 struct ttm_resource_manager *man, 699 const struct ttm_place *place, 700 struct ttm_operation_ctx *ctx, 701 struct ww_acquire_ctx *ticket) 702 { 703 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; 704 bool locked = false; 705 unsigned i; 706 int ret; 707 708 spin_lock(&ttm_bo_glob.lru_lock); 709 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 710 list_for_each_entry(bo, &man->lru[i], lru) { 711 bool busy; 712 713 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 714 &busy)) { 715 if (busy && !busy_bo && ticket != 716 dma_resv_locking_ctx(bo->base.resv)) 717 busy_bo = bo; 718 continue; 719 } 720 721 if (place && !bdev->driver->eviction_valuable(bo, 722 place)) { 723 if (locked) 724 dma_resv_unlock(bo->base.resv); 725 continue; 726 } 727 if (!ttm_bo_get_unless_zero(bo)) { 728 if (locked) 729 dma_resv_unlock(bo->base.resv); 730 continue; 731 } 732 break; 733 } 734 735 /* If the inner loop terminated early, we have our candidate */ 736 if (&bo->lru != &man->lru[i]) 737 break; 738 739 bo = NULL; 740 } 741 742 if (!bo) { 743 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo)) 744 busy_bo = NULL; 745 spin_unlock(&ttm_bo_glob.lru_lock); 746 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); 747 if (busy_bo) 748 ttm_bo_put(busy_bo); 749 return ret; 750 } 751 752 if (bo->deleted) { 753 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, 754 ctx->no_wait_gpu, locked); 755 ttm_bo_put(bo); 756 return ret; 757 } 758 759 spin_unlock(&ttm_bo_glob.lru_lock); 760 761 ret = ttm_bo_evict(bo, ctx); 762 if (locked) 763 ttm_bo_unreserve(bo); 764 765 ttm_bo_put(bo); 766 return ret; 767 } 768 769 /* 770 * Add the last move fence to the BO and reserve a new shared slot. 771 */ 772 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 773 struct ttm_resource_manager *man, 774 struct ttm_resource *mem, 775 bool no_wait_gpu) 776 { 777 struct dma_fence *fence; 778 int ret; 779 780 spin_lock(&man->move_lock); 781 fence = dma_fence_get(man->move); 782 spin_unlock(&man->move_lock); 783 784 if (!fence) 785 return 0; 786 787 if (no_wait_gpu) { 788 dma_fence_put(fence); 789 return -EBUSY; 790 } 791 792 dma_resv_add_shared_fence(bo->base.resv, fence); 793 794 ret = dma_resv_reserve_shared(bo->base.resv, 1); 795 if (unlikely(ret)) { 796 dma_fence_put(fence); 797 return ret; 798 } 799 800 dma_fence_put(bo->moving); 801 bo->moving = fence; 802 return 0; 803 } 804 805 /* 806 * Repeatedly evict memory from the LRU for @mem_type until we create enough 807 * space, or we've evicted everything and there isn't enough space. 808 */ 809 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 810 const struct ttm_place *place, 811 struct ttm_resource *mem, 812 struct ttm_operation_ctx *ctx) 813 { 814 struct ttm_bo_device *bdev = bo->bdev; 815 struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); 816 struct ww_acquire_ctx *ticket; 817 int ret; 818 819 ticket = dma_resv_locking_ctx(bo->base.resv); 820 do { 821 ret = ttm_resource_alloc(bo, place, mem); 822 if (likely(!ret)) 823 break; 824 if (unlikely(ret != -ENOSPC)) 825 return ret; 826 ret = ttm_mem_evict_first(bdev, man, place, ctx, 827 ticket); 828 if (unlikely(ret != 0)) 829 return ret; 830 } while (1); 831 832 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); 833 } 834 835 /** 836 * ttm_bo_mem_placement - check if placement is compatible 837 * @bo: BO to find memory for 838 * @place: where to search 839 * @mem: the memory object to fill in 840 * 841 * Check if placement is compatible and fill in mem structure. 842 * Returns -EBUSY if placement won't work or negative error code. 843 * 0 when placement can be used. 844 */ 845 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, 846 const struct ttm_place *place, 847 struct ttm_resource *mem) 848 { 849 struct ttm_bo_device *bdev = bo->bdev; 850 struct ttm_resource_manager *man; 851 852 man = ttm_manager_type(bdev, place->mem_type); 853 if (!man || !ttm_resource_manager_used(man)) 854 return -EBUSY; 855 856 mem->mem_type = place->mem_type; 857 mem->placement = place->flags; 858 859 spin_lock(&ttm_bo_glob.lru_lock); 860 ttm_bo_move_to_lru_tail(bo, mem, NULL); 861 spin_unlock(&ttm_bo_glob.lru_lock); 862 863 return 0; 864 } 865 866 /* 867 * Creates space for memory region @mem according to its type. 868 * 869 * This function first searches for free space in compatible memory types in 870 * the priority order defined by the driver. If free space isn't found, then 871 * ttm_bo_mem_force_space is attempted in priority order to evict and find 872 * space. 873 */ 874 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 875 struct ttm_placement *placement, 876 struct ttm_resource *mem, 877 struct ttm_operation_ctx *ctx) 878 { 879 struct ttm_bo_device *bdev = bo->bdev; 880 bool type_found = false; 881 int i, ret; 882 883 ret = dma_resv_reserve_shared(bo->base.resv, 1); 884 if (unlikely(ret)) 885 return ret; 886 887 for (i = 0; i < placement->num_placement; ++i) { 888 const struct ttm_place *place = &placement->placement[i]; 889 struct ttm_resource_manager *man; 890 891 ret = ttm_bo_mem_placement(bo, place, mem); 892 if (ret) 893 continue; 894 895 type_found = true; 896 ret = ttm_resource_alloc(bo, place, mem); 897 if (ret == -ENOSPC) 898 continue; 899 if (unlikely(ret)) 900 goto error; 901 902 man = ttm_manager_type(bdev, mem->mem_type); 903 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); 904 if (unlikely(ret)) { 905 ttm_resource_free(bo, mem); 906 if (ret == -EBUSY) 907 continue; 908 909 goto error; 910 } 911 return 0; 912 } 913 914 for (i = 0; i < placement->num_busy_placement; ++i) { 915 const struct ttm_place *place = &placement->busy_placement[i]; 916 917 ret = ttm_bo_mem_placement(bo, place, mem); 918 if (ret) 919 continue; 920 921 type_found = true; 922 ret = ttm_bo_mem_force_space(bo, place, mem, ctx); 923 if (likely(!ret)) 924 return 0; 925 926 if (ret && ret != -EBUSY) 927 goto error; 928 } 929 930 ret = -ENOMEM; 931 if (!type_found) { 932 pr_err(TTM_PFX "No compatible memory type found\n"); 933 ret = -EINVAL; 934 } 935 936 error: 937 if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count) 938 ttm_bo_move_to_lru_tail_unlocked(bo); 939 940 return ret; 941 } 942 EXPORT_SYMBOL(ttm_bo_mem_space); 943 944 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 945 struct ttm_resource *mem, 946 struct ttm_operation_ctx *ctx, 947 struct ttm_place *hop) 948 { 949 struct ttm_placement hop_placement; 950 int ret; 951 struct ttm_resource hop_mem = *mem; 952 953 hop_mem.mm_node = NULL; 954 hop_mem.mem_type = TTM_PL_SYSTEM; 955 hop_mem.placement = 0; 956 957 hop_placement.num_placement = hop_placement.num_busy_placement = 1; 958 hop_placement.placement = hop_placement.busy_placement = hop; 959 960 /* find space in the bounce domain */ 961 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); 962 if (ret) 963 return ret; 964 /* move to the bounce domain */ 965 ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); 966 if (ret) { 967 ttm_resource_free(bo, &hop_mem); 968 return ret; 969 } 970 return 0; 971 } 972 973 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 974 struct ttm_placement *placement, 975 struct ttm_operation_ctx *ctx) 976 { 977 int ret = 0; 978 struct ttm_place hop; 979 struct ttm_resource mem; 980 981 dma_resv_assert_held(bo->base.resv); 982 983 memset(&hop, 0, sizeof(hop)); 984 985 mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT; 986 mem.page_alignment = bo->mem.page_alignment; 987 mem.bus.offset = 0; 988 mem.bus.addr = NULL; 989 mem.mm_node = NULL; 990 991 /* 992 * Determine where to move the buffer. 993 * 994 * If driver determines move is going to need 995 * an extra step then it will return -EMULTIHOP 996 * and the buffer will be moved to the temporary 997 * stop and the driver will be called to make 998 * the second hop. 999 */ 1000 ret = ttm_bo_mem_space(bo, placement, &mem, ctx); 1001 if (ret) 1002 return ret; 1003 bounce: 1004 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); 1005 if (ret == -EMULTIHOP) { 1006 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); 1007 if (ret) 1008 goto out; 1009 /* try and move to final place now. */ 1010 goto bounce; 1011 } 1012 out: 1013 if (ret) 1014 ttm_resource_free(bo, &mem); 1015 return ret; 1016 } 1017 1018 static bool ttm_bo_places_compat(const struct ttm_place *places, 1019 unsigned num_placement, 1020 struct ttm_resource *mem, 1021 uint32_t *new_flags) 1022 { 1023 unsigned i; 1024 1025 for (i = 0; i < num_placement; i++) { 1026 const struct ttm_place *heap = &places[i]; 1027 1028 if ((mem->start < heap->fpfn || 1029 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1030 continue; 1031 1032 *new_flags = heap->flags; 1033 if ((mem->mem_type == heap->mem_type) && 1034 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || 1035 (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) 1036 return true; 1037 } 1038 return false; 1039 } 1040 1041 bool ttm_bo_mem_compat(struct ttm_placement *placement, 1042 struct ttm_resource *mem, 1043 uint32_t *new_flags) 1044 { 1045 if (ttm_bo_places_compat(placement->placement, placement->num_placement, 1046 mem, new_flags)) 1047 return true; 1048 1049 if ((placement->busy_placement != placement->placement || 1050 placement->num_busy_placement > placement->num_placement) && 1051 ttm_bo_places_compat(placement->busy_placement, 1052 placement->num_busy_placement, 1053 mem, new_flags)) 1054 return true; 1055 1056 return false; 1057 } 1058 EXPORT_SYMBOL(ttm_bo_mem_compat); 1059 1060 int ttm_bo_validate(struct ttm_buffer_object *bo, 1061 struct ttm_placement *placement, 1062 struct ttm_operation_ctx *ctx) 1063 { 1064 int ret; 1065 uint32_t new_flags; 1066 1067 dma_resv_assert_held(bo->base.resv); 1068 1069 /* 1070 * Remove the backing store if no placement is given. 1071 */ 1072 if (!placement->num_placement && !placement->num_busy_placement) { 1073 ret = ttm_bo_pipeline_gutting(bo); 1074 if (ret) 1075 return ret; 1076 1077 return ttm_tt_create(bo, false); 1078 } 1079 1080 /* 1081 * Check whether we need to move buffer. 1082 */ 1083 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1084 ret = ttm_bo_move_buffer(bo, placement, ctx); 1085 if (ret) 1086 return ret; 1087 } 1088 /* 1089 * We might need to add a TTM. 1090 */ 1091 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 1092 ret = ttm_tt_create(bo, true); 1093 if (ret) 1094 return ret; 1095 } 1096 return 0; 1097 } 1098 EXPORT_SYMBOL(ttm_bo_validate); 1099 1100 int ttm_bo_init_reserved(struct ttm_bo_device *bdev, 1101 struct ttm_buffer_object *bo, 1102 size_t size, 1103 enum ttm_bo_type type, 1104 struct ttm_placement *placement, 1105 uint32_t page_alignment, 1106 struct ttm_operation_ctx *ctx, 1107 size_t acc_size, 1108 struct sg_table *sg, 1109 struct dma_resv *resv, 1110 void (*destroy) (struct ttm_buffer_object *)) 1111 { 1112 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 1113 bool locked; 1114 int ret = 0; 1115 1116 ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); 1117 if (ret) { 1118 pr_err("Out of kernel memory\n"); 1119 if (destroy) 1120 (*destroy)(bo); 1121 else 1122 kfree(bo); 1123 return -ENOMEM; 1124 } 1125 1126 bo->destroy = destroy ? destroy : ttm_bo_default_destroy; 1127 1128 kref_init(&bo->kref); 1129 INIT_LIST_HEAD(&bo->lru); 1130 INIT_LIST_HEAD(&bo->ddestroy); 1131 INIT_LIST_HEAD(&bo->swap); 1132 bo->bdev = bdev; 1133 bo->type = type; 1134 bo->mem.mem_type = TTM_PL_SYSTEM; 1135 bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1136 bo->mem.mm_node = NULL; 1137 bo->mem.page_alignment = page_alignment; 1138 bo->mem.bus.offset = 0; 1139 bo->mem.bus.addr = NULL; 1140 bo->moving = NULL; 1141 bo->mem.placement = 0; 1142 bo->acc_size = acc_size; 1143 bo->pin_count = 0; 1144 bo->sg = sg; 1145 if (resv) { 1146 bo->base.resv = resv; 1147 dma_resv_assert_held(bo->base.resv); 1148 } else { 1149 bo->base.resv = &bo->base._resv; 1150 } 1151 if (!ttm_bo_uses_embedded_gem_object(bo)) { 1152 /* 1153 * bo.base is not initialized, so we have to setup the 1154 * struct elements we want use regardless. 1155 */ 1156 bo->base.size = size; 1157 dma_resv_init(&bo->base._resv); 1158 drm_vma_node_reset(&bo->base.vma_node); 1159 } 1160 atomic_inc(&ttm_bo_glob.bo_count); 1161 1162 /* 1163 * For ttm_bo_type_device buffers, allocate 1164 * address space from the device. 1165 */ 1166 if (bo->type == ttm_bo_type_device || 1167 bo->type == ttm_bo_type_sg) 1168 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 1169 bo->mem.num_pages); 1170 1171 /* passed reservation objects should already be locked, 1172 * since otherwise lockdep will be angered in radeon. 1173 */ 1174 if (!resv) { 1175 locked = dma_resv_trylock(bo->base.resv); 1176 WARN_ON(!locked); 1177 } 1178 1179 if (likely(!ret)) 1180 ret = ttm_bo_validate(bo, placement, ctx); 1181 1182 if (unlikely(ret)) { 1183 if (!resv) 1184 ttm_bo_unreserve(bo); 1185 1186 ttm_bo_put(bo); 1187 return ret; 1188 } 1189 1190 ttm_bo_move_to_lru_tail_unlocked(bo); 1191 1192 return ret; 1193 } 1194 EXPORT_SYMBOL(ttm_bo_init_reserved); 1195 1196 int ttm_bo_init(struct ttm_bo_device *bdev, 1197 struct ttm_buffer_object *bo, 1198 size_t size, 1199 enum ttm_bo_type type, 1200 struct ttm_placement *placement, 1201 uint32_t page_alignment, 1202 bool interruptible, 1203 size_t acc_size, 1204 struct sg_table *sg, 1205 struct dma_resv *resv, 1206 void (*destroy) (struct ttm_buffer_object *)) 1207 { 1208 struct ttm_operation_ctx ctx = { interruptible, false }; 1209 int ret; 1210 1211 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, 1212 page_alignment, &ctx, acc_size, 1213 sg, resv, destroy); 1214 if (ret) 1215 return ret; 1216 1217 if (!resv) 1218 ttm_bo_unreserve(bo); 1219 1220 return 0; 1221 } 1222 EXPORT_SYMBOL(ttm_bo_init); 1223 1224 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1225 unsigned long bo_size, 1226 unsigned struct_size) 1227 { 1228 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1229 size_t size = 0; 1230 1231 size += ttm_round_pot(struct_size); 1232 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1233 size += ttm_round_pot(sizeof(struct ttm_tt)); 1234 return size; 1235 } 1236 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1237 1238 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1239 { 1240 struct ttm_bo_global *glob = 1241 container_of(kobj, struct ttm_bo_global, kobj); 1242 1243 __free_page(glob->dummy_read_page); 1244 } 1245 1246 static void ttm_bo_global_release(void) 1247 { 1248 struct ttm_bo_global *glob = &ttm_bo_glob; 1249 1250 mutex_lock(&ttm_global_mutex); 1251 if (--ttm_bo_glob_use_count > 0) 1252 goto out; 1253 1254 kobject_del(&glob->kobj); 1255 kobject_put(&glob->kobj); 1256 ttm_mem_global_release(&ttm_mem_glob); 1257 memset(glob, 0, sizeof(*glob)); 1258 out: 1259 mutex_unlock(&ttm_global_mutex); 1260 } 1261 1262 static int ttm_bo_global_init(void) 1263 { 1264 struct ttm_bo_global *glob = &ttm_bo_glob; 1265 int ret = 0; 1266 unsigned i; 1267 1268 mutex_lock(&ttm_global_mutex); 1269 if (++ttm_bo_glob_use_count > 1) 1270 goto out; 1271 1272 ret = ttm_mem_global_init(&ttm_mem_glob); 1273 if (ret) 1274 goto out; 1275 1276 spin_lock_init(&glob->lru_lock); 1277 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1278 1279 if (unlikely(glob->dummy_read_page == NULL)) { 1280 ret = -ENOMEM; 1281 goto out; 1282 } 1283 1284 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1285 INIT_LIST_HEAD(&glob->swap_lru[i]); 1286 INIT_LIST_HEAD(&glob->device_list); 1287 atomic_set(&glob->bo_count, 0); 1288 1289 ret = kobject_init_and_add( 1290 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1291 if (unlikely(ret != 0)) 1292 kobject_put(&glob->kobj); 1293 out: 1294 mutex_unlock(&ttm_global_mutex); 1295 return ret; 1296 } 1297 1298 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1299 { 1300 struct ttm_bo_global *glob = &ttm_bo_glob; 1301 int ret = 0; 1302 unsigned i; 1303 struct ttm_resource_manager *man; 1304 1305 man = ttm_manager_type(bdev, TTM_PL_SYSTEM); 1306 ttm_resource_manager_set_used(man, false); 1307 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); 1308 1309 mutex_lock(&ttm_global_mutex); 1310 list_del(&bdev->device_list); 1311 mutex_unlock(&ttm_global_mutex); 1312 1313 cancel_delayed_work_sync(&bdev->wq); 1314 1315 if (ttm_bo_delayed_delete(bdev, true)) 1316 pr_debug("Delayed destroy list was clean\n"); 1317 1318 spin_lock(&glob->lru_lock); 1319 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1320 if (list_empty(&man->lru[0])) 1321 pr_debug("Swap list %d was clean\n", i); 1322 spin_unlock(&glob->lru_lock); 1323 1324 ttm_pool_fini(&bdev->pool); 1325 1326 if (!ret) 1327 ttm_bo_global_release(); 1328 1329 return ret; 1330 } 1331 EXPORT_SYMBOL(ttm_bo_device_release); 1332 1333 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) 1334 { 1335 struct ttm_resource_manager *man = &bdev->sysman; 1336 1337 /* 1338 * Initialize the system memory buffer type. 1339 * Other types need to be driver / IOCTL initialized. 1340 */ 1341 man->use_tt = true; 1342 1343 ttm_resource_manager_init(man, 0); 1344 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); 1345 ttm_resource_manager_set_used(man, true); 1346 } 1347 1348 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1349 struct ttm_bo_driver *driver, 1350 struct device *dev, 1351 struct address_space *mapping, 1352 struct drm_vma_offset_manager *vma_manager, 1353 bool use_dma_alloc, bool use_dma32) 1354 { 1355 struct ttm_bo_global *glob = &ttm_bo_glob; 1356 int ret; 1357 1358 if (WARN_ON(vma_manager == NULL)) 1359 return -EINVAL; 1360 1361 ret = ttm_bo_global_init(); 1362 if (ret) 1363 return ret; 1364 1365 bdev->driver = driver; 1366 1367 ttm_bo_init_sysman(bdev); 1368 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); 1369 1370 bdev->vma_manager = vma_manager; 1371 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1372 INIT_LIST_HEAD(&bdev->ddestroy); 1373 bdev->dev_mapping = mapping; 1374 mutex_lock(&ttm_global_mutex); 1375 list_add_tail(&bdev->device_list, &glob->device_list); 1376 mutex_unlock(&ttm_global_mutex); 1377 1378 return 0; 1379 } 1380 EXPORT_SYMBOL(ttm_bo_device_init); 1381 1382 /* 1383 * buffer object vm functions. 1384 */ 1385 1386 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1387 { 1388 struct ttm_bo_device *bdev = bo->bdev; 1389 1390 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1391 ttm_mem_io_free(bdev, &bo->mem); 1392 } 1393 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1394 1395 int ttm_bo_wait(struct ttm_buffer_object *bo, 1396 bool interruptible, bool no_wait) 1397 { 1398 long timeout = 15 * HZ; 1399 1400 if (no_wait) { 1401 if (dma_resv_test_signaled_rcu(bo->base.resv, true)) 1402 return 0; 1403 else 1404 return -EBUSY; 1405 } 1406 1407 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, 1408 interruptible, timeout); 1409 if (timeout < 0) 1410 return timeout; 1411 1412 if (timeout == 0) 1413 return -EBUSY; 1414 1415 dma_resv_add_excl_fence(bo->base.resv, NULL); 1416 return 0; 1417 } 1418 EXPORT_SYMBOL(ttm_bo_wait); 1419 1420 /* 1421 * A buffer object shrink method that tries to swap out the first 1422 * buffer object on the bo_global::swap_lru list. 1423 */ 1424 int ttm_bo_swapout(struct ttm_operation_ctx *ctx) 1425 { 1426 struct ttm_bo_global *glob = &ttm_bo_glob; 1427 struct ttm_buffer_object *bo; 1428 int ret = -EBUSY; 1429 bool locked; 1430 unsigned i; 1431 1432 spin_lock(&glob->lru_lock); 1433 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1434 list_for_each_entry(bo, &glob->swap_lru[i], swap) { 1435 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 1436 NULL)) 1437 continue; 1438 1439 if (!ttm_bo_get_unless_zero(bo)) { 1440 if (locked) 1441 dma_resv_unlock(bo->base.resv); 1442 continue; 1443 } 1444 1445 ret = 0; 1446 break; 1447 } 1448 if (!ret) 1449 break; 1450 } 1451 1452 if (ret) { 1453 spin_unlock(&glob->lru_lock); 1454 return ret; 1455 } 1456 1457 if (bo->deleted) { 1458 ret = ttm_bo_cleanup_refs(bo, false, false, locked); 1459 ttm_bo_put(bo); 1460 return ret; 1461 } 1462 1463 ttm_bo_del_from_lru(bo); 1464 spin_unlock(&glob->lru_lock); 1465 1466 /** 1467 * Move to system cached 1468 */ 1469 1470 if (bo->mem.mem_type != TTM_PL_SYSTEM) { 1471 struct ttm_operation_ctx ctx = { false, false }; 1472 struct ttm_resource evict_mem; 1473 struct ttm_place hop; 1474 1475 memset(&hop, 0, sizeof(hop)); 1476 1477 evict_mem = bo->mem; 1478 evict_mem.mm_node = NULL; 1479 evict_mem.placement = 0; 1480 evict_mem.mem_type = TTM_PL_SYSTEM; 1481 1482 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop); 1483 if (unlikely(ret != 0)) { 1484 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n"); 1485 goto out; 1486 } 1487 } 1488 1489 /** 1490 * Make sure BO is idle. 1491 */ 1492 1493 ret = ttm_bo_wait(bo, false, false); 1494 if (unlikely(ret != 0)) 1495 goto out; 1496 1497 ttm_bo_unmap_virtual(bo); 1498 1499 /** 1500 * Swap out. Buffer will be swapped in again as soon as 1501 * anyone tries to access a ttm page. 1502 */ 1503 1504 if (bo->bdev->driver->swap_notify) 1505 bo->bdev->driver->swap_notify(bo); 1506 1507 ret = ttm_tt_swapout(bo->bdev, bo->ttm); 1508 out: 1509 1510 /** 1511 * 1512 * Unreserve without putting on LRU to avoid swapping out an 1513 * already swapped buffer. 1514 */ 1515 if (locked) 1516 dma_resv_unlock(bo->base.resv); 1517 ttm_bo_put(bo); 1518 return ret; 1519 } 1520 EXPORT_SYMBOL(ttm_bo_swapout); 1521 1522 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) 1523 { 1524 if (bo->ttm == NULL) 1525 return; 1526 1527 ttm_tt_destroy(bo->bdev, bo->ttm); 1528 bo->ttm = NULL; 1529 } 1530 1531