1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int pos; 61 62 pos = ffs(place->flags & TTM_PL_MASK_MEM); 63 if (unlikely(!pos)) 64 return -EINVAL; 65 66 *mem_type = pos - 1; 67 return 0; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 struct drm_printer p = drm_debug_printer(TTM_PFX); 74 75 pr_err(" has_type: %d\n", man->has_type); 76 pr_err(" use_type: %d\n", man->use_type); 77 pr_err(" flags: 0x%08X\n", man->flags); 78 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); 79 pr_err(" size: %llu\n", man->size); 80 pr_err(" available_caching: 0x%08X\n", man->available_caching); 81 pr_err(" default_caching: 0x%08X\n", man->default_caching); 82 if (mem_type != TTM_PL_SYSTEM) 83 (*man->func->debug)(man, &p); 84 } 85 86 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 87 struct ttm_placement *placement) 88 { 89 int i, ret, mem_type; 90 91 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 92 bo, bo->mem.num_pages, bo->mem.size >> 10, 93 bo->mem.size >> 20); 94 for (i = 0; i < placement->num_placement; i++) { 95 ret = ttm_mem_type_from_place(&placement->placement[i], 96 &mem_type); 97 if (ret) 98 return; 99 pr_err(" placement[%d]=0x%08X (%d)\n", 100 i, placement->placement[i].flags, mem_type); 101 ttm_mem_type_debug(bo->bdev, mem_type); 102 } 103 } 104 105 static ssize_t ttm_bo_global_show(struct kobject *kobj, 106 struct attribute *attr, 107 char *buffer) 108 { 109 struct ttm_bo_global *glob = 110 container_of(kobj, struct ttm_bo_global, kobj); 111 112 return snprintf(buffer, PAGE_SIZE, "%d\n", 113 atomic_read(&glob->bo_count)); 114 } 115 116 static struct attribute *ttm_bo_global_attrs[] = { 117 &ttm_bo_count, 118 NULL 119 }; 120 121 static const struct sysfs_ops ttm_bo_global_ops = { 122 .show = &ttm_bo_global_show 123 }; 124 125 static struct kobj_type ttm_bo_glob_kobj_type = { 126 .release = &ttm_bo_global_kobj_release, 127 .sysfs_ops = &ttm_bo_global_ops, 128 .default_attrs = ttm_bo_global_attrs 129 }; 130 131 132 static inline uint32_t ttm_bo_type_flags(unsigned type) 133 { 134 return 1 << (type); 135 } 136 137 static void ttm_bo_release_list(struct kref *list_kref) 138 { 139 struct ttm_buffer_object *bo = 140 container_of(list_kref, struct ttm_buffer_object, list_kref); 141 struct ttm_bo_device *bdev = bo->bdev; 142 size_t acc_size = bo->acc_size; 143 144 BUG_ON(kref_read(&bo->list_kref)); 145 BUG_ON(kref_read(&bo->kref)); 146 BUG_ON(atomic_read(&bo->cpu_writers)); 147 BUG_ON(bo->mem.mm_node != NULL); 148 BUG_ON(!list_empty(&bo->lru)); 149 BUG_ON(!list_empty(&bo->ddestroy)); 150 ttm_tt_destroy(bo->ttm); 151 atomic_dec(&bo->glob->bo_count); 152 dma_fence_put(bo->moving); 153 if (bo->resv == &bo->ttm_resv) 154 reservation_object_fini(&bo->ttm_resv); 155 mutex_destroy(&bo->wu_mutex); 156 if (bo->destroy) 157 bo->destroy(bo); 158 else { 159 kfree(bo); 160 } 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 162 } 163 164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 165 { 166 struct ttm_bo_device *bdev = bo->bdev; 167 struct ttm_mem_type_manager *man; 168 169 lockdep_assert_held(&bo->resv->lock.base); 170 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 172 173 BUG_ON(!list_empty(&bo->lru)); 174 175 man = &bdev->man[bo->mem.mem_type]; 176 list_add_tail(&bo->lru, &man->lru[bo->priority]); 177 kref_get(&bo->list_kref); 178 179 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 180 list_add_tail(&bo->swap, 181 &bo->glob->swap_lru[bo->priority]); 182 kref_get(&bo->list_kref); 183 } 184 } 185 } 186 EXPORT_SYMBOL(ttm_bo_add_to_lru); 187 188 static void ttm_bo_ref_bug(struct kref *list_kref) 189 { 190 BUG(); 191 } 192 193 void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 194 { 195 if (!list_empty(&bo->swap)) { 196 list_del_init(&bo->swap); 197 kref_put(&bo->list_kref, ttm_bo_ref_bug); 198 } 199 if (!list_empty(&bo->lru)) { 200 list_del_init(&bo->lru); 201 kref_put(&bo->list_kref, ttm_bo_ref_bug); 202 } 203 204 /* 205 * TODO: Add a driver hook to delete from 206 * driver-specific LRU's here. 207 */ 208 } 209 210 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 211 { 212 spin_lock(&bo->glob->lru_lock); 213 ttm_bo_del_from_lru(bo); 214 spin_unlock(&bo->glob->lru_lock); 215 } 216 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 217 218 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 219 { 220 lockdep_assert_held(&bo->resv->lock.base); 221 222 ttm_bo_del_from_lru(bo); 223 ttm_bo_add_to_lru(bo); 224 } 225 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 226 227 /* 228 * Call bo->mutex locked. 229 */ 230 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 231 { 232 struct ttm_bo_device *bdev = bo->bdev; 233 struct ttm_bo_global *glob = bo->glob; 234 int ret = 0; 235 uint32_t page_flags = 0; 236 237 TTM_ASSERT_LOCKED(&bo->mutex); 238 bo->ttm = NULL; 239 240 if (bdev->need_dma32) 241 page_flags |= TTM_PAGE_FLAG_DMA32; 242 243 switch (bo->type) { 244 case ttm_bo_type_device: 245 if (zero_alloc) 246 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 247 case ttm_bo_type_kernel: 248 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 249 page_flags, glob->dummy_read_page); 250 if (unlikely(bo->ttm == NULL)) 251 ret = -ENOMEM; 252 break; 253 case ttm_bo_type_sg: 254 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 255 page_flags | TTM_PAGE_FLAG_SG, 256 glob->dummy_read_page); 257 if (unlikely(bo->ttm == NULL)) { 258 ret = -ENOMEM; 259 break; 260 } 261 bo->ttm->sg = bo->sg; 262 break; 263 default: 264 pr_err("Illegal buffer object type\n"); 265 ret = -EINVAL; 266 break; 267 } 268 269 return ret; 270 } 271 272 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 273 struct ttm_mem_reg *mem, 274 bool evict, bool interruptible, 275 bool no_wait_gpu) 276 { 277 struct ttm_bo_device *bdev = bo->bdev; 278 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 279 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 280 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 281 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 282 int ret = 0; 283 284 if (old_is_pci || new_is_pci || 285 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 286 ret = ttm_mem_io_lock(old_man, true); 287 if (unlikely(ret != 0)) 288 goto out_err; 289 ttm_bo_unmap_virtual_locked(bo); 290 ttm_mem_io_unlock(old_man); 291 } 292 293 /* 294 * Create and bind a ttm if required. 295 */ 296 297 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 298 if (bo->ttm == NULL) { 299 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 300 ret = ttm_bo_add_ttm(bo, zero); 301 if (ret) 302 goto out_err; 303 } 304 305 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 306 if (ret) 307 goto out_err; 308 309 if (mem->mem_type != TTM_PL_SYSTEM) { 310 ret = ttm_tt_bind(bo->ttm, mem); 311 if (ret) 312 goto out_err; 313 } 314 315 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 316 if (bdev->driver->move_notify) 317 bdev->driver->move_notify(bo, evict, mem); 318 bo->mem = *mem; 319 mem->mm_node = NULL; 320 goto moved; 321 } 322 } 323 324 if (bdev->driver->move_notify) 325 bdev->driver->move_notify(bo, evict, mem); 326 327 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 328 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 329 ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem); 330 else if (bdev->driver->move) 331 ret = bdev->driver->move(bo, evict, interruptible, 332 no_wait_gpu, mem); 333 else 334 ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem); 335 336 if (ret) { 337 if (bdev->driver->move_notify) { 338 struct ttm_mem_reg tmp_mem = *mem; 339 *mem = bo->mem; 340 bo->mem = tmp_mem; 341 bdev->driver->move_notify(bo, false, mem); 342 bo->mem = *mem; 343 *mem = tmp_mem; 344 } 345 346 goto out_err; 347 } 348 349 moved: 350 if (bo->evicted) { 351 if (bdev->driver->invalidate_caches) { 352 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 353 if (ret) 354 pr_err("Can not flush read caches\n"); 355 } 356 bo->evicted = false; 357 } 358 359 if (bo->mem.mm_node) { 360 bo->offset = (bo->mem.start << PAGE_SHIFT) + 361 bdev->man[bo->mem.mem_type].gpu_offset; 362 bo->cur_placement = bo->mem.placement; 363 } else 364 bo->offset = 0; 365 366 return 0; 367 368 out_err: 369 new_man = &bdev->man[bo->mem.mem_type]; 370 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { 371 ttm_tt_destroy(bo->ttm); 372 bo->ttm = NULL; 373 } 374 375 return ret; 376 } 377 378 /** 379 * Call bo::reserved. 380 * Will release GPU memory type usage on destruction. 381 * This is the place to put in driver specific hooks to release 382 * driver private resources. 383 * Will release the bo::reserved lock. 384 */ 385 386 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 387 { 388 if (bo->bdev->driver->move_notify) 389 bo->bdev->driver->move_notify(bo, false, NULL); 390 391 ttm_tt_destroy(bo->ttm); 392 bo->ttm = NULL; 393 ttm_bo_mem_put(bo, &bo->mem); 394 395 ww_mutex_unlock (&bo->resv->lock); 396 } 397 398 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 399 { 400 int r; 401 402 if (bo->resv == &bo->ttm_resv) 403 return 0; 404 405 reservation_object_init(&bo->ttm_resv); 406 BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); 407 408 r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); 409 if (r) { 410 reservation_object_unlock(&bo->ttm_resv); 411 reservation_object_fini(&bo->ttm_resv); 412 } 413 414 return r; 415 } 416 417 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 418 { 419 struct reservation_object_list *fobj; 420 struct dma_fence *fence; 421 int i; 422 423 fobj = reservation_object_get_list(&bo->ttm_resv); 424 fence = reservation_object_get_excl(&bo->ttm_resv); 425 if (fence && !fence->ops->signaled) 426 dma_fence_enable_sw_signaling(fence); 427 428 for (i = 0; fobj && i < fobj->shared_count; ++i) { 429 fence = rcu_dereference_protected(fobj->shared[i], 430 reservation_object_held(bo->resv)); 431 432 if (!fence->ops->signaled) 433 dma_fence_enable_sw_signaling(fence); 434 } 435 } 436 437 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 438 { 439 struct ttm_bo_device *bdev = bo->bdev; 440 struct ttm_bo_global *glob = bo->glob; 441 int ret; 442 443 ret = ttm_bo_individualize_resv(bo); 444 if (ret) { 445 /* Last resort, if we fail to allocate memory for the 446 * fences block for the BO to become idle 447 */ 448 reservation_object_wait_timeout_rcu(bo->resv, true, false, 449 30 * HZ); 450 spin_lock(&glob->lru_lock); 451 goto error; 452 } 453 454 spin_lock(&glob->lru_lock); 455 ret = __ttm_bo_reserve(bo, false, true, NULL); 456 if (!ret) { 457 if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { 458 ttm_bo_del_from_lru(bo); 459 spin_unlock(&glob->lru_lock); 460 if (bo->resv != &bo->ttm_resv) 461 reservation_object_unlock(&bo->ttm_resv); 462 ttm_bo_cleanup_memtype_use(bo); 463 return; 464 } 465 466 ttm_bo_flush_all_fences(bo); 467 468 /* 469 * Make NO_EVICT bos immediately available to 470 * shrinkers, now that they are queued for 471 * destruction. 472 */ 473 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 474 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 475 ttm_bo_add_to_lru(bo); 476 } 477 478 __ttm_bo_unreserve(bo); 479 } 480 if (bo->resv != &bo->ttm_resv) 481 reservation_object_unlock(&bo->ttm_resv); 482 483 error: 484 kref_get(&bo->list_kref); 485 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 486 spin_unlock(&glob->lru_lock); 487 488 schedule_delayed_work(&bdev->wq, 489 ((HZ / 100) < 1) ? 1 : HZ / 100); 490 } 491 492 /** 493 * function ttm_bo_cleanup_refs_and_unlock 494 * If bo idle, remove from delayed- and lru lists, and unref. 495 * If not idle, do nothing. 496 * 497 * Must be called with lru_lock and reservation held, this function 498 * will drop both before returning. 499 * 500 * @interruptible Any sleeps should occur interruptibly. 501 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 502 */ 503 504 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 505 bool interruptible, 506 bool no_wait_gpu) 507 { 508 struct ttm_bo_global *glob = bo->glob; 509 struct reservation_object *resv; 510 int ret; 511 512 if (unlikely(list_empty(&bo->ddestroy))) 513 resv = bo->resv; 514 else 515 resv = &bo->ttm_resv; 516 517 if (reservation_object_test_signaled_rcu(resv, true)) 518 ret = 0; 519 else 520 ret = -EBUSY; 521 522 if (ret && !no_wait_gpu) { 523 long lret; 524 ww_mutex_unlock(&bo->resv->lock); 525 spin_unlock(&glob->lru_lock); 526 527 lret = reservation_object_wait_timeout_rcu(resv, true, 528 interruptible, 529 30 * HZ); 530 531 if (lret < 0) 532 return lret; 533 else if (lret == 0) 534 return -EBUSY; 535 536 spin_lock(&glob->lru_lock); 537 ret = __ttm_bo_reserve(bo, false, true, NULL); 538 539 /* 540 * We raced, and lost, someone else holds the reservation now, 541 * and is probably busy in ttm_bo_cleanup_memtype_use. 542 * 543 * Even if it's not the case, because we finished waiting any 544 * delayed destruction would succeed, so just return success 545 * here. 546 */ 547 if (ret) { 548 spin_unlock(&glob->lru_lock); 549 return 0; 550 } 551 } 552 553 if (ret || unlikely(list_empty(&bo->ddestroy))) { 554 __ttm_bo_unreserve(bo); 555 spin_unlock(&glob->lru_lock); 556 return ret; 557 } 558 559 ttm_bo_del_from_lru(bo); 560 if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv)) 561 reservation_object_fini(&bo->ttm_resv); 562 list_del_init(&bo->ddestroy); 563 kref_put(&bo->list_kref, ttm_bo_ref_bug); 564 565 spin_unlock(&glob->lru_lock); 566 ttm_bo_cleanup_memtype_use(bo); 567 568 return 0; 569 } 570 571 /** 572 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 573 * encountered buffers. 574 */ 575 576 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 577 { 578 struct ttm_bo_global *glob = bdev->glob; 579 struct ttm_buffer_object *entry = NULL; 580 int ret = 0; 581 582 spin_lock(&glob->lru_lock); 583 if (list_empty(&bdev->ddestroy)) 584 goto out_unlock; 585 586 entry = list_first_entry(&bdev->ddestroy, 587 struct ttm_buffer_object, ddestroy); 588 kref_get(&entry->list_kref); 589 590 for (;;) { 591 struct ttm_buffer_object *nentry = NULL; 592 593 if (entry->ddestroy.next != &bdev->ddestroy) { 594 nentry = list_first_entry(&entry->ddestroy, 595 struct ttm_buffer_object, ddestroy); 596 kref_get(&nentry->list_kref); 597 } 598 599 ret = __ttm_bo_reserve(entry, false, true, NULL); 600 if (remove_all && ret) { 601 spin_unlock(&glob->lru_lock); 602 ret = __ttm_bo_reserve(entry, false, false, NULL); 603 spin_lock(&glob->lru_lock); 604 } 605 606 if (!ret) 607 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 608 !remove_all); 609 else 610 spin_unlock(&glob->lru_lock); 611 612 kref_put(&entry->list_kref, ttm_bo_release_list); 613 entry = nentry; 614 615 if (ret || !entry) 616 goto out; 617 618 spin_lock(&glob->lru_lock); 619 if (list_empty(&entry->ddestroy)) 620 break; 621 } 622 623 out_unlock: 624 spin_unlock(&glob->lru_lock); 625 out: 626 if (entry) 627 kref_put(&entry->list_kref, ttm_bo_release_list); 628 return ret; 629 } 630 631 static void ttm_bo_delayed_workqueue(struct work_struct *work) 632 { 633 struct ttm_bo_device *bdev = 634 container_of(work, struct ttm_bo_device, wq.work); 635 636 if (ttm_bo_delayed_delete(bdev, false)) { 637 schedule_delayed_work(&bdev->wq, 638 ((HZ / 100) < 1) ? 1 : HZ / 100); 639 } 640 } 641 642 static void ttm_bo_release(struct kref *kref) 643 { 644 struct ttm_buffer_object *bo = 645 container_of(kref, struct ttm_buffer_object, kref); 646 struct ttm_bo_device *bdev = bo->bdev; 647 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 648 649 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 650 ttm_mem_io_lock(man, false); 651 ttm_mem_io_free_vm(bo); 652 ttm_mem_io_unlock(man); 653 ttm_bo_cleanup_refs_or_queue(bo); 654 kref_put(&bo->list_kref, ttm_bo_release_list); 655 } 656 657 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 658 { 659 struct ttm_buffer_object *bo = *p_bo; 660 661 *p_bo = NULL; 662 kref_put(&bo->kref, ttm_bo_release); 663 } 664 EXPORT_SYMBOL(ttm_bo_unref); 665 666 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 667 { 668 return cancel_delayed_work_sync(&bdev->wq); 669 } 670 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 671 672 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 673 { 674 if (resched) 675 schedule_delayed_work(&bdev->wq, 676 ((HZ / 100) < 1) ? 1 : HZ / 100); 677 } 678 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 679 680 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 681 bool no_wait_gpu) 682 { 683 struct ttm_bo_device *bdev = bo->bdev; 684 struct ttm_mem_reg evict_mem; 685 struct ttm_placement placement; 686 int ret = 0; 687 688 lockdep_assert_held(&bo->resv->lock.base); 689 690 evict_mem = bo->mem; 691 evict_mem.mm_node = NULL; 692 evict_mem.bus.io_reserved_vm = false; 693 evict_mem.bus.io_reserved_count = 0; 694 695 placement.num_placement = 0; 696 placement.num_busy_placement = 0; 697 bdev->driver->evict_flags(bo, &placement); 698 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 699 no_wait_gpu); 700 if (ret) { 701 if (ret != -ERESTARTSYS) { 702 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 703 bo); 704 ttm_bo_mem_space_debug(bo, &placement); 705 } 706 goto out; 707 } 708 709 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 710 no_wait_gpu); 711 if (unlikely(ret)) { 712 if (ret != -ERESTARTSYS) 713 pr_err("Buffer eviction failed\n"); 714 ttm_bo_mem_put(bo, &evict_mem); 715 goto out; 716 } 717 bo->evicted = true; 718 out: 719 return ret; 720 } 721 722 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 723 const struct ttm_place *place) 724 { 725 /* Don't evict this BO if it's outside of the 726 * requested placement range 727 */ 728 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 729 (place->lpfn && place->lpfn <= bo->mem.start)) 730 return false; 731 732 return true; 733 } 734 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 735 736 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 737 uint32_t mem_type, 738 const struct ttm_place *place, 739 bool interruptible, 740 bool no_wait_gpu) 741 { 742 struct ttm_bo_global *glob = bdev->glob; 743 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 744 struct ttm_buffer_object *bo; 745 int ret = -EBUSY; 746 unsigned i; 747 748 spin_lock(&glob->lru_lock); 749 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 750 list_for_each_entry(bo, &man->lru[i], lru) { 751 ret = __ttm_bo_reserve(bo, false, true, NULL); 752 if (ret) 753 continue; 754 755 if (place && !bdev->driver->eviction_valuable(bo, 756 place)) { 757 __ttm_bo_unreserve(bo); 758 ret = -EBUSY; 759 continue; 760 } 761 762 break; 763 } 764 765 if (!ret) 766 break; 767 } 768 769 if (ret) { 770 spin_unlock(&glob->lru_lock); 771 return ret; 772 } 773 774 kref_get(&bo->list_kref); 775 776 if (!list_empty(&bo->ddestroy)) { 777 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 778 no_wait_gpu); 779 kref_put(&bo->list_kref, ttm_bo_release_list); 780 return ret; 781 } 782 783 ttm_bo_del_from_lru(bo); 784 spin_unlock(&glob->lru_lock); 785 786 BUG_ON(ret != 0); 787 788 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 789 ttm_bo_unreserve(bo); 790 791 kref_put(&bo->list_kref, ttm_bo_release_list); 792 return ret; 793 } 794 795 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 796 { 797 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 798 799 if (mem->mm_node) 800 (*man->func->put_node)(man, mem); 801 } 802 EXPORT_SYMBOL(ttm_bo_mem_put); 803 804 /** 805 * Add the last move fence to the BO and reserve a new shared slot. 806 */ 807 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 808 struct ttm_mem_type_manager *man, 809 struct ttm_mem_reg *mem) 810 { 811 struct dma_fence *fence; 812 int ret; 813 814 spin_lock(&man->move_lock); 815 fence = dma_fence_get(man->move); 816 spin_unlock(&man->move_lock); 817 818 if (fence) { 819 reservation_object_add_shared_fence(bo->resv, fence); 820 821 ret = reservation_object_reserve_shared(bo->resv); 822 if (unlikely(ret)) 823 return ret; 824 825 dma_fence_put(bo->moving); 826 bo->moving = fence; 827 } 828 829 return 0; 830 } 831 832 /** 833 * Repeatedly evict memory from the LRU for @mem_type until we create enough 834 * space, or we've evicted everything and there isn't enough space. 835 */ 836 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 837 uint32_t mem_type, 838 const struct ttm_place *place, 839 struct ttm_mem_reg *mem, 840 bool interruptible, 841 bool no_wait_gpu) 842 { 843 struct ttm_bo_device *bdev = bo->bdev; 844 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 845 int ret; 846 847 do { 848 ret = (*man->func->get_node)(man, bo, place, mem); 849 if (unlikely(ret != 0)) 850 return ret; 851 if (mem->mm_node) 852 break; 853 ret = ttm_mem_evict_first(bdev, mem_type, place, 854 interruptible, no_wait_gpu); 855 if (unlikely(ret != 0)) 856 return ret; 857 } while (1); 858 mem->mem_type = mem_type; 859 return ttm_bo_add_move_fence(bo, man, mem); 860 } 861 862 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 863 uint32_t cur_placement, 864 uint32_t proposed_placement) 865 { 866 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 867 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 868 869 /** 870 * Keep current caching if possible. 871 */ 872 873 if ((cur_placement & caching) != 0) 874 result |= (cur_placement & caching); 875 else if ((man->default_caching & caching) != 0) 876 result |= man->default_caching; 877 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 878 result |= TTM_PL_FLAG_CACHED; 879 else if ((TTM_PL_FLAG_WC & caching) != 0) 880 result |= TTM_PL_FLAG_WC; 881 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 882 result |= TTM_PL_FLAG_UNCACHED; 883 884 return result; 885 } 886 887 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 888 uint32_t mem_type, 889 const struct ttm_place *place, 890 uint32_t *masked_placement) 891 { 892 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 893 894 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 895 return false; 896 897 if ((place->flags & man->available_caching) == 0) 898 return false; 899 900 cur_flags |= (place->flags & man->available_caching); 901 902 *masked_placement = cur_flags; 903 return true; 904 } 905 906 /** 907 * Creates space for memory region @mem according to its type. 908 * 909 * This function first searches for free space in compatible memory types in 910 * the priority order defined by the driver. If free space isn't found, then 911 * ttm_bo_mem_force_space is attempted in priority order to evict and find 912 * space. 913 */ 914 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 915 struct ttm_placement *placement, 916 struct ttm_mem_reg *mem, 917 bool interruptible, 918 bool no_wait_gpu) 919 { 920 struct ttm_bo_device *bdev = bo->bdev; 921 struct ttm_mem_type_manager *man; 922 uint32_t mem_type = TTM_PL_SYSTEM; 923 uint32_t cur_flags = 0; 924 bool type_found = false; 925 bool type_ok = false; 926 bool has_erestartsys = false; 927 int i, ret; 928 929 ret = reservation_object_reserve_shared(bo->resv); 930 if (unlikely(ret)) 931 return ret; 932 933 mem->mm_node = NULL; 934 for (i = 0; i < placement->num_placement; ++i) { 935 const struct ttm_place *place = &placement->placement[i]; 936 937 ret = ttm_mem_type_from_place(place, &mem_type); 938 if (ret) 939 return ret; 940 man = &bdev->man[mem_type]; 941 if (!man->has_type || !man->use_type) 942 continue; 943 944 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 945 &cur_flags); 946 947 if (!type_ok) 948 continue; 949 950 type_found = true; 951 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 952 cur_flags); 953 /* 954 * Use the access and other non-mapping-related flag bits from 955 * the memory placement flags to the current flags 956 */ 957 ttm_flag_masked(&cur_flags, place->flags, 958 ~TTM_PL_MASK_MEMTYPE); 959 960 if (mem_type == TTM_PL_SYSTEM) 961 break; 962 963 ret = (*man->func->get_node)(man, bo, place, mem); 964 if (unlikely(ret)) 965 return ret; 966 967 if (mem->mm_node) { 968 ret = ttm_bo_add_move_fence(bo, man, mem); 969 if (unlikely(ret)) { 970 (*man->func->put_node)(man, mem); 971 return ret; 972 } 973 break; 974 } 975 } 976 977 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 978 mem->mem_type = mem_type; 979 mem->placement = cur_flags; 980 return 0; 981 } 982 983 for (i = 0; i < placement->num_busy_placement; ++i) { 984 const struct ttm_place *place = &placement->busy_placement[i]; 985 986 ret = ttm_mem_type_from_place(place, &mem_type); 987 if (ret) 988 return ret; 989 man = &bdev->man[mem_type]; 990 if (!man->has_type || !man->use_type) 991 continue; 992 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 993 continue; 994 995 type_found = true; 996 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 997 cur_flags); 998 /* 999 * Use the access and other non-mapping-related flag bits from 1000 * the memory placement flags to the current flags 1001 */ 1002 ttm_flag_masked(&cur_flags, place->flags, 1003 ~TTM_PL_MASK_MEMTYPE); 1004 1005 if (mem_type == TTM_PL_SYSTEM) { 1006 mem->mem_type = mem_type; 1007 mem->placement = cur_flags; 1008 mem->mm_node = NULL; 1009 return 0; 1010 } 1011 1012 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 1013 interruptible, no_wait_gpu); 1014 if (ret == 0 && mem->mm_node) { 1015 mem->placement = cur_flags; 1016 return 0; 1017 } 1018 if (ret == -ERESTARTSYS) 1019 has_erestartsys = true; 1020 } 1021 1022 if (!type_found) { 1023 pr_err(TTM_PFX "No compatible memory type found\n"); 1024 return -EINVAL; 1025 } 1026 1027 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 1028 } 1029 EXPORT_SYMBOL(ttm_bo_mem_space); 1030 1031 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1032 struct ttm_placement *placement, 1033 bool interruptible, 1034 bool no_wait_gpu) 1035 { 1036 int ret = 0; 1037 struct ttm_mem_reg mem; 1038 1039 lockdep_assert_held(&bo->resv->lock.base); 1040 1041 mem.num_pages = bo->num_pages; 1042 mem.size = mem.num_pages << PAGE_SHIFT; 1043 mem.page_alignment = bo->mem.page_alignment; 1044 mem.bus.io_reserved_vm = false; 1045 mem.bus.io_reserved_count = 0; 1046 /* 1047 * Determine where to move the buffer. 1048 */ 1049 ret = ttm_bo_mem_space(bo, placement, &mem, 1050 interruptible, no_wait_gpu); 1051 if (ret) 1052 goto out_unlock; 1053 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1054 interruptible, no_wait_gpu); 1055 out_unlock: 1056 if (ret && mem.mm_node) 1057 ttm_bo_mem_put(bo, &mem); 1058 return ret; 1059 } 1060 1061 static bool ttm_bo_places_compat(const struct ttm_place *places, 1062 unsigned num_placement, 1063 struct ttm_mem_reg *mem, 1064 uint32_t *new_flags) 1065 { 1066 unsigned i; 1067 1068 for (i = 0; i < num_placement; i++) { 1069 const struct ttm_place *heap = &places[i]; 1070 1071 if (mem->mm_node && (mem->start < heap->fpfn || 1072 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1073 continue; 1074 1075 *new_flags = heap->flags; 1076 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1077 (*new_flags & mem->placement & TTM_PL_MASK_MEM) && 1078 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || 1079 (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) 1080 return true; 1081 } 1082 return false; 1083 } 1084 1085 bool ttm_bo_mem_compat(struct ttm_placement *placement, 1086 struct ttm_mem_reg *mem, 1087 uint32_t *new_flags) 1088 { 1089 if (ttm_bo_places_compat(placement->placement, placement->num_placement, 1090 mem, new_flags)) 1091 return true; 1092 1093 if ((placement->busy_placement != placement->placement || 1094 placement->num_busy_placement > placement->num_placement) && 1095 ttm_bo_places_compat(placement->busy_placement, 1096 placement->num_busy_placement, 1097 mem, new_flags)) 1098 return true; 1099 1100 return false; 1101 } 1102 EXPORT_SYMBOL(ttm_bo_mem_compat); 1103 1104 int ttm_bo_validate(struct ttm_buffer_object *bo, 1105 struct ttm_placement *placement, 1106 bool interruptible, 1107 bool no_wait_gpu) 1108 { 1109 int ret; 1110 uint32_t new_flags; 1111 1112 lockdep_assert_held(&bo->resv->lock.base); 1113 /* 1114 * Check whether we need to move buffer. 1115 */ 1116 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1117 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1118 no_wait_gpu); 1119 if (ret) 1120 return ret; 1121 } else { 1122 /* 1123 * Use the access and other non-mapping-related flag bits from 1124 * the compatible memory placement flags to the active flags 1125 */ 1126 ttm_flag_masked(&bo->mem.placement, new_flags, 1127 ~TTM_PL_MASK_MEMTYPE); 1128 } 1129 /* 1130 * We might need to add a TTM. 1131 */ 1132 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1133 ret = ttm_bo_add_ttm(bo, true); 1134 if (ret) 1135 return ret; 1136 } 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(ttm_bo_validate); 1140 1141 int ttm_bo_init_reserved(struct ttm_bo_device *bdev, 1142 struct ttm_buffer_object *bo, 1143 unsigned long size, 1144 enum ttm_bo_type type, 1145 struct ttm_placement *placement, 1146 uint32_t page_alignment, 1147 bool interruptible, 1148 struct file *persistent_swap_storage, 1149 size_t acc_size, 1150 struct sg_table *sg, 1151 struct reservation_object *resv, 1152 void (*destroy) (struct ttm_buffer_object *)) 1153 { 1154 int ret = 0; 1155 unsigned long num_pages; 1156 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1157 bool locked; 1158 1159 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1160 if (ret) { 1161 pr_err("Out of kernel memory\n"); 1162 if (destroy) 1163 (*destroy)(bo); 1164 else 1165 kfree(bo); 1166 return -ENOMEM; 1167 } 1168 1169 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1170 if (num_pages == 0) { 1171 pr_err("Illegal buffer object size\n"); 1172 if (destroy) 1173 (*destroy)(bo); 1174 else 1175 kfree(bo); 1176 ttm_mem_global_free(mem_glob, acc_size); 1177 return -EINVAL; 1178 } 1179 bo->destroy = destroy; 1180 1181 kref_init(&bo->kref); 1182 kref_init(&bo->list_kref); 1183 atomic_set(&bo->cpu_writers, 0); 1184 INIT_LIST_HEAD(&bo->lru); 1185 INIT_LIST_HEAD(&bo->ddestroy); 1186 INIT_LIST_HEAD(&bo->swap); 1187 INIT_LIST_HEAD(&bo->io_reserve_lru); 1188 mutex_init(&bo->wu_mutex); 1189 bo->bdev = bdev; 1190 bo->glob = bdev->glob; 1191 bo->type = type; 1192 bo->num_pages = num_pages; 1193 bo->mem.size = num_pages << PAGE_SHIFT; 1194 bo->mem.mem_type = TTM_PL_SYSTEM; 1195 bo->mem.num_pages = bo->num_pages; 1196 bo->mem.mm_node = NULL; 1197 bo->mem.page_alignment = page_alignment; 1198 bo->mem.bus.io_reserved_vm = false; 1199 bo->mem.bus.io_reserved_count = 0; 1200 bo->moving = NULL; 1201 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1202 bo->persistent_swap_storage = persistent_swap_storage; 1203 bo->acc_size = acc_size; 1204 bo->sg = sg; 1205 if (resv) { 1206 bo->resv = resv; 1207 lockdep_assert_held(&bo->resv->lock.base); 1208 } else { 1209 bo->resv = &bo->ttm_resv; 1210 reservation_object_init(&bo->ttm_resv); 1211 } 1212 atomic_inc(&bo->glob->bo_count); 1213 drm_vma_node_reset(&bo->vma_node); 1214 bo->priority = 0; 1215 1216 /* 1217 * For ttm_bo_type_device buffers, allocate 1218 * address space from the device. 1219 */ 1220 if (bo->type == ttm_bo_type_device || 1221 bo->type == ttm_bo_type_sg) 1222 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1223 bo->mem.num_pages); 1224 1225 /* passed reservation objects should already be locked, 1226 * since otherwise lockdep will be angered in radeon. 1227 */ 1228 if (!resv) { 1229 locked = ww_mutex_trylock(&bo->resv->lock); 1230 WARN_ON(!locked); 1231 } 1232 1233 if (likely(!ret)) 1234 ret = ttm_bo_validate(bo, placement, interruptible, false); 1235 1236 if (unlikely(ret)) { 1237 if (!resv) 1238 ttm_bo_unreserve(bo); 1239 1240 ttm_bo_unref(&bo); 1241 return ret; 1242 } 1243 1244 if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1245 spin_lock(&bo->glob->lru_lock); 1246 ttm_bo_add_to_lru(bo); 1247 spin_unlock(&bo->glob->lru_lock); 1248 } 1249 1250 return ret; 1251 } 1252 EXPORT_SYMBOL(ttm_bo_init_reserved); 1253 1254 int ttm_bo_init(struct ttm_bo_device *bdev, 1255 struct ttm_buffer_object *bo, 1256 unsigned long size, 1257 enum ttm_bo_type type, 1258 struct ttm_placement *placement, 1259 uint32_t page_alignment, 1260 bool interruptible, 1261 struct file *persistent_swap_storage, 1262 size_t acc_size, 1263 struct sg_table *sg, 1264 struct reservation_object *resv, 1265 void (*destroy) (struct ttm_buffer_object *)) 1266 { 1267 int ret; 1268 1269 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, 1270 page_alignment, interruptible, 1271 persistent_swap_storage, acc_size, 1272 sg, resv, destroy); 1273 if (ret) 1274 return ret; 1275 1276 if (!resv) 1277 ttm_bo_unreserve(bo); 1278 1279 return 0; 1280 } 1281 EXPORT_SYMBOL(ttm_bo_init); 1282 1283 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1284 unsigned long bo_size, 1285 unsigned struct_size) 1286 { 1287 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1288 size_t size = 0; 1289 1290 size += ttm_round_pot(struct_size); 1291 size += ttm_round_pot(npages * sizeof(void *)); 1292 size += ttm_round_pot(sizeof(struct ttm_tt)); 1293 return size; 1294 } 1295 EXPORT_SYMBOL(ttm_bo_acc_size); 1296 1297 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1298 unsigned long bo_size, 1299 unsigned struct_size) 1300 { 1301 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1302 size_t size = 0; 1303 1304 size += ttm_round_pot(struct_size); 1305 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1306 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1307 return size; 1308 } 1309 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1310 1311 int ttm_bo_create(struct ttm_bo_device *bdev, 1312 unsigned long size, 1313 enum ttm_bo_type type, 1314 struct ttm_placement *placement, 1315 uint32_t page_alignment, 1316 bool interruptible, 1317 struct file *persistent_swap_storage, 1318 struct ttm_buffer_object **p_bo) 1319 { 1320 struct ttm_buffer_object *bo; 1321 size_t acc_size; 1322 int ret; 1323 1324 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1325 if (unlikely(bo == NULL)) 1326 return -ENOMEM; 1327 1328 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1329 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1330 interruptible, persistent_swap_storage, acc_size, 1331 NULL, NULL, NULL); 1332 if (likely(ret == 0)) 1333 *p_bo = bo; 1334 1335 return ret; 1336 } 1337 EXPORT_SYMBOL(ttm_bo_create); 1338 1339 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1340 unsigned mem_type) 1341 { 1342 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1343 struct ttm_bo_global *glob = bdev->glob; 1344 struct dma_fence *fence; 1345 int ret; 1346 unsigned i; 1347 1348 /* 1349 * Can't use standard list traversal since we're unlocking. 1350 */ 1351 1352 spin_lock(&glob->lru_lock); 1353 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1354 while (!list_empty(&man->lru[i])) { 1355 spin_unlock(&glob->lru_lock); 1356 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1357 if (ret) 1358 return ret; 1359 spin_lock(&glob->lru_lock); 1360 } 1361 } 1362 spin_unlock(&glob->lru_lock); 1363 1364 spin_lock(&man->move_lock); 1365 fence = dma_fence_get(man->move); 1366 spin_unlock(&man->move_lock); 1367 1368 if (fence) { 1369 ret = dma_fence_wait(fence, false); 1370 dma_fence_put(fence); 1371 if (ret) 1372 return ret; 1373 } 1374 1375 return 0; 1376 } 1377 1378 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1379 { 1380 struct ttm_mem_type_manager *man; 1381 int ret = -EINVAL; 1382 1383 if (mem_type >= TTM_NUM_MEM_TYPES) { 1384 pr_err("Illegal memory type %d\n", mem_type); 1385 return ret; 1386 } 1387 man = &bdev->man[mem_type]; 1388 1389 if (!man->has_type) { 1390 pr_err("Trying to take down uninitialized memory manager type %u\n", 1391 mem_type); 1392 return ret; 1393 } 1394 1395 man->use_type = false; 1396 man->has_type = false; 1397 1398 ret = 0; 1399 if (mem_type > 0) { 1400 ret = ttm_bo_force_list_clean(bdev, mem_type); 1401 if (ret) { 1402 pr_err("Cleanup eviction failed\n"); 1403 return ret; 1404 } 1405 1406 ret = (*man->func->takedown)(man); 1407 } 1408 1409 dma_fence_put(man->move); 1410 man->move = NULL; 1411 1412 return ret; 1413 } 1414 EXPORT_SYMBOL(ttm_bo_clean_mm); 1415 1416 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1417 { 1418 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1419 1420 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1421 pr_err("Illegal memory manager memory type %u\n", mem_type); 1422 return -EINVAL; 1423 } 1424 1425 if (!man->has_type) { 1426 pr_err("Memory type %u has not been initialized\n", mem_type); 1427 return 0; 1428 } 1429 1430 return ttm_bo_force_list_clean(bdev, mem_type); 1431 } 1432 EXPORT_SYMBOL(ttm_bo_evict_mm); 1433 1434 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1435 unsigned long p_size) 1436 { 1437 int ret; 1438 struct ttm_mem_type_manager *man; 1439 unsigned i; 1440 1441 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1442 man = &bdev->man[type]; 1443 BUG_ON(man->has_type); 1444 man->io_reserve_fastpath = true; 1445 man->use_io_reserve_lru = false; 1446 mutex_init(&man->io_reserve_mutex); 1447 spin_lock_init(&man->move_lock); 1448 INIT_LIST_HEAD(&man->io_reserve_lru); 1449 1450 ret = bdev->driver->init_mem_type(bdev, type, man); 1451 if (ret) 1452 return ret; 1453 man->bdev = bdev; 1454 1455 if (type != TTM_PL_SYSTEM) { 1456 ret = (*man->func->init)(man, p_size); 1457 if (ret) 1458 return ret; 1459 } 1460 man->has_type = true; 1461 man->use_type = true; 1462 man->size = p_size; 1463 1464 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1465 INIT_LIST_HEAD(&man->lru[i]); 1466 man->move = NULL; 1467 1468 return 0; 1469 } 1470 EXPORT_SYMBOL(ttm_bo_init_mm); 1471 1472 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1473 { 1474 struct ttm_bo_global *glob = 1475 container_of(kobj, struct ttm_bo_global, kobj); 1476 1477 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1478 __free_page(glob->dummy_read_page); 1479 kfree(glob); 1480 } 1481 1482 void ttm_bo_global_release(struct drm_global_reference *ref) 1483 { 1484 struct ttm_bo_global *glob = ref->object; 1485 1486 kobject_del(&glob->kobj); 1487 kobject_put(&glob->kobj); 1488 } 1489 EXPORT_SYMBOL(ttm_bo_global_release); 1490 1491 int ttm_bo_global_init(struct drm_global_reference *ref) 1492 { 1493 struct ttm_bo_global_ref *bo_ref = 1494 container_of(ref, struct ttm_bo_global_ref, ref); 1495 struct ttm_bo_global *glob = ref->object; 1496 int ret; 1497 unsigned i; 1498 1499 mutex_init(&glob->device_list_mutex); 1500 spin_lock_init(&glob->lru_lock); 1501 glob->mem_glob = bo_ref->mem_glob; 1502 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1503 1504 if (unlikely(glob->dummy_read_page == NULL)) { 1505 ret = -ENOMEM; 1506 goto out_no_drp; 1507 } 1508 1509 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1510 INIT_LIST_HEAD(&glob->swap_lru[i]); 1511 INIT_LIST_HEAD(&glob->device_list); 1512 1513 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1514 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1515 if (unlikely(ret != 0)) { 1516 pr_err("Could not register buffer object swapout\n"); 1517 goto out_no_shrink; 1518 } 1519 1520 atomic_set(&glob->bo_count, 0); 1521 1522 ret = kobject_init_and_add( 1523 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1524 if (unlikely(ret != 0)) 1525 kobject_put(&glob->kobj); 1526 return ret; 1527 out_no_shrink: 1528 __free_page(glob->dummy_read_page); 1529 out_no_drp: 1530 kfree(glob); 1531 return ret; 1532 } 1533 EXPORT_SYMBOL(ttm_bo_global_init); 1534 1535 1536 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1537 { 1538 int ret = 0; 1539 unsigned i = TTM_NUM_MEM_TYPES; 1540 struct ttm_mem_type_manager *man; 1541 struct ttm_bo_global *glob = bdev->glob; 1542 1543 while (i--) { 1544 man = &bdev->man[i]; 1545 if (man->has_type) { 1546 man->use_type = false; 1547 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1548 ret = -EBUSY; 1549 pr_err("DRM memory manager type %d is not clean\n", 1550 i); 1551 } 1552 man->has_type = false; 1553 } 1554 } 1555 1556 mutex_lock(&glob->device_list_mutex); 1557 list_del(&bdev->device_list); 1558 mutex_unlock(&glob->device_list_mutex); 1559 1560 cancel_delayed_work_sync(&bdev->wq); 1561 1562 while (ttm_bo_delayed_delete(bdev, true)) 1563 ; 1564 1565 spin_lock(&glob->lru_lock); 1566 if (list_empty(&bdev->ddestroy)) 1567 TTM_DEBUG("Delayed destroy list was clean\n"); 1568 1569 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1570 if (list_empty(&bdev->man[0].lru[0])) 1571 TTM_DEBUG("Swap list %d was clean\n", i); 1572 spin_unlock(&glob->lru_lock); 1573 1574 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1575 1576 return ret; 1577 } 1578 EXPORT_SYMBOL(ttm_bo_device_release); 1579 1580 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1581 struct ttm_bo_global *glob, 1582 struct ttm_bo_driver *driver, 1583 struct address_space *mapping, 1584 uint64_t file_page_offset, 1585 bool need_dma32) 1586 { 1587 int ret = -EINVAL; 1588 1589 bdev->driver = driver; 1590 1591 memset(bdev->man, 0, sizeof(bdev->man)); 1592 1593 /* 1594 * Initialize the system memory buffer type. 1595 * Other types need to be driver / IOCTL initialized. 1596 */ 1597 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1598 if (unlikely(ret != 0)) 1599 goto out_no_sys; 1600 1601 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1602 0x10000000); 1603 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1604 INIT_LIST_HEAD(&bdev->ddestroy); 1605 bdev->dev_mapping = mapping; 1606 bdev->glob = glob; 1607 bdev->need_dma32 = need_dma32; 1608 mutex_lock(&glob->device_list_mutex); 1609 list_add_tail(&bdev->device_list, &glob->device_list); 1610 mutex_unlock(&glob->device_list_mutex); 1611 1612 return 0; 1613 out_no_sys: 1614 return ret; 1615 } 1616 EXPORT_SYMBOL(ttm_bo_device_init); 1617 1618 /* 1619 * buffer object vm functions. 1620 */ 1621 1622 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1623 { 1624 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1625 1626 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1627 if (mem->mem_type == TTM_PL_SYSTEM) 1628 return false; 1629 1630 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1631 return false; 1632 1633 if (mem->placement & TTM_PL_FLAG_CACHED) 1634 return false; 1635 } 1636 return true; 1637 } 1638 1639 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1640 { 1641 struct ttm_bo_device *bdev = bo->bdev; 1642 1643 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1644 ttm_mem_io_free_vm(bo); 1645 } 1646 1647 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1648 { 1649 struct ttm_bo_device *bdev = bo->bdev; 1650 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1651 1652 ttm_mem_io_lock(man, false); 1653 ttm_bo_unmap_virtual_locked(bo); 1654 ttm_mem_io_unlock(man); 1655 } 1656 1657 1658 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1659 1660 int ttm_bo_wait(struct ttm_buffer_object *bo, 1661 bool interruptible, bool no_wait) 1662 { 1663 long timeout = 15 * HZ; 1664 1665 if (no_wait) { 1666 if (reservation_object_test_signaled_rcu(bo->resv, true)) 1667 return 0; 1668 else 1669 return -EBUSY; 1670 } 1671 1672 timeout = reservation_object_wait_timeout_rcu(bo->resv, true, 1673 interruptible, timeout); 1674 if (timeout < 0) 1675 return timeout; 1676 1677 if (timeout == 0) 1678 return -EBUSY; 1679 1680 reservation_object_add_excl_fence(bo->resv, NULL); 1681 return 0; 1682 } 1683 EXPORT_SYMBOL(ttm_bo_wait); 1684 1685 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1686 { 1687 int ret = 0; 1688 1689 /* 1690 * Using ttm_bo_reserve makes sure the lru lists are updated. 1691 */ 1692 1693 ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1694 if (unlikely(ret != 0)) 1695 return ret; 1696 ret = ttm_bo_wait(bo, true, no_wait); 1697 if (likely(ret == 0)) 1698 atomic_inc(&bo->cpu_writers); 1699 ttm_bo_unreserve(bo); 1700 return ret; 1701 } 1702 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1703 1704 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1705 { 1706 atomic_dec(&bo->cpu_writers); 1707 } 1708 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1709 1710 /** 1711 * A buffer object shrink method that tries to swap out the first 1712 * buffer object on the bo_global::swap_lru list. 1713 */ 1714 1715 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1716 { 1717 struct ttm_bo_global *glob = 1718 container_of(shrink, struct ttm_bo_global, shrink); 1719 struct ttm_buffer_object *bo; 1720 int ret = -EBUSY; 1721 unsigned i; 1722 1723 spin_lock(&glob->lru_lock); 1724 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1725 list_for_each_entry(bo, &glob->swap_lru[i], swap) { 1726 ret = __ttm_bo_reserve(bo, false, true, NULL); 1727 if (!ret) 1728 break; 1729 } 1730 if (!ret) 1731 break; 1732 } 1733 1734 if (ret) { 1735 spin_unlock(&glob->lru_lock); 1736 return ret; 1737 } 1738 1739 kref_get(&bo->list_kref); 1740 1741 if (!list_empty(&bo->ddestroy)) { 1742 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1743 kref_put(&bo->list_kref, ttm_bo_release_list); 1744 return ret; 1745 } 1746 1747 ttm_bo_del_from_lru(bo); 1748 spin_unlock(&glob->lru_lock); 1749 1750 /** 1751 * Move to system cached 1752 */ 1753 1754 if (bo->mem.mem_type != TTM_PL_SYSTEM || 1755 bo->ttm->caching_state != tt_cached) { 1756 struct ttm_mem_reg evict_mem; 1757 1758 evict_mem = bo->mem; 1759 evict_mem.mm_node = NULL; 1760 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1761 evict_mem.mem_type = TTM_PL_SYSTEM; 1762 1763 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1764 false, false); 1765 if (unlikely(ret != 0)) 1766 goto out; 1767 } 1768 1769 /** 1770 * Make sure BO is idle. 1771 */ 1772 1773 ret = ttm_bo_wait(bo, false, false); 1774 if (unlikely(ret != 0)) 1775 goto out; 1776 1777 ttm_bo_unmap_virtual(bo); 1778 1779 /** 1780 * Swap out. Buffer will be swapped in again as soon as 1781 * anyone tries to access a ttm page. 1782 */ 1783 1784 if (bo->bdev->driver->swap_notify) 1785 bo->bdev->driver->swap_notify(bo); 1786 1787 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1788 out: 1789 1790 /** 1791 * 1792 * Unreserve without putting on LRU to avoid swapping out an 1793 * already swapped buffer. 1794 */ 1795 1796 __ttm_bo_unreserve(bo); 1797 kref_put(&bo->list_kref, ttm_bo_release_list); 1798 return ret; 1799 } 1800 1801 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1802 { 1803 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1804 ; 1805 } 1806 EXPORT_SYMBOL(ttm_bo_swapout_all); 1807 1808 /** 1809 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1810 * unreserved 1811 * 1812 * @bo: Pointer to buffer 1813 */ 1814 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1815 { 1816 int ret; 1817 1818 /* 1819 * In the absense of a wait_unlocked API, 1820 * Use the bo::wu_mutex to avoid triggering livelocks due to 1821 * concurrent use of this function. Note that this use of 1822 * bo::wu_mutex can go away if we change locking order to 1823 * mmap_sem -> bo::reserve. 1824 */ 1825 ret = mutex_lock_interruptible(&bo->wu_mutex); 1826 if (unlikely(ret != 0)) 1827 return -ERESTARTSYS; 1828 if (!ww_mutex_is_locked(&bo->resv->lock)) 1829 goto out_unlock; 1830 ret = __ttm_bo_reserve(bo, true, false, NULL); 1831 if (unlikely(ret != 0)) 1832 goto out_unlock; 1833 __ttm_bo_unreserve(bo); 1834 1835 out_unlock: 1836 mutex_unlock(&bo->wu_mutex); 1837 return ret; 1838 } 1839