1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); 78 pr_err(" size: %llu\n", man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return snprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 131 static inline uint32_t ttm_bo_type_flags(unsigned type) 132 { 133 return 1 << (type); 134 } 135 136 static void ttm_bo_release_list(struct kref *list_kref) 137 { 138 struct ttm_buffer_object *bo = 139 container_of(list_kref, struct ttm_buffer_object, list_kref); 140 struct ttm_bo_device *bdev = bo->bdev; 141 size_t acc_size = bo->acc_size; 142 143 BUG_ON(atomic_read(&bo->list_kref.refcount)); 144 BUG_ON(atomic_read(&bo->kref.refcount)); 145 BUG_ON(atomic_read(&bo->cpu_writers)); 146 BUG_ON(bo->mem.mm_node != NULL); 147 BUG_ON(!list_empty(&bo->lru)); 148 BUG_ON(!list_empty(&bo->ddestroy)); 149 150 if (bo->ttm) 151 ttm_tt_destroy(bo->ttm); 152 atomic_dec(&bo->glob->bo_count); 153 if (bo->resv == &bo->ttm_resv) 154 reservation_object_fini(&bo->ttm_resv); 155 mutex_destroy(&bo->wu_mutex); 156 if (bo->destroy) 157 bo->destroy(bo); 158 else { 159 kfree(bo); 160 } 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 162 } 163 164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 165 { 166 struct ttm_bo_device *bdev = bo->bdev; 167 struct ttm_mem_type_manager *man; 168 169 lockdep_assert_held(&bo->resv->lock.base); 170 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 172 173 BUG_ON(!list_empty(&bo->lru)); 174 175 man = &bdev->man[bo->mem.mem_type]; 176 list_add_tail(&bo->lru, &man->lru); 177 kref_get(&bo->list_kref); 178 179 if (bo->ttm != NULL) { 180 list_add_tail(&bo->swap, &bo->glob->swap_lru); 181 kref_get(&bo->list_kref); 182 } 183 } 184 } 185 EXPORT_SYMBOL(ttm_bo_add_to_lru); 186 187 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 188 { 189 int put_count = 0; 190 191 if (!list_empty(&bo->swap)) { 192 list_del_init(&bo->swap); 193 ++put_count; 194 } 195 if (!list_empty(&bo->lru)) { 196 list_del_init(&bo->lru); 197 ++put_count; 198 } 199 200 /* 201 * TODO: Add a driver hook to delete from 202 * driver-specific LRU's here. 203 */ 204 205 return put_count; 206 } 207 208 static void ttm_bo_ref_bug(struct kref *list_kref) 209 { 210 BUG(); 211 } 212 213 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 214 bool never_free) 215 { 216 kref_sub(&bo->list_kref, count, 217 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 218 } 219 220 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 221 { 222 int put_count; 223 224 spin_lock(&bo->glob->lru_lock); 225 put_count = ttm_bo_del_from_lru(bo); 226 spin_unlock(&bo->glob->lru_lock); 227 ttm_bo_list_ref_sub(bo, put_count, true); 228 } 229 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 230 231 /* 232 * Call bo->mutex locked. 233 */ 234 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 235 { 236 struct ttm_bo_device *bdev = bo->bdev; 237 struct ttm_bo_global *glob = bo->glob; 238 int ret = 0; 239 uint32_t page_flags = 0; 240 241 TTM_ASSERT_LOCKED(&bo->mutex); 242 bo->ttm = NULL; 243 244 if (bdev->need_dma32) 245 page_flags |= TTM_PAGE_FLAG_DMA32; 246 247 switch (bo->type) { 248 case ttm_bo_type_device: 249 if (zero_alloc) 250 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 251 case ttm_bo_type_kernel: 252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 253 page_flags, glob->dummy_read_page); 254 if (unlikely(bo->ttm == NULL)) 255 ret = -ENOMEM; 256 break; 257 case ttm_bo_type_sg: 258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 259 page_flags | TTM_PAGE_FLAG_SG, 260 glob->dummy_read_page); 261 if (unlikely(bo->ttm == NULL)) { 262 ret = -ENOMEM; 263 break; 264 } 265 bo->ttm->sg = bo->sg; 266 break; 267 default: 268 pr_err("Illegal buffer object type\n"); 269 ret = -EINVAL; 270 break; 271 } 272 273 return ret; 274 } 275 276 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 277 struct ttm_mem_reg *mem, 278 bool evict, bool interruptible, 279 bool no_wait_gpu) 280 { 281 struct ttm_bo_device *bdev = bo->bdev; 282 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 283 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 284 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 285 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 286 int ret = 0; 287 288 if (old_is_pci || new_is_pci || 289 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 290 ret = ttm_mem_io_lock(old_man, true); 291 if (unlikely(ret != 0)) 292 goto out_err; 293 ttm_bo_unmap_virtual_locked(bo); 294 ttm_mem_io_unlock(old_man); 295 } 296 297 /* 298 * Create and bind a ttm if required. 299 */ 300 301 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 302 if (bo->ttm == NULL) { 303 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 304 ret = ttm_bo_add_ttm(bo, zero); 305 if (ret) 306 goto out_err; 307 } 308 309 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 310 if (ret) 311 goto out_err; 312 313 if (mem->mem_type != TTM_PL_SYSTEM) { 314 ret = ttm_tt_bind(bo->ttm, mem); 315 if (ret) 316 goto out_err; 317 } 318 319 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 320 if (bdev->driver->move_notify) 321 bdev->driver->move_notify(bo, mem); 322 bo->mem = *mem; 323 mem->mm_node = NULL; 324 goto moved; 325 } 326 } 327 328 if (bdev->driver->move_notify) 329 bdev->driver->move_notify(bo, mem); 330 331 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 332 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 333 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 334 else if (bdev->driver->move) 335 ret = bdev->driver->move(bo, evict, interruptible, 336 no_wait_gpu, mem); 337 else 338 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 339 340 if (ret) { 341 if (bdev->driver->move_notify) { 342 struct ttm_mem_reg tmp_mem = *mem; 343 *mem = bo->mem; 344 bo->mem = tmp_mem; 345 bdev->driver->move_notify(bo, mem); 346 bo->mem = *mem; 347 *mem = tmp_mem; 348 } 349 350 goto out_err; 351 } 352 353 moved: 354 if (bo->evicted) { 355 if (bdev->driver->invalidate_caches) { 356 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 357 if (ret) 358 pr_err("Can not flush read caches\n"); 359 } 360 bo->evicted = false; 361 } 362 363 if (bo->mem.mm_node) { 364 bo->offset = (bo->mem.start << PAGE_SHIFT) + 365 bdev->man[bo->mem.mem_type].gpu_offset; 366 bo->cur_placement = bo->mem.placement; 367 } else 368 bo->offset = 0; 369 370 return 0; 371 372 out_err: 373 new_man = &bdev->man[bo->mem.mem_type]; 374 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 375 ttm_tt_unbind(bo->ttm); 376 ttm_tt_destroy(bo->ttm); 377 bo->ttm = NULL; 378 } 379 380 return ret; 381 } 382 383 /** 384 * Call bo::reserved. 385 * Will release GPU memory type usage on destruction. 386 * This is the place to put in driver specific hooks to release 387 * driver private resources. 388 * Will release the bo::reserved lock. 389 */ 390 391 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 392 { 393 if (bo->bdev->driver->move_notify) 394 bo->bdev->driver->move_notify(bo, NULL); 395 396 if (bo->ttm) { 397 ttm_tt_unbind(bo->ttm); 398 ttm_tt_destroy(bo->ttm); 399 bo->ttm = NULL; 400 } 401 ttm_bo_mem_put(bo, &bo->mem); 402 403 ww_mutex_unlock (&bo->resv->lock); 404 } 405 406 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 407 { 408 struct reservation_object_list *fobj; 409 struct fence *fence; 410 int i; 411 412 fobj = reservation_object_get_list(bo->resv); 413 fence = reservation_object_get_excl(bo->resv); 414 if (fence && !fence->ops->signaled) 415 fence_enable_sw_signaling(fence); 416 417 for (i = 0; fobj && i < fobj->shared_count; ++i) { 418 fence = rcu_dereference_protected(fobj->shared[i], 419 reservation_object_held(bo->resv)); 420 421 if (!fence->ops->signaled) 422 fence_enable_sw_signaling(fence); 423 } 424 } 425 426 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 427 { 428 struct ttm_bo_device *bdev = bo->bdev; 429 struct ttm_bo_global *glob = bo->glob; 430 int put_count; 431 int ret; 432 433 spin_lock(&glob->lru_lock); 434 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 435 436 if (!ret) { 437 if (!ttm_bo_wait(bo, false, false, true)) { 438 put_count = ttm_bo_del_from_lru(bo); 439 440 spin_unlock(&glob->lru_lock); 441 ttm_bo_cleanup_memtype_use(bo); 442 443 ttm_bo_list_ref_sub(bo, put_count, true); 444 445 return; 446 } else 447 ttm_bo_flush_all_fences(bo); 448 449 /* 450 * Make NO_EVICT bos immediately available to 451 * shrinkers, now that they are queued for 452 * destruction. 453 */ 454 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 455 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 456 ttm_bo_add_to_lru(bo); 457 } 458 459 __ttm_bo_unreserve(bo); 460 } 461 462 kref_get(&bo->list_kref); 463 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 464 spin_unlock(&glob->lru_lock); 465 466 schedule_delayed_work(&bdev->wq, 467 ((HZ / 100) < 1) ? 1 : HZ / 100); 468 } 469 470 /** 471 * function ttm_bo_cleanup_refs_and_unlock 472 * If bo idle, remove from delayed- and lru lists, and unref. 473 * If not idle, do nothing. 474 * 475 * Must be called with lru_lock and reservation held, this function 476 * will drop both before returning. 477 * 478 * @interruptible Any sleeps should occur interruptibly. 479 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 480 */ 481 482 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 483 bool interruptible, 484 bool no_wait_gpu) 485 { 486 struct ttm_bo_global *glob = bo->glob; 487 int put_count; 488 int ret; 489 490 ret = ttm_bo_wait(bo, false, false, true); 491 492 if (ret && !no_wait_gpu) { 493 long lret; 494 ww_mutex_unlock(&bo->resv->lock); 495 spin_unlock(&glob->lru_lock); 496 497 lret = reservation_object_wait_timeout_rcu(bo->resv, 498 true, 499 interruptible, 500 30 * HZ); 501 502 if (lret < 0) 503 return lret; 504 else if (lret == 0) 505 return -EBUSY; 506 507 spin_lock(&glob->lru_lock); 508 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 509 510 /* 511 * We raced, and lost, someone else holds the reservation now, 512 * and is probably busy in ttm_bo_cleanup_memtype_use. 513 * 514 * Even if it's not the case, because we finished waiting any 515 * delayed destruction would succeed, so just return success 516 * here. 517 */ 518 if (ret) { 519 spin_unlock(&glob->lru_lock); 520 return 0; 521 } 522 523 /* 524 * remove sync_obj with ttm_bo_wait, the wait should be 525 * finished, and no new wait object should have been added. 526 */ 527 ret = ttm_bo_wait(bo, false, false, true); 528 WARN_ON(ret); 529 } 530 531 if (ret || unlikely(list_empty(&bo->ddestroy))) { 532 __ttm_bo_unreserve(bo); 533 spin_unlock(&glob->lru_lock); 534 return ret; 535 } 536 537 put_count = ttm_bo_del_from_lru(bo); 538 list_del_init(&bo->ddestroy); 539 ++put_count; 540 541 spin_unlock(&glob->lru_lock); 542 ttm_bo_cleanup_memtype_use(bo); 543 544 ttm_bo_list_ref_sub(bo, put_count, true); 545 546 return 0; 547 } 548 549 /** 550 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 551 * encountered buffers. 552 */ 553 554 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 555 { 556 struct ttm_bo_global *glob = bdev->glob; 557 struct ttm_buffer_object *entry = NULL; 558 int ret = 0; 559 560 spin_lock(&glob->lru_lock); 561 if (list_empty(&bdev->ddestroy)) 562 goto out_unlock; 563 564 entry = list_first_entry(&bdev->ddestroy, 565 struct ttm_buffer_object, ddestroy); 566 kref_get(&entry->list_kref); 567 568 for (;;) { 569 struct ttm_buffer_object *nentry = NULL; 570 571 if (entry->ddestroy.next != &bdev->ddestroy) { 572 nentry = list_first_entry(&entry->ddestroy, 573 struct ttm_buffer_object, ddestroy); 574 kref_get(&nentry->list_kref); 575 } 576 577 ret = __ttm_bo_reserve(entry, false, true, false, NULL); 578 if (remove_all && ret) { 579 spin_unlock(&glob->lru_lock); 580 ret = __ttm_bo_reserve(entry, false, false, 581 false, NULL); 582 spin_lock(&glob->lru_lock); 583 } 584 585 if (!ret) 586 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 587 !remove_all); 588 else 589 spin_unlock(&glob->lru_lock); 590 591 kref_put(&entry->list_kref, ttm_bo_release_list); 592 entry = nentry; 593 594 if (ret || !entry) 595 goto out; 596 597 spin_lock(&glob->lru_lock); 598 if (list_empty(&entry->ddestroy)) 599 break; 600 } 601 602 out_unlock: 603 spin_unlock(&glob->lru_lock); 604 out: 605 if (entry) 606 kref_put(&entry->list_kref, ttm_bo_release_list); 607 return ret; 608 } 609 610 static void ttm_bo_delayed_workqueue(struct work_struct *work) 611 { 612 struct ttm_bo_device *bdev = 613 container_of(work, struct ttm_bo_device, wq.work); 614 615 if (ttm_bo_delayed_delete(bdev, false)) { 616 schedule_delayed_work(&bdev->wq, 617 ((HZ / 100) < 1) ? 1 : HZ / 100); 618 } 619 } 620 621 static void ttm_bo_release(struct kref *kref) 622 { 623 struct ttm_buffer_object *bo = 624 container_of(kref, struct ttm_buffer_object, kref); 625 struct ttm_bo_device *bdev = bo->bdev; 626 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 627 628 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 629 ttm_mem_io_lock(man, false); 630 ttm_mem_io_free_vm(bo); 631 ttm_mem_io_unlock(man); 632 ttm_bo_cleanup_refs_or_queue(bo); 633 kref_put(&bo->list_kref, ttm_bo_release_list); 634 } 635 636 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 637 { 638 struct ttm_buffer_object *bo = *p_bo; 639 640 *p_bo = NULL; 641 kref_put(&bo->kref, ttm_bo_release); 642 } 643 EXPORT_SYMBOL(ttm_bo_unref); 644 645 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 646 { 647 return cancel_delayed_work_sync(&bdev->wq); 648 } 649 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 650 651 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 652 { 653 if (resched) 654 schedule_delayed_work(&bdev->wq, 655 ((HZ / 100) < 1) ? 1 : HZ / 100); 656 } 657 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 658 659 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 660 bool no_wait_gpu) 661 { 662 struct ttm_bo_device *bdev = bo->bdev; 663 struct ttm_mem_reg evict_mem; 664 struct ttm_placement placement; 665 int ret = 0; 666 667 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 668 669 if (unlikely(ret != 0)) { 670 if (ret != -ERESTARTSYS) { 671 pr_err("Failed to expire sync object before buffer eviction\n"); 672 } 673 goto out; 674 } 675 676 lockdep_assert_held(&bo->resv->lock.base); 677 678 evict_mem = bo->mem; 679 evict_mem.mm_node = NULL; 680 evict_mem.bus.io_reserved_vm = false; 681 evict_mem.bus.io_reserved_count = 0; 682 683 placement.num_placement = 0; 684 placement.num_busy_placement = 0; 685 bdev->driver->evict_flags(bo, &placement); 686 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 687 no_wait_gpu); 688 if (ret) { 689 if (ret != -ERESTARTSYS) { 690 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 691 bo); 692 ttm_bo_mem_space_debug(bo, &placement); 693 } 694 goto out; 695 } 696 697 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 698 no_wait_gpu); 699 if (ret) { 700 if (ret != -ERESTARTSYS) 701 pr_err("Buffer eviction failed\n"); 702 ttm_bo_mem_put(bo, &evict_mem); 703 goto out; 704 } 705 bo->evicted = true; 706 out: 707 return ret; 708 } 709 710 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 711 uint32_t mem_type, 712 const struct ttm_place *place, 713 bool interruptible, 714 bool no_wait_gpu) 715 { 716 struct ttm_bo_global *glob = bdev->glob; 717 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 718 struct ttm_buffer_object *bo; 719 int ret = -EBUSY, put_count; 720 721 spin_lock(&glob->lru_lock); 722 list_for_each_entry(bo, &man->lru, lru) { 723 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 724 if (!ret) { 725 if (place && (place->fpfn || place->lpfn)) { 726 /* Don't evict this BO if it's outside of the 727 * requested placement range 728 */ 729 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 730 (place->lpfn && place->lpfn <= bo->mem.start)) { 731 __ttm_bo_unreserve(bo); 732 ret = -EBUSY; 733 continue; 734 } 735 } 736 737 break; 738 } 739 } 740 741 if (ret) { 742 spin_unlock(&glob->lru_lock); 743 return ret; 744 } 745 746 kref_get(&bo->list_kref); 747 748 if (!list_empty(&bo->ddestroy)) { 749 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 750 no_wait_gpu); 751 kref_put(&bo->list_kref, ttm_bo_release_list); 752 return ret; 753 } 754 755 put_count = ttm_bo_del_from_lru(bo); 756 spin_unlock(&glob->lru_lock); 757 758 BUG_ON(ret != 0); 759 760 ttm_bo_list_ref_sub(bo, put_count, true); 761 762 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 763 ttm_bo_unreserve(bo); 764 765 kref_put(&bo->list_kref, ttm_bo_release_list); 766 return ret; 767 } 768 769 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 770 { 771 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 772 773 if (mem->mm_node) 774 (*man->func->put_node)(man, mem); 775 } 776 EXPORT_SYMBOL(ttm_bo_mem_put); 777 778 /** 779 * Repeatedly evict memory from the LRU for @mem_type until we create enough 780 * space, or we've evicted everything and there isn't enough space. 781 */ 782 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 783 uint32_t mem_type, 784 const struct ttm_place *place, 785 struct ttm_mem_reg *mem, 786 bool interruptible, 787 bool no_wait_gpu) 788 { 789 struct ttm_bo_device *bdev = bo->bdev; 790 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 791 int ret; 792 793 do { 794 ret = (*man->func->get_node)(man, bo, place, mem); 795 if (unlikely(ret != 0)) 796 return ret; 797 if (mem->mm_node) 798 break; 799 ret = ttm_mem_evict_first(bdev, mem_type, place, 800 interruptible, no_wait_gpu); 801 if (unlikely(ret != 0)) 802 return ret; 803 } while (1); 804 if (mem->mm_node == NULL) 805 return -ENOMEM; 806 mem->mem_type = mem_type; 807 return 0; 808 } 809 810 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 811 uint32_t cur_placement, 812 uint32_t proposed_placement) 813 { 814 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 815 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 816 817 /** 818 * Keep current caching if possible. 819 */ 820 821 if ((cur_placement & caching) != 0) 822 result |= (cur_placement & caching); 823 else if ((man->default_caching & caching) != 0) 824 result |= man->default_caching; 825 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 826 result |= TTM_PL_FLAG_CACHED; 827 else if ((TTM_PL_FLAG_WC & caching) != 0) 828 result |= TTM_PL_FLAG_WC; 829 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 830 result |= TTM_PL_FLAG_UNCACHED; 831 832 return result; 833 } 834 835 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 836 uint32_t mem_type, 837 const struct ttm_place *place, 838 uint32_t *masked_placement) 839 { 840 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 841 842 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 843 return false; 844 845 if ((place->flags & man->available_caching) == 0) 846 return false; 847 848 cur_flags |= (place->flags & man->available_caching); 849 850 *masked_placement = cur_flags; 851 return true; 852 } 853 854 /** 855 * Creates space for memory region @mem according to its type. 856 * 857 * This function first searches for free space in compatible memory types in 858 * the priority order defined by the driver. If free space isn't found, then 859 * ttm_bo_mem_force_space is attempted in priority order to evict and find 860 * space. 861 */ 862 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 863 struct ttm_placement *placement, 864 struct ttm_mem_reg *mem, 865 bool interruptible, 866 bool no_wait_gpu) 867 { 868 struct ttm_bo_device *bdev = bo->bdev; 869 struct ttm_mem_type_manager *man; 870 uint32_t mem_type = TTM_PL_SYSTEM; 871 uint32_t cur_flags = 0; 872 bool type_found = false; 873 bool type_ok = false; 874 bool has_erestartsys = false; 875 int i, ret; 876 877 mem->mm_node = NULL; 878 for (i = 0; i < placement->num_placement; ++i) { 879 const struct ttm_place *place = &placement->placement[i]; 880 881 ret = ttm_mem_type_from_place(place, &mem_type); 882 if (ret) 883 return ret; 884 man = &bdev->man[mem_type]; 885 if (!man->has_type || !man->use_type) 886 continue; 887 888 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 889 &cur_flags); 890 891 if (!type_ok) 892 continue; 893 894 type_found = true; 895 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 896 cur_flags); 897 /* 898 * Use the access and other non-mapping-related flag bits from 899 * the memory placement flags to the current flags 900 */ 901 ttm_flag_masked(&cur_flags, place->flags, 902 ~TTM_PL_MASK_MEMTYPE); 903 904 if (mem_type == TTM_PL_SYSTEM) 905 break; 906 907 ret = (*man->func->get_node)(man, bo, place, mem); 908 if (unlikely(ret)) 909 return ret; 910 911 if (mem->mm_node) 912 break; 913 } 914 915 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 916 mem->mem_type = mem_type; 917 mem->placement = cur_flags; 918 return 0; 919 } 920 921 for (i = 0; i < placement->num_busy_placement; ++i) { 922 const struct ttm_place *place = &placement->busy_placement[i]; 923 924 ret = ttm_mem_type_from_place(place, &mem_type); 925 if (ret) 926 return ret; 927 man = &bdev->man[mem_type]; 928 if (!man->has_type || !man->use_type) 929 continue; 930 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 931 continue; 932 933 type_found = true; 934 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 935 cur_flags); 936 /* 937 * Use the access and other non-mapping-related flag bits from 938 * the memory placement flags to the current flags 939 */ 940 ttm_flag_masked(&cur_flags, place->flags, 941 ~TTM_PL_MASK_MEMTYPE); 942 943 if (mem_type == TTM_PL_SYSTEM) { 944 mem->mem_type = mem_type; 945 mem->placement = cur_flags; 946 mem->mm_node = NULL; 947 return 0; 948 } 949 950 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 951 interruptible, no_wait_gpu); 952 if (ret == 0 && mem->mm_node) { 953 mem->placement = cur_flags; 954 return 0; 955 } 956 if (ret == -ERESTARTSYS) 957 has_erestartsys = true; 958 } 959 960 if (!type_found) { 961 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); 962 return -EINVAL; 963 } 964 965 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 966 } 967 EXPORT_SYMBOL(ttm_bo_mem_space); 968 969 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 970 struct ttm_placement *placement, 971 bool interruptible, 972 bool no_wait_gpu) 973 { 974 int ret = 0; 975 struct ttm_mem_reg mem; 976 977 lockdep_assert_held(&bo->resv->lock.base); 978 979 /* 980 * FIXME: It's possible to pipeline buffer moves. 981 * Have the driver move function wait for idle when necessary, 982 * instead of doing it here. 983 */ 984 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 985 if (ret) 986 return ret; 987 mem.num_pages = bo->num_pages; 988 mem.size = mem.num_pages << PAGE_SHIFT; 989 mem.page_alignment = bo->mem.page_alignment; 990 mem.bus.io_reserved_vm = false; 991 mem.bus.io_reserved_count = 0; 992 /* 993 * Determine where to move the buffer. 994 */ 995 ret = ttm_bo_mem_space(bo, placement, &mem, 996 interruptible, no_wait_gpu); 997 if (ret) 998 goto out_unlock; 999 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1000 interruptible, no_wait_gpu); 1001 out_unlock: 1002 if (ret && mem.mm_node) 1003 ttm_bo_mem_put(bo, &mem); 1004 return ret; 1005 } 1006 1007 static bool ttm_bo_mem_compat(struct ttm_placement *placement, 1008 struct ttm_mem_reg *mem, 1009 uint32_t *new_flags) 1010 { 1011 int i; 1012 1013 for (i = 0; i < placement->num_placement; i++) { 1014 const struct ttm_place *heap = &placement->placement[i]; 1015 if (mem->mm_node && 1016 (mem->start < heap->fpfn || 1017 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1018 continue; 1019 1020 *new_flags = heap->flags; 1021 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1022 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1023 return true; 1024 } 1025 1026 for (i = 0; i < placement->num_busy_placement; i++) { 1027 const struct ttm_place *heap = &placement->busy_placement[i]; 1028 if (mem->mm_node && 1029 (mem->start < heap->fpfn || 1030 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1031 continue; 1032 1033 *new_flags = heap->flags; 1034 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1035 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 int ttm_bo_validate(struct ttm_buffer_object *bo, 1043 struct ttm_placement *placement, 1044 bool interruptible, 1045 bool no_wait_gpu) 1046 { 1047 int ret; 1048 uint32_t new_flags; 1049 1050 lockdep_assert_held(&bo->resv->lock.base); 1051 /* 1052 * Check whether we need to move buffer. 1053 */ 1054 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1055 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1056 no_wait_gpu); 1057 if (ret) 1058 return ret; 1059 } else { 1060 /* 1061 * Use the access and other non-mapping-related flag bits from 1062 * the compatible memory placement flags to the active flags 1063 */ 1064 ttm_flag_masked(&bo->mem.placement, new_flags, 1065 ~TTM_PL_MASK_MEMTYPE); 1066 } 1067 /* 1068 * We might need to add a TTM. 1069 */ 1070 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1071 ret = ttm_bo_add_ttm(bo, true); 1072 if (ret) 1073 return ret; 1074 } 1075 return 0; 1076 } 1077 EXPORT_SYMBOL(ttm_bo_validate); 1078 1079 int ttm_bo_init(struct ttm_bo_device *bdev, 1080 struct ttm_buffer_object *bo, 1081 unsigned long size, 1082 enum ttm_bo_type type, 1083 struct ttm_placement *placement, 1084 uint32_t page_alignment, 1085 bool interruptible, 1086 struct file *persistent_swap_storage, 1087 size_t acc_size, 1088 struct sg_table *sg, 1089 struct reservation_object *resv, 1090 void (*destroy) (struct ttm_buffer_object *)) 1091 { 1092 int ret = 0; 1093 unsigned long num_pages; 1094 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1095 bool locked; 1096 1097 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1098 if (ret) { 1099 pr_err("Out of kernel memory\n"); 1100 if (destroy) 1101 (*destroy)(bo); 1102 else 1103 kfree(bo); 1104 return -ENOMEM; 1105 } 1106 1107 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1108 if (num_pages == 0) { 1109 pr_err("Illegal buffer object size\n"); 1110 if (destroy) 1111 (*destroy)(bo); 1112 else 1113 kfree(bo); 1114 ttm_mem_global_free(mem_glob, acc_size); 1115 return -EINVAL; 1116 } 1117 bo->destroy = destroy; 1118 1119 kref_init(&bo->kref); 1120 kref_init(&bo->list_kref); 1121 atomic_set(&bo->cpu_writers, 0); 1122 INIT_LIST_HEAD(&bo->lru); 1123 INIT_LIST_HEAD(&bo->ddestroy); 1124 INIT_LIST_HEAD(&bo->swap); 1125 INIT_LIST_HEAD(&bo->io_reserve_lru); 1126 mutex_init(&bo->wu_mutex); 1127 bo->bdev = bdev; 1128 bo->glob = bdev->glob; 1129 bo->type = type; 1130 bo->num_pages = num_pages; 1131 bo->mem.size = num_pages << PAGE_SHIFT; 1132 bo->mem.mem_type = TTM_PL_SYSTEM; 1133 bo->mem.num_pages = bo->num_pages; 1134 bo->mem.mm_node = NULL; 1135 bo->mem.page_alignment = page_alignment; 1136 bo->mem.bus.io_reserved_vm = false; 1137 bo->mem.bus.io_reserved_count = 0; 1138 bo->priv_flags = 0; 1139 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1140 bo->persistent_swap_storage = persistent_swap_storage; 1141 bo->acc_size = acc_size; 1142 bo->sg = sg; 1143 if (resv) { 1144 bo->resv = resv; 1145 lockdep_assert_held(&bo->resv->lock.base); 1146 } else { 1147 bo->resv = &bo->ttm_resv; 1148 reservation_object_init(&bo->ttm_resv); 1149 } 1150 atomic_inc(&bo->glob->bo_count); 1151 drm_vma_node_reset(&bo->vma_node); 1152 1153 /* 1154 * For ttm_bo_type_device buffers, allocate 1155 * address space from the device. 1156 */ 1157 if (bo->type == ttm_bo_type_device || 1158 bo->type == ttm_bo_type_sg) 1159 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1160 bo->mem.num_pages); 1161 1162 /* passed reservation objects should already be locked, 1163 * since otherwise lockdep will be angered in radeon. 1164 */ 1165 if (!resv) { 1166 locked = ww_mutex_trylock(&bo->resv->lock); 1167 WARN_ON(!locked); 1168 } 1169 1170 if (likely(!ret)) 1171 ret = ttm_bo_validate(bo, placement, interruptible, false); 1172 1173 if (!resv) 1174 ttm_bo_unreserve(bo); 1175 1176 if (unlikely(ret)) 1177 ttm_bo_unref(&bo); 1178 1179 return ret; 1180 } 1181 EXPORT_SYMBOL(ttm_bo_init); 1182 1183 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1184 unsigned long bo_size, 1185 unsigned struct_size) 1186 { 1187 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1188 size_t size = 0; 1189 1190 size += ttm_round_pot(struct_size); 1191 size += PAGE_ALIGN(npages * sizeof(void *)); 1192 size += ttm_round_pot(sizeof(struct ttm_tt)); 1193 return size; 1194 } 1195 EXPORT_SYMBOL(ttm_bo_acc_size); 1196 1197 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1198 unsigned long bo_size, 1199 unsigned struct_size) 1200 { 1201 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1202 size_t size = 0; 1203 1204 size += ttm_round_pot(struct_size); 1205 size += PAGE_ALIGN(npages * sizeof(void *)); 1206 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1207 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1208 return size; 1209 } 1210 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1211 1212 int ttm_bo_create(struct ttm_bo_device *bdev, 1213 unsigned long size, 1214 enum ttm_bo_type type, 1215 struct ttm_placement *placement, 1216 uint32_t page_alignment, 1217 bool interruptible, 1218 struct file *persistent_swap_storage, 1219 struct ttm_buffer_object **p_bo) 1220 { 1221 struct ttm_buffer_object *bo; 1222 size_t acc_size; 1223 int ret; 1224 1225 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1226 if (unlikely(bo == NULL)) 1227 return -ENOMEM; 1228 1229 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1230 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1231 interruptible, persistent_swap_storage, acc_size, 1232 NULL, NULL, NULL); 1233 if (likely(ret == 0)) 1234 *p_bo = bo; 1235 1236 return ret; 1237 } 1238 EXPORT_SYMBOL(ttm_bo_create); 1239 1240 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1241 unsigned mem_type, bool allow_errors) 1242 { 1243 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1244 struct ttm_bo_global *glob = bdev->glob; 1245 int ret; 1246 1247 /* 1248 * Can't use standard list traversal since we're unlocking. 1249 */ 1250 1251 spin_lock(&glob->lru_lock); 1252 while (!list_empty(&man->lru)) { 1253 spin_unlock(&glob->lru_lock); 1254 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1255 if (ret) { 1256 if (allow_errors) { 1257 return ret; 1258 } else { 1259 pr_err("Cleanup eviction failed\n"); 1260 } 1261 } 1262 spin_lock(&glob->lru_lock); 1263 } 1264 spin_unlock(&glob->lru_lock); 1265 return 0; 1266 } 1267 1268 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1269 { 1270 struct ttm_mem_type_manager *man; 1271 int ret = -EINVAL; 1272 1273 if (mem_type >= TTM_NUM_MEM_TYPES) { 1274 pr_err("Illegal memory type %d\n", mem_type); 1275 return ret; 1276 } 1277 man = &bdev->man[mem_type]; 1278 1279 if (!man->has_type) { 1280 pr_err("Trying to take down uninitialized memory manager type %u\n", 1281 mem_type); 1282 return ret; 1283 } 1284 1285 man->use_type = false; 1286 man->has_type = false; 1287 1288 ret = 0; 1289 if (mem_type > 0) { 1290 ttm_bo_force_list_clean(bdev, mem_type, false); 1291 1292 ret = (*man->func->takedown)(man); 1293 } 1294 1295 return ret; 1296 } 1297 EXPORT_SYMBOL(ttm_bo_clean_mm); 1298 1299 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1300 { 1301 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1302 1303 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1304 pr_err("Illegal memory manager memory type %u\n", mem_type); 1305 return -EINVAL; 1306 } 1307 1308 if (!man->has_type) { 1309 pr_err("Memory type %u has not been initialized\n", mem_type); 1310 return 0; 1311 } 1312 1313 return ttm_bo_force_list_clean(bdev, mem_type, true); 1314 } 1315 EXPORT_SYMBOL(ttm_bo_evict_mm); 1316 1317 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1318 unsigned long p_size) 1319 { 1320 int ret = -EINVAL; 1321 struct ttm_mem_type_manager *man; 1322 1323 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1324 man = &bdev->man[type]; 1325 BUG_ON(man->has_type); 1326 man->io_reserve_fastpath = true; 1327 man->use_io_reserve_lru = false; 1328 mutex_init(&man->io_reserve_mutex); 1329 INIT_LIST_HEAD(&man->io_reserve_lru); 1330 1331 ret = bdev->driver->init_mem_type(bdev, type, man); 1332 if (ret) 1333 return ret; 1334 man->bdev = bdev; 1335 1336 ret = 0; 1337 if (type != TTM_PL_SYSTEM) { 1338 ret = (*man->func->init)(man, p_size); 1339 if (ret) 1340 return ret; 1341 } 1342 man->has_type = true; 1343 man->use_type = true; 1344 man->size = p_size; 1345 1346 INIT_LIST_HEAD(&man->lru); 1347 1348 return 0; 1349 } 1350 EXPORT_SYMBOL(ttm_bo_init_mm); 1351 1352 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1353 { 1354 struct ttm_bo_global *glob = 1355 container_of(kobj, struct ttm_bo_global, kobj); 1356 1357 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1358 __free_page(glob->dummy_read_page); 1359 kfree(glob); 1360 } 1361 1362 void ttm_bo_global_release(struct drm_global_reference *ref) 1363 { 1364 struct ttm_bo_global *glob = ref->object; 1365 1366 kobject_del(&glob->kobj); 1367 kobject_put(&glob->kobj); 1368 } 1369 EXPORT_SYMBOL(ttm_bo_global_release); 1370 1371 int ttm_bo_global_init(struct drm_global_reference *ref) 1372 { 1373 struct ttm_bo_global_ref *bo_ref = 1374 container_of(ref, struct ttm_bo_global_ref, ref); 1375 struct ttm_bo_global *glob = ref->object; 1376 int ret; 1377 1378 mutex_init(&glob->device_list_mutex); 1379 spin_lock_init(&glob->lru_lock); 1380 glob->mem_glob = bo_ref->mem_glob; 1381 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1382 1383 if (unlikely(glob->dummy_read_page == NULL)) { 1384 ret = -ENOMEM; 1385 goto out_no_drp; 1386 } 1387 1388 INIT_LIST_HEAD(&glob->swap_lru); 1389 INIT_LIST_HEAD(&glob->device_list); 1390 1391 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1392 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1393 if (unlikely(ret != 0)) { 1394 pr_err("Could not register buffer object swapout\n"); 1395 goto out_no_shrink; 1396 } 1397 1398 atomic_set(&glob->bo_count, 0); 1399 1400 ret = kobject_init_and_add( 1401 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1402 if (unlikely(ret != 0)) 1403 kobject_put(&glob->kobj); 1404 return ret; 1405 out_no_shrink: 1406 __free_page(glob->dummy_read_page); 1407 out_no_drp: 1408 kfree(glob); 1409 return ret; 1410 } 1411 EXPORT_SYMBOL(ttm_bo_global_init); 1412 1413 1414 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1415 { 1416 int ret = 0; 1417 unsigned i = TTM_NUM_MEM_TYPES; 1418 struct ttm_mem_type_manager *man; 1419 struct ttm_bo_global *glob = bdev->glob; 1420 1421 while (i--) { 1422 man = &bdev->man[i]; 1423 if (man->has_type) { 1424 man->use_type = false; 1425 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1426 ret = -EBUSY; 1427 pr_err("DRM memory manager type %d is not clean\n", 1428 i); 1429 } 1430 man->has_type = false; 1431 } 1432 } 1433 1434 mutex_lock(&glob->device_list_mutex); 1435 list_del(&bdev->device_list); 1436 mutex_unlock(&glob->device_list_mutex); 1437 1438 cancel_delayed_work_sync(&bdev->wq); 1439 1440 while (ttm_bo_delayed_delete(bdev, true)) 1441 ; 1442 1443 spin_lock(&glob->lru_lock); 1444 if (list_empty(&bdev->ddestroy)) 1445 TTM_DEBUG("Delayed destroy list was clean\n"); 1446 1447 if (list_empty(&bdev->man[0].lru)) 1448 TTM_DEBUG("Swap list was clean\n"); 1449 spin_unlock(&glob->lru_lock); 1450 1451 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1452 1453 return ret; 1454 } 1455 EXPORT_SYMBOL(ttm_bo_device_release); 1456 1457 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1458 struct ttm_bo_global *glob, 1459 struct ttm_bo_driver *driver, 1460 struct address_space *mapping, 1461 uint64_t file_page_offset, 1462 bool need_dma32) 1463 { 1464 int ret = -EINVAL; 1465 1466 bdev->driver = driver; 1467 1468 memset(bdev->man, 0, sizeof(bdev->man)); 1469 1470 /* 1471 * Initialize the system memory buffer type. 1472 * Other types need to be driver / IOCTL initialized. 1473 */ 1474 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1475 if (unlikely(ret != 0)) 1476 goto out_no_sys; 1477 1478 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1479 0x10000000); 1480 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1481 INIT_LIST_HEAD(&bdev->ddestroy); 1482 bdev->dev_mapping = mapping; 1483 bdev->glob = glob; 1484 bdev->need_dma32 = need_dma32; 1485 bdev->val_seq = 0; 1486 mutex_lock(&glob->device_list_mutex); 1487 list_add_tail(&bdev->device_list, &glob->device_list); 1488 mutex_unlock(&glob->device_list_mutex); 1489 1490 return 0; 1491 out_no_sys: 1492 return ret; 1493 } 1494 EXPORT_SYMBOL(ttm_bo_device_init); 1495 1496 /* 1497 * buffer object vm functions. 1498 */ 1499 1500 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1501 { 1502 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1503 1504 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1505 if (mem->mem_type == TTM_PL_SYSTEM) 1506 return false; 1507 1508 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1509 return false; 1510 1511 if (mem->placement & TTM_PL_FLAG_CACHED) 1512 return false; 1513 } 1514 return true; 1515 } 1516 1517 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1518 { 1519 struct ttm_bo_device *bdev = bo->bdev; 1520 1521 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1522 ttm_mem_io_free_vm(bo); 1523 } 1524 1525 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1526 { 1527 struct ttm_bo_device *bdev = bo->bdev; 1528 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1529 1530 ttm_mem_io_lock(man, false); 1531 ttm_bo_unmap_virtual_locked(bo); 1532 ttm_mem_io_unlock(man); 1533 } 1534 1535 1536 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1537 1538 int ttm_bo_wait(struct ttm_buffer_object *bo, 1539 bool lazy, bool interruptible, bool no_wait) 1540 { 1541 struct reservation_object_list *fobj; 1542 struct reservation_object *resv; 1543 struct fence *excl; 1544 long timeout = 15 * HZ; 1545 int i; 1546 1547 resv = bo->resv; 1548 fobj = reservation_object_get_list(resv); 1549 excl = reservation_object_get_excl(resv); 1550 if (excl) { 1551 if (!fence_is_signaled(excl)) { 1552 if (no_wait) 1553 return -EBUSY; 1554 1555 timeout = fence_wait_timeout(excl, 1556 interruptible, timeout); 1557 } 1558 } 1559 1560 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { 1561 struct fence *fence; 1562 fence = rcu_dereference_protected(fobj->shared[i], 1563 reservation_object_held(resv)); 1564 1565 if (!fence_is_signaled(fence)) { 1566 if (no_wait) 1567 return -EBUSY; 1568 1569 timeout = fence_wait_timeout(fence, 1570 interruptible, timeout); 1571 } 1572 } 1573 1574 if (timeout < 0) 1575 return timeout; 1576 1577 if (timeout == 0) 1578 return -EBUSY; 1579 1580 reservation_object_add_excl_fence(resv, NULL); 1581 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1582 return 0; 1583 } 1584 EXPORT_SYMBOL(ttm_bo_wait); 1585 1586 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1587 { 1588 int ret = 0; 1589 1590 /* 1591 * Using ttm_bo_reserve makes sure the lru lists are updated. 1592 */ 1593 1594 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); 1595 if (unlikely(ret != 0)) 1596 return ret; 1597 ret = ttm_bo_wait(bo, false, true, no_wait); 1598 if (likely(ret == 0)) 1599 atomic_inc(&bo->cpu_writers); 1600 ttm_bo_unreserve(bo); 1601 return ret; 1602 } 1603 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1604 1605 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1606 { 1607 atomic_dec(&bo->cpu_writers); 1608 } 1609 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1610 1611 /** 1612 * A buffer object shrink method that tries to swap out the first 1613 * buffer object on the bo_global::swap_lru list. 1614 */ 1615 1616 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1617 { 1618 struct ttm_bo_global *glob = 1619 container_of(shrink, struct ttm_bo_global, shrink); 1620 struct ttm_buffer_object *bo; 1621 int ret = -EBUSY; 1622 int put_count; 1623 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1624 1625 spin_lock(&glob->lru_lock); 1626 list_for_each_entry(bo, &glob->swap_lru, swap) { 1627 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 1628 if (!ret) 1629 break; 1630 } 1631 1632 if (ret) { 1633 spin_unlock(&glob->lru_lock); 1634 return ret; 1635 } 1636 1637 kref_get(&bo->list_kref); 1638 1639 if (!list_empty(&bo->ddestroy)) { 1640 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1641 kref_put(&bo->list_kref, ttm_bo_release_list); 1642 return ret; 1643 } 1644 1645 put_count = ttm_bo_del_from_lru(bo); 1646 spin_unlock(&glob->lru_lock); 1647 1648 ttm_bo_list_ref_sub(bo, put_count, true); 1649 1650 /** 1651 * Wait for GPU, then move to system cached. 1652 */ 1653 1654 ret = ttm_bo_wait(bo, false, false, false); 1655 1656 if (unlikely(ret != 0)) 1657 goto out; 1658 1659 if ((bo->mem.placement & swap_placement) != swap_placement) { 1660 struct ttm_mem_reg evict_mem; 1661 1662 evict_mem = bo->mem; 1663 evict_mem.mm_node = NULL; 1664 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1665 evict_mem.mem_type = TTM_PL_SYSTEM; 1666 1667 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1668 false, false); 1669 if (unlikely(ret != 0)) 1670 goto out; 1671 } 1672 1673 ttm_bo_unmap_virtual(bo); 1674 1675 /** 1676 * Swap out. Buffer will be swapped in again as soon as 1677 * anyone tries to access a ttm page. 1678 */ 1679 1680 if (bo->bdev->driver->swap_notify) 1681 bo->bdev->driver->swap_notify(bo); 1682 1683 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1684 out: 1685 1686 /** 1687 * 1688 * Unreserve without putting on LRU to avoid swapping out an 1689 * already swapped buffer. 1690 */ 1691 1692 __ttm_bo_unreserve(bo); 1693 kref_put(&bo->list_kref, ttm_bo_release_list); 1694 return ret; 1695 } 1696 1697 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1698 { 1699 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1700 ; 1701 } 1702 EXPORT_SYMBOL(ttm_bo_swapout_all); 1703 1704 /** 1705 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1706 * unreserved 1707 * 1708 * @bo: Pointer to buffer 1709 */ 1710 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1711 { 1712 int ret; 1713 1714 /* 1715 * In the absense of a wait_unlocked API, 1716 * Use the bo::wu_mutex to avoid triggering livelocks due to 1717 * concurrent use of this function. Note that this use of 1718 * bo::wu_mutex can go away if we change locking order to 1719 * mmap_sem -> bo::reserve. 1720 */ 1721 ret = mutex_lock_interruptible(&bo->wu_mutex); 1722 if (unlikely(ret != 0)) 1723 return -ERESTARTSYS; 1724 if (!ww_mutex_is_locked(&bo->resv->lock)) 1725 goto out_unlock; 1726 ret = __ttm_bo_reserve(bo, true, false, false, NULL); 1727 if (unlikely(ret != 0)) 1728 goto out_unlock; 1729 __ttm_bo_unreserve(bo); 1730 1731 out_unlock: 1732 mutex_unlock(&bo->wu_mutex); 1733 return ret; 1734 } 1735