1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); 78 pr_err(" size: %llu\n", man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return snprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 131 static inline uint32_t ttm_bo_type_flags(unsigned type) 132 { 133 return 1 << (type); 134 } 135 136 static void ttm_bo_release_list(struct kref *list_kref) 137 { 138 struct ttm_buffer_object *bo = 139 container_of(list_kref, struct ttm_buffer_object, list_kref); 140 struct ttm_bo_device *bdev = bo->bdev; 141 size_t acc_size = bo->acc_size; 142 143 BUG_ON(atomic_read(&bo->list_kref.refcount)); 144 BUG_ON(atomic_read(&bo->kref.refcount)); 145 BUG_ON(atomic_read(&bo->cpu_writers)); 146 BUG_ON(bo->mem.mm_node != NULL); 147 BUG_ON(!list_empty(&bo->lru)); 148 BUG_ON(!list_empty(&bo->ddestroy)); 149 150 if (bo->ttm) 151 ttm_tt_destroy(bo->ttm); 152 atomic_dec(&bo->glob->bo_count); 153 if (bo->resv == &bo->ttm_resv) 154 reservation_object_fini(&bo->ttm_resv); 155 mutex_destroy(&bo->wu_mutex); 156 if (bo->destroy) 157 bo->destroy(bo); 158 else { 159 kfree(bo); 160 } 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 162 } 163 164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 165 { 166 struct ttm_bo_device *bdev = bo->bdev; 167 struct ttm_mem_type_manager *man; 168 169 lockdep_assert_held(&bo->resv->lock.base); 170 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 172 173 BUG_ON(!list_empty(&bo->lru)); 174 175 man = &bdev->man[bo->mem.mem_type]; 176 list_add_tail(&bo->lru, &man->lru); 177 kref_get(&bo->list_kref); 178 179 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 180 list_add_tail(&bo->swap, &bo->glob->swap_lru); 181 kref_get(&bo->list_kref); 182 } 183 } 184 } 185 EXPORT_SYMBOL(ttm_bo_add_to_lru); 186 187 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 188 { 189 int put_count = 0; 190 191 if (!list_empty(&bo->swap)) { 192 list_del_init(&bo->swap); 193 ++put_count; 194 } 195 if (!list_empty(&bo->lru)) { 196 list_del_init(&bo->lru); 197 ++put_count; 198 } 199 200 /* 201 * TODO: Add a driver hook to delete from 202 * driver-specific LRU's here. 203 */ 204 205 return put_count; 206 } 207 208 static void ttm_bo_ref_bug(struct kref *list_kref) 209 { 210 BUG(); 211 } 212 213 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 214 bool never_free) 215 { 216 kref_sub(&bo->list_kref, count, 217 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 218 } 219 220 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 221 { 222 int put_count; 223 224 spin_lock(&bo->glob->lru_lock); 225 put_count = ttm_bo_del_from_lru(bo); 226 spin_unlock(&bo->glob->lru_lock); 227 ttm_bo_list_ref_sub(bo, put_count, true); 228 } 229 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 230 231 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 232 { 233 int put_count = 0; 234 235 lockdep_assert_held(&bo->resv->lock.base); 236 237 put_count = ttm_bo_del_from_lru(bo); 238 ttm_bo_list_ref_sub(bo, put_count, true); 239 ttm_bo_add_to_lru(bo); 240 } 241 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 242 243 /* 244 * Call bo->mutex locked. 245 */ 246 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 247 { 248 struct ttm_bo_device *bdev = bo->bdev; 249 struct ttm_bo_global *glob = bo->glob; 250 int ret = 0; 251 uint32_t page_flags = 0; 252 253 TTM_ASSERT_LOCKED(&bo->mutex); 254 bo->ttm = NULL; 255 256 if (bdev->need_dma32) 257 page_flags |= TTM_PAGE_FLAG_DMA32; 258 259 switch (bo->type) { 260 case ttm_bo_type_device: 261 if (zero_alloc) 262 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 263 case ttm_bo_type_kernel: 264 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 265 page_flags, glob->dummy_read_page); 266 if (unlikely(bo->ttm == NULL)) 267 ret = -ENOMEM; 268 break; 269 case ttm_bo_type_sg: 270 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 271 page_flags | TTM_PAGE_FLAG_SG, 272 glob->dummy_read_page); 273 if (unlikely(bo->ttm == NULL)) { 274 ret = -ENOMEM; 275 break; 276 } 277 bo->ttm->sg = bo->sg; 278 break; 279 default: 280 pr_err("Illegal buffer object type\n"); 281 ret = -EINVAL; 282 break; 283 } 284 285 return ret; 286 } 287 288 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 289 struct ttm_mem_reg *mem, 290 bool evict, bool interruptible, 291 bool no_wait_gpu) 292 { 293 struct ttm_bo_device *bdev = bo->bdev; 294 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 295 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 296 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 297 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 298 int ret = 0; 299 300 if (old_is_pci || new_is_pci || 301 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 302 ret = ttm_mem_io_lock(old_man, true); 303 if (unlikely(ret != 0)) 304 goto out_err; 305 ttm_bo_unmap_virtual_locked(bo); 306 ttm_mem_io_unlock(old_man); 307 } 308 309 /* 310 * Create and bind a ttm if required. 311 */ 312 313 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 314 if (bo->ttm == NULL) { 315 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 316 ret = ttm_bo_add_ttm(bo, zero); 317 if (ret) 318 goto out_err; 319 } 320 321 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 322 if (ret) 323 goto out_err; 324 325 if (mem->mem_type != TTM_PL_SYSTEM) { 326 ret = ttm_tt_bind(bo->ttm, mem); 327 if (ret) 328 goto out_err; 329 } 330 331 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 332 if (bdev->driver->move_notify) 333 bdev->driver->move_notify(bo, mem); 334 bo->mem = *mem; 335 mem->mm_node = NULL; 336 goto moved; 337 } 338 } 339 340 if (bdev->driver->move_notify) 341 bdev->driver->move_notify(bo, mem); 342 343 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 344 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 345 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 346 else if (bdev->driver->move) 347 ret = bdev->driver->move(bo, evict, interruptible, 348 no_wait_gpu, mem); 349 else 350 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 351 352 if (ret) { 353 if (bdev->driver->move_notify) { 354 struct ttm_mem_reg tmp_mem = *mem; 355 *mem = bo->mem; 356 bo->mem = tmp_mem; 357 bdev->driver->move_notify(bo, mem); 358 bo->mem = *mem; 359 *mem = tmp_mem; 360 } 361 362 goto out_err; 363 } 364 365 moved: 366 if (bo->evicted) { 367 if (bdev->driver->invalidate_caches) { 368 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 369 if (ret) 370 pr_err("Can not flush read caches\n"); 371 } 372 bo->evicted = false; 373 } 374 375 if (bo->mem.mm_node) { 376 bo->offset = (bo->mem.start << PAGE_SHIFT) + 377 bdev->man[bo->mem.mem_type].gpu_offset; 378 bo->cur_placement = bo->mem.placement; 379 } else 380 bo->offset = 0; 381 382 return 0; 383 384 out_err: 385 new_man = &bdev->man[bo->mem.mem_type]; 386 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 387 ttm_tt_unbind(bo->ttm); 388 ttm_tt_destroy(bo->ttm); 389 bo->ttm = NULL; 390 } 391 392 return ret; 393 } 394 395 /** 396 * Call bo::reserved. 397 * Will release GPU memory type usage on destruction. 398 * This is the place to put in driver specific hooks to release 399 * driver private resources. 400 * Will release the bo::reserved lock. 401 */ 402 403 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 404 { 405 if (bo->bdev->driver->move_notify) 406 bo->bdev->driver->move_notify(bo, NULL); 407 408 if (bo->ttm) { 409 ttm_tt_unbind(bo->ttm); 410 ttm_tt_destroy(bo->ttm); 411 bo->ttm = NULL; 412 } 413 ttm_bo_mem_put(bo, &bo->mem); 414 415 ww_mutex_unlock (&bo->resv->lock); 416 } 417 418 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 419 { 420 struct reservation_object_list *fobj; 421 struct fence *fence; 422 int i; 423 424 fobj = reservation_object_get_list(bo->resv); 425 fence = reservation_object_get_excl(bo->resv); 426 if (fence && !fence->ops->signaled) 427 fence_enable_sw_signaling(fence); 428 429 for (i = 0; fobj && i < fobj->shared_count; ++i) { 430 fence = rcu_dereference_protected(fobj->shared[i], 431 reservation_object_held(bo->resv)); 432 433 if (!fence->ops->signaled) 434 fence_enable_sw_signaling(fence); 435 } 436 } 437 438 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 439 { 440 struct ttm_bo_device *bdev = bo->bdev; 441 struct ttm_bo_global *glob = bo->glob; 442 int put_count; 443 int ret; 444 445 spin_lock(&glob->lru_lock); 446 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 447 448 if (!ret) { 449 if (!ttm_bo_wait(bo, false, false, true)) { 450 put_count = ttm_bo_del_from_lru(bo); 451 452 spin_unlock(&glob->lru_lock); 453 ttm_bo_cleanup_memtype_use(bo); 454 455 ttm_bo_list_ref_sub(bo, put_count, true); 456 457 return; 458 } else 459 ttm_bo_flush_all_fences(bo); 460 461 /* 462 * Make NO_EVICT bos immediately available to 463 * shrinkers, now that they are queued for 464 * destruction. 465 */ 466 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 467 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 468 ttm_bo_add_to_lru(bo); 469 } 470 471 __ttm_bo_unreserve(bo); 472 } 473 474 kref_get(&bo->list_kref); 475 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 476 spin_unlock(&glob->lru_lock); 477 478 schedule_delayed_work(&bdev->wq, 479 ((HZ / 100) < 1) ? 1 : HZ / 100); 480 } 481 482 /** 483 * function ttm_bo_cleanup_refs_and_unlock 484 * If bo idle, remove from delayed- and lru lists, and unref. 485 * If not idle, do nothing. 486 * 487 * Must be called with lru_lock and reservation held, this function 488 * will drop both before returning. 489 * 490 * @interruptible Any sleeps should occur interruptibly. 491 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 492 */ 493 494 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 495 bool interruptible, 496 bool no_wait_gpu) 497 { 498 struct ttm_bo_global *glob = bo->glob; 499 int put_count; 500 int ret; 501 502 ret = ttm_bo_wait(bo, false, false, true); 503 504 if (ret && !no_wait_gpu) { 505 long lret; 506 ww_mutex_unlock(&bo->resv->lock); 507 spin_unlock(&glob->lru_lock); 508 509 lret = reservation_object_wait_timeout_rcu(bo->resv, 510 true, 511 interruptible, 512 30 * HZ); 513 514 if (lret < 0) 515 return lret; 516 else if (lret == 0) 517 return -EBUSY; 518 519 spin_lock(&glob->lru_lock); 520 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 521 522 /* 523 * We raced, and lost, someone else holds the reservation now, 524 * and is probably busy in ttm_bo_cleanup_memtype_use. 525 * 526 * Even if it's not the case, because we finished waiting any 527 * delayed destruction would succeed, so just return success 528 * here. 529 */ 530 if (ret) { 531 spin_unlock(&glob->lru_lock); 532 return 0; 533 } 534 535 /* 536 * remove sync_obj with ttm_bo_wait, the wait should be 537 * finished, and no new wait object should have been added. 538 */ 539 ret = ttm_bo_wait(bo, false, false, true); 540 WARN_ON(ret); 541 } 542 543 if (ret || unlikely(list_empty(&bo->ddestroy))) { 544 __ttm_bo_unreserve(bo); 545 spin_unlock(&glob->lru_lock); 546 return ret; 547 } 548 549 put_count = ttm_bo_del_from_lru(bo); 550 list_del_init(&bo->ddestroy); 551 ++put_count; 552 553 spin_unlock(&glob->lru_lock); 554 ttm_bo_cleanup_memtype_use(bo); 555 556 ttm_bo_list_ref_sub(bo, put_count, true); 557 558 return 0; 559 } 560 561 /** 562 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 563 * encountered buffers. 564 */ 565 566 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 567 { 568 struct ttm_bo_global *glob = bdev->glob; 569 struct ttm_buffer_object *entry = NULL; 570 int ret = 0; 571 572 spin_lock(&glob->lru_lock); 573 if (list_empty(&bdev->ddestroy)) 574 goto out_unlock; 575 576 entry = list_first_entry(&bdev->ddestroy, 577 struct ttm_buffer_object, ddestroy); 578 kref_get(&entry->list_kref); 579 580 for (;;) { 581 struct ttm_buffer_object *nentry = NULL; 582 583 if (entry->ddestroy.next != &bdev->ddestroy) { 584 nentry = list_first_entry(&entry->ddestroy, 585 struct ttm_buffer_object, ddestroy); 586 kref_get(&nentry->list_kref); 587 } 588 589 ret = __ttm_bo_reserve(entry, false, true, false, NULL); 590 if (remove_all && ret) { 591 spin_unlock(&glob->lru_lock); 592 ret = __ttm_bo_reserve(entry, false, false, 593 false, NULL); 594 spin_lock(&glob->lru_lock); 595 } 596 597 if (!ret) 598 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 599 !remove_all); 600 else 601 spin_unlock(&glob->lru_lock); 602 603 kref_put(&entry->list_kref, ttm_bo_release_list); 604 entry = nentry; 605 606 if (ret || !entry) 607 goto out; 608 609 spin_lock(&glob->lru_lock); 610 if (list_empty(&entry->ddestroy)) 611 break; 612 } 613 614 out_unlock: 615 spin_unlock(&glob->lru_lock); 616 out: 617 if (entry) 618 kref_put(&entry->list_kref, ttm_bo_release_list); 619 return ret; 620 } 621 622 static void ttm_bo_delayed_workqueue(struct work_struct *work) 623 { 624 struct ttm_bo_device *bdev = 625 container_of(work, struct ttm_bo_device, wq.work); 626 627 if (ttm_bo_delayed_delete(bdev, false)) { 628 schedule_delayed_work(&bdev->wq, 629 ((HZ / 100) < 1) ? 1 : HZ / 100); 630 } 631 } 632 633 static void ttm_bo_release(struct kref *kref) 634 { 635 struct ttm_buffer_object *bo = 636 container_of(kref, struct ttm_buffer_object, kref); 637 struct ttm_bo_device *bdev = bo->bdev; 638 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 639 640 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 641 ttm_mem_io_lock(man, false); 642 ttm_mem_io_free_vm(bo); 643 ttm_mem_io_unlock(man); 644 ttm_bo_cleanup_refs_or_queue(bo); 645 kref_put(&bo->list_kref, ttm_bo_release_list); 646 } 647 648 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 649 { 650 struct ttm_buffer_object *bo = *p_bo; 651 652 *p_bo = NULL; 653 kref_put(&bo->kref, ttm_bo_release); 654 } 655 EXPORT_SYMBOL(ttm_bo_unref); 656 657 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 658 { 659 return cancel_delayed_work_sync(&bdev->wq); 660 } 661 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 662 663 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 664 { 665 if (resched) 666 schedule_delayed_work(&bdev->wq, 667 ((HZ / 100) < 1) ? 1 : HZ / 100); 668 } 669 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 670 671 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 672 bool no_wait_gpu) 673 { 674 struct ttm_bo_device *bdev = bo->bdev; 675 struct ttm_mem_reg evict_mem; 676 struct ttm_placement placement; 677 int ret = 0; 678 679 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 680 681 if (unlikely(ret != 0)) { 682 if (ret != -ERESTARTSYS) { 683 pr_err("Failed to expire sync object before buffer eviction\n"); 684 } 685 goto out; 686 } 687 688 lockdep_assert_held(&bo->resv->lock.base); 689 690 evict_mem = bo->mem; 691 evict_mem.mm_node = NULL; 692 evict_mem.bus.io_reserved_vm = false; 693 evict_mem.bus.io_reserved_count = 0; 694 695 placement.num_placement = 0; 696 placement.num_busy_placement = 0; 697 bdev->driver->evict_flags(bo, &placement); 698 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 699 no_wait_gpu); 700 if (ret) { 701 if (ret != -ERESTARTSYS) { 702 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 703 bo); 704 ttm_bo_mem_space_debug(bo, &placement); 705 } 706 goto out; 707 } 708 709 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 710 no_wait_gpu); 711 if (ret) { 712 if (ret != -ERESTARTSYS) 713 pr_err("Buffer eviction failed\n"); 714 ttm_bo_mem_put(bo, &evict_mem); 715 goto out; 716 } 717 bo->evicted = true; 718 out: 719 return ret; 720 } 721 722 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 723 uint32_t mem_type, 724 const struct ttm_place *place, 725 bool interruptible, 726 bool no_wait_gpu) 727 { 728 struct ttm_bo_global *glob = bdev->glob; 729 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 730 struct ttm_buffer_object *bo; 731 int ret = -EBUSY, put_count; 732 733 spin_lock(&glob->lru_lock); 734 list_for_each_entry(bo, &man->lru, lru) { 735 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 736 if (!ret) { 737 if (place && (place->fpfn || place->lpfn)) { 738 /* Don't evict this BO if it's outside of the 739 * requested placement range 740 */ 741 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 742 (place->lpfn && place->lpfn <= bo->mem.start)) { 743 __ttm_bo_unreserve(bo); 744 ret = -EBUSY; 745 continue; 746 } 747 } 748 749 break; 750 } 751 } 752 753 if (ret) { 754 spin_unlock(&glob->lru_lock); 755 return ret; 756 } 757 758 kref_get(&bo->list_kref); 759 760 if (!list_empty(&bo->ddestroy)) { 761 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 762 no_wait_gpu); 763 kref_put(&bo->list_kref, ttm_bo_release_list); 764 return ret; 765 } 766 767 put_count = ttm_bo_del_from_lru(bo); 768 spin_unlock(&glob->lru_lock); 769 770 BUG_ON(ret != 0); 771 772 ttm_bo_list_ref_sub(bo, put_count, true); 773 774 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 775 ttm_bo_unreserve(bo); 776 777 kref_put(&bo->list_kref, ttm_bo_release_list); 778 return ret; 779 } 780 781 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 782 { 783 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 784 785 if (mem->mm_node) 786 (*man->func->put_node)(man, mem); 787 } 788 EXPORT_SYMBOL(ttm_bo_mem_put); 789 790 /** 791 * Repeatedly evict memory from the LRU for @mem_type until we create enough 792 * space, or we've evicted everything and there isn't enough space. 793 */ 794 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 795 uint32_t mem_type, 796 const struct ttm_place *place, 797 struct ttm_mem_reg *mem, 798 bool interruptible, 799 bool no_wait_gpu) 800 { 801 struct ttm_bo_device *bdev = bo->bdev; 802 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 803 int ret; 804 805 do { 806 ret = (*man->func->get_node)(man, bo, place, mem); 807 if (unlikely(ret != 0)) 808 return ret; 809 if (mem->mm_node) 810 break; 811 ret = ttm_mem_evict_first(bdev, mem_type, place, 812 interruptible, no_wait_gpu); 813 if (unlikely(ret != 0)) 814 return ret; 815 } while (1); 816 if (mem->mm_node == NULL) 817 return -ENOMEM; 818 mem->mem_type = mem_type; 819 return 0; 820 } 821 822 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 823 uint32_t cur_placement, 824 uint32_t proposed_placement) 825 { 826 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 827 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 828 829 /** 830 * Keep current caching if possible. 831 */ 832 833 if ((cur_placement & caching) != 0) 834 result |= (cur_placement & caching); 835 else if ((man->default_caching & caching) != 0) 836 result |= man->default_caching; 837 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 838 result |= TTM_PL_FLAG_CACHED; 839 else if ((TTM_PL_FLAG_WC & caching) != 0) 840 result |= TTM_PL_FLAG_WC; 841 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 842 result |= TTM_PL_FLAG_UNCACHED; 843 844 return result; 845 } 846 847 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 848 uint32_t mem_type, 849 const struct ttm_place *place, 850 uint32_t *masked_placement) 851 { 852 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 853 854 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 855 return false; 856 857 if ((place->flags & man->available_caching) == 0) 858 return false; 859 860 cur_flags |= (place->flags & man->available_caching); 861 862 *masked_placement = cur_flags; 863 return true; 864 } 865 866 /** 867 * Creates space for memory region @mem according to its type. 868 * 869 * This function first searches for free space in compatible memory types in 870 * the priority order defined by the driver. If free space isn't found, then 871 * ttm_bo_mem_force_space is attempted in priority order to evict and find 872 * space. 873 */ 874 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 875 struct ttm_placement *placement, 876 struct ttm_mem_reg *mem, 877 bool interruptible, 878 bool no_wait_gpu) 879 { 880 struct ttm_bo_device *bdev = bo->bdev; 881 struct ttm_mem_type_manager *man; 882 uint32_t mem_type = TTM_PL_SYSTEM; 883 uint32_t cur_flags = 0; 884 bool type_found = false; 885 bool type_ok = false; 886 bool has_erestartsys = false; 887 int i, ret; 888 889 mem->mm_node = NULL; 890 for (i = 0; i < placement->num_placement; ++i) { 891 const struct ttm_place *place = &placement->placement[i]; 892 893 ret = ttm_mem_type_from_place(place, &mem_type); 894 if (ret) 895 return ret; 896 man = &bdev->man[mem_type]; 897 if (!man->has_type || !man->use_type) 898 continue; 899 900 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 901 &cur_flags); 902 903 if (!type_ok) 904 continue; 905 906 type_found = true; 907 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 908 cur_flags); 909 /* 910 * Use the access and other non-mapping-related flag bits from 911 * the memory placement flags to the current flags 912 */ 913 ttm_flag_masked(&cur_flags, place->flags, 914 ~TTM_PL_MASK_MEMTYPE); 915 916 if (mem_type == TTM_PL_SYSTEM) 917 break; 918 919 ret = (*man->func->get_node)(man, bo, place, mem); 920 if (unlikely(ret)) 921 return ret; 922 923 if (mem->mm_node) 924 break; 925 } 926 927 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 928 mem->mem_type = mem_type; 929 mem->placement = cur_flags; 930 return 0; 931 } 932 933 for (i = 0; i < placement->num_busy_placement; ++i) { 934 const struct ttm_place *place = &placement->busy_placement[i]; 935 936 ret = ttm_mem_type_from_place(place, &mem_type); 937 if (ret) 938 return ret; 939 man = &bdev->man[mem_type]; 940 if (!man->has_type || !man->use_type) 941 continue; 942 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 943 continue; 944 945 type_found = true; 946 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 947 cur_flags); 948 /* 949 * Use the access and other non-mapping-related flag bits from 950 * the memory placement flags to the current flags 951 */ 952 ttm_flag_masked(&cur_flags, place->flags, 953 ~TTM_PL_MASK_MEMTYPE); 954 955 if (mem_type == TTM_PL_SYSTEM) { 956 mem->mem_type = mem_type; 957 mem->placement = cur_flags; 958 mem->mm_node = NULL; 959 return 0; 960 } 961 962 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 963 interruptible, no_wait_gpu); 964 if (ret == 0 && mem->mm_node) { 965 mem->placement = cur_flags; 966 return 0; 967 } 968 if (ret == -ERESTARTSYS) 969 has_erestartsys = true; 970 } 971 972 if (!type_found) { 973 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); 974 return -EINVAL; 975 } 976 977 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 978 } 979 EXPORT_SYMBOL(ttm_bo_mem_space); 980 981 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 982 struct ttm_placement *placement, 983 bool interruptible, 984 bool no_wait_gpu) 985 { 986 int ret = 0; 987 struct ttm_mem_reg mem; 988 989 lockdep_assert_held(&bo->resv->lock.base); 990 991 /* 992 * FIXME: It's possible to pipeline buffer moves. 993 * Have the driver move function wait for idle when necessary, 994 * instead of doing it here. 995 */ 996 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 997 if (ret) 998 return ret; 999 mem.num_pages = bo->num_pages; 1000 mem.size = mem.num_pages << PAGE_SHIFT; 1001 mem.page_alignment = bo->mem.page_alignment; 1002 mem.bus.io_reserved_vm = false; 1003 mem.bus.io_reserved_count = 0; 1004 /* 1005 * Determine where to move the buffer. 1006 */ 1007 ret = ttm_bo_mem_space(bo, placement, &mem, 1008 interruptible, no_wait_gpu); 1009 if (ret) 1010 goto out_unlock; 1011 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1012 interruptible, no_wait_gpu); 1013 out_unlock: 1014 if (ret && mem.mm_node) 1015 ttm_bo_mem_put(bo, &mem); 1016 return ret; 1017 } 1018 1019 static bool ttm_bo_mem_compat(struct ttm_placement *placement, 1020 struct ttm_mem_reg *mem, 1021 uint32_t *new_flags) 1022 { 1023 int i; 1024 1025 for (i = 0; i < placement->num_placement; i++) { 1026 const struct ttm_place *heap = &placement->placement[i]; 1027 if (mem->mm_node && 1028 (mem->start < heap->fpfn || 1029 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1030 continue; 1031 1032 *new_flags = heap->flags; 1033 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1034 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1035 return true; 1036 } 1037 1038 for (i = 0; i < placement->num_busy_placement; i++) { 1039 const struct ttm_place *heap = &placement->busy_placement[i]; 1040 if (mem->mm_node && 1041 (mem->start < heap->fpfn || 1042 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1043 continue; 1044 1045 *new_flags = heap->flags; 1046 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1047 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1048 return true; 1049 } 1050 1051 return false; 1052 } 1053 1054 int ttm_bo_validate(struct ttm_buffer_object *bo, 1055 struct ttm_placement *placement, 1056 bool interruptible, 1057 bool no_wait_gpu) 1058 { 1059 int ret; 1060 uint32_t new_flags; 1061 1062 lockdep_assert_held(&bo->resv->lock.base); 1063 /* 1064 * Check whether we need to move buffer. 1065 */ 1066 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1067 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1068 no_wait_gpu); 1069 if (ret) 1070 return ret; 1071 } else { 1072 /* 1073 * Use the access and other non-mapping-related flag bits from 1074 * the compatible memory placement flags to the active flags 1075 */ 1076 ttm_flag_masked(&bo->mem.placement, new_flags, 1077 ~TTM_PL_MASK_MEMTYPE); 1078 } 1079 /* 1080 * We might need to add a TTM. 1081 */ 1082 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1083 ret = ttm_bo_add_ttm(bo, true); 1084 if (ret) 1085 return ret; 1086 } 1087 return 0; 1088 } 1089 EXPORT_SYMBOL(ttm_bo_validate); 1090 1091 int ttm_bo_init(struct ttm_bo_device *bdev, 1092 struct ttm_buffer_object *bo, 1093 unsigned long size, 1094 enum ttm_bo_type type, 1095 struct ttm_placement *placement, 1096 uint32_t page_alignment, 1097 bool interruptible, 1098 struct file *persistent_swap_storage, 1099 size_t acc_size, 1100 struct sg_table *sg, 1101 struct reservation_object *resv, 1102 void (*destroy) (struct ttm_buffer_object *)) 1103 { 1104 int ret = 0; 1105 unsigned long num_pages; 1106 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1107 bool locked; 1108 1109 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1110 if (ret) { 1111 pr_err("Out of kernel memory\n"); 1112 if (destroy) 1113 (*destroy)(bo); 1114 else 1115 kfree(bo); 1116 return -ENOMEM; 1117 } 1118 1119 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1120 if (num_pages == 0) { 1121 pr_err("Illegal buffer object size\n"); 1122 if (destroy) 1123 (*destroy)(bo); 1124 else 1125 kfree(bo); 1126 ttm_mem_global_free(mem_glob, acc_size); 1127 return -EINVAL; 1128 } 1129 bo->destroy = destroy; 1130 1131 kref_init(&bo->kref); 1132 kref_init(&bo->list_kref); 1133 atomic_set(&bo->cpu_writers, 0); 1134 INIT_LIST_HEAD(&bo->lru); 1135 INIT_LIST_HEAD(&bo->ddestroy); 1136 INIT_LIST_HEAD(&bo->swap); 1137 INIT_LIST_HEAD(&bo->io_reserve_lru); 1138 mutex_init(&bo->wu_mutex); 1139 bo->bdev = bdev; 1140 bo->glob = bdev->glob; 1141 bo->type = type; 1142 bo->num_pages = num_pages; 1143 bo->mem.size = num_pages << PAGE_SHIFT; 1144 bo->mem.mem_type = TTM_PL_SYSTEM; 1145 bo->mem.num_pages = bo->num_pages; 1146 bo->mem.mm_node = NULL; 1147 bo->mem.page_alignment = page_alignment; 1148 bo->mem.bus.io_reserved_vm = false; 1149 bo->mem.bus.io_reserved_count = 0; 1150 bo->priv_flags = 0; 1151 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1152 bo->persistent_swap_storage = persistent_swap_storage; 1153 bo->acc_size = acc_size; 1154 bo->sg = sg; 1155 if (resv) { 1156 bo->resv = resv; 1157 lockdep_assert_held(&bo->resv->lock.base); 1158 } else { 1159 bo->resv = &bo->ttm_resv; 1160 reservation_object_init(&bo->ttm_resv); 1161 } 1162 atomic_inc(&bo->glob->bo_count); 1163 drm_vma_node_reset(&bo->vma_node); 1164 1165 /* 1166 * For ttm_bo_type_device buffers, allocate 1167 * address space from the device. 1168 */ 1169 if (bo->type == ttm_bo_type_device || 1170 bo->type == ttm_bo_type_sg) 1171 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1172 bo->mem.num_pages); 1173 1174 /* passed reservation objects should already be locked, 1175 * since otherwise lockdep will be angered in radeon. 1176 */ 1177 if (!resv) { 1178 locked = ww_mutex_trylock(&bo->resv->lock); 1179 WARN_ON(!locked); 1180 } 1181 1182 if (likely(!ret)) 1183 ret = ttm_bo_validate(bo, placement, interruptible, false); 1184 1185 if (!resv) { 1186 ttm_bo_unreserve(bo); 1187 1188 } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1189 spin_lock(&bo->glob->lru_lock); 1190 ttm_bo_add_to_lru(bo); 1191 spin_unlock(&bo->glob->lru_lock); 1192 } 1193 1194 if (unlikely(ret)) 1195 ttm_bo_unref(&bo); 1196 1197 return ret; 1198 } 1199 EXPORT_SYMBOL(ttm_bo_init); 1200 1201 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1202 unsigned long bo_size, 1203 unsigned struct_size) 1204 { 1205 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1206 size_t size = 0; 1207 1208 size += ttm_round_pot(struct_size); 1209 size += PAGE_ALIGN(npages * sizeof(void *)); 1210 size += ttm_round_pot(sizeof(struct ttm_tt)); 1211 return size; 1212 } 1213 EXPORT_SYMBOL(ttm_bo_acc_size); 1214 1215 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1216 unsigned long bo_size, 1217 unsigned struct_size) 1218 { 1219 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1220 size_t size = 0; 1221 1222 size += ttm_round_pot(struct_size); 1223 size += PAGE_ALIGN(npages * sizeof(void *)); 1224 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1225 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1226 return size; 1227 } 1228 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1229 1230 int ttm_bo_create(struct ttm_bo_device *bdev, 1231 unsigned long size, 1232 enum ttm_bo_type type, 1233 struct ttm_placement *placement, 1234 uint32_t page_alignment, 1235 bool interruptible, 1236 struct file *persistent_swap_storage, 1237 struct ttm_buffer_object **p_bo) 1238 { 1239 struct ttm_buffer_object *bo; 1240 size_t acc_size; 1241 int ret; 1242 1243 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1244 if (unlikely(bo == NULL)) 1245 return -ENOMEM; 1246 1247 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1248 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1249 interruptible, persistent_swap_storage, acc_size, 1250 NULL, NULL, NULL); 1251 if (likely(ret == 0)) 1252 *p_bo = bo; 1253 1254 return ret; 1255 } 1256 EXPORT_SYMBOL(ttm_bo_create); 1257 1258 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1259 unsigned mem_type, bool allow_errors) 1260 { 1261 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1262 struct ttm_bo_global *glob = bdev->glob; 1263 int ret; 1264 1265 /* 1266 * Can't use standard list traversal since we're unlocking. 1267 */ 1268 1269 spin_lock(&glob->lru_lock); 1270 while (!list_empty(&man->lru)) { 1271 spin_unlock(&glob->lru_lock); 1272 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1273 if (ret) { 1274 if (allow_errors) { 1275 return ret; 1276 } else { 1277 pr_err("Cleanup eviction failed\n"); 1278 } 1279 } 1280 spin_lock(&glob->lru_lock); 1281 } 1282 spin_unlock(&glob->lru_lock); 1283 return 0; 1284 } 1285 1286 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1287 { 1288 struct ttm_mem_type_manager *man; 1289 int ret = -EINVAL; 1290 1291 if (mem_type >= TTM_NUM_MEM_TYPES) { 1292 pr_err("Illegal memory type %d\n", mem_type); 1293 return ret; 1294 } 1295 man = &bdev->man[mem_type]; 1296 1297 if (!man->has_type) { 1298 pr_err("Trying to take down uninitialized memory manager type %u\n", 1299 mem_type); 1300 return ret; 1301 } 1302 1303 man->use_type = false; 1304 man->has_type = false; 1305 1306 ret = 0; 1307 if (mem_type > 0) { 1308 ttm_bo_force_list_clean(bdev, mem_type, false); 1309 1310 ret = (*man->func->takedown)(man); 1311 } 1312 1313 return ret; 1314 } 1315 EXPORT_SYMBOL(ttm_bo_clean_mm); 1316 1317 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1318 { 1319 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1320 1321 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1322 pr_err("Illegal memory manager memory type %u\n", mem_type); 1323 return -EINVAL; 1324 } 1325 1326 if (!man->has_type) { 1327 pr_err("Memory type %u has not been initialized\n", mem_type); 1328 return 0; 1329 } 1330 1331 return ttm_bo_force_list_clean(bdev, mem_type, true); 1332 } 1333 EXPORT_SYMBOL(ttm_bo_evict_mm); 1334 1335 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1336 unsigned long p_size) 1337 { 1338 int ret = -EINVAL; 1339 struct ttm_mem_type_manager *man; 1340 1341 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1342 man = &bdev->man[type]; 1343 BUG_ON(man->has_type); 1344 man->io_reserve_fastpath = true; 1345 man->use_io_reserve_lru = false; 1346 mutex_init(&man->io_reserve_mutex); 1347 INIT_LIST_HEAD(&man->io_reserve_lru); 1348 1349 ret = bdev->driver->init_mem_type(bdev, type, man); 1350 if (ret) 1351 return ret; 1352 man->bdev = bdev; 1353 1354 ret = 0; 1355 if (type != TTM_PL_SYSTEM) { 1356 ret = (*man->func->init)(man, p_size); 1357 if (ret) 1358 return ret; 1359 } 1360 man->has_type = true; 1361 man->use_type = true; 1362 man->size = p_size; 1363 1364 INIT_LIST_HEAD(&man->lru); 1365 1366 return 0; 1367 } 1368 EXPORT_SYMBOL(ttm_bo_init_mm); 1369 1370 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1371 { 1372 struct ttm_bo_global *glob = 1373 container_of(kobj, struct ttm_bo_global, kobj); 1374 1375 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1376 __free_page(glob->dummy_read_page); 1377 kfree(glob); 1378 } 1379 1380 void ttm_bo_global_release(struct drm_global_reference *ref) 1381 { 1382 struct ttm_bo_global *glob = ref->object; 1383 1384 kobject_del(&glob->kobj); 1385 kobject_put(&glob->kobj); 1386 } 1387 EXPORT_SYMBOL(ttm_bo_global_release); 1388 1389 int ttm_bo_global_init(struct drm_global_reference *ref) 1390 { 1391 struct ttm_bo_global_ref *bo_ref = 1392 container_of(ref, struct ttm_bo_global_ref, ref); 1393 struct ttm_bo_global *glob = ref->object; 1394 int ret; 1395 1396 mutex_init(&glob->device_list_mutex); 1397 spin_lock_init(&glob->lru_lock); 1398 glob->mem_glob = bo_ref->mem_glob; 1399 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1400 1401 if (unlikely(glob->dummy_read_page == NULL)) { 1402 ret = -ENOMEM; 1403 goto out_no_drp; 1404 } 1405 1406 INIT_LIST_HEAD(&glob->swap_lru); 1407 INIT_LIST_HEAD(&glob->device_list); 1408 1409 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1410 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1411 if (unlikely(ret != 0)) { 1412 pr_err("Could not register buffer object swapout\n"); 1413 goto out_no_shrink; 1414 } 1415 1416 atomic_set(&glob->bo_count, 0); 1417 1418 ret = kobject_init_and_add( 1419 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1420 if (unlikely(ret != 0)) 1421 kobject_put(&glob->kobj); 1422 return ret; 1423 out_no_shrink: 1424 __free_page(glob->dummy_read_page); 1425 out_no_drp: 1426 kfree(glob); 1427 return ret; 1428 } 1429 EXPORT_SYMBOL(ttm_bo_global_init); 1430 1431 1432 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1433 { 1434 int ret = 0; 1435 unsigned i = TTM_NUM_MEM_TYPES; 1436 struct ttm_mem_type_manager *man; 1437 struct ttm_bo_global *glob = bdev->glob; 1438 1439 while (i--) { 1440 man = &bdev->man[i]; 1441 if (man->has_type) { 1442 man->use_type = false; 1443 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1444 ret = -EBUSY; 1445 pr_err("DRM memory manager type %d is not clean\n", 1446 i); 1447 } 1448 man->has_type = false; 1449 } 1450 } 1451 1452 mutex_lock(&glob->device_list_mutex); 1453 list_del(&bdev->device_list); 1454 mutex_unlock(&glob->device_list_mutex); 1455 1456 cancel_delayed_work_sync(&bdev->wq); 1457 1458 while (ttm_bo_delayed_delete(bdev, true)) 1459 ; 1460 1461 spin_lock(&glob->lru_lock); 1462 if (list_empty(&bdev->ddestroy)) 1463 TTM_DEBUG("Delayed destroy list was clean\n"); 1464 1465 if (list_empty(&bdev->man[0].lru)) 1466 TTM_DEBUG("Swap list was clean\n"); 1467 spin_unlock(&glob->lru_lock); 1468 1469 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1470 1471 return ret; 1472 } 1473 EXPORT_SYMBOL(ttm_bo_device_release); 1474 1475 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1476 struct ttm_bo_global *glob, 1477 struct ttm_bo_driver *driver, 1478 struct address_space *mapping, 1479 uint64_t file_page_offset, 1480 bool need_dma32) 1481 { 1482 int ret = -EINVAL; 1483 1484 bdev->driver = driver; 1485 1486 memset(bdev->man, 0, sizeof(bdev->man)); 1487 1488 /* 1489 * Initialize the system memory buffer type. 1490 * Other types need to be driver / IOCTL initialized. 1491 */ 1492 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1493 if (unlikely(ret != 0)) 1494 goto out_no_sys; 1495 1496 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1497 0x10000000); 1498 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1499 INIT_LIST_HEAD(&bdev->ddestroy); 1500 bdev->dev_mapping = mapping; 1501 bdev->glob = glob; 1502 bdev->need_dma32 = need_dma32; 1503 bdev->val_seq = 0; 1504 mutex_lock(&glob->device_list_mutex); 1505 list_add_tail(&bdev->device_list, &glob->device_list); 1506 mutex_unlock(&glob->device_list_mutex); 1507 1508 return 0; 1509 out_no_sys: 1510 return ret; 1511 } 1512 EXPORT_SYMBOL(ttm_bo_device_init); 1513 1514 /* 1515 * buffer object vm functions. 1516 */ 1517 1518 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1519 { 1520 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1521 1522 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1523 if (mem->mem_type == TTM_PL_SYSTEM) 1524 return false; 1525 1526 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1527 return false; 1528 1529 if (mem->placement & TTM_PL_FLAG_CACHED) 1530 return false; 1531 } 1532 return true; 1533 } 1534 1535 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1536 { 1537 struct ttm_bo_device *bdev = bo->bdev; 1538 1539 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1540 ttm_mem_io_free_vm(bo); 1541 } 1542 1543 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1544 { 1545 struct ttm_bo_device *bdev = bo->bdev; 1546 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1547 1548 ttm_mem_io_lock(man, false); 1549 ttm_bo_unmap_virtual_locked(bo); 1550 ttm_mem_io_unlock(man); 1551 } 1552 1553 1554 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1555 1556 int ttm_bo_wait(struct ttm_buffer_object *bo, 1557 bool lazy, bool interruptible, bool no_wait) 1558 { 1559 struct reservation_object_list *fobj; 1560 struct reservation_object *resv; 1561 struct fence *excl; 1562 long timeout = 15 * HZ; 1563 int i; 1564 1565 resv = bo->resv; 1566 fobj = reservation_object_get_list(resv); 1567 excl = reservation_object_get_excl(resv); 1568 if (excl) { 1569 if (!fence_is_signaled(excl)) { 1570 if (no_wait) 1571 return -EBUSY; 1572 1573 timeout = fence_wait_timeout(excl, 1574 interruptible, timeout); 1575 } 1576 } 1577 1578 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { 1579 struct fence *fence; 1580 fence = rcu_dereference_protected(fobj->shared[i], 1581 reservation_object_held(resv)); 1582 1583 if (!fence_is_signaled(fence)) { 1584 if (no_wait) 1585 return -EBUSY; 1586 1587 timeout = fence_wait_timeout(fence, 1588 interruptible, timeout); 1589 } 1590 } 1591 1592 if (timeout < 0) 1593 return timeout; 1594 1595 if (timeout == 0) 1596 return -EBUSY; 1597 1598 reservation_object_add_excl_fence(resv, NULL); 1599 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1600 return 0; 1601 } 1602 EXPORT_SYMBOL(ttm_bo_wait); 1603 1604 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1605 { 1606 int ret = 0; 1607 1608 /* 1609 * Using ttm_bo_reserve makes sure the lru lists are updated. 1610 */ 1611 1612 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); 1613 if (unlikely(ret != 0)) 1614 return ret; 1615 ret = ttm_bo_wait(bo, false, true, no_wait); 1616 if (likely(ret == 0)) 1617 atomic_inc(&bo->cpu_writers); 1618 ttm_bo_unreserve(bo); 1619 return ret; 1620 } 1621 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1622 1623 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1624 { 1625 atomic_dec(&bo->cpu_writers); 1626 } 1627 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1628 1629 /** 1630 * A buffer object shrink method that tries to swap out the first 1631 * buffer object on the bo_global::swap_lru list. 1632 */ 1633 1634 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1635 { 1636 struct ttm_bo_global *glob = 1637 container_of(shrink, struct ttm_bo_global, shrink); 1638 struct ttm_buffer_object *bo; 1639 int ret = -EBUSY; 1640 int put_count; 1641 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1642 1643 spin_lock(&glob->lru_lock); 1644 list_for_each_entry(bo, &glob->swap_lru, swap) { 1645 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 1646 if (!ret) 1647 break; 1648 } 1649 1650 if (ret) { 1651 spin_unlock(&glob->lru_lock); 1652 return ret; 1653 } 1654 1655 kref_get(&bo->list_kref); 1656 1657 if (!list_empty(&bo->ddestroy)) { 1658 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1659 kref_put(&bo->list_kref, ttm_bo_release_list); 1660 return ret; 1661 } 1662 1663 put_count = ttm_bo_del_from_lru(bo); 1664 spin_unlock(&glob->lru_lock); 1665 1666 ttm_bo_list_ref_sub(bo, put_count, true); 1667 1668 /** 1669 * Wait for GPU, then move to system cached. 1670 */ 1671 1672 ret = ttm_bo_wait(bo, false, false, false); 1673 1674 if (unlikely(ret != 0)) 1675 goto out; 1676 1677 if ((bo->mem.placement & swap_placement) != swap_placement) { 1678 struct ttm_mem_reg evict_mem; 1679 1680 evict_mem = bo->mem; 1681 evict_mem.mm_node = NULL; 1682 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1683 evict_mem.mem_type = TTM_PL_SYSTEM; 1684 1685 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1686 false, false); 1687 if (unlikely(ret != 0)) 1688 goto out; 1689 } 1690 1691 ttm_bo_unmap_virtual(bo); 1692 1693 /** 1694 * Swap out. Buffer will be swapped in again as soon as 1695 * anyone tries to access a ttm page. 1696 */ 1697 1698 if (bo->bdev->driver->swap_notify) 1699 bo->bdev->driver->swap_notify(bo); 1700 1701 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1702 out: 1703 1704 /** 1705 * 1706 * Unreserve without putting on LRU to avoid swapping out an 1707 * already swapped buffer. 1708 */ 1709 1710 __ttm_bo_unreserve(bo); 1711 kref_put(&bo->list_kref, ttm_bo_release_list); 1712 return ret; 1713 } 1714 1715 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1716 { 1717 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1718 ; 1719 } 1720 EXPORT_SYMBOL(ttm_bo_swapout_all); 1721 1722 /** 1723 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1724 * unreserved 1725 * 1726 * @bo: Pointer to buffer 1727 */ 1728 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1729 { 1730 int ret; 1731 1732 /* 1733 * In the absense of a wait_unlocked API, 1734 * Use the bo::wu_mutex to avoid triggering livelocks due to 1735 * concurrent use of this function. Note that this use of 1736 * bo::wu_mutex can go away if we change locking order to 1737 * mmap_sem -> bo::reserve. 1738 */ 1739 ret = mutex_lock_interruptible(&bo->wu_mutex); 1740 if (unlikely(ret != 0)) 1741 return -ERESTARTSYS; 1742 if (!ww_mutex_is_locked(&bo->resv->lock)) 1743 goto out_unlock; 1744 ret = __ttm_bo_reserve(bo, true, false, false, NULL); 1745 if (unlikely(ret != 0)) 1746 goto out_unlock; 1747 __ttm_bo_unreserve(bo); 1748 1749 out_unlock: 1750 mutex_unlock(&bo->wu_mutex); 1751 return ret; 1752 } 1753