1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #include <drm/ttm/ttm_bo_driver.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/drm_vma_manager.h> 35 #include <linux/io.h> 36 #include <linux/highmem.h> 37 #include <linux/wait.h> 38 #include <linux/slab.h> 39 #include <linux/vmalloc.h> 40 #include <linux/module.h> 41 #include <linux/dma-resv.h> 42 43 struct ttm_transfer_obj { 44 struct ttm_buffer_object base; 45 struct ttm_buffer_object *bo; 46 }; 47 48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 49 { 50 ttm_bo_mem_put(bo, &bo->mem); 51 } 52 53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 54 struct ttm_operation_ctx *ctx, 55 struct ttm_mem_reg *new_mem) 56 { 57 struct ttm_tt *ttm = bo->ttm; 58 struct ttm_mem_reg *old_mem = &bo->mem; 59 int ret; 60 61 if (old_mem->mem_type != TTM_PL_SYSTEM) { 62 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 63 64 if (unlikely(ret != 0)) { 65 if (ret != -ERESTARTSYS) 66 pr_err("Failed to expire sync object before unbinding TTM\n"); 67 return ret; 68 } 69 70 ttm_tt_unbind(ttm); 71 ttm_bo_free_old_node(bo); 72 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 73 TTM_PL_MASK_MEM); 74 old_mem->mem_type = TTM_PL_SYSTEM; 75 } 76 77 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 78 if (unlikely(ret != 0)) 79 return ret; 80 81 if (new_mem->mem_type != TTM_PL_SYSTEM) { 82 ret = ttm_tt_bind(ttm, new_mem, ctx); 83 if (unlikely(ret != 0)) 84 return ret; 85 } 86 87 *old_mem = *new_mem; 88 new_mem->mm_node = NULL; 89 90 return 0; 91 } 92 EXPORT_SYMBOL(ttm_bo_move_ttm); 93 94 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 95 { 96 if (likely(!man->use_io_reserve_lru)) 97 return 0; 98 99 if (interruptible) 100 return mutex_lock_interruptible(&man->io_reserve_mutex); 101 102 mutex_lock(&man->io_reserve_mutex); 103 return 0; 104 } 105 106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 107 { 108 if (likely(!man->use_io_reserve_lru)) 109 return; 110 111 mutex_unlock(&man->io_reserve_mutex); 112 } 113 114 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 115 { 116 struct ttm_buffer_object *bo; 117 118 bo = list_first_entry_or_null(&man->io_reserve_lru, 119 struct ttm_buffer_object, 120 io_reserve_lru); 121 if (!bo) 122 return -ENOSPC; 123 124 list_del_init(&bo->io_reserve_lru); 125 ttm_bo_unmap_virtual_locked(bo); 126 return 0; 127 } 128 129 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 130 struct ttm_mem_reg *mem) 131 { 132 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 133 int ret; 134 135 if (mem->bus.io_reserved_count++) 136 return 0; 137 138 if (!bdev->driver->io_mem_reserve) 139 return 0; 140 141 retry: 142 ret = bdev->driver->io_mem_reserve(bdev, mem); 143 if (ret == -ENOSPC) { 144 ret = ttm_mem_io_evict(man); 145 if (ret == 0) 146 goto retry; 147 } 148 return ret; 149 } 150 151 void ttm_mem_io_free(struct ttm_bo_device *bdev, 152 struct ttm_mem_reg *mem) 153 { 154 if (--mem->bus.io_reserved_count) 155 return; 156 157 if (!bdev->driver->io_mem_free) 158 return; 159 160 bdev->driver->io_mem_free(bdev, mem); 161 } 162 163 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 164 { 165 struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type]; 166 struct ttm_mem_reg *mem = &bo->mem; 167 int ret; 168 169 if (mem->bus.io_reserved_vm) 170 return 0; 171 172 ret = ttm_mem_io_reserve(bo->bdev, mem); 173 if (unlikely(ret != 0)) 174 return ret; 175 mem->bus.io_reserved_vm = true; 176 if (man->use_io_reserve_lru) 177 list_add_tail(&bo->io_reserve_lru, 178 &man->io_reserve_lru); 179 return 0; 180 } 181 182 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 183 { 184 struct ttm_mem_reg *mem = &bo->mem; 185 186 if (!mem->bus.io_reserved_vm) 187 return; 188 189 mem->bus.io_reserved_vm = false; 190 list_del_init(&bo->io_reserve_lru); 191 ttm_mem_io_free(bo->bdev, mem); 192 } 193 194 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, 195 struct ttm_mem_reg *mem, 196 void **virtual) 197 { 198 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 199 int ret; 200 void *addr; 201 202 *virtual = NULL; 203 (void) ttm_mem_io_lock(man, false); 204 ret = ttm_mem_io_reserve(bdev, mem); 205 ttm_mem_io_unlock(man); 206 if (ret || !mem->bus.is_iomem) 207 return ret; 208 209 if (mem->bus.addr) { 210 addr = mem->bus.addr; 211 } else { 212 if (mem->placement & TTM_PL_FLAG_WC) 213 addr = ioremap_wc(mem->bus.base + mem->bus.offset, 214 mem->bus.size); 215 else 216 addr = ioremap(mem->bus.base + mem->bus.offset, 217 mem->bus.size); 218 if (!addr) { 219 (void) ttm_mem_io_lock(man, false); 220 ttm_mem_io_free(bdev, mem); 221 ttm_mem_io_unlock(man); 222 return -ENOMEM; 223 } 224 } 225 *virtual = addr; 226 return 0; 227 } 228 229 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, 230 struct ttm_mem_reg *mem, 231 void *virtual) 232 { 233 struct ttm_mem_type_manager *man; 234 235 man = &bdev->man[mem->mem_type]; 236 237 if (virtual && mem->bus.addr == NULL) 238 iounmap(virtual); 239 (void) ttm_mem_io_lock(man, false); 240 ttm_mem_io_free(bdev, mem); 241 ttm_mem_io_unlock(man); 242 } 243 244 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 245 { 246 uint32_t *dstP = 247 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 248 uint32_t *srcP = 249 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 250 251 int i; 252 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 253 iowrite32(ioread32(srcP++), dstP++); 254 return 0; 255 } 256 257 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 258 unsigned long page, 259 pgprot_t prot) 260 { 261 struct page *d = ttm->pages[page]; 262 void *dst; 263 264 if (!d) 265 return -ENOMEM; 266 267 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 268 dst = kmap_atomic_prot(d, prot); 269 if (!dst) 270 return -ENOMEM; 271 272 memcpy_fromio(dst, src, PAGE_SIZE); 273 274 kunmap_atomic(dst); 275 276 return 0; 277 } 278 279 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 280 unsigned long page, 281 pgprot_t prot) 282 { 283 struct page *s = ttm->pages[page]; 284 void *src; 285 286 if (!s) 287 return -ENOMEM; 288 289 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 290 src = kmap_atomic_prot(s, prot); 291 if (!src) 292 return -ENOMEM; 293 294 memcpy_toio(dst, src, PAGE_SIZE); 295 296 kunmap_atomic(src); 297 298 return 0; 299 } 300 301 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 302 struct ttm_operation_ctx *ctx, 303 struct ttm_mem_reg *new_mem) 304 { 305 struct ttm_bo_device *bdev = bo->bdev; 306 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 307 struct ttm_tt *ttm = bo->ttm; 308 struct ttm_mem_reg *old_mem = &bo->mem; 309 struct ttm_mem_reg old_copy = *old_mem; 310 void *old_iomap; 311 void *new_iomap; 312 int ret; 313 unsigned long i; 314 unsigned long page; 315 unsigned long add = 0; 316 int dir; 317 318 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 319 if (ret) 320 return ret; 321 322 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 323 if (ret) 324 return ret; 325 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 326 if (ret) 327 goto out; 328 329 /* 330 * Single TTM move. NOP. 331 */ 332 if (old_iomap == NULL && new_iomap == NULL) 333 goto out2; 334 335 /* 336 * Don't move nonexistent data. Clear destination instead. 337 */ 338 if (old_iomap == NULL && 339 (ttm == NULL || (ttm->state == tt_unpopulated && 340 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { 341 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 342 goto out2; 343 } 344 345 /* 346 * TTM might be null for moves within the same region. 347 */ 348 if (ttm) { 349 ret = ttm_tt_populate(ttm, ctx); 350 if (ret) 351 goto out1; 352 } 353 354 add = 0; 355 dir = 1; 356 357 if ((old_mem->mem_type == new_mem->mem_type) && 358 (new_mem->start < old_mem->start + old_mem->size)) { 359 dir = -1; 360 add = new_mem->num_pages - 1; 361 } 362 363 for (i = 0; i < new_mem->num_pages; ++i) { 364 page = i * dir + add; 365 if (old_iomap == NULL) { 366 pgprot_t prot = ttm_io_prot(old_mem->placement, 367 PAGE_KERNEL); 368 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 369 prot); 370 } else if (new_iomap == NULL) { 371 pgprot_t prot = ttm_io_prot(new_mem->placement, 372 PAGE_KERNEL); 373 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 374 prot); 375 } else { 376 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 377 } 378 if (ret) 379 goto out1; 380 } 381 mb(); 382 out2: 383 old_copy = *old_mem; 384 *old_mem = *new_mem; 385 new_mem->mm_node = NULL; 386 387 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { 388 ttm_tt_destroy(ttm); 389 bo->ttm = NULL; 390 } 391 392 out1: 393 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 394 out: 395 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 396 397 /* 398 * On error, keep the mm node! 399 */ 400 if (!ret) 401 ttm_bo_mem_put(bo, &old_copy); 402 return ret; 403 } 404 EXPORT_SYMBOL(ttm_bo_move_memcpy); 405 406 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 407 { 408 struct ttm_transfer_obj *fbo; 409 410 fbo = container_of(bo, struct ttm_transfer_obj, base); 411 ttm_bo_put(fbo->bo); 412 kfree(fbo); 413 } 414 415 /** 416 * ttm_buffer_object_transfer 417 * 418 * @bo: A pointer to a struct ttm_buffer_object. 419 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 420 * holding the data of @bo with the old placement. 421 * 422 * This is a utility function that may be called after an accelerated move 423 * has been scheduled. A new buffer object is created as a placeholder for 424 * the old data while it's being copied. When that buffer object is idle, 425 * it can be destroyed, releasing the space of the old placement. 426 * Returns: 427 * !0: Failure. 428 */ 429 430 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 431 struct ttm_buffer_object **new_obj) 432 { 433 struct ttm_transfer_obj *fbo; 434 int ret; 435 436 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 437 if (!fbo) 438 return -ENOMEM; 439 440 fbo->base = *bo; 441 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; 442 443 ttm_bo_get(bo); 444 fbo->bo = bo; 445 446 /** 447 * Fix up members that we shouldn't copy directly: 448 * TODO: Explicit member copy would probably be better here. 449 */ 450 451 atomic_inc(&ttm_bo_glob.bo_count); 452 INIT_LIST_HEAD(&fbo->base.ddestroy); 453 INIT_LIST_HEAD(&fbo->base.lru); 454 INIT_LIST_HEAD(&fbo->base.swap); 455 INIT_LIST_HEAD(&fbo->base.io_reserve_lru); 456 fbo->base.moving = NULL; 457 drm_vma_node_reset(&fbo->base.base.vma_node); 458 459 kref_init(&fbo->base.kref); 460 fbo->base.destroy = &ttm_transfered_destroy; 461 fbo->base.acc_size = 0; 462 if (bo->type != ttm_bo_type_sg) 463 fbo->base.base.resv = &fbo->base.base._resv; 464 465 dma_resv_init(&fbo->base.base._resv); 466 fbo->base.base.dev = NULL; 467 ret = dma_resv_trylock(&fbo->base.base._resv); 468 WARN_ON(!ret); 469 470 *new_obj = &fbo->base; 471 return 0; 472 } 473 474 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) 475 { 476 /* Cached mappings need no adjustment */ 477 if (caching_flags & TTM_PL_FLAG_CACHED) 478 return tmp; 479 480 #if defined(__i386__) || defined(__x86_64__) 481 if (caching_flags & TTM_PL_FLAG_WC) 482 tmp = pgprot_writecombine(tmp); 483 else if (boot_cpu_data.x86 > 3) 484 tmp = pgprot_noncached(tmp); 485 #endif 486 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ 487 defined(__powerpc__) || defined(__mips__) 488 if (caching_flags & TTM_PL_FLAG_WC) 489 tmp = pgprot_writecombine(tmp); 490 else 491 tmp = pgprot_noncached(tmp); 492 #endif 493 #if defined(__sparc__) 494 tmp = pgprot_noncached(tmp); 495 #endif 496 return tmp; 497 } 498 EXPORT_SYMBOL(ttm_io_prot); 499 500 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 501 unsigned long offset, 502 unsigned long size, 503 struct ttm_bo_kmap_obj *map) 504 { 505 struct ttm_mem_reg *mem = &bo->mem; 506 507 if (bo->mem.bus.addr) { 508 map->bo_kmap_type = ttm_bo_map_premapped; 509 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 510 } else { 511 map->bo_kmap_type = ttm_bo_map_iomap; 512 if (mem->placement & TTM_PL_FLAG_WC) 513 map->virtual = ioremap_wc(bo->mem.bus.base + 514 bo->mem.bus.offset + offset, 515 size); 516 else 517 map->virtual = ioremap(bo->mem.bus.base + 518 bo->mem.bus.offset + offset, 519 size); 520 } 521 return (!map->virtual) ? -ENOMEM : 0; 522 } 523 524 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 525 unsigned long start_page, 526 unsigned long num_pages, 527 struct ttm_bo_kmap_obj *map) 528 { 529 struct ttm_mem_reg *mem = &bo->mem; 530 struct ttm_operation_ctx ctx = { 531 .interruptible = false, 532 .no_wait_gpu = false 533 }; 534 struct ttm_tt *ttm; 535 pgprot_t prot; 536 int ret; 537 538 ret = ttm_tt_create(bo, true); 539 if (ret) 540 return ret; 541 542 ttm = bo->ttm; 543 ret = ttm_tt_populate(ttm, &ctx); 544 if (ret) 545 return ret; 546 547 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 548 /* 549 * We're mapping a single page, and the desired 550 * page protection is consistent with the bo. 551 */ 552 553 map->bo_kmap_type = ttm_bo_map_kmap; 554 map->page = ttm->pages[start_page]; 555 map->virtual = kmap(map->page); 556 } else { 557 /* 558 * We need to use vmap to get the desired page protection 559 * or to make the buffer object look contiguous. 560 */ 561 prot = ttm_io_prot(mem->placement, PAGE_KERNEL); 562 map->bo_kmap_type = ttm_bo_map_vmap; 563 map->virtual = vmap(ttm->pages + start_page, num_pages, 564 0, prot); 565 } 566 return (!map->virtual) ? -ENOMEM : 0; 567 } 568 569 int ttm_bo_kmap(struct ttm_buffer_object *bo, 570 unsigned long start_page, unsigned long num_pages, 571 struct ttm_bo_kmap_obj *map) 572 { 573 struct ttm_mem_type_manager *man = 574 &bo->bdev->man[bo->mem.mem_type]; 575 unsigned long offset, size; 576 int ret; 577 578 map->virtual = NULL; 579 map->bo = bo; 580 if (num_pages > bo->num_pages) 581 return -EINVAL; 582 if (start_page > bo->num_pages) 583 return -EINVAL; 584 585 (void) ttm_mem_io_lock(man, false); 586 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 587 ttm_mem_io_unlock(man); 588 if (ret) 589 return ret; 590 if (!bo->mem.bus.is_iomem) { 591 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 592 } else { 593 offset = start_page << PAGE_SHIFT; 594 size = num_pages << PAGE_SHIFT; 595 return ttm_bo_ioremap(bo, offset, size, map); 596 } 597 } 598 EXPORT_SYMBOL(ttm_bo_kmap); 599 600 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 601 { 602 struct ttm_buffer_object *bo = map->bo; 603 struct ttm_mem_type_manager *man = 604 &bo->bdev->man[bo->mem.mem_type]; 605 606 if (!map->virtual) 607 return; 608 switch (map->bo_kmap_type) { 609 case ttm_bo_map_iomap: 610 iounmap(map->virtual); 611 break; 612 case ttm_bo_map_vmap: 613 vunmap(map->virtual); 614 break; 615 case ttm_bo_map_kmap: 616 kunmap(map->page); 617 break; 618 case ttm_bo_map_premapped: 619 break; 620 default: 621 BUG(); 622 } 623 (void) ttm_mem_io_lock(man, false); 624 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 625 ttm_mem_io_unlock(man); 626 map->virtual = NULL; 627 map->page = NULL; 628 } 629 EXPORT_SYMBOL(ttm_bo_kunmap); 630 631 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 632 struct dma_fence *fence, 633 bool evict, 634 struct ttm_mem_reg *new_mem) 635 { 636 struct ttm_bo_device *bdev = bo->bdev; 637 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 638 struct ttm_mem_reg *old_mem = &bo->mem; 639 int ret; 640 struct ttm_buffer_object *ghost_obj; 641 642 dma_resv_add_excl_fence(bo->base.resv, fence); 643 if (evict) { 644 ret = ttm_bo_wait(bo, false, false); 645 if (ret) 646 return ret; 647 648 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { 649 ttm_tt_destroy(bo->ttm); 650 bo->ttm = NULL; 651 } 652 ttm_bo_free_old_node(bo); 653 } else { 654 /** 655 * This should help pipeline ordinary buffer moves. 656 * 657 * Hang old buffer memory on a new buffer object, 658 * and leave it to be released when the GPU 659 * operation has completed. 660 */ 661 662 dma_fence_put(bo->moving); 663 bo->moving = dma_fence_get(fence); 664 665 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 666 if (ret) 667 return ret; 668 669 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); 670 671 /** 672 * If we're not moving to fixed memory, the TTM object 673 * needs to stay alive. Otherwhise hang it on the ghost 674 * bo to be unbound and destroyed. 675 */ 676 677 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 678 ghost_obj->ttm = NULL; 679 else 680 bo->ttm = NULL; 681 682 dma_resv_unlock(&ghost_obj->base._resv); 683 ttm_bo_put(ghost_obj); 684 } 685 686 *old_mem = *new_mem; 687 new_mem->mm_node = NULL; 688 689 return 0; 690 } 691 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 692 693 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 694 struct dma_fence *fence, bool evict, 695 struct ttm_mem_reg *new_mem) 696 { 697 struct ttm_bo_device *bdev = bo->bdev; 698 struct ttm_mem_reg *old_mem = &bo->mem; 699 700 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; 701 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; 702 703 int ret; 704 705 dma_resv_add_excl_fence(bo->base.resv, fence); 706 707 if (!evict) { 708 struct ttm_buffer_object *ghost_obj; 709 710 /** 711 * This should help pipeline ordinary buffer moves. 712 * 713 * Hang old buffer memory on a new buffer object, 714 * and leave it to be released when the GPU 715 * operation has completed. 716 */ 717 718 dma_fence_put(bo->moving); 719 bo->moving = dma_fence_get(fence); 720 721 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 722 if (ret) 723 return ret; 724 725 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); 726 727 /** 728 * If we're not moving to fixed memory, the TTM object 729 * needs to stay alive. Otherwhise hang it on the ghost 730 * bo to be unbound and destroyed. 731 */ 732 733 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) 734 ghost_obj->ttm = NULL; 735 else 736 bo->ttm = NULL; 737 738 dma_resv_unlock(&ghost_obj->base._resv); 739 ttm_bo_put(ghost_obj); 740 741 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { 742 743 /** 744 * BO doesn't have a TTM we need to bind/unbind. Just remember 745 * this eviction and free up the allocation 746 */ 747 748 spin_lock(&from->move_lock); 749 if (!from->move || dma_fence_is_later(fence, from->move)) { 750 dma_fence_put(from->move); 751 from->move = dma_fence_get(fence); 752 } 753 spin_unlock(&from->move_lock); 754 755 ttm_bo_free_old_node(bo); 756 757 dma_fence_put(bo->moving); 758 bo->moving = dma_fence_get(fence); 759 760 } else { 761 /** 762 * Last resort, wait for the move to be completed. 763 * 764 * Should never happen in pratice. 765 */ 766 767 ret = ttm_bo_wait(bo, false, false); 768 if (ret) 769 return ret; 770 771 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { 772 ttm_tt_destroy(bo->ttm); 773 bo->ttm = NULL; 774 } 775 ttm_bo_free_old_node(bo); 776 } 777 778 *old_mem = *new_mem; 779 new_mem->mm_node = NULL; 780 781 return 0; 782 } 783 EXPORT_SYMBOL(ttm_bo_pipeline_move); 784 785 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) 786 { 787 struct ttm_buffer_object *ghost; 788 int ret; 789 790 ret = ttm_buffer_object_transfer(bo, &ghost); 791 if (ret) 792 return ret; 793 794 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); 795 /* Last resort, wait for the BO to be idle when we are OOM */ 796 if (ret) 797 ttm_bo_wait(bo, false, false); 798 799 memset(&bo->mem, 0, sizeof(bo->mem)); 800 bo->mem.mem_type = TTM_PL_SYSTEM; 801 bo->ttm = NULL; 802 803 dma_resv_unlock(&ghost->base._resv); 804 ttm_bo_put(ghost); 805 806 return 0; 807 } 808