1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <drm/ttm/ttm_bo_driver.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <drm/drm_vma_manager.h> 34 #include <linux/io.h> 35 #include <linux/highmem.h> 36 #include <linux/wait.h> 37 #include <linux/slab.h> 38 #include <linux/vmalloc.h> 39 #include <linux/module.h> 40 #include <linux/reservation.h> 41 42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 43 { 44 ttm_bo_mem_put(bo, &bo->mem); 45 } 46 47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 48 bool evict, 49 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 50 { 51 struct ttm_tt *ttm = bo->ttm; 52 struct ttm_mem_reg *old_mem = &bo->mem; 53 int ret; 54 55 if (old_mem->mem_type != TTM_PL_SYSTEM) { 56 ttm_tt_unbind(ttm); 57 ttm_bo_free_old_node(bo); 58 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 59 TTM_PL_MASK_MEM); 60 old_mem->mem_type = TTM_PL_SYSTEM; 61 } 62 63 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 64 if (unlikely(ret != 0)) 65 return ret; 66 67 if (new_mem->mem_type != TTM_PL_SYSTEM) { 68 ret = ttm_tt_bind(ttm, new_mem); 69 if (unlikely(ret != 0)) 70 return ret; 71 } 72 73 *old_mem = *new_mem; 74 new_mem->mm_node = NULL; 75 76 return 0; 77 } 78 EXPORT_SYMBOL(ttm_bo_move_ttm); 79 80 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 81 { 82 if (likely(man->io_reserve_fastpath)) 83 return 0; 84 85 if (interruptible) 86 return mutex_lock_interruptible(&man->io_reserve_mutex); 87 88 mutex_lock(&man->io_reserve_mutex); 89 return 0; 90 } 91 EXPORT_SYMBOL(ttm_mem_io_lock); 92 93 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 94 { 95 if (likely(man->io_reserve_fastpath)) 96 return; 97 98 mutex_unlock(&man->io_reserve_mutex); 99 } 100 EXPORT_SYMBOL(ttm_mem_io_unlock); 101 102 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 103 { 104 struct ttm_buffer_object *bo; 105 106 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 107 return -EAGAIN; 108 109 bo = list_first_entry(&man->io_reserve_lru, 110 struct ttm_buffer_object, 111 io_reserve_lru); 112 list_del_init(&bo->io_reserve_lru); 113 ttm_bo_unmap_virtual_locked(bo); 114 115 return 0; 116 } 117 118 119 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 120 struct ttm_mem_reg *mem) 121 { 122 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 123 int ret = 0; 124 125 if (!bdev->driver->io_mem_reserve) 126 return 0; 127 if (likely(man->io_reserve_fastpath)) 128 return bdev->driver->io_mem_reserve(bdev, mem); 129 130 if (bdev->driver->io_mem_reserve && 131 mem->bus.io_reserved_count++ == 0) { 132 retry: 133 ret = bdev->driver->io_mem_reserve(bdev, mem); 134 if (ret == -EAGAIN) { 135 ret = ttm_mem_io_evict(man); 136 if (ret == 0) 137 goto retry; 138 } 139 } 140 return ret; 141 } 142 EXPORT_SYMBOL(ttm_mem_io_reserve); 143 144 void ttm_mem_io_free(struct ttm_bo_device *bdev, 145 struct ttm_mem_reg *mem) 146 { 147 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 148 149 if (likely(man->io_reserve_fastpath)) 150 return; 151 152 if (bdev->driver->io_mem_reserve && 153 --mem->bus.io_reserved_count == 0 && 154 bdev->driver->io_mem_free) 155 bdev->driver->io_mem_free(bdev, mem); 156 157 } 158 EXPORT_SYMBOL(ttm_mem_io_free); 159 160 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 161 { 162 struct ttm_mem_reg *mem = &bo->mem; 163 int ret; 164 165 if (!mem->bus.io_reserved_vm) { 166 struct ttm_mem_type_manager *man = 167 &bo->bdev->man[mem->mem_type]; 168 169 ret = ttm_mem_io_reserve(bo->bdev, mem); 170 if (unlikely(ret != 0)) 171 return ret; 172 mem->bus.io_reserved_vm = true; 173 if (man->use_io_reserve_lru) 174 list_add_tail(&bo->io_reserve_lru, 175 &man->io_reserve_lru); 176 } 177 return 0; 178 } 179 180 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 181 { 182 struct ttm_mem_reg *mem = &bo->mem; 183 184 if (mem->bus.io_reserved_vm) { 185 mem->bus.io_reserved_vm = false; 186 list_del_init(&bo->io_reserve_lru); 187 ttm_mem_io_free(bo->bdev, mem); 188 } 189 } 190 191 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 192 void **virtual) 193 { 194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 195 int ret; 196 void *addr; 197 198 *virtual = NULL; 199 (void) ttm_mem_io_lock(man, false); 200 ret = ttm_mem_io_reserve(bdev, mem); 201 ttm_mem_io_unlock(man); 202 if (ret || !mem->bus.is_iomem) 203 return ret; 204 205 if (mem->bus.addr) { 206 addr = mem->bus.addr; 207 } else { 208 if (mem->placement & TTM_PL_FLAG_WC) 209 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); 210 else 211 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); 212 if (!addr) { 213 (void) ttm_mem_io_lock(man, false); 214 ttm_mem_io_free(bdev, mem); 215 ttm_mem_io_unlock(man); 216 return -ENOMEM; 217 } 218 } 219 *virtual = addr; 220 return 0; 221 } 222 223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 224 void *virtual) 225 { 226 struct ttm_mem_type_manager *man; 227 228 man = &bdev->man[mem->mem_type]; 229 230 if (virtual && mem->bus.addr == NULL) 231 iounmap(virtual); 232 (void) ttm_mem_io_lock(man, false); 233 ttm_mem_io_free(bdev, mem); 234 ttm_mem_io_unlock(man); 235 } 236 237 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 238 { 239 uint32_t *dstP = 240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 241 uint32_t *srcP = 242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 243 244 int i; 245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 246 iowrite32(ioread32(srcP++), dstP++); 247 return 0; 248 } 249 250 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 251 unsigned long page, 252 pgprot_t prot) 253 { 254 struct page *d = ttm->pages[page]; 255 void *dst; 256 257 if (!d) 258 return -ENOMEM; 259 260 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 261 262 #ifdef CONFIG_X86 263 dst = kmap_atomic_prot(d, prot); 264 #else 265 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 266 dst = vmap(&d, 1, 0, prot); 267 else 268 dst = kmap(d); 269 #endif 270 if (!dst) 271 return -ENOMEM; 272 273 memcpy_fromio(dst, src, PAGE_SIZE); 274 275 #ifdef CONFIG_X86 276 kunmap_atomic(dst); 277 #else 278 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 279 vunmap(dst); 280 else 281 kunmap(d); 282 #endif 283 284 return 0; 285 } 286 287 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 288 unsigned long page, 289 pgprot_t prot) 290 { 291 struct page *s = ttm->pages[page]; 292 void *src; 293 294 if (!s) 295 return -ENOMEM; 296 297 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 298 #ifdef CONFIG_X86 299 src = kmap_atomic_prot(s, prot); 300 #else 301 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 302 src = vmap(&s, 1, 0, prot); 303 else 304 src = kmap(s); 305 #endif 306 if (!src) 307 return -ENOMEM; 308 309 memcpy_toio(dst, src, PAGE_SIZE); 310 311 #ifdef CONFIG_X86 312 kunmap_atomic(src); 313 #else 314 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 315 vunmap(src); 316 else 317 kunmap(s); 318 #endif 319 320 return 0; 321 } 322 323 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 324 bool evict, bool no_wait_gpu, 325 struct ttm_mem_reg *new_mem) 326 { 327 struct ttm_bo_device *bdev = bo->bdev; 328 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 329 struct ttm_tt *ttm = bo->ttm; 330 struct ttm_mem_reg *old_mem = &bo->mem; 331 struct ttm_mem_reg old_copy = *old_mem; 332 void *old_iomap; 333 void *new_iomap; 334 int ret; 335 unsigned long i; 336 unsigned long page; 337 unsigned long add = 0; 338 int dir; 339 340 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 341 if (ret) 342 return ret; 343 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 344 if (ret) 345 goto out; 346 347 /* 348 * Single TTM move. NOP. 349 */ 350 if (old_iomap == NULL && new_iomap == NULL) 351 goto out2; 352 353 /* 354 * Don't move nonexistent data. Clear destination instead. 355 */ 356 if (old_iomap == NULL && 357 (ttm == NULL || (ttm->state == tt_unpopulated && 358 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { 359 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 360 goto out2; 361 } 362 363 /* 364 * TTM might be null for moves within the same region. 365 */ 366 if (ttm && ttm->state == tt_unpopulated) { 367 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 368 if (ret) 369 goto out1; 370 } 371 372 add = 0; 373 dir = 1; 374 375 if ((old_mem->mem_type == new_mem->mem_type) && 376 (new_mem->start < old_mem->start + old_mem->size)) { 377 dir = -1; 378 add = new_mem->num_pages - 1; 379 } 380 381 for (i = 0; i < new_mem->num_pages; ++i) { 382 page = i * dir + add; 383 if (old_iomap == NULL) { 384 pgprot_t prot = ttm_io_prot(old_mem->placement, 385 PAGE_KERNEL); 386 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 387 prot); 388 } else if (new_iomap == NULL) { 389 pgprot_t prot = ttm_io_prot(new_mem->placement, 390 PAGE_KERNEL); 391 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 392 prot); 393 } else 394 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 395 if (ret) 396 goto out1; 397 } 398 mb(); 399 out2: 400 old_copy = *old_mem; 401 *old_mem = *new_mem; 402 new_mem->mm_node = NULL; 403 404 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 405 ttm_tt_unbind(ttm); 406 ttm_tt_destroy(ttm); 407 bo->ttm = NULL; 408 } 409 410 out1: 411 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 412 out: 413 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 414 415 /* 416 * On error, keep the mm node! 417 */ 418 if (!ret) 419 ttm_bo_mem_put(bo, &old_copy); 420 return ret; 421 } 422 EXPORT_SYMBOL(ttm_bo_move_memcpy); 423 424 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 425 { 426 kfree(bo); 427 } 428 429 /** 430 * ttm_buffer_object_transfer 431 * 432 * @bo: A pointer to a struct ttm_buffer_object. 433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 434 * holding the data of @bo with the old placement. 435 * 436 * This is a utility function that may be called after an accelerated move 437 * has been scheduled. A new buffer object is created as a placeholder for 438 * the old data while it's being copied. When that buffer object is idle, 439 * it can be destroyed, releasing the space of the old placement. 440 * Returns: 441 * !0: Failure. 442 */ 443 444 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 445 struct ttm_buffer_object **new_obj) 446 { 447 struct ttm_buffer_object *fbo; 448 int ret; 449 450 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 451 if (!fbo) 452 return -ENOMEM; 453 454 *fbo = *bo; 455 456 /** 457 * Fix up members that we shouldn't copy directly: 458 * TODO: Explicit member copy would probably be better here. 459 */ 460 461 INIT_LIST_HEAD(&fbo->ddestroy); 462 INIT_LIST_HEAD(&fbo->lru); 463 INIT_LIST_HEAD(&fbo->swap); 464 INIT_LIST_HEAD(&fbo->io_reserve_lru); 465 drm_vma_node_reset(&fbo->vma_node); 466 atomic_set(&fbo->cpu_writers, 0); 467 468 kref_init(&fbo->list_kref); 469 kref_init(&fbo->kref); 470 fbo->destroy = &ttm_transfered_destroy; 471 fbo->acc_size = 0; 472 fbo->resv = &fbo->ttm_resv; 473 reservation_object_init(fbo->resv); 474 ret = ww_mutex_trylock(&fbo->resv->lock); 475 WARN_ON(!ret); 476 477 *new_obj = fbo; 478 return 0; 479 } 480 481 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) 482 { 483 /* Cached mappings need no adjustment */ 484 if (caching_flags & TTM_PL_FLAG_CACHED) 485 return tmp; 486 487 #if defined(__i386__) || defined(__x86_64__) 488 if (caching_flags & TTM_PL_FLAG_WC) 489 tmp = pgprot_writecombine(tmp); 490 else if (boot_cpu_data.x86 > 3) 491 tmp = pgprot_noncached(tmp); 492 #endif 493 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ 494 defined(__powerpc__) 495 if (caching_flags & TTM_PL_FLAG_WC) 496 tmp = pgprot_writecombine(tmp); 497 else 498 tmp = pgprot_noncached(tmp); 499 #endif 500 #if defined(__sparc__) || defined(__mips__) 501 tmp = pgprot_noncached(tmp); 502 #endif 503 return tmp; 504 } 505 EXPORT_SYMBOL(ttm_io_prot); 506 507 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 508 unsigned long offset, 509 unsigned long size, 510 struct ttm_bo_kmap_obj *map) 511 { 512 struct ttm_mem_reg *mem = &bo->mem; 513 514 if (bo->mem.bus.addr) { 515 map->bo_kmap_type = ttm_bo_map_premapped; 516 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 517 } else { 518 map->bo_kmap_type = ttm_bo_map_iomap; 519 if (mem->placement & TTM_PL_FLAG_WC) 520 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, 521 size); 522 else 523 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, 524 size); 525 } 526 return (!map->virtual) ? -ENOMEM : 0; 527 } 528 529 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 530 unsigned long start_page, 531 unsigned long num_pages, 532 struct ttm_bo_kmap_obj *map) 533 { 534 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; 535 struct ttm_tt *ttm = bo->ttm; 536 int ret; 537 538 BUG_ON(!ttm); 539 540 if (ttm->state == tt_unpopulated) { 541 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 542 if (ret) 543 return ret; 544 } 545 546 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 547 /* 548 * We're mapping a single page, and the desired 549 * page protection is consistent with the bo. 550 */ 551 552 map->bo_kmap_type = ttm_bo_map_kmap; 553 map->page = ttm->pages[start_page]; 554 map->virtual = kmap(map->page); 555 } else { 556 /* 557 * We need to use vmap to get the desired page protection 558 * or to make the buffer object look contiguous. 559 */ 560 prot = ttm_io_prot(mem->placement, PAGE_KERNEL); 561 map->bo_kmap_type = ttm_bo_map_vmap; 562 map->virtual = vmap(ttm->pages + start_page, num_pages, 563 0, prot); 564 } 565 return (!map->virtual) ? -ENOMEM : 0; 566 } 567 568 int ttm_bo_kmap(struct ttm_buffer_object *bo, 569 unsigned long start_page, unsigned long num_pages, 570 struct ttm_bo_kmap_obj *map) 571 { 572 struct ttm_mem_type_manager *man = 573 &bo->bdev->man[bo->mem.mem_type]; 574 unsigned long offset, size; 575 int ret; 576 577 BUG_ON(!list_empty(&bo->swap)); 578 map->virtual = NULL; 579 map->bo = bo; 580 if (num_pages > bo->num_pages) 581 return -EINVAL; 582 if (start_page > bo->num_pages) 583 return -EINVAL; 584 #if 0 585 if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) 586 return -EPERM; 587 #endif 588 (void) ttm_mem_io_lock(man, false); 589 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 590 ttm_mem_io_unlock(man); 591 if (ret) 592 return ret; 593 if (!bo->mem.bus.is_iomem) { 594 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 595 } else { 596 offset = start_page << PAGE_SHIFT; 597 size = num_pages << PAGE_SHIFT; 598 return ttm_bo_ioremap(bo, offset, size, map); 599 } 600 } 601 EXPORT_SYMBOL(ttm_bo_kmap); 602 603 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 604 { 605 struct ttm_buffer_object *bo = map->bo; 606 struct ttm_mem_type_manager *man = 607 &bo->bdev->man[bo->mem.mem_type]; 608 609 if (!map->virtual) 610 return; 611 switch (map->bo_kmap_type) { 612 case ttm_bo_map_iomap: 613 iounmap(map->virtual); 614 break; 615 case ttm_bo_map_vmap: 616 vunmap(map->virtual); 617 break; 618 case ttm_bo_map_kmap: 619 kunmap(map->page); 620 break; 621 case ttm_bo_map_premapped: 622 break; 623 default: 624 BUG(); 625 } 626 (void) ttm_mem_io_lock(man, false); 627 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 628 ttm_mem_io_unlock(man); 629 map->virtual = NULL; 630 map->page = NULL; 631 } 632 EXPORT_SYMBOL(ttm_bo_kunmap); 633 634 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 635 struct fence *fence, 636 bool evict, 637 bool no_wait_gpu, 638 struct ttm_mem_reg *new_mem) 639 { 640 struct ttm_bo_device *bdev = bo->bdev; 641 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 642 struct ttm_mem_reg *old_mem = &bo->mem; 643 int ret; 644 struct ttm_buffer_object *ghost_obj; 645 646 reservation_object_add_excl_fence(bo->resv, fence); 647 if (evict) { 648 ret = ttm_bo_wait(bo, false, false); 649 if (ret) 650 return ret; 651 652 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 653 (bo->ttm != NULL)) { 654 ttm_tt_unbind(bo->ttm); 655 ttm_tt_destroy(bo->ttm); 656 bo->ttm = NULL; 657 } 658 ttm_bo_free_old_node(bo); 659 } else { 660 /** 661 * This should help pipeline ordinary buffer moves. 662 * 663 * Hang old buffer memory on a new buffer object, 664 * and leave it to be released when the GPU 665 * operation has completed. 666 */ 667 668 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 669 670 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 671 if (ret) 672 return ret; 673 674 reservation_object_add_excl_fence(ghost_obj->resv, fence); 675 676 /** 677 * If we're not moving to fixed memory, the TTM object 678 * needs to stay alive. Otherwhise hang it on the ghost 679 * bo to be unbound and destroyed. 680 */ 681 682 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 683 ghost_obj->ttm = NULL; 684 else 685 bo->ttm = NULL; 686 687 ttm_bo_unreserve(ghost_obj); 688 ttm_bo_unref(&ghost_obj); 689 } 690 691 *old_mem = *new_mem; 692 new_mem->mm_node = NULL; 693 694 return 0; 695 } 696 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 697