1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #include <drm/ttm/ttm_bo_driver.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/drm_cache.h> 35 #include <drm/drm_vma_manager.h> 36 #include <linux/iosys-map.h> 37 #include <linux/io.h> 38 #include <linux/highmem.h> 39 #include <linux/wait.h> 40 #include <linux/slab.h> 41 #include <linux/vmalloc.h> 42 #include <linux/module.h> 43 #include <linux/dma-resv.h> 44 45 struct ttm_transfer_obj { 46 struct ttm_buffer_object base; 47 struct ttm_buffer_object *bo; 48 }; 49 50 int ttm_mem_io_reserve(struct ttm_device *bdev, 51 struct ttm_resource *mem) 52 { 53 if (mem->bus.offset || mem->bus.addr) 54 return 0; 55 56 mem->bus.is_iomem = false; 57 if (!bdev->funcs->io_mem_reserve) 58 return 0; 59 60 return bdev->funcs->io_mem_reserve(bdev, mem); 61 } 62 63 void ttm_mem_io_free(struct ttm_device *bdev, 64 struct ttm_resource *mem) 65 { 66 if (!mem) 67 return; 68 69 if (!mem->bus.offset && !mem->bus.addr) 70 return; 71 72 if (bdev->funcs->io_mem_free) 73 bdev->funcs->io_mem_free(bdev, mem); 74 75 mem->bus.offset = 0; 76 mem->bus.addr = NULL; 77 } 78 79 /** 80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation. 81 * @clear: Whether to clear rather than copy. 82 * @num_pages: Number of pages of the operation. 83 * @dst_iter: A struct ttm_kmap_iter representing the destination resource. 84 * @src_iter: A struct ttm_kmap_iter representing the source resource. 85 * 86 * This function is intended to be able to move out async under a 87 * dma-fence if desired. 88 */ 89 void ttm_move_memcpy(bool clear, 90 u32 num_pages, 91 struct ttm_kmap_iter *dst_iter, 92 struct ttm_kmap_iter *src_iter) 93 { 94 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; 95 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; 96 struct iosys_map src_map, dst_map; 97 pgoff_t i; 98 99 /* Single TTM move. NOP */ 100 if (dst_ops->maps_tt && src_ops->maps_tt) 101 return; 102 103 /* Don't move nonexistent data. Clear destination instead. */ 104 if (clear) { 105 for (i = 0; i < num_pages; ++i) { 106 dst_ops->map_local(dst_iter, &dst_map, i); 107 if (dst_map.is_iomem) 108 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE); 109 else 110 memset(dst_map.vaddr, 0, PAGE_SIZE); 111 if (dst_ops->unmap_local) 112 dst_ops->unmap_local(dst_iter, &dst_map); 113 } 114 return; 115 } 116 117 for (i = 0; i < num_pages; ++i) { 118 dst_ops->map_local(dst_iter, &dst_map, i); 119 src_ops->map_local(src_iter, &src_map, i); 120 121 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE); 122 123 if (src_ops->unmap_local) 124 src_ops->unmap_local(src_iter, &src_map); 125 if (dst_ops->unmap_local) 126 dst_ops->unmap_local(dst_iter, &dst_map); 127 } 128 } 129 EXPORT_SYMBOL(ttm_move_memcpy); 130 131 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 132 struct ttm_operation_ctx *ctx, 133 struct ttm_resource *dst_mem) 134 { 135 struct ttm_device *bdev = bo->bdev; 136 struct ttm_resource_manager *dst_man = 137 ttm_manager_type(bo->bdev, dst_mem->mem_type); 138 struct ttm_tt *ttm = bo->ttm; 139 struct ttm_resource *src_mem = bo->resource; 140 struct ttm_resource_manager *src_man = 141 ttm_manager_type(bdev, src_mem->mem_type); 142 union { 143 struct ttm_kmap_iter_tt tt; 144 struct ttm_kmap_iter_linear_io io; 145 } _dst_iter, _src_iter; 146 struct ttm_kmap_iter *dst_iter, *src_iter; 147 bool clear; 148 int ret = 0; 149 150 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || 151 dst_man->use_tt)) { 152 ret = ttm_tt_populate(bdev, ttm, ctx); 153 if (ret) 154 return ret; 155 } 156 157 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem); 158 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt) 159 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); 160 if (IS_ERR(dst_iter)) 161 return PTR_ERR(dst_iter); 162 163 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem); 164 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt) 165 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); 166 if (IS_ERR(src_iter)) { 167 ret = PTR_ERR(src_iter); 168 goto out_src_iter; 169 } 170 171 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); 172 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) 173 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); 174 175 if (!src_iter->ops->maps_tt) 176 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); 177 ttm_bo_move_sync_cleanup(bo, dst_mem); 178 179 out_src_iter: 180 if (!dst_iter->ops->maps_tt) 181 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem); 182 183 return ret; 184 } 185 EXPORT_SYMBOL(ttm_bo_move_memcpy); 186 187 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 188 { 189 struct ttm_transfer_obj *fbo; 190 191 fbo = container_of(bo, struct ttm_transfer_obj, base); 192 dma_resv_fini(&fbo->base.base._resv); 193 ttm_bo_put(fbo->bo); 194 kfree(fbo); 195 } 196 197 /** 198 * ttm_buffer_object_transfer 199 * 200 * @bo: A pointer to a struct ttm_buffer_object. 201 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 202 * holding the data of @bo with the old placement. 203 * 204 * This is a utility function that may be called after an accelerated move 205 * has been scheduled. A new buffer object is created as a placeholder for 206 * the old data while it's being copied. When that buffer object is idle, 207 * it can be destroyed, releasing the space of the old placement. 208 * Returns: 209 * !0: Failure. 210 */ 211 212 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 213 struct ttm_buffer_object **new_obj) 214 { 215 struct ttm_transfer_obj *fbo; 216 int ret; 217 218 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 219 if (!fbo) 220 return -ENOMEM; 221 222 fbo->base = *bo; 223 224 /** 225 * Fix up members that we shouldn't copy directly: 226 * TODO: Explicit member copy would probably be better here. 227 */ 228 229 atomic_inc(&ttm_glob.bo_count); 230 INIT_LIST_HEAD(&fbo->base.ddestroy); 231 drm_vma_node_reset(&fbo->base.base.vma_node); 232 233 kref_init(&fbo->base.kref); 234 fbo->base.destroy = &ttm_transfered_destroy; 235 fbo->base.pin_count = 0; 236 if (bo->type != ttm_bo_type_sg) 237 fbo->base.base.resv = &fbo->base.base._resv; 238 239 dma_resv_init(&fbo->base.base._resv); 240 fbo->base.base.dev = NULL; 241 ret = dma_resv_trylock(&fbo->base.base._resv); 242 WARN_ON(!ret); 243 244 if (fbo->base.resource) { 245 ttm_resource_set_bo(fbo->base.resource, &fbo->base); 246 bo->resource = NULL; 247 ttm_bo_set_bulk_move(&fbo->base, NULL); 248 } else { 249 fbo->base.bulk_move = NULL; 250 } 251 252 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); 253 if (ret) { 254 kfree(fbo); 255 return ret; 256 } 257 258 ttm_bo_get(bo); 259 fbo->bo = bo; 260 261 ttm_bo_move_to_lru_tail_unlocked(&fbo->base); 262 263 *new_obj = &fbo->base; 264 return 0; 265 } 266 267 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, 268 pgprot_t tmp) 269 { 270 struct ttm_resource_manager *man; 271 enum ttm_caching caching; 272 273 man = ttm_manager_type(bo->bdev, res->mem_type); 274 caching = man->use_tt ? bo->ttm->caching : res->bus.caching; 275 276 return ttm_prot_from_caching(caching, tmp); 277 } 278 EXPORT_SYMBOL(ttm_io_prot); 279 280 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 281 unsigned long offset, 282 unsigned long size, 283 struct ttm_bo_kmap_obj *map) 284 { 285 struct ttm_resource *mem = bo->resource; 286 287 if (bo->resource->bus.addr) { 288 map->bo_kmap_type = ttm_bo_map_premapped; 289 map->virtual = ((u8 *)bo->resource->bus.addr) + offset; 290 } else { 291 resource_size_t res = bo->resource->bus.offset + offset; 292 293 map->bo_kmap_type = ttm_bo_map_iomap; 294 if (mem->bus.caching == ttm_write_combined) 295 map->virtual = ioremap_wc(res, size); 296 #ifdef CONFIG_X86 297 else if (mem->bus.caching == ttm_cached) 298 map->virtual = ioremap_cache(res, size); 299 #endif 300 else 301 map->virtual = ioremap(res, size); 302 } 303 return (!map->virtual) ? -ENOMEM : 0; 304 } 305 306 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 307 unsigned long start_page, 308 unsigned long num_pages, 309 struct ttm_bo_kmap_obj *map) 310 { 311 struct ttm_resource *mem = bo->resource; 312 struct ttm_operation_ctx ctx = { 313 .interruptible = false, 314 .no_wait_gpu = false 315 }; 316 struct ttm_tt *ttm = bo->ttm; 317 pgprot_t prot; 318 int ret; 319 320 BUG_ON(!ttm); 321 322 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); 323 if (ret) 324 return ret; 325 326 if (num_pages == 1 && ttm->caching == ttm_cached) { 327 /* 328 * We're mapping a single page, and the desired 329 * page protection is consistent with the bo. 330 */ 331 332 map->bo_kmap_type = ttm_bo_map_kmap; 333 map->page = ttm->pages[start_page]; 334 map->virtual = kmap(map->page); 335 } else { 336 /* 337 * We need to use vmap to get the desired page protection 338 * or to make the buffer object look contiguous. 339 */ 340 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); 341 map->bo_kmap_type = ttm_bo_map_vmap; 342 map->virtual = vmap(ttm->pages + start_page, num_pages, 343 0, prot); 344 } 345 return (!map->virtual) ? -ENOMEM : 0; 346 } 347 348 int ttm_bo_kmap(struct ttm_buffer_object *bo, 349 unsigned long start_page, unsigned long num_pages, 350 struct ttm_bo_kmap_obj *map) 351 { 352 unsigned long offset, size; 353 int ret; 354 355 map->virtual = NULL; 356 map->bo = bo; 357 if (num_pages > bo->resource->num_pages) 358 return -EINVAL; 359 if ((start_page + num_pages) > bo->resource->num_pages) 360 return -EINVAL; 361 362 ret = ttm_mem_io_reserve(bo->bdev, bo->resource); 363 if (ret) 364 return ret; 365 if (!bo->resource->bus.is_iomem) { 366 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 367 } else { 368 offset = start_page << PAGE_SHIFT; 369 size = num_pages << PAGE_SHIFT; 370 return ttm_bo_ioremap(bo, offset, size, map); 371 } 372 } 373 EXPORT_SYMBOL(ttm_bo_kmap); 374 375 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 376 { 377 if (!map->virtual) 378 return; 379 switch (map->bo_kmap_type) { 380 case ttm_bo_map_iomap: 381 iounmap(map->virtual); 382 break; 383 case ttm_bo_map_vmap: 384 vunmap(map->virtual); 385 break; 386 case ttm_bo_map_kmap: 387 kunmap(map->page); 388 break; 389 case ttm_bo_map_premapped: 390 break; 391 default: 392 BUG(); 393 } 394 ttm_mem_io_free(map->bo->bdev, map->bo->resource); 395 map->virtual = NULL; 396 map->page = NULL; 397 } 398 EXPORT_SYMBOL(ttm_bo_kunmap); 399 400 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) 401 { 402 struct ttm_resource *mem = bo->resource; 403 int ret; 404 405 ret = ttm_mem_io_reserve(bo->bdev, mem); 406 if (ret) 407 return ret; 408 409 if (mem->bus.is_iomem) { 410 void __iomem *vaddr_iomem; 411 412 if (mem->bus.addr) 413 vaddr_iomem = (void __iomem *)mem->bus.addr; 414 else if (mem->bus.caching == ttm_write_combined) 415 vaddr_iomem = ioremap_wc(mem->bus.offset, 416 bo->base.size); 417 #ifdef CONFIG_X86 418 else if (mem->bus.caching == ttm_cached) 419 vaddr_iomem = ioremap_cache(mem->bus.offset, 420 bo->base.size); 421 #endif 422 else 423 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); 424 425 if (!vaddr_iomem) 426 return -ENOMEM; 427 428 iosys_map_set_vaddr_iomem(map, vaddr_iomem); 429 430 } else { 431 struct ttm_operation_ctx ctx = { 432 .interruptible = false, 433 .no_wait_gpu = false 434 }; 435 struct ttm_tt *ttm = bo->ttm; 436 pgprot_t prot; 437 void *vaddr; 438 439 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); 440 if (ret) 441 return ret; 442 443 /* 444 * We need to use vmap to get the desired page protection 445 * or to make the buffer object look contiguous. 446 */ 447 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); 448 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); 449 if (!vaddr) 450 return -ENOMEM; 451 452 iosys_map_set_vaddr(map, vaddr); 453 } 454 455 return 0; 456 } 457 EXPORT_SYMBOL(ttm_bo_vmap); 458 459 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) 460 { 461 struct ttm_resource *mem = bo->resource; 462 463 if (iosys_map_is_null(map)) 464 return; 465 466 if (!map->is_iomem) 467 vunmap(map->vaddr); 468 else if (!mem->bus.addr) 469 iounmap(map->vaddr_iomem); 470 iosys_map_clear(map); 471 472 ttm_mem_io_free(bo->bdev, bo->resource); 473 } 474 EXPORT_SYMBOL(ttm_bo_vunmap); 475 476 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, 477 bool dst_use_tt) 478 { 479 int ret; 480 ret = ttm_bo_wait(bo, false, false); 481 if (ret) 482 return ret; 483 484 if (!dst_use_tt) 485 ttm_bo_tt_destroy(bo); 486 ttm_resource_free(bo, &bo->resource); 487 return 0; 488 } 489 490 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, 491 struct dma_fence *fence, 492 bool dst_use_tt) 493 { 494 struct ttm_buffer_object *ghost_obj; 495 int ret; 496 497 /** 498 * This should help pipeline ordinary buffer moves. 499 * 500 * Hang old buffer memory on a new buffer object, 501 * and leave it to be released when the GPU 502 * operation has completed. 503 */ 504 505 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 506 if (ret) 507 return ret; 508 509 dma_resv_add_fence(&ghost_obj->base._resv, fence, 510 DMA_RESV_USAGE_KERNEL); 511 512 /** 513 * If we're not moving to fixed memory, the TTM object 514 * needs to stay alive. Otherwhise hang it on the ghost 515 * bo to be unbound and destroyed. 516 */ 517 518 if (dst_use_tt) 519 ghost_obj->ttm = NULL; 520 else 521 bo->ttm = NULL; 522 523 dma_resv_unlock(&ghost_obj->base._resv); 524 ttm_bo_put(ghost_obj); 525 return 0; 526 } 527 528 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, 529 struct dma_fence *fence) 530 { 531 struct ttm_device *bdev = bo->bdev; 532 struct ttm_resource_manager *from; 533 534 from = ttm_manager_type(bdev, bo->resource->mem_type); 535 536 /** 537 * BO doesn't have a TTM we need to bind/unbind. Just remember 538 * this eviction and free up the allocation 539 */ 540 spin_lock(&from->move_lock); 541 if (!from->move || dma_fence_is_later(fence, from->move)) { 542 dma_fence_put(from->move); 543 from->move = dma_fence_get(fence); 544 } 545 spin_unlock(&from->move_lock); 546 547 ttm_resource_free(bo, &bo->resource); 548 } 549 550 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 551 struct dma_fence *fence, 552 bool evict, 553 bool pipeline, 554 struct ttm_resource *new_mem) 555 { 556 struct ttm_device *bdev = bo->bdev; 557 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); 558 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 559 int ret = 0; 560 561 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 562 if (!evict) 563 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); 564 else if (!from->use_tt && pipeline) 565 ttm_bo_move_pipeline_evict(bo, fence); 566 else 567 ret = ttm_bo_wait_free_node(bo, man->use_tt); 568 569 if (ret) 570 return ret; 571 572 ttm_bo_assign_mem(bo, new_mem); 573 574 return 0; 575 } 576 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 577 578 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, 579 struct ttm_resource *new_mem) 580 { 581 struct ttm_device *bdev = bo->bdev; 582 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 583 int ret; 584 585 ret = ttm_bo_wait_free_node(bo, man->use_tt); 586 if (WARN_ON(ret)) 587 return; 588 589 ttm_bo_assign_mem(bo, new_mem); 590 } 591 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup); 592 593 /** 594 * ttm_bo_pipeline_gutting - purge the contents of a bo 595 * @bo: The buffer object 596 * 597 * Purge the contents of a bo, async if the bo is not idle. 598 * After a successful call, the bo is left unpopulated in 599 * system placement. The function may wait uninterruptible 600 * for idle on OOM. 601 * 602 * Return: 0 if successful, negative error code on failure. 603 */ 604 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) 605 { 606 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; 607 struct ttm_buffer_object *ghost; 608 struct ttm_resource *sys_res; 609 struct ttm_tt *ttm; 610 int ret; 611 612 ret = ttm_resource_alloc(bo, &sys_mem, &sys_res); 613 if (ret) 614 return ret; 615 616 /* If already idle, no need for ghost object dance. */ 617 ret = ttm_bo_wait(bo, false, true); 618 if (ret != -EBUSY) { 619 if (!bo->ttm) { 620 /* See comment below about clearing. */ 621 ret = ttm_tt_create(bo, true); 622 if (ret) 623 goto error_free_sys_mem; 624 } else { 625 ttm_tt_unpopulate(bo->bdev, bo->ttm); 626 if (bo->type == ttm_bo_type_device) 627 ttm_tt_mark_for_clear(bo->ttm); 628 } 629 ttm_resource_free(bo, &bo->resource); 630 ttm_bo_assign_mem(bo, sys_res); 631 return 0; 632 } 633 634 /* 635 * We need an unpopulated ttm_tt after giving our current one, 636 * if any, to the ghost object. And we can't afford to fail 637 * creating one *after* the operation. If the bo subsequently gets 638 * resurrected, make sure it's cleared (if ttm_bo_type_device) 639 * to avoid leaking sensitive information to user-space. 640 */ 641 642 ttm = bo->ttm; 643 bo->ttm = NULL; 644 ret = ttm_tt_create(bo, true); 645 swap(bo->ttm, ttm); 646 if (ret) 647 goto error_free_sys_mem; 648 649 ret = ttm_buffer_object_transfer(bo, &ghost); 650 if (ret) 651 goto error_destroy_tt; 652 653 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); 654 /* Last resort, wait for the BO to be idle when we are OOM */ 655 if (ret) 656 ttm_bo_wait(bo, false, false); 657 658 dma_resv_unlock(&ghost->base._resv); 659 ttm_bo_put(ghost); 660 bo->ttm = ttm; 661 ttm_bo_assign_mem(bo, sys_res); 662 return 0; 663 664 error_destroy_tt: 665 ttm_tt_destroy(bo->bdev, ttm); 666 667 error_free_sys_mem: 668 ttm_resource_free(bo, &sys_res); 669 return ret; 670 } 671