xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo_util.c (revision ac8fa1bd)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_cache.h>
35 #include <drm/drm_vma_manager.h>
36 #include <linux/dma-buf-map.h>
37 #include <linux/io.h>
38 #include <linux/highmem.h>
39 #include <linux/wait.h>
40 #include <linux/slab.h>
41 #include <linux/vmalloc.h>
42 #include <linux/module.h>
43 #include <linux/dma-resv.h>
44 
45 struct ttm_transfer_obj {
46 	struct ttm_buffer_object base;
47 	struct ttm_buffer_object *bo;
48 };
49 
50 int ttm_mem_io_reserve(struct ttm_device *bdev,
51 		       struct ttm_resource *mem)
52 {
53 	if (mem->bus.offset || mem->bus.addr)
54 		return 0;
55 
56 	mem->bus.is_iomem = false;
57 	if (!bdev->funcs->io_mem_reserve)
58 		return 0;
59 
60 	return bdev->funcs->io_mem_reserve(bdev, mem);
61 }
62 
63 void ttm_mem_io_free(struct ttm_device *bdev,
64 		     struct ttm_resource *mem)
65 {
66 	if (!mem->bus.offset && !mem->bus.addr)
67 		return;
68 
69 	if (bdev->funcs->io_mem_free)
70 		bdev->funcs->io_mem_free(bdev, mem);
71 
72 	mem->bus.offset = 0;
73 	mem->bus.addr = NULL;
74 }
75 
76 /**
77  * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
78  * @bo: The struct ttm_buffer_object.
79  * @new_mem: The struct ttm_resource we're moving to (copy destination).
80  * @new_iter: A struct ttm_kmap_iter representing the destination resource.
81  * @src_iter: A struct ttm_kmap_iter representing the source resource.
82  *
83  * This function is intended to be able to move out async under a
84  * dma-fence if desired.
85  */
86 void ttm_move_memcpy(struct ttm_buffer_object *bo,
87 		     u32 num_pages,
88 		     struct ttm_kmap_iter *dst_iter,
89 		     struct ttm_kmap_iter *src_iter)
90 {
91 	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
92 	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
93 	struct ttm_tt *ttm = bo->ttm;
94 	struct dma_buf_map src_map, dst_map;
95 	pgoff_t i;
96 
97 	/* Single TTM move. NOP */
98 	if (dst_ops->maps_tt && src_ops->maps_tt)
99 		return;
100 
101 	/* Don't move nonexistent data. Clear destination instead. */
102 	if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
103 		if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
104 			return;
105 
106 		for (i = 0; i < num_pages; ++i) {
107 			dst_ops->map_local(dst_iter, &dst_map, i);
108 			if (dst_map.is_iomem)
109 				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
110 			else
111 				memset(dst_map.vaddr, 0, PAGE_SIZE);
112 			if (dst_ops->unmap_local)
113 				dst_ops->unmap_local(dst_iter, &dst_map);
114 		}
115 		return;
116 	}
117 
118 	for (i = 0; i < num_pages; ++i) {
119 		dst_ops->map_local(dst_iter, &dst_map, i);
120 		src_ops->map_local(src_iter, &src_map, i);
121 
122 		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
123 
124 		if (src_ops->unmap_local)
125 			src_ops->unmap_local(src_iter, &src_map);
126 		if (dst_ops->unmap_local)
127 			dst_ops->unmap_local(dst_iter, &dst_map);
128 	}
129 }
130 EXPORT_SYMBOL(ttm_move_memcpy);
131 
132 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
133 		       struct ttm_operation_ctx *ctx,
134 		       struct ttm_resource *dst_mem)
135 {
136 	struct ttm_device *bdev = bo->bdev;
137 	struct ttm_resource_manager *dst_man =
138 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
139 	struct ttm_tt *ttm = bo->ttm;
140 	struct ttm_resource *src_mem = bo->resource;
141 	struct ttm_resource_manager *src_man =
142 		ttm_manager_type(bdev, src_mem->mem_type);
143 	struct ttm_resource src_copy = *src_mem;
144 	union {
145 		struct ttm_kmap_iter_tt tt;
146 		struct ttm_kmap_iter_linear_io io;
147 	} _dst_iter, _src_iter;
148 	struct ttm_kmap_iter *dst_iter, *src_iter;
149 	int ret = 0;
150 
151 	if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
152 		    dst_man->use_tt)) {
153 		ret = ttm_tt_populate(bdev, ttm, ctx);
154 		if (ret)
155 			return ret;
156 	}
157 
158 	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
159 	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
160 		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
161 	if (IS_ERR(dst_iter))
162 		return PTR_ERR(dst_iter);
163 
164 	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
165 	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
166 		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
167 	if (IS_ERR(src_iter)) {
168 		ret = PTR_ERR(src_iter);
169 		goto out_src_iter;
170 	}
171 
172 	ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
173 	src_copy = *src_mem;
174 	ttm_bo_move_sync_cleanup(bo, dst_mem);
175 
176 	if (!src_iter->ops->maps_tt)
177 		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
178 out_src_iter:
179 	if (!dst_iter->ops->maps_tt)
180 		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
181 
182 	return ret;
183 }
184 EXPORT_SYMBOL(ttm_bo_move_memcpy);
185 
186 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
187 {
188 	struct ttm_transfer_obj *fbo;
189 
190 	fbo = container_of(bo, struct ttm_transfer_obj, base);
191 	ttm_bo_put(fbo->bo);
192 	kfree(fbo);
193 }
194 
195 /**
196  * ttm_buffer_object_transfer
197  *
198  * @bo: A pointer to a struct ttm_buffer_object.
199  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
200  * holding the data of @bo with the old placement.
201  *
202  * This is a utility function that may be called after an accelerated move
203  * has been scheduled. A new buffer object is created as a placeholder for
204  * the old data while it's being copied. When that buffer object is idle,
205  * it can be destroyed, releasing the space of the old placement.
206  * Returns:
207  * !0: Failure.
208  */
209 
210 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
211 				      struct ttm_buffer_object **new_obj)
212 {
213 	struct ttm_transfer_obj *fbo;
214 	int ret;
215 
216 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
217 	if (!fbo)
218 		return -ENOMEM;
219 
220 	fbo->base = *bo;
221 
222 	ttm_bo_get(bo);
223 	fbo->bo = bo;
224 
225 	/**
226 	 * Fix up members that we shouldn't copy directly:
227 	 * TODO: Explicit member copy would probably be better here.
228 	 */
229 
230 	atomic_inc(&ttm_glob.bo_count);
231 	INIT_LIST_HEAD(&fbo->base.ddestroy);
232 	INIT_LIST_HEAD(&fbo->base.lru);
233 	fbo->base.moving = NULL;
234 	drm_vma_node_reset(&fbo->base.base.vma_node);
235 
236 	kref_init(&fbo->base.kref);
237 	fbo->base.destroy = &ttm_transfered_destroy;
238 	fbo->base.pin_count = 0;
239 	if (bo->type != ttm_bo_type_sg)
240 		fbo->base.base.resv = &fbo->base.base._resv;
241 
242 	dma_resv_init(&fbo->base.base._resv);
243 	fbo->base.base.dev = NULL;
244 	ret = dma_resv_trylock(&fbo->base.base._resv);
245 	WARN_ON(!ret);
246 
247 	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
248 
249 	*new_obj = &fbo->base;
250 	return 0;
251 }
252 
253 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
254 		     pgprot_t tmp)
255 {
256 	struct ttm_resource_manager *man;
257 	enum ttm_caching caching;
258 
259 	man = ttm_manager_type(bo->bdev, res->mem_type);
260 	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
261 
262 	return ttm_prot_from_caching(caching, tmp);
263 }
264 EXPORT_SYMBOL(ttm_io_prot);
265 
266 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
267 			  unsigned long offset,
268 			  unsigned long size,
269 			  struct ttm_bo_kmap_obj *map)
270 {
271 	struct ttm_resource *mem = bo->resource;
272 
273 	if (bo->resource->bus.addr) {
274 		map->bo_kmap_type = ttm_bo_map_premapped;
275 		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
276 	} else {
277 		resource_size_t res = bo->resource->bus.offset + offset;
278 
279 		map->bo_kmap_type = ttm_bo_map_iomap;
280 		if (mem->bus.caching == ttm_write_combined)
281 			map->virtual = ioremap_wc(res, size);
282 #ifdef CONFIG_X86
283 		else if (mem->bus.caching == ttm_cached)
284 			map->virtual = ioremap_cache(res, size);
285 #endif
286 		else
287 			map->virtual = ioremap(res, size);
288 	}
289 	return (!map->virtual) ? -ENOMEM : 0;
290 }
291 
292 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
293 			   unsigned long start_page,
294 			   unsigned long num_pages,
295 			   struct ttm_bo_kmap_obj *map)
296 {
297 	struct ttm_resource *mem = bo->resource;
298 	struct ttm_operation_ctx ctx = {
299 		.interruptible = false,
300 		.no_wait_gpu = false
301 	};
302 	struct ttm_tt *ttm = bo->ttm;
303 	pgprot_t prot;
304 	int ret;
305 
306 	BUG_ON(!ttm);
307 
308 	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
309 	if (ret)
310 		return ret;
311 
312 	if (num_pages == 1 && ttm->caching == ttm_cached) {
313 		/*
314 		 * We're mapping a single page, and the desired
315 		 * page protection is consistent with the bo.
316 		 */
317 
318 		map->bo_kmap_type = ttm_bo_map_kmap;
319 		map->page = ttm->pages[start_page];
320 		map->virtual = kmap(map->page);
321 	} else {
322 		/*
323 		 * We need to use vmap to get the desired page protection
324 		 * or to make the buffer object look contiguous.
325 		 */
326 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
327 		map->bo_kmap_type = ttm_bo_map_vmap;
328 		map->virtual = vmap(ttm->pages + start_page, num_pages,
329 				    0, prot);
330 	}
331 	return (!map->virtual) ? -ENOMEM : 0;
332 }
333 
334 int ttm_bo_kmap(struct ttm_buffer_object *bo,
335 		unsigned long start_page, unsigned long num_pages,
336 		struct ttm_bo_kmap_obj *map)
337 {
338 	unsigned long offset, size;
339 	int ret;
340 
341 	map->virtual = NULL;
342 	map->bo = bo;
343 	if (num_pages > bo->resource->num_pages)
344 		return -EINVAL;
345 	if ((start_page + num_pages) > bo->resource->num_pages)
346 		return -EINVAL;
347 
348 	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
349 	if (ret)
350 		return ret;
351 	if (!bo->resource->bus.is_iomem) {
352 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
353 	} else {
354 		offset = start_page << PAGE_SHIFT;
355 		size = num_pages << PAGE_SHIFT;
356 		return ttm_bo_ioremap(bo, offset, size, map);
357 	}
358 }
359 EXPORT_SYMBOL(ttm_bo_kmap);
360 
361 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
362 {
363 	if (!map->virtual)
364 		return;
365 	switch (map->bo_kmap_type) {
366 	case ttm_bo_map_iomap:
367 		iounmap(map->virtual);
368 		break;
369 	case ttm_bo_map_vmap:
370 		vunmap(map->virtual);
371 		break;
372 	case ttm_bo_map_kmap:
373 		kunmap(map->page);
374 		break;
375 	case ttm_bo_map_premapped:
376 		break;
377 	default:
378 		BUG();
379 	}
380 	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
381 	map->virtual = NULL;
382 	map->page = NULL;
383 }
384 EXPORT_SYMBOL(ttm_bo_kunmap);
385 
386 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
387 {
388 	struct ttm_resource *mem = bo->resource;
389 	int ret;
390 
391 	ret = ttm_mem_io_reserve(bo->bdev, mem);
392 	if (ret)
393 		return ret;
394 
395 	if (mem->bus.is_iomem) {
396 		void __iomem *vaddr_iomem;
397 
398 		if (mem->bus.addr)
399 			vaddr_iomem = (void __iomem *)mem->bus.addr;
400 		else if (mem->bus.caching == ttm_write_combined)
401 			vaddr_iomem = ioremap_wc(mem->bus.offset,
402 						 bo->base.size);
403 #ifdef CONFIG_X86
404 		else if (mem->bus.caching == ttm_cached)
405 			vaddr_iomem = ioremap_cache(mem->bus.offset,
406 						  bo->base.size);
407 #endif
408 		else
409 			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
410 
411 		if (!vaddr_iomem)
412 			return -ENOMEM;
413 
414 		dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
415 
416 	} else {
417 		struct ttm_operation_ctx ctx = {
418 			.interruptible = false,
419 			.no_wait_gpu = false
420 		};
421 		struct ttm_tt *ttm = bo->ttm;
422 		pgprot_t prot;
423 		void *vaddr;
424 
425 		ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
426 		if (ret)
427 			return ret;
428 
429 		/*
430 		 * We need to use vmap to get the desired page protection
431 		 * or to make the buffer object look contiguous.
432 		 */
433 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
434 		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
435 		if (!vaddr)
436 			return -ENOMEM;
437 
438 		dma_buf_map_set_vaddr(map, vaddr);
439 	}
440 
441 	return 0;
442 }
443 EXPORT_SYMBOL(ttm_bo_vmap);
444 
445 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
446 {
447 	struct ttm_resource *mem = bo->resource;
448 
449 	if (dma_buf_map_is_null(map))
450 		return;
451 
452 	if (!map->is_iomem)
453 		vunmap(map->vaddr);
454 	else if (!mem->bus.addr)
455 		iounmap(map->vaddr_iomem);
456 	dma_buf_map_clear(map);
457 
458 	ttm_mem_io_free(bo->bdev, bo->resource);
459 }
460 EXPORT_SYMBOL(ttm_bo_vunmap);
461 
462 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
463 				 bool dst_use_tt)
464 {
465 	int ret;
466 	ret = ttm_bo_wait(bo, false, false);
467 	if (ret)
468 		return ret;
469 
470 	if (!dst_use_tt)
471 		ttm_bo_tt_destroy(bo);
472 	ttm_resource_free(bo, &bo->resource);
473 	return 0;
474 }
475 
476 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
477 				struct dma_fence *fence,
478 				bool dst_use_tt)
479 {
480 	struct ttm_buffer_object *ghost_obj;
481 	int ret;
482 
483 	/**
484 	 * This should help pipeline ordinary buffer moves.
485 	 *
486 	 * Hang old buffer memory on a new buffer object,
487 	 * and leave it to be released when the GPU
488 	 * operation has completed.
489 	 */
490 
491 	dma_fence_put(bo->moving);
492 	bo->moving = dma_fence_get(fence);
493 
494 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
495 	if (ret)
496 		return ret;
497 
498 	dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
499 
500 	/**
501 	 * If we're not moving to fixed memory, the TTM object
502 	 * needs to stay alive. Otherwhise hang it on the ghost
503 	 * bo to be unbound and destroyed.
504 	 */
505 
506 	if (dst_use_tt)
507 		ghost_obj->ttm = NULL;
508 	else
509 		bo->ttm = NULL;
510 	bo->resource = NULL;
511 
512 	dma_resv_unlock(&ghost_obj->base._resv);
513 	ttm_bo_put(ghost_obj);
514 	return 0;
515 }
516 
517 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
518 				       struct dma_fence *fence)
519 {
520 	struct ttm_device *bdev = bo->bdev;
521 	struct ttm_resource_manager *from;
522 
523 	from = ttm_manager_type(bdev, bo->resource->mem_type);
524 
525 	/**
526 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
527 	 * this eviction and free up the allocation
528 	 */
529 	spin_lock(&from->move_lock);
530 	if (!from->move || dma_fence_is_later(fence, from->move)) {
531 		dma_fence_put(from->move);
532 		from->move = dma_fence_get(fence);
533 	}
534 	spin_unlock(&from->move_lock);
535 
536 	ttm_resource_free(bo, &bo->resource);
537 
538 	dma_fence_put(bo->moving);
539 	bo->moving = dma_fence_get(fence);
540 }
541 
542 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
543 			      struct dma_fence *fence,
544 			      bool evict,
545 			      bool pipeline,
546 			      struct ttm_resource *new_mem)
547 {
548 	struct ttm_device *bdev = bo->bdev;
549 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
550 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
551 	int ret = 0;
552 
553 	dma_resv_add_excl_fence(bo->base.resv, fence);
554 	if (!evict)
555 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
556 	else if (!from->use_tt && pipeline)
557 		ttm_bo_move_pipeline_evict(bo, fence);
558 	else
559 		ret = ttm_bo_wait_free_node(bo, man->use_tt);
560 
561 	if (ret)
562 		return ret;
563 
564 	ttm_bo_assign_mem(bo, new_mem);
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
569 
570 /**
571  * ttm_bo_pipeline_gutting - purge the contents of a bo
572  * @bo: The buffer object
573  *
574  * Purge the contents of a bo, async if the bo is not idle.
575  * After a successful call, the bo is left unpopulated in
576  * system placement. The function may wait uninterruptible
577  * for idle on OOM.
578  *
579  * Return: 0 if successful, negative error code on failure.
580  */
581 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
582 {
583 	static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
584 	struct ttm_buffer_object *ghost;
585 	struct ttm_resource *sys_res;
586 	struct ttm_tt *ttm;
587 	int ret;
588 
589 	ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
590 	if (ret)
591 		return ret;
592 
593 	/* If already idle, no need for ghost object dance. */
594 	ret = ttm_bo_wait(bo, false, true);
595 	if (ret != -EBUSY) {
596 		if (!bo->ttm) {
597 			/* See comment below about clearing. */
598 			ret = ttm_tt_create(bo, true);
599 			if (ret)
600 				goto error_free_sys_mem;
601 		} else {
602 			ttm_tt_unpopulate(bo->bdev, bo->ttm);
603 			if (bo->type == ttm_bo_type_device)
604 				ttm_tt_mark_for_clear(bo->ttm);
605 		}
606 		ttm_resource_free(bo, &bo->resource);
607 		ttm_bo_assign_mem(bo, sys_res);
608 		return 0;
609 	}
610 
611 	/*
612 	 * We need an unpopulated ttm_tt after giving our current one,
613 	 * if any, to the ghost object. And we can't afford to fail
614 	 * creating one *after* the operation. If the bo subsequently gets
615 	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
616 	 * to avoid leaking sensitive information to user-space.
617 	 */
618 
619 	ttm = bo->ttm;
620 	bo->ttm = NULL;
621 	ret = ttm_tt_create(bo, true);
622 	swap(bo->ttm, ttm);
623 	if (ret)
624 		goto error_free_sys_mem;
625 
626 	ret = ttm_buffer_object_transfer(bo, &ghost);
627 	if (ret)
628 		goto error_destroy_tt;
629 
630 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
631 	/* Last resort, wait for the BO to be idle when we are OOM */
632 	if (ret)
633 		ttm_bo_wait(bo, false, false);
634 
635 	dma_resv_unlock(&ghost->base._resv);
636 	ttm_bo_put(ghost);
637 	bo->ttm = ttm;
638 	bo->resource = NULL;
639 	ttm_bo_assign_mem(bo, sys_res);
640 	return 0;
641 
642 error_destroy_tt:
643 	ttm_tt_destroy(bo->bdev, ttm);
644 
645 error_free_sys_mem:
646 	ttm_resource_free(bo, &sys_res);
647 	return ret;
648 }
649