xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo_util.c (revision 79e790ff)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/dma-buf-map.h>
36 #include <linux/io.h>
37 #include <linux/highmem.h>
38 #include <linux/wait.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/module.h>
42 #include <linux/dma-resv.h>
43 
44 struct ttm_transfer_obj {
45 	struct ttm_buffer_object base;
46 	struct ttm_buffer_object *bo;
47 };
48 
49 int ttm_mem_io_reserve(struct ttm_device *bdev,
50 		       struct ttm_resource *mem)
51 {
52 	if (mem->bus.offset || mem->bus.addr)
53 		return 0;
54 
55 	mem->bus.is_iomem = false;
56 	if (!bdev->funcs->io_mem_reserve)
57 		return 0;
58 
59 	return bdev->funcs->io_mem_reserve(bdev, mem);
60 }
61 
62 void ttm_mem_io_free(struct ttm_device *bdev,
63 		     struct ttm_resource *mem)
64 {
65 	if (!mem->bus.offset && !mem->bus.addr)
66 		return;
67 
68 	if (bdev->funcs->io_mem_free)
69 		bdev->funcs->io_mem_free(bdev, mem);
70 
71 	mem->bus.offset = 0;
72 	mem->bus.addr = NULL;
73 }
74 
75 static int ttm_resource_ioremap(struct ttm_device *bdev,
76 			       struct ttm_resource *mem,
77 			       void **virtual)
78 {
79 	int ret;
80 	void *addr;
81 
82 	*virtual = NULL;
83 	ret = ttm_mem_io_reserve(bdev, mem);
84 	if (ret || !mem->bus.is_iomem)
85 		return ret;
86 
87 	if (mem->bus.addr) {
88 		addr = mem->bus.addr;
89 	} else {
90 		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
91 
92 		if (mem->bus.caching == ttm_write_combined)
93 			addr = ioremap_wc(mem->bus.offset, bus_size);
94 #ifdef CONFIG_X86
95 		else if (mem->bus.caching == ttm_cached)
96 			addr = ioremap_cache(mem->bus.offset, bus_size);
97 #endif
98 		else
99 			addr = ioremap(mem->bus.offset, bus_size);
100 		if (!addr) {
101 			ttm_mem_io_free(bdev, mem);
102 			return -ENOMEM;
103 		}
104 	}
105 	*virtual = addr;
106 	return 0;
107 }
108 
109 static void ttm_resource_iounmap(struct ttm_device *bdev,
110 				struct ttm_resource *mem,
111 				void *virtual)
112 {
113 	if (virtual && mem->bus.addr == NULL)
114 		iounmap(virtual);
115 	ttm_mem_io_free(bdev, mem);
116 }
117 
118 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
119 {
120 	uint32_t *dstP =
121 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
122 	uint32_t *srcP =
123 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
124 
125 	int i;
126 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
127 		iowrite32(ioread32(srcP++), dstP++);
128 	return 0;
129 }
130 
131 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
132 				unsigned long page,
133 				pgprot_t prot)
134 {
135 	struct page *d = ttm->pages[page];
136 	void *dst;
137 
138 	if (!d)
139 		return -ENOMEM;
140 
141 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
142 	dst = kmap_atomic_prot(d, prot);
143 	if (!dst)
144 		return -ENOMEM;
145 
146 	memcpy_fromio(dst, src, PAGE_SIZE);
147 
148 	kunmap_atomic(dst);
149 
150 	return 0;
151 }
152 
153 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
154 				unsigned long page,
155 				pgprot_t prot)
156 {
157 	struct page *s = ttm->pages[page];
158 	void *src;
159 
160 	if (!s)
161 		return -ENOMEM;
162 
163 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
164 	src = kmap_atomic_prot(s, prot);
165 	if (!src)
166 		return -ENOMEM;
167 
168 	memcpy_toio(dst, src, PAGE_SIZE);
169 
170 	kunmap_atomic(src);
171 
172 	return 0;
173 }
174 
175 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
176 		       struct ttm_operation_ctx *ctx,
177 		       struct ttm_resource *new_mem)
178 {
179 	struct ttm_device *bdev = bo->bdev;
180 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
181 	struct ttm_tt *ttm = bo->ttm;
182 	struct ttm_resource *old_mem = &bo->mem;
183 	struct ttm_resource old_copy = *old_mem;
184 	void *old_iomap;
185 	void *new_iomap;
186 	int ret;
187 	unsigned long i;
188 
189 	ret = ttm_bo_wait_ctx(bo, ctx);
190 	if (ret)
191 		return ret;
192 
193 	ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
194 	if (ret)
195 		return ret;
196 	ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
197 	if (ret)
198 		goto out;
199 
200 	/*
201 	 * Single TTM move. NOP.
202 	 */
203 	if (old_iomap == NULL && new_iomap == NULL)
204 		goto out2;
205 
206 	/*
207 	 * Don't move nonexistent data. Clear destination instead.
208 	 */
209 	if (old_iomap == NULL &&
210 	    (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
211 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
212 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
213 		goto out2;
214 	}
215 
216 	/*
217 	 * TTM might be null for moves within the same region.
218 	 */
219 	if (ttm) {
220 		ret = ttm_tt_populate(bdev, ttm, ctx);
221 		if (ret)
222 			goto out1;
223 	}
224 
225 	for (i = 0; i < new_mem->num_pages; ++i) {
226 		if (old_iomap == NULL) {
227 			pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
228 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
229 						   prot);
230 		} else if (new_iomap == NULL) {
231 			pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
232 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
233 						   prot);
234 		} else {
235 			ret = ttm_copy_io_page(new_iomap, old_iomap, i);
236 		}
237 		if (ret)
238 			goto out1;
239 	}
240 	mb();
241 out2:
242 	old_copy = *old_mem;
243 
244 	ttm_bo_assign_mem(bo, new_mem);
245 
246 	if (!man->use_tt)
247 		ttm_bo_tt_destroy(bo);
248 
249 out1:
250 	ttm_resource_iounmap(bdev, old_mem, new_iomap);
251 out:
252 	ttm_resource_iounmap(bdev, &old_copy, old_iomap);
253 
254 	/*
255 	 * On error, keep the mm node!
256 	 */
257 	if (!ret)
258 		ttm_resource_free(bo, &old_copy);
259 	return ret;
260 }
261 EXPORT_SYMBOL(ttm_bo_move_memcpy);
262 
263 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
264 {
265 	struct ttm_transfer_obj *fbo;
266 
267 	fbo = container_of(bo, struct ttm_transfer_obj, base);
268 	ttm_bo_put(fbo->bo);
269 	kfree(fbo);
270 }
271 
272 /**
273  * ttm_buffer_object_transfer
274  *
275  * @bo: A pointer to a struct ttm_buffer_object.
276  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
277  * holding the data of @bo with the old placement.
278  *
279  * This is a utility function that may be called after an accelerated move
280  * has been scheduled. A new buffer object is created as a placeholder for
281  * the old data while it's being copied. When that buffer object is idle,
282  * it can be destroyed, releasing the space of the old placement.
283  * Returns:
284  * !0: Failure.
285  */
286 
287 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
288 				      struct ttm_buffer_object **new_obj)
289 {
290 	struct ttm_transfer_obj *fbo;
291 	int ret;
292 
293 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
294 	if (!fbo)
295 		return -ENOMEM;
296 
297 	fbo->base = *bo;
298 
299 	ttm_bo_get(bo);
300 	fbo->bo = bo;
301 
302 	/**
303 	 * Fix up members that we shouldn't copy directly:
304 	 * TODO: Explicit member copy would probably be better here.
305 	 */
306 
307 	atomic_inc(&ttm_glob.bo_count);
308 	INIT_LIST_HEAD(&fbo->base.ddestroy);
309 	INIT_LIST_HEAD(&fbo->base.lru);
310 	fbo->base.moving = NULL;
311 	drm_vma_node_reset(&fbo->base.base.vma_node);
312 
313 	kref_init(&fbo->base.kref);
314 	fbo->base.destroy = &ttm_transfered_destroy;
315 	fbo->base.pin_count = 0;
316 	if (bo->type != ttm_bo_type_sg)
317 		fbo->base.base.resv = &fbo->base.base._resv;
318 
319 	dma_resv_init(&fbo->base.base._resv);
320 	fbo->base.base.dev = NULL;
321 	ret = dma_resv_trylock(&fbo->base.base._resv);
322 	WARN_ON(!ret);
323 
324 	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
325 
326 	*new_obj = &fbo->base;
327 	return 0;
328 }
329 
330 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
331 		     pgprot_t tmp)
332 {
333 	struct ttm_resource_manager *man;
334 	enum ttm_caching caching;
335 
336 	man = ttm_manager_type(bo->bdev, res->mem_type);
337 	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
338 
339 	/* Cached mappings need no adjustment */
340 	if (caching == ttm_cached)
341 		return tmp;
342 
343 #if defined(__i386__) || defined(__x86_64__)
344 	if (caching == ttm_write_combined)
345 		tmp = pgprot_writecombine(tmp);
346 	else if (boot_cpu_data.x86 > 3)
347 		tmp = pgprot_noncached(tmp);
348 #endif
349 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
350     defined(__powerpc__) || defined(__mips__)
351 	if (caching == ttm_write_combined)
352 		tmp = pgprot_writecombine(tmp);
353 	else
354 		tmp = pgprot_noncached(tmp);
355 #endif
356 #if defined(__sparc__)
357 	tmp = pgprot_noncached(tmp);
358 #endif
359 	return tmp;
360 }
361 EXPORT_SYMBOL(ttm_io_prot);
362 
363 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
364 			  unsigned long offset,
365 			  unsigned long size,
366 			  struct ttm_bo_kmap_obj *map)
367 {
368 	struct ttm_resource *mem = &bo->mem;
369 
370 	if (bo->mem.bus.addr) {
371 		map->bo_kmap_type = ttm_bo_map_premapped;
372 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
373 	} else {
374 		map->bo_kmap_type = ttm_bo_map_iomap;
375 		if (mem->bus.caching == ttm_write_combined)
376 			map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
377 						  size);
378 #ifdef CONFIG_X86
379 		else if (mem->bus.caching == ttm_cached)
380 			map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
381 						  size);
382 #endif
383 		else
384 			map->virtual = ioremap(bo->mem.bus.offset + offset,
385 					       size);
386 	}
387 	return (!map->virtual) ? -ENOMEM : 0;
388 }
389 
390 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
391 			   unsigned long start_page,
392 			   unsigned long num_pages,
393 			   struct ttm_bo_kmap_obj *map)
394 {
395 	struct ttm_resource *mem = &bo->mem;
396 	struct ttm_operation_ctx ctx = {
397 		.interruptible = false,
398 		.no_wait_gpu = false
399 	};
400 	struct ttm_tt *ttm = bo->ttm;
401 	pgprot_t prot;
402 	int ret;
403 
404 	BUG_ON(!ttm);
405 
406 	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
407 	if (ret)
408 		return ret;
409 
410 	if (num_pages == 1 && ttm->caching == ttm_cached) {
411 		/*
412 		 * We're mapping a single page, and the desired
413 		 * page protection is consistent with the bo.
414 		 */
415 
416 		map->bo_kmap_type = ttm_bo_map_kmap;
417 		map->page = ttm->pages[start_page];
418 		map->virtual = kmap(map->page);
419 	} else {
420 		/*
421 		 * We need to use vmap to get the desired page protection
422 		 * or to make the buffer object look contiguous.
423 		 */
424 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
425 		map->bo_kmap_type = ttm_bo_map_vmap;
426 		map->virtual = vmap(ttm->pages + start_page, num_pages,
427 				    0, prot);
428 	}
429 	return (!map->virtual) ? -ENOMEM : 0;
430 }
431 
432 int ttm_bo_kmap(struct ttm_buffer_object *bo,
433 		unsigned long start_page, unsigned long num_pages,
434 		struct ttm_bo_kmap_obj *map)
435 {
436 	unsigned long offset, size;
437 	int ret;
438 
439 	map->virtual = NULL;
440 	map->bo = bo;
441 	if (num_pages > bo->mem.num_pages)
442 		return -EINVAL;
443 	if ((start_page + num_pages) > bo->mem.num_pages)
444 		return -EINVAL;
445 
446 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
447 	if (ret)
448 		return ret;
449 	if (!bo->mem.bus.is_iomem) {
450 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
451 	} else {
452 		offset = start_page << PAGE_SHIFT;
453 		size = num_pages << PAGE_SHIFT;
454 		return ttm_bo_ioremap(bo, offset, size, map);
455 	}
456 }
457 EXPORT_SYMBOL(ttm_bo_kmap);
458 
459 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
460 {
461 	if (!map->virtual)
462 		return;
463 	switch (map->bo_kmap_type) {
464 	case ttm_bo_map_iomap:
465 		iounmap(map->virtual);
466 		break;
467 	case ttm_bo_map_vmap:
468 		vunmap(map->virtual);
469 		break;
470 	case ttm_bo_map_kmap:
471 		kunmap(map->page);
472 		break;
473 	case ttm_bo_map_premapped:
474 		break;
475 	default:
476 		BUG();
477 	}
478 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
479 	map->virtual = NULL;
480 	map->page = NULL;
481 }
482 EXPORT_SYMBOL(ttm_bo_kunmap);
483 
484 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
485 {
486 	struct ttm_resource *mem = &bo->mem;
487 	int ret;
488 
489 	ret = ttm_mem_io_reserve(bo->bdev, mem);
490 	if (ret)
491 		return ret;
492 
493 	if (mem->bus.is_iomem) {
494 		void __iomem *vaddr_iomem;
495 
496 		if (mem->bus.addr)
497 			vaddr_iomem = (void __iomem *)mem->bus.addr;
498 		else if (mem->bus.caching == ttm_write_combined)
499 			vaddr_iomem = ioremap_wc(mem->bus.offset,
500 						 bo->base.size);
501 #ifdef CONFIG_X86
502 		else if (mem->bus.caching == ttm_cached)
503 			vaddr_iomem = ioremap_cache(mem->bus.offset,
504 						  bo->base.size);
505 #endif
506 		else
507 			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
508 
509 		if (!vaddr_iomem)
510 			return -ENOMEM;
511 
512 		dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
513 
514 	} else {
515 		struct ttm_operation_ctx ctx = {
516 			.interruptible = false,
517 			.no_wait_gpu = false
518 		};
519 		struct ttm_tt *ttm = bo->ttm;
520 		pgprot_t prot;
521 		void *vaddr;
522 
523 		ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
524 		if (ret)
525 			return ret;
526 
527 		/*
528 		 * We need to use vmap to get the desired page protection
529 		 * or to make the buffer object look contiguous.
530 		 */
531 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
532 		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
533 		if (!vaddr)
534 			return -ENOMEM;
535 
536 		dma_buf_map_set_vaddr(map, vaddr);
537 	}
538 
539 	return 0;
540 }
541 EXPORT_SYMBOL(ttm_bo_vmap);
542 
543 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
544 {
545 	struct ttm_resource *mem = &bo->mem;
546 
547 	if (dma_buf_map_is_null(map))
548 		return;
549 
550 	if (!map->is_iomem)
551 		vunmap(map->vaddr);
552 	else if (!mem->bus.addr)
553 		iounmap(map->vaddr_iomem);
554 	dma_buf_map_clear(map);
555 
556 	ttm_mem_io_free(bo->bdev, &bo->mem);
557 }
558 EXPORT_SYMBOL(ttm_bo_vunmap);
559 
560 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
561 				 bool dst_use_tt)
562 {
563 	int ret;
564 	ret = ttm_bo_wait(bo, false, false);
565 	if (ret)
566 		return ret;
567 
568 	if (!dst_use_tt)
569 		ttm_bo_tt_destroy(bo);
570 	ttm_resource_free(bo, &bo->mem);
571 	return 0;
572 }
573 
574 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
575 				struct dma_fence *fence,
576 				bool dst_use_tt)
577 {
578 	struct ttm_buffer_object *ghost_obj;
579 	int ret;
580 
581 	/**
582 	 * This should help pipeline ordinary buffer moves.
583 	 *
584 	 * Hang old buffer memory on a new buffer object,
585 	 * and leave it to be released when the GPU
586 	 * operation has completed.
587 	 */
588 
589 	dma_fence_put(bo->moving);
590 	bo->moving = dma_fence_get(fence);
591 
592 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
593 	if (ret)
594 		return ret;
595 
596 	dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
597 
598 	/**
599 	 * If we're not moving to fixed memory, the TTM object
600 	 * needs to stay alive. Otherwhise hang it on the ghost
601 	 * bo to be unbound and destroyed.
602 	 */
603 
604 	if (dst_use_tt)
605 		ghost_obj->ttm = NULL;
606 	else
607 		bo->ttm = NULL;
608 
609 	dma_resv_unlock(&ghost_obj->base._resv);
610 	ttm_bo_put(ghost_obj);
611 	return 0;
612 }
613 
614 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
615 				       struct dma_fence *fence)
616 {
617 	struct ttm_device *bdev = bo->bdev;
618 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
619 
620 	/**
621 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
622 	 * this eviction and free up the allocation
623 	 */
624 	spin_lock(&from->move_lock);
625 	if (!from->move || dma_fence_is_later(fence, from->move)) {
626 		dma_fence_put(from->move);
627 		from->move = dma_fence_get(fence);
628 	}
629 	spin_unlock(&from->move_lock);
630 
631 	ttm_resource_free(bo, &bo->mem);
632 
633 	dma_fence_put(bo->moving);
634 	bo->moving = dma_fence_get(fence);
635 }
636 
637 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
638 			      struct dma_fence *fence,
639 			      bool evict,
640 			      bool pipeline,
641 			      struct ttm_resource *new_mem)
642 {
643 	struct ttm_device *bdev = bo->bdev;
644 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
645 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
646 	int ret = 0;
647 
648 	dma_resv_add_excl_fence(bo->base.resv, fence);
649 	if (!evict)
650 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
651 	else if (!from->use_tt && pipeline)
652 		ttm_bo_move_pipeline_evict(bo, fence);
653 	else
654 		ret = ttm_bo_wait_free_node(bo, man->use_tt);
655 
656 	if (ret)
657 		return ret;
658 
659 	ttm_bo_assign_mem(bo, new_mem);
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
664 
665 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
666 {
667 	struct ttm_buffer_object *ghost;
668 	int ret;
669 
670 	ret = ttm_buffer_object_transfer(bo, &ghost);
671 	if (ret)
672 		return ret;
673 
674 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
675 	/* Last resort, wait for the BO to be idle when we are OOM */
676 	if (ret)
677 		ttm_bo_wait(bo, false, false);
678 
679 	memset(&bo->mem, 0, sizeof(bo->mem));
680 	bo->mem.mem_type = TTM_PL_SYSTEM;
681 	bo->ttm = NULL;
682 
683 	dma_resv_unlock(&ghost->base._resv);
684 	ttm_bo_put(ghost);
685 
686 	return 0;
687 }
688