xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo_util.c (revision 04301bf5)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42 
43 struct ttm_transfer_obj {
44 	struct ttm_buffer_object base;
45 	struct ttm_buffer_object *bo;
46 };
47 
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50 	ttm_bo_mem_put(bo, &bo->mem);
51 }
52 
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 		   struct ttm_operation_ctx *ctx,
55 		    struct ttm_mem_reg *new_mem)
56 {
57 	struct ttm_tt *ttm = bo->ttm;
58 	struct ttm_mem_reg *old_mem = &bo->mem;
59 	int ret;
60 
61 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
62 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63 
64 		if (unlikely(ret != 0)) {
65 			if (ret != -ERESTARTSYS)
66 				pr_err("Failed to expire sync object before unbinding TTM\n");
67 			return ret;
68 		}
69 
70 		ttm_tt_unbind(ttm);
71 		ttm_bo_free_old_node(bo);
72 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73 				TTM_PL_MASK_MEM);
74 		old_mem->mem_type = TTM_PL_SYSTEM;
75 	}
76 
77 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78 	if (unlikely(ret != 0))
79 		return ret;
80 
81 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
82 		ret = ttm_tt_bind(ttm, new_mem, ctx);
83 		if (unlikely(ret != 0))
84 			return ret;
85 	}
86 
87 	*old_mem = *new_mem;
88 	new_mem->mm_node = NULL;
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
93 
94 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
95 {
96 	if (likely(!man->use_io_reserve_lru))
97 		return 0;
98 
99 	if (interruptible)
100 		return mutex_lock_interruptible(&man->io_reserve_mutex);
101 
102 	mutex_lock(&man->io_reserve_mutex);
103 	return 0;
104 }
105 
106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
107 {
108 	if (likely(!man->use_io_reserve_lru))
109 		return;
110 
111 	mutex_unlock(&man->io_reserve_mutex);
112 }
113 
114 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
115 {
116 	struct ttm_buffer_object *bo;
117 
118 	bo = list_first_entry_or_null(&man->io_reserve_lru,
119 				      struct ttm_buffer_object,
120 				      io_reserve_lru);
121 	if (!bo)
122 		return -ENOSPC;
123 
124 	list_del_init(&bo->io_reserve_lru);
125 	ttm_bo_unmap_virtual_locked(bo);
126 	return 0;
127 }
128 
129 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
130 		       struct ttm_mem_reg *mem)
131 {
132 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
133 	int ret;
134 
135 	if (mem->bus.io_reserved_count++)
136 		return 0;
137 
138 	if (!bdev->driver->io_mem_reserve)
139 		return 0;
140 
141 retry:
142 	ret = bdev->driver->io_mem_reserve(bdev, mem);
143 	if (ret == -ENOSPC) {
144 		ret = ttm_mem_io_evict(man);
145 		if (ret == 0)
146 			goto retry;
147 	}
148 	return ret;
149 }
150 
151 void ttm_mem_io_free(struct ttm_bo_device *bdev,
152 		     struct ttm_mem_reg *mem)
153 {
154 	if (--mem->bus.io_reserved_count)
155 		return;
156 
157 	if (!bdev->driver->io_mem_free)
158 		return;
159 
160 	bdev->driver->io_mem_free(bdev, mem);
161 }
162 
163 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
164 {
165 	struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type];
166 	struct ttm_mem_reg *mem = &bo->mem;
167 	int ret;
168 
169 	if (mem->bus.io_reserved_vm)
170 		return 0;
171 
172 	ret = ttm_mem_io_reserve(bo->bdev, mem);
173 	if (unlikely(ret != 0))
174 		return ret;
175 	mem->bus.io_reserved_vm = true;
176 	if (man->use_io_reserve_lru)
177 		list_add_tail(&bo->io_reserve_lru,
178 			      &man->io_reserve_lru);
179 	return 0;
180 }
181 
182 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
183 {
184 	struct ttm_mem_reg *mem = &bo->mem;
185 
186 	if (!mem->bus.io_reserved_vm)
187 		return;
188 
189 	mem->bus.io_reserved_vm = false;
190 	list_del_init(&bo->io_reserve_lru);
191 	ttm_mem_io_free(bo->bdev, mem);
192 }
193 
194 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
195 			       struct ttm_mem_reg *mem,
196 			       void **virtual)
197 {
198 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
199 	int ret;
200 	void *addr;
201 
202 	*virtual = NULL;
203 	(void) ttm_mem_io_lock(man, false);
204 	ret = ttm_mem_io_reserve(bdev, mem);
205 	ttm_mem_io_unlock(man);
206 	if (ret || !mem->bus.is_iomem)
207 		return ret;
208 
209 	if (mem->bus.addr) {
210 		addr = mem->bus.addr;
211 	} else {
212 		if (mem->placement & TTM_PL_FLAG_WC)
213 			addr = ioremap_wc(mem->bus.base + mem->bus.offset,
214 					  mem->bus.size);
215 		else
216 			addr = ioremap(mem->bus.base + mem->bus.offset,
217 				       mem->bus.size);
218 		if (!addr) {
219 			(void) ttm_mem_io_lock(man, false);
220 			ttm_mem_io_free(bdev, mem);
221 			ttm_mem_io_unlock(man);
222 			return -ENOMEM;
223 		}
224 	}
225 	*virtual = addr;
226 	return 0;
227 }
228 
229 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
230 				struct ttm_mem_reg *mem,
231 				void *virtual)
232 {
233 	struct ttm_mem_type_manager *man;
234 
235 	man = &bdev->man[mem->mem_type];
236 
237 	if (virtual && mem->bus.addr == NULL)
238 		iounmap(virtual);
239 	(void) ttm_mem_io_lock(man, false);
240 	ttm_mem_io_free(bdev, mem);
241 	ttm_mem_io_unlock(man);
242 }
243 
244 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
245 {
246 	uint32_t *dstP =
247 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
248 	uint32_t *srcP =
249 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
250 
251 	int i;
252 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
253 		iowrite32(ioread32(srcP++), dstP++);
254 	return 0;
255 }
256 
257 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
258 				unsigned long page,
259 				pgprot_t prot)
260 {
261 	struct page *d = ttm->pages[page];
262 	void *dst;
263 
264 	if (!d)
265 		return -ENOMEM;
266 
267 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
268 	dst = kmap_atomic_prot(d, prot);
269 	if (!dst)
270 		return -ENOMEM;
271 
272 	memcpy_fromio(dst, src, PAGE_SIZE);
273 
274 	kunmap_atomic(dst);
275 
276 	return 0;
277 }
278 
279 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
280 				unsigned long page,
281 				pgprot_t prot)
282 {
283 	struct page *s = ttm->pages[page];
284 	void *src;
285 
286 	if (!s)
287 		return -ENOMEM;
288 
289 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
290 	src = kmap_atomic_prot(s, prot);
291 	if (!src)
292 		return -ENOMEM;
293 
294 	memcpy_toio(dst, src, PAGE_SIZE);
295 
296 	kunmap_atomic(src);
297 
298 	return 0;
299 }
300 
301 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
302 		       struct ttm_operation_ctx *ctx,
303 		       struct ttm_mem_reg *new_mem)
304 {
305 	struct ttm_bo_device *bdev = bo->bdev;
306 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
307 	struct ttm_tt *ttm = bo->ttm;
308 	struct ttm_mem_reg *old_mem = &bo->mem;
309 	struct ttm_mem_reg old_copy = *old_mem;
310 	void *old_iomap;
311 	void *new_iomap;
312 	int ret;
313 	unsigned long i;
314 	unsigned long page;
315 	unsigned long add = 0;
316 	int dir;
317 
318 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
319 	if (ret)
320 		return ret;
321 
322 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
323 	if (ret)
324 		return ret;
325 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
326 	if (ret)
327 		goto out;
328 
329 	/*
330 	 * Single TTM move. NOP.
331 	 */
332 	if (old_iomap == NULL && new_iomap == NULL)
333 		goto out2;
334 
335 	/*
336 	 * Don't move nonexistent data. Clear destination instead.
337 	 */
338 	if (old_iomap == NULL &&
339 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
340 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
341 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
342 		goto out2;
343 	}
344 
345 	/*
346 	 * TTM might be null for moves within the same region.
347 	 */
348 	if (ttm) {
349 		ret = ttm_tt_populate(ttm, ctx);
350 		if (ret)
351 			goto out1;
352 	}
353 
354 	add = 0;
355 	dir = 1;
356 
357 	if ((old_mem->mem_type == new_mem->mem_type) &&
358 	    (new_mem->start < old_mem->start + old_mem->size)) {
359 		dir = -1;
360 		add = new_mem->num_pages - 1;
361 	}
362 
363 	for (i = 0; i < new_mem->num_pages; ++i) {
364 		page = i * dir + add;
365 		if (old_iomap == NULL) {
366 			pgprot_t prot = ttm_io_prot(old_mem->placement,
367 						    PAGE_KERNEL);
368 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
369 						   prot);
370 		} else if (new_iomap == NULL) {
371 			pgprot_t prot = ttm_io_prot(new_mem->placement,
372 						    PAGE_KERNEL);
373 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
374 						   prot);
375 		} else {
376 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
377 		}
378 		if (ret)
379 			goto out1;
380 	}
381 	mb();
382 out2:
383 	old_copy = *old_mem;
384 	*old_mem = *new_mem;
385 	new_mem->mm_node = NULL;
386 
387 	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
388 		ttm_tt_destroy(ttm);
389 		bo->ttm = NULL;
390 	}
391 
392 out1:
393 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
394 out:
395 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
396 
397 	/*
398 	 * On error, keep the mm node!
399 	 */
400 	if (!ret)
401 		ttm_bo_mem_put(bo, &old_copy);
402 	return ret;
403 }
404 EXPORT_SYMBOL(ttm_bo_move_memcpy);
405 
406 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
407 {
408 	struct ttm_transfer_obj *fbo;
409 
410 	fbo = container_of(bo, struct ttm_transfer_obj, base);
411 	ttm_bo_put(fbo->bo);
412 	kfree(fbo);
413 }
414 
415 /**
416  * ttm_buffer_object_transfer
417  *
418  * @bo: A pointer to a struct ttm_buffer_object.
419  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
420  * holding the data of @bo with the old placement.
421  *
422  * This is a utility function that may be called after an accelerated move
423  * has been scheduled. A new buffer object is created as a placeholder for
424  * the old data while it's being copied. When that buffer object is idle,
425  * it can be destroyed, releasing the space of the old placement.
426  * Returns:
427  * !0: Failure.
428  */
429 
430 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
431 				      struct ttm_buffer_object **new_obj)
432 {
433 	struct ttm_transfer_obj *fbo;
434 	int ret;
435 
436 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
437 	if (!fbo)
438 		return -ENOMEM;
439 
440 	fbo->base = *bo;
441 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
442 
443 	ttm_bo_get(bo);
444 	fbo->bo = bo;
445 
446 	/**
447 	 * Fix up members that we shouldn't copy directly:
448 	 * TODO: Explicit member copy would probably be better here.
449 	 */
450 
451 	atomic_inc(&ttm_bo_glob.bo_count);
452 	INIT_LIST_HEAD(&fbo->base.ddestroy);
453 	INIT_LIST_HEAD(&fbo->base.lru);
454 	INIT_LIST_HEAD(&fbo->base.swap);
455 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
456 	fbo->base.moving = NULL;
457 	drm_vma_node_reset(&fbo->base.base.vma_node);
458 
459 	kref_init(&fbo->base.kref);
460 	fbo->base.destroy = &ttm_transfered_destroy;
461 	fbo->base.acc_size = 0;
462 	if (bo->type != ttm_bo_type_sg)
463 		fbo->base.base.resv = &fbo->base.base._resv;
464 
465 	dma_resv_init(&fbo->base.base._resv);
466 	fbo->base.base.dev = NULL;
467 	ret = dma_resv_trylock(&fbo->base.base._resv);
468 	WARN_ON(!ret);
469 
470 	*new_obj = &fbo->base;
471 	return 0;
472 }
473 
474 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
475 {
476 	/* Cached mappings need no adjustment */
477 	if (caching_flags & TTM_PL_FLAG_CACHED)
478 		return tmp;
479 
480 #if defined(__i386__) || defined(__x86_64__)
481 	if (caching_flags & TTM_PL_FLAG_WC)
482 		tmp = pgprot_writecombine(tmp);
483 	else if (boot_cpu_data.x86 > 3)
484 		tmp = pgprot_noncached(tmp);
485 #endif
486 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
487     defined(__powerpc__) || defined(__mips__)
488 	if (caching_flags & TTM_PL_FLAG_WC)
489 		tmp = pgprot_writecombine(tmp);
490 	else
491 		tmp = pgprot_noncached(tmp);
492 #endif
493 #if defined(__sparc__)
494 	tmp = pgprot_noncached(tmp);
495 #endif
496 	return tmp;
497 }
498 EXPORT_SYMBOL(ttm_io_prot);
499 
500 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
501 			  unsigned long offset,
502 			  unsigned long size,
503 			  struct ttm_bo_kmap_obj *map)
504 {
505 	struct ttm_mem_reg *mem = &bo->mem;
506 
507 	if (bo->mem.bus.addr) {
508 		map->bo_kmap_type = ttm_bo_map_premapped;
509 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
510 	} else {
511 		map->bo_kmap_type = ttm_bo_map_iomap;
512 		if (mem->placement & TTM_PL_FLAG_WC)
513 			map->virtual = ioremap_wc(bo->mem.bus.base +
514 						  bo->mem.bus.offset + offset,
515 						  size);
516 		else
517 			map->virtual = ioremap(bo->mem.bus.base +
518 					       bo->mem.bus.offset + offset,
519 					       size);
520 	}
521 	return (!map->virtual) ? -ENOMEM : 0;
522 }
523 
524 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
525 			   unsigned long start_page,
526 			   unsigned long num_pages,
527 			   struct ttm_bo_kmap_obj *map)
528 {
529 	struct ttm_mem_reg *mem = &bo->mem;
530 	struct ttm_operation_ctx ctx = {
531 		.interruptible = false,
532 		.no_wait_gpu = false
533 	};
534 	struct ttm_tt *ttm = bo->ttm;
535 	pgprot_t prot;
536 	int ret;
537 
538 	BUG_ON(!ttm);
539 
540 	ret = ttm_tt_populate(ttm, &ctx);
541 	if (ret)
542 		return ret;
543 
544 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
545 		/*
546 		 * We're mapping a single page, and the desired
547 		 * page protection is consistent with the bo.
548 		 */
549 
550 		map->bo_kmap_type = ttm_bo_map_kmap;
551 		map->page = ttm->pages[start_page];
552 		map->virtual = kmap(map->page);
553 	} else {
554 		/*
555 		 * We need to use vmap to get the desired page protection
556 		 * or to make the buffer object look contiguous.
557 		 */
558 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
559 		map->bo_kmap_type = ttm_bo_map_vmap;
560 		map->virtual = vmap(ttm->pages + start_page, num_pages,
561 				    0, prot);
562 	}
563 	return (!map->virtual) ? -ENOMEM : 0;
564 }
565 
566 int ttm_bo_kmap(struct ttm_buffer_object *bo,
567 		unsigned long start_page, unsigned long num_pages,
568 		struct ttm_bo_kmap_obj *map)
569 {
570 	struct ttm_mem_type_manager *man =
571 		&bo->bdev->man[bo->mem.mem_type];
572 	unsigned long offset, size;
573 	int ret;
574 
575 	map->virtual = NULL;
576 	map->bo = bo;
577 	if (num_pages > bo->num_pages)
578 		return -EINVAL;
579 	if (start_page > bo->num_pages)
580 		return -EINVAL;
581 
582 	(void) ttm_mem_io_lock(man, false);
583 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
584 	ttm_mem_io_unlock(man);
585 	if (ret)
586 		return ret;
587 	if (!bo->mem.bus.is_iomem) {
588 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
589 	} else {
590 		offset = start_page << PAGE_SHIFT;
591 		size = num_pages << PAGE_SHIFT;
592 		return ttm_bo_ioremap(bo, offset, size, map);
593 	}
594 }
595 EXPORT_SYMBOL(ttm_bo_kmap);
596 
597 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
598 {
599 	struct ttm_buffer_object *bo = map->bo;
600 	struct ttm_mem_type_manager *man =
601 		&bo->bdev->man[bo->mem.mem_type];
602 
603 	if (!map->virtual)
604 		return;
605 	switch (map->bo_kmap_type) {
606 	case ttm_bo_map_iomap:
607 		iounmap(map->virtual);
608 		break;
609 	case ttm_bo_map_vmap:
610 		vunmap(map->virtual);
611 		break;
612 	case ttm_bo_map_kmap:
613 		kunmap(map->page);
614 		break;
615 	case ttm_bo_map_premapped:
616 		break;
617 	default:
618 		BUG();
619 	}
620 	(void) ttm_mem_io_lock(man, false);
621 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
622 	ttm_mem_io_unlock(man);
623 	map->virtual = NULL;
624 	map->page = NULL;
625 }
626 EXPORT_SYMBOL(ttm_bo_kunmap);
627 
628 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
629 			      struct dma_fence *fence,
630 			      bool evict,
631 			      struct ttm_mem_reg *new_mem)
632 {
633 	struct ttm_bo_device *bdev = bo->bdev;
634 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
635 	struct ttm_mem_reg *old_mem = &bo->mem;
636 	int ret;
637 	struct ttm_buffer_object *ghost_obj;
638 
639 	dma_resv_add_excl_fence(bo->base.resv, fence);
640 	if (evict) {
641 		ret = ttm_bo_wait(bo, false, false);
642 		if (ret)
643 			return ret;
644 
645 		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
646 			ttm_tt_destroy(bo->ttm);
647 			bo->ttm = NULL;
648 		}
649 		ttm_bo_free_old_node(bo);
650 	} else {
651 		/**
652 		 * This should help pipeline ordinary buffer moves.
653 		 *
654 		 * Hang old buffer memory on a new buffer object,
655 		 * and leave it to be released when the GPU
656 		 * operation has completed.
657 		 */
658 
659 		dma_fence_put(bo->moving);
660 		bo->moving = dma_fence_get(fence);
661 
662 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
663 		if (ret)
664 			return ret;
665 
666 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
667 
668 		/**
669 		 * If we're not moving to fixed memory, the TTM object
670 		 * needs to stay alive. Otherwhise hang it on the ghost
671 		 * bo to be unbound and destroyed.
672 		 */
673 
674 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
675 			ghost_obj->ttm = NULL;
676 		else
677 			bo->ttm = NULL;
678 
679 		dma_resv_unlock(&ghost_obj->base._resv);
680 		ttm_bo_put(ghost_obj);
681 	}
682 
683 	*old_mem = *new_mem;
684 	new_mem->mm_node = NULL;
685 
686 	return 0;
687 }
688 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
689 
690 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
691 			 struct dma_fence *fence, bool evict,
692 			 struct ttm_mem_reg *new_mem)
693 {
694 	struct ttm_bo_device *bdev = bo->bdev;
695 	struct ttm_mem_reg *old_mem = &bo->mem;
696 
697 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
698 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
699 
700 	int ret;
701 
702 	dma_resv_add_excl_fence(bo->base.resv, fence);
703 
704 	if (!evict) {
705 		struct ttm_buffer_object *ghost_obj;
706 
707 		/**
708 		 * This should help pipeline ordinary buffer moves.
709 		 *
710 		 * Hang old buffer memory on a new buffer object,
711 		 * and leave it to be released when the GPU
712 		 * operation has completed.
713 		 */
714 
715 		dma_fence_put(bo->moving);
716 		bo->moving = dma_fence_get(fence);
717 
718 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
719 		if (ret)
720 			return ret;
721 
722 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
723 
724 		/**
725 		 * If we're not moving to fixed memory, the TTM object
726 		 * needs to stay alive. Otherwhise hang it on the ghost
727 		 * bo to be unbound and destroyed.
728 		 */
729 
730 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
731 			ghost_obj->ttm = NULL;
732 		else
733 			bo->ttm = NULL;
734 
735 		dma_resv_unlock(&ghost_obj->base._resv);
736 		ttm_bo_put(ghost_obj);
737 
738 	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
739 
740 		/**
741 		 * BO doesn't have a TTM we need to bind/unbind. Just remember
742 		 * this eviction and free up the allocation
743 		 */
744 
745 		spin_lock(&from->move_lock);
746 		if (!from->move || dma_fence_is_later(fence, from->move)) {
747 			dma_fence_put(from->move);
748 			from->move = dma_fence_get(fence);
749 		}
750 		spin_unlock(&from->move_lock);
751 
752 		ttm_bo_free_old_node(bo);
753 
754 		dma_fence_put(bo->moving);
755 		bo->moving = dma_fence_get(fence);
756 
757 	} else {
758 		/**
759 		 * Last resort, wait for the move to be completed.
760 		 *
761 		 * Should never happen in pratice.
762 		 */
763 
764 		ret = ttm_bo_wait(bo, false, false);
765 		if (ret)
766 			return ret;
767 
768 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
769 			ttm_tt_destroy(bo->ttm);
770 			bo->ttm = NULL;
771 		}
772 		ttm_bo_free_old_node(bo);
773 	}
774 
775 	*old_mem = *new_mem;
776 	new_mem->mm_node = NULL;
777 
778 	return 0;
779 }
780 EXPORT_SYMBOL(ttm_bo_pipeline_move);
781 
782 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
783 {
784 	struct ttm_buffer_object *ghost;
785 	int ret;
786 
787 	ret = ttm_buffer_object_transfer(bo, &ghost);
788 	if (ret)
789 		return ret;
790 
791 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
792 	/* Last resort, wait for the BO to be idle when we are OOM */
793 	if (ret)
794 		ttm_bo_wait(bo, false, false);
795 
796 	memset(&bo->mem, 0, sizeof(bo->mem));
797 	bo->mem.mem_type = TTM_PL_SYSTEM;
798 	bo->ttm = NULL;
799 
800 	dma_resv_unlock(&ghost->base._resv);
801 	ttm_bo_put(ghost);
802 
803 	return 0;
804 }
805