xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo_util.c (revision fb574682)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42 
43 struct ttm_transfer_obj {
44 	struct ttm_buffer_object base;
45 	struct ttm_buffer_object *bo;
46 };
47 
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50 	ttm_bo_mem_put(bo, &bo->mem);
51 }
52 
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 		   struct ttm_operation_ctx *ctx,
55 		    struct ttm_mem_reg *new_mem)
56 {
57 	struct ttm_tt *ttm = bo->ttm;
58 	struct ttm_mem_reg *old_mem = &bo->mem;
59 	int ret;
60 
61 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
62 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63 
64 		if (unlikely(ret != 0)) {
65 			if (ret != -ERESTARTSYS)
66 				pr_err("Failed to expire sync object before unbinding TTM\n");
67 			return ret;
68 		}
69 
70 		ttm_tt_unbind(ttm);
71 		ttm_bo_free_old_node(bo);
72 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73 				TTM_PL_MASK_MEM);
74 		old_mem->mem_type = TTM_PL_SYSTEM;
75 	}
76 
77 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78 	if (unlikely(ret != 0))
79 		return ret;
80 
81 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
82 		ret = ttm_tt_bind(ttm, new_mem, ctx);
83 		if (unlikely(ret != 0))
84 			return ret;
85 	}
86 
87 	*old_mem = *new_mem;
88 	new_mem->mm_node = NULL;
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
93 
94 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
95 {
96 	if (likely(man->io_reserve_fastpath))
97 		return 0;
98 
99 	if (interruptible)
100 		return mutex_lock_interruptible(&man->io_reserve_mutex);
101 
102 	mutex_lock(&man->io_reserve_mutex);
103 	return 0;
104 }
105 
106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
107 {
108 	if (likely(man->io_reserve_fastpath))
109 		return;
110 
111 	mutex_unlock(&man->io_reserve_mutex);
112 }
113 
114 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
115 {
116 	struct ttm_buffer_object *bo;
117 
118 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
119 		return -EAGAIN;
120 
121 	bo = list_first_entry(&man->io_reserve_lru,
122 			      struct ttm_buffer_object,
123 			      io_reserve_lru);
124 	list_del_init(&bo->io_reserve_lru);
125 	ttm_bo_unmap_virtual_locked(bo);
126 
127 	return 0;
128 }
129 
130 
131 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
132 		       struct ttm_mem_reg *mem)
133 {
134 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
135 	int ret = 0;
136 
137 	if (!bdev->driver->io_mem_reserve)
138 		return 0;
139 	if (likely(man->io_reserve_fastpath))
140 		return bdev->driver->io_mem_reserve(bdev, mem);
141 
142 	if (bdev->driver->io_mem_reserve &&
143 	    mem->bus.io_reserved_count++ == 0) {
144 retry:
145 		ret = bdev->driver->io_mem_reserve(bdev, mem);
146 		if (ret == -EAGAIN) {
147 			ret = ttm_mem_io_evict(man);
148 			if (ret == 0)
149 				goto retry;
150 		}
151 	}
152 	return ret;
153 }
154 
155 void ttm_mem_io_free(struct ttm_bo_device *bdev,
156 		     struct ttm_mem_reg *mem)
157 {
158 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
159 
160 	if (likely(man->io_reserve_fastpath))
161 		return;
162 
163 	if (bdev->driver->io_mem_reserve &&
164 	    --mem->bus.io_reserved_count == 0 &&
165 	    bdev->driver->io_mem_free)
166 		bdev->driver->io_mem_free(bdev, mem);
167 
168 }
169 
170 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
171 {
172 	struct ttm_mem_reg *mem = &bo->mem;
173 	int ret;
174 
175 	if (!mem->bus.io_reserved_vm) {
176 		struct ttm_mem_type_manager *man =
177 			&bo->bdev->man[mem->mem_type];
178 
179 		ret = ttm_mem_io_reserve(bo->bdev, mem);
180 		if (unlikely(ret != 0))
181 			return ret;
182 		mem->bus.io_reserved_vm = true;
183 		if (man->use_io_reserve_lru)
184 			list_add_tail(&bo->io_reserve_lru,
185 				      &man->io_reserve_lru);
186 	}
187 	return 0;
188 }
189 
190 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
191 {
192 	struct ttm_mem_reg *mem = &bo->mem;
193 
194 	if (mem->bus.io_reserved_vm) {
195 		mem->bus.io_reserved_vm = false;
196 		list_del_init(&bo->io_reserve_lru);
197 		ttm_mem_io_free(bo->bdev, mem);
198 	}
199 }
200 
201 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
202 			void **virtual)
203 {
204 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
205 	int ret;
206 	void *addr;
207 
208 	*virtual = NULL;
209 	(void) ttm_mem_io_lock(man, false);
210 	ret = ttm_mem_io_reserve(bdev, mem);
211 	ttm_mem_io_unlock(man);
212 	if (ret || !mem->bus.is_iomem)
213 		return ret;
214 
215 	if (mem->bus.addr) {
216 		addr = mem->bus.addr;
217 	} else {
218 		if (mem->placement & TTM_PL_FLAG_WC)
219 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
220 		else
221 			addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
222 		if (!addr) {
223 			(void) ttm_mem_io_lock(man, false);
224 			ttm_mem_io_free(bdev, mem);
225 			ttm_mem_io_unlock(man);
226 			return -ENOMEM;
227 		}
228 	}
229 	*virtual = addr;
230 	return 0;
231 }
232 
233 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
234 			 void *virtual)
235 {
236 	struct ttm_mem_type_manager *man;
237 
238 	man = &bdev->man[mem->mem_type];
239 
240 	if (virtual && mem->bus.addr == NULL)
241 		iounmap(virtual);
242 	(void) ttm_mem_io_lock(man, false);
243 	ttm_mem_io_free(bdev, mem);
244 	ttm_mem_io_unlock(man);
245 }
246 
247 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
248 {
249 	uint32_t *dstP =
250 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
251 	uint32_t *srcP =
252 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
253 
254 	int i;
255 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
256 		iowrite32(ioread32(srcP++), dstP++);
257 	return 0;
258 }
259 
260 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
261 				unsigned long page,
262 				pgprot_t prot)
263 {
264 	struct page *d = ttm->pages[page];
265 	void *dst;
266 
267 	if (!d)
268 		return -ENOMEM;
269 
270 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
271 	dst = kmap_atomic_prot(d, prot);
272 	if (!dst)
273 		return -ENOMEM;
274 
275 	memcpy_fromio(dst, src, PAGE_SIZE);
276 
277 	kunmap_atomic(dst);
278 
279 	return 0;
280 }
281 
282 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
283 				unsigned long page,
284 				pgprot_t prot)
285 {
286 	struct page *s = ttm->pages[page];
287 	void *src;
288 
289 	if (!s)
290 		return -ENOMEM;
291 
292 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
293 	src = kmap_atomic_prot(s, prot);
294 	if (!src)
295 		return -ENOMEM;
296 
297 	memcpy_toio(dst, src, PAGE_SIZE);
298 
299 	kunmap_atomic(src);
300 
301 	return 0;
302 }
303 
304 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
305 		       struct ttm_operation_ctx *ctx,
306 		       struct ttm_mem_reg *new_mem)
307 {
308 	struct ttm_bo_device *bdev = bo->bdev;
309 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
310 	struct ttm_tt *ttm = bo->ttm;
311 	struct ttm_mem_reg *old_mem = &bo->mem;
312 	struct ttm_mem_reg old_copy = *old_mem;
313 	void *old_iomap;
314 	void *new_iomap;
315 	int ret;
316 	unsigned long i;
317 	unsigned long page;
318 	unsigned long add = 0;
319 	int dir;
320 
321 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
322 	if (ret)
323 		return ret;
324 
325 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
326 	if (ret)
327 		return ret;
328 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
329 	if (ret)
330 		goto out;
331 
332 	/*
333 	 * Single TTM move. NOP.
334 	 */
335 	if (old_iomap == NULL && new_iomap == NULL)
336 		goto out2;
337 
338 	/*
339 	 * Don't move nonexistent data. Clear destination instead.
340 	 */
341 	if (old_iomap == NULL &&
342 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
343 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
344 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
345 		goto out2;
346 	}
347 
348 	/*
349 	 * TTM might be null for moves within the same region.
350 	 */
351 	if (ttm) {
352 		ret = ttm_tt_populate(ttm, ctx);
353 		if (ret)
354 			goto out1;
355 	}
356 
357 	add = 0;
358 	dir = 1;
359 
360 	if ((old_mem->mem_type == new_mem->mem_type) &&
361 	    (new_mem->start < old_mem->start + old_mem->size)) {
362 		dir = -1;
363 		add = new_mem->num_pages - 1;
364 	}
365 
366 	for (i = 0; i < new_mem->num_pages; ++i) {
367 		page = i * dir + add;
368 		if (old_iomap == NULL) {
369 			pgprot_t prot = ttm_io_prot(old_mem->placement,
370 						    PAGE_KERNEL);
371 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
372 						   prot);
373 		} else if (new_iomap == NULL) {
374 			pgprot_t prot = ttm_io_prot(new_mem->placement,
375 						    PAGE_KERNEL);
376 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
377 						   prot);
378 		} else {
379 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
380 		}
381 		if (ret)
382 			goto out1;
383 	}
384 	mb();
385 out2:
386 	old_copy = *old_mem;
387 	*old_mem = *new_mem;
388 	new_mem->mm_node = NULL;
389 
390 	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
391 		ttm_tt_destroy(ttm);
392 		bo->ttm = NULL;
393 	}
394 
395 out1:
396 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
397 out:
398 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
399 
400 	/*
401 	 * On error, keep the mm node!
402 	 */
403 	if (!ret)
404 		ttm_bo_mem_put(bo, &old_copy);
405 	return ret;
406 }
407 EXPORT_SYMBOL(ttm_bo_move_memcpy);
408 
409 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
410 {
411 	struct ttm_transfer_obj *fbo;
412 
413 	fbo = container_of(bo, struct ttm_transfer_obj, base);
414 	ttm_bo_put(fbo->bo);
415 	kfree(fbo);
416 }
417 
418 /**
419  * ttm_buffer_object_transfer
420  *
421  * @bo: A pointer to a struct ttm_buffer_object.
422  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
423  * holding the data of @bo with the old placement.
424  *
425  * This is a utility function that may be called after an accelerated move
426  * has been scheduled. A new buffer object is created as a placeholder for
427  * the old data while it's being copied. When that buffer object is idle,
428  * it can be destroyed, releasing the space of the old placement.
429  * Returns:
430  * !0: Failure.
431  */
432 
433 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
434 				      struct ttm_buffer_object **new_obj)
435 {
436 	struct ttm_transfer_obj *fbo;
437 	int ret;
438 
439 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
440 	if (!fbo)
441 		return -ENOMEM;
442 
443 	fbo->base = *bo;
444 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
445 
446 	ttm_bo_get(bo);
447 	fbo->bo = bo;
448 
449 	/**
450 	 * Fix up members that we shouldn't copy directly:
451 	 * TODO: Explicit member copy would probably be better here.
452 	 */
453 
454 	atomic_inc(&ttm_bo_glob.bo_count);
455 	INIT_LIST_HEAD(&fbo->base.ddestroy);
456 	INIT_LIST_HEAD(&fbo->base.lru);
457 	INIT_LIST_HEAD(&fbo->base.swap);
458 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
459 	fbo->base.moving = NULL;
460 	drm_vma_node_reset(&fbo->base.base.vma_node);
461 
462 	kref_init(&fbo->base.kref);
463 	fbo->base.destroy = &ttm_transfered_destroy;
464 	fbo->base.acc_size = 0;
465 	if (bo->type != ttm_bo_type_sg)
466 		fbo->base.base.resv = &fbo->base.base._resv;
467 
468 	dma_resv_init(&fbo->base.base._resv);
469 	fbo->base.base.dev = NULL;
470 	ret = dma_resv_trylock(&fbo->base.base._resv);
471 	WARN_ON(!ret);
472 
473 	*new_obj = &fbo->base;
474 	return 0;
475 }
476 
477 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
478 {
479 	/* Cached mappings need no adjustment */
480 	if (caching_flags & TTM_PL_FLAG_CACHED)
481 		return tmp;
482 
483 #if defined(__i386__) || defined(__x86_64__)
484 	if (caching_flags & TTM_PL_FLAG_WC)
485 		tmp = pgprot_writecombine(tmp);
486 	else if (boot_cpu_data.x86 > 3)
487 		tmp = pgprot_noncached(tmp);
488 #endif
489 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
490     defined(__powerpc__) || defined(__mips__)
491 	if (caching_flags & TTM_PL_FLAG_WC)
492 		tmp = pgprot_writecombine(tmp);
493 	else
494 		tmp = pgprot_noncached(tmp);
495 #endif
496 #if defined(__sparc__)
497 	tmp = pgprot_noncached(tmp);
498 #endif
499 	return tmp;
500 }
501 EXPORT_SYMBOL(ttm_io_prot);
502 
503 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
504 			  unsigned long offset,
505 			  unsigned long size,
506 			  struct ttm_bo_kmap_obj *map)
507 {
508 	struct ttm_mem_reg *mem = &bo->mem;
509 
510 	if (bo->mem.bus.addr) {
511 		map->bo_kmap_type = ttm_bo_map_premapped;
512 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
513 	} else {
514 		map->bo_kmap_type = ttm_bo_map_iomap;
515 		if (mem->placement & TTM_PL_FLAG_WC)
516 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
517 						  size);
518 		else
519 			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
520 						       size);
521 	}
522 	return (!map->virtual) ? -ENOMEM : 0;
523 }
524 
525 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
526 			   unsigned long start_page,
527 			   unsigned long num_pages,
528 			   struct ttm_bo_kmap_obj *map)
529 {
530 	struct ttm_mem_reg *mem = &bo->mem;
531 	struct ttm_operation_ctx ctx = {
532 		.interruptible = false,
533 		.no_wait_gpu = false
534 	};
535 	struct ttm_tt *ttm = bo->ttm;
536 	pgprot_t prot;
537 	int ret;
538 
539 	BUG_ON(!ttm);
540 
541 	ret = ttm_tt_populate(ttm, &ctx);
542 	if (ret)
543 		return ret;
544 
545 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
546 		/*
547 		 * We're mapping a single page, and the desired
548 		 * page protection is consistent with the bo.
549 		 */
550 
551 		map->bo_kmap_type = ttm_bo_map_kmap;
552 		map->page = ttm->pages[start_page];
553 		map->virtual = kmap(map->page);
554 	} else {
555 		/*
556 		 * We need to use vmap to get the desired page protection
557 		 * or to make the buffer object look contiguous.
558 		 */
559 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
560 		map->bo_kmap_type = ttm_bo_map_vmap;
561 		map->virtual = vmap(ttm->pages + start_page, num_pages,
562 				    0, prot);
563 	}
564 	return (!map->virtual) ? -ENOMEM : 0;
565 }
566 
567 int ttm_bo_kmap(struct ttm_buffer_object *bo,
568 		unsigned long start_page, unsigned long num_pages,
569 		struct ttm_bo_kmap_obj *map)
570 {
571 	struct ttm_mem_type_manager *man =
572 		&bo->bdev->man[bo->mem.mem_type];
573 	unsigned long offset, size;
574 	int ret;
575 
576 	map->virtual = NULL;
577 	map->bo = bo;
578 	if (num_pages > bo->num_pages)
579 		return -EINVAL;
580 	if (start_page > bo->num_pages)
581 		return -EINVAL;
582 
583 	(void) ttm_mem_io_lock(man, false);
584 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
585 	ttm_mem_io_unlock(man);
586 	if (ret)
587 		return ret;
588 	if (!bo->mem.bus.is_iomem) {
589 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
590 	} else {
591 		offset = start_page << PAGE_SHIFT;
592 		size = num_pages << PAGE_SHIFT;
593 		return ttm_bo_ioremap(bo, offset, size, map);
594 	}
595 }
596 EXPORT_SYMBOL(ttm_bo_kmap);
597 
598 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
599 {
600 	struct ttm_buffer_object *bo = map->bo;
601 	struct ttm_mem_type_manager *man =
602 		&bo->bdev->man[bo->mem.mem_type];
603 
604 	if (!map->virtual)
605 		return;
606 	switch (map->bo_kmap_type) {
607 	case ttm_bo_map_iomap:
608 		iounmap(map->virtual);
609 		break;
610 	case ttm_bo_map_vmap:
611 		vunmap(map->virtual);
612 		break;
613 	case ttm_bo_map_kmap:
614 		kunmap(map->page);
615 		break;
616 	case ttm_bo_map_premapped:
617 		break;
618 	default:
619 		BUG();
620 	}
621 	(void) ttm_mem_io_lock(man, false);
622 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
623 	ttm_mem_io_unlock(man);
624 	map->virtual = NULL;
625 	map->page = NULL;
626 }
627 EXPORT_SYMBOL(ttm_bo_kunmap);
628 
629 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
630 			      struct dma_fence *fence,
631 			      bool evict,
632 			      struct ttm_mem_reg *new_mem)
633 {
634 	struct ttm_bo_device *bdev = bo->bdev;
635 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
636 	struct ttm_mem_reg *old_mem = &bo->mem;
637 	int ret;
638 	struct ttm_buffer_object *ghost_obj;
639 
640 	dma_resv_add_excl_fence(bo->base.resv, fence);
641 	if (evict) {
642 		ret = ttm_bo_wait(bo, false, false);
643 		if (ret)
644 			return ret;
645 
646 		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
647 			ttm_tt_destroy(bo->ttm);
648 			bo->ttm = NULL;
649 		}
650 		ttm_bo_free_old_node(bo);
651 	} else {
652 		/**
653 		 * This should help pipeline ordinary buffer moves.
654 		 *
655 		 * Hang old buffer memory on a new buffer object,
656 		 * and leave it to be released when the GPU
657 		 * operation has completed.
658 		 */
659 
660 		dma_fence_put(bo->moving);
661 		bo->moving = dma_fence_get(fence);
662 
663 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
664 		if (ret)
665 			return ret;
666 
667 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
668 
669 		/**
670 		 * If we're not moving to fixed memory, the TTM object
671 		 * needs to stay alive. Otherwhise hang it on the ghost
672 		 * bo to be unbound and destroyed.
673 		 */
674 
675 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
676 			ghost_obj->ttm = NULL;
677 		else
678 			bo->ttm = NULL;
679 
680 		dma_resv_unlock(&ghost_obj->base._resv);
681 		ttm_bo_put(ghost_obj);
682 	}
683 
684 	*old_mem = *new_mem;
685 	new_mem->mm_node = NULL;
686 
687 	return 0;
688 }
689 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
690 
691 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
692 			 struct dma_fence *fence, bool evict,
693 			 struct ttm_mem_reg *new_mem)
694 {
695 	struct ttm_bo_device *bdev = bo->bdev;
696 	struct ttm_mem_reg *old_mem = &bo->mem;
697 
698 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
699 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
700 
701 	int ret;
702 
703 	dma_resv_add_excl_fence(bo->base.resv, fence);
704 
705 	if (!evict) {
706 		struct ttm_buffer_object *ghost_obj;
707 
708 		/**
709 		 * This should help pipeline ordinary buffer moves.
710 		 *
711 		 * Hang old buffer memory on a new buffer object,
712 		 * and leave it to be released when the GPU
713 		 * operation has completed.
714 		 */
715 
716 		dma_fence_put(bo->moving);
717 		bo->moving = dma_fence_get(fence);
718 
719 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
720 		if (ret)
721 			return ret;
722 
723 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
724 
725 		/**
726 		 * If we're not moving to fixed memory, the TTM object
727 		 * needs to stay alive. Otherwhise hang it on the ghost
728 		 * bo to be unbound and destroyed.
729 		 */
730 
731 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
732 			ghost_obj->ttm = NULL;
733 		else
734 			bo->ttm = NULL;
735 
736 		dma_resv_unlock(&ghost_obj->base._resv);
737 		ttm_bo_put(ghost_obj);
738 
739 	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
740 
741 		/**
742 		 * BO doesn't have a TTM we need to bind/unbind. Just remember
743 		 * this eviction and free up the allocation
744 		 */
745 
746 		spin_lock(&from->move_lock);
747 		if (!from->move || dma_fence_is_later(fence, from->move)) {
748 			dma_fence_put(from->move);
749 			from->move = dma_fence_get(fence);
750 		}
751 		spin_unlock(&from->move_lock);
752 
753 		ttm_bo_free_old_node(bo);
754 
755 		dma_fence_put(bo->moving);
756 		bo->moving = dma_fence_get(fence);
757 
758 	} else {
759 		/**
760 		 * Last resort, wait for the move to be completed.
761 		 *
762 		 * Should never happen in pratice.
763 		 */
764 
765 		ret = ttm_bo_wait(bo, false, false);
766 		if (ret)
767 			return ret;
768 
769 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
770 			ttm_tt_destroy(bo->ttm);
771 			bo->ttm = NULL;
772 		}
773 		ttm_bo_free_old_node(bo);
774 	}
775 
776 	*old_mem = *new_mem;
777 	new_mem->mm_node = NULL;
778 
779 	return 0;
780 }
781 EXPORT_SYMBOL(ttm_bo_pipeline_move);
782 
783 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
784 {
785 	struct ttm_buffer_object *ghost;
786 	int ret;
787 
788 	ret = ttm_buffer_object_transfer(bo, &ghost);
789 	if (ret)
790 		return ret;
791 
792 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
793 	/* Last resort, wait for the BO to be idle when we are OOM */
794 	if (ret)
795 		ttm_bo_wait(bo, false, false);
796 
797 	memset(&bo->mem, 0, sizeof(bo->mem));
798 	bo->mem.mem_type = TTM_PL_SYSTEM;
799 	bo->ttm = NULL;
800 
801 	dma_resv_unlock(&ghost->base._resv);
802 	ttm_bo_put(ghost);
803 
804 	return 0;
805 }
806