xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo.c (revision 29d97219)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44 
45 #include "ttm_module.h"
46 
47 /* default destructor */
48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
49 {
50 	kfree(bo);
51 }
52 
53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
54 					struct ttm_placement *placement)
55 {
56 	struct drm_printer p = drm_debug_printer(TTM_PFX);
57 	struct ttm_resource_manager *man;
58 	int i, mem_type;
59 
60 	drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
61 		   bo, bo->mem.num_pages, bo->base.size >> 10,
62 		   bo->base.size >> 20);
63 	for (i = 0; i < placement->num_placement; i++) {
64 		mem_type = placement->placement[i].mem_type;
65 		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
66 			   i, placement->placement[i].flags, mem_type);
67 		man = ttm_manager_type(bo->bdev, mem_type);
68 		ttm_resource_manager_debug(man, &p);
69 	}
70 }
71 
72 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
73 {
74 	struct ttm_device *bdev = bo->bdev;
75 
76 	list_del_init(&bo->lru);
77 
78 	if (bdev->funcs->del_from_lru_notify)
79 		bdev->funcs->del_from_lru_notify(bo);
80 }
81 
82 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
83 				     struct ttm_buffer_object *bo)
84 {
85 	if (!pos->first)
86 		pos->first = bo;
87 	pos->last = bo;
88 }
89 
90 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
91 			     struct ttm_resource *mem,
92 			     struct ttm_lru_bulk_move *bulk)
93 {
94 	struct ttm_device *bdev = bo->bdev;
95 	struct ttm_resource_manager *man;
96 
97 	if (!bo->deleted)
98 		dma_resv_assert_held(bo->base.resv);
99 
100 	if (bo->pin_count) {
101 		ttm_bo_del_from_lru(bo);
102 		return;
103 	}
104 
105 	man = ttm_manager_type(bdev, mem->mem_type);
106 	list_move_tail(&bo->lru, &man->lru[bo->priority]);
107 
108 	if (bdev->funcs->del_from_lru_notify)
109 		bdev->funcs->del_from_lru_notify(bo);
110 
111 	if (bulk && !bo->pin_count) {
112 		switch (bo->mem.mem_type) {
113 		case TTM_PL_TT:
114 			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
115 			break;
116 
117 		case TTM_PL_VRAM:
118 			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
119 			break;
120 		}
121 	}
122 }
123 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
124 
125 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
126 {
127 	unsigned i;
128 
129 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
130 		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
131 		struct ttm_resource_manager *man;
132 
133 		if (!pos->first)
134 			continue;
135 
136 		dma_resv_assert_held(pos->first->base.resv);
137 		dma_resv_assert_held(pos->last->base.resv);
138 
139 		man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
140 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
141 				    &pos->last->lru);
142 	}
143 
144 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
145 		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
146 		struct ttm_resource_manager *man;
147 
148 		if (!pos->first)
149 			continue;
150 
151 		dma_resv_assert_held(pos->first->base.resv);
152 		dma_resv_assert_held(pos->last->base.resv);
153 
154 		man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
155 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
156 				    &pos->last->lru);
157 	}
158 }
159 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
160 
161 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
162 				  struct ttm_resource *mem, bool evict,
163 				  struct ttm_operation_ctx *ctx,
164 				  struct ttm_place *hop)
165 {
166 	struct ttm_device *bdev = bo->bdev;
167 	struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
168 	struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
169 	int ret;
170 
171 	ttm_bo_unmap_virtual(bo);
172 
173 	/*
174 	 * Create and bind a ttm if required.
175 	 */
176 
177 	if (new_man->use_tt) {
178 		/* Zero init the new TTM structure if the old location should
179 		 * have used one as well.
180 		 */
181 		ret = ttm_tt_create(bo, old_man->use_tt);
182 		if (ret)
183 			goto out_err;
184 
185 		if (mem->mem_type != TTM_PL_SYSTEM) {
186 			ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
187 			if (ret)
188 				goto out_err;
189 		}
190 	}
191 
192 	ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
193 	if (ret) {
194 		if (ret == -EMULTIHOP)
195 			return ret;
196 		goto out_err;
197 	}
198 
199 	ctx->bytes_moved += bo->base.size;
200 	return 0;
201 
202 out_err:
203 	new_man = ttm_manager_type(bdev, bo->mem.mem_type);
204 	if (!new_man->use_tt)
205 		ttm_bo_tt_destroy(bo);
206 
207 	return ret;
208 }
209 
210 /*
211  * Call bo::reserved.
212  * Will release GPU memory type usage on destruction.
213  * This is the place to put in driver specific hooks to release
214  * driver private resources.
215  * Will release the bo::reserved lock.
216  */
217 
218 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
219 {
220 	if (bo->bdev->funcs->delete_mem_notify)
221 		bo->bdev->funcs->delete_mem_notify(bo);
222 
223 	ttm_bo_tt_destroy(bo);
224 	ttm_resource_free(bo, &bo->mem);
225 }
226 
227 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
228 {
229 	int r;
230 
231 	if (bo->base.resv == &bo->base._resv)
232 		return 0;
233 
234 	BUG_ON(!dma_resv_trylock(&bo->base._resv));
235 
236 	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
237 	dma_resv_unlock(&bo->base._resv);
238 	if (r)
239 		return r;
240 
241 	if (bo->type != ttm_bo_type_sg) {
242 		/* This works because the BO is about to be destroyed and nobody
243 		 * reference it any more. The only tricky case is the trylock on
244 		 * the resv object while holding the lru_lock.
245 		 */
246 		spin_lock(&bo->bdev->lru_lock);
247 		bo->base.resv = &bo->base._resv;
248 		spin_unlock(&bo->bdev->lru_lock);
249 	}
250 
251 	return r;
252 }
253 
254 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
255 {
256 	struct dma_resv *resv = &bo->base._resv;
257 	struct dma_resv_list *fobj;
258 	struct dma_fence *fence;
259 	int i;
260 
261 	rcu_read_lock();
262 	fobj = rcu_dereference(resv->fence);
263 	fence = rcu_dereference(resv->fence_excl);
264 	if (fence && !fence->ops->signaled)
265 		dma_fence_enable_sw_signaling(fence);
266 
267 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
268 		fence = rcu_dereference(fobj->shared[i]);
269 
270 		if (!fence->ops->signaled)
271 			dma_fence_enable_sw_signaling(fence);
272 	}
273 	rcu_read_unlock();
274 }
275 
276 /**
277  * function ttm_bo_cleanup_refs
278  * If bo idle, remove from lru lists, and unref.
279  * If not idle, block if possible.
280  *
281  * Must be called with lru_lock and reservation held, this function
282  * will drop the lru lock and optionally the reservation lock before returning.
283  *
284  * @bo:                    The buffer object to clean-up
285  * @interruptible:         Any sleeps should occur interruptibly.
286  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
287  * @unlock_resv:           Unlock the reservation lock as well.
288  */
289 
290 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
291 			       bool interruptible, bool no_wait_gpu,
292 			       bool unlock_resv)
293 {
294 	struct dma_resv *resv = &bo->base._resv;
295 	int ret;
296 
297 	if (dma_resv_test_signaled_rcu(resv, true))
298 		ret = 0;
299 	else
300 		ret = -EBUSY;
301 
302 	if (ret && !no_wait_gpu) {
303 		long lret;
304 
305 		if (unlock_resv)
306 			dma_resv_unlock(bo->base.resv);
307 		spin_unlock(&bo->bdev->lru_lock);
308 
309 		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
310 						 30 * HZ);
311 
312 		if (lret < 0)
313 			return lret;
314 		else if (lret == 0)
315 			return -EBUSY;
316 
317 		spin_lock(&bo->bdev->lru_lock);
318 		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
319 			/*
320 			 * We raced, and lost, someone else holds the reservation now,
321 			 * and is probably busy in ttm_bo_cleanup_memtype_use.
322 			 *
323 			 * Even if it's not the case, because we finished waiting any
324 			 * delayed destruction would succeed, so just return success
325 			 * here.
326 			 */
327 			spin_unlock(&bo->bdev->lru_lock);
328 			return 0;
329 		}
330 		ret = 0;
331 	}
332 
333 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
334 		if (unlock_resv)
335 			dma_resv_unlock(bo->base.resv);
336 		spin_unlock(&bo->bdev->lru_lock);
337 		return ret;
338 	}
339 
340 	ttm_bo_del_from_lru(bo);
341 	list_del_init(&bo->ddestroy);
342 	spin_unlock(&bo->bdev->lru_lock);
343 	ttm_bo_cleanup_memtype_use(bo);
344 
345 	if (unlock_resv)
346 		dma_resv_unlock(bo->base.resv);
347 
348 	ttm_bo_put(bo);
349 
350 	return 0;
351 }
352 
353 /*
354  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
355  * encountered buffers.
356  */
357 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
358 {
359 	struct list_head removed;
360 	bool empty;
361 
362 	INIT_LIST_HEAD(&removed);
363 
364 	spin_lock(&bdev->lru_lock);
365 	while (!list_empty(&bdev->ddestroy)) {
366 		struct ttm_buffer_object *bo;
367 
368 		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
369 				      ddestroy);
370 		list_move_tail(&bo->ddestroy, &removed);
371 		if (!ttm_bo_get_unless_zero(bo))
372 			continue;
373 
374 		if (remove_all || bo->base.resv != &bo->base._resv) {
375 			spin_unlock(&bdev->lru_lock);
376 			dma_resv_lock(bo->base.resv, NULL);
377 
378 			spin_lock(&bdev->lru_lock);
379 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
380 
381 		} else if (dma_resv_trylock(bo->base.resv)) {
382 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
383 		} else {
384 			spin_unlock(&bdev->lru_lock);
385 		}
386 
387 		ttm_bo_put(bo);
388 		spin_lock(&bdev->lru_lock);
389 	}
390 	list_splice_tail(&removed, &bdev->ddestroy);
391 	empty = list_empty(&bdev->ddestroy);
392 	spin_unlock(&bdev->lru_lock);
393 
394 	return empty;
395 }
396 
397 static void ttm_bo_release(struct kref *kref)
398 {
399 	struct ttm_buffer_object *bo =
400 	    container_of(kref, struct ttm_buffer_object, kref);
401 	struct ttm_device *bdev = bo->bdev;
402 	int ret;
403 
404 	if (!bo->deleted) {
405 		ret = ttm_bo_individualize_resv(bo);
406 		if (ret) {
407 			/* Last resort, if we fail to allocate memory for the
408 			 * fences block for the BO to become idle
409 			 */
410 			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
411 						  30 * HZ);
412 		}
413 
414 		if (bo->bdev->funcs->release_notify)
415 			bo->bdev->funcs->release_notify(bo);
416 
417 		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
418 		ttm_mem_io_free(bdev, &bo->mem);
419 	}
420 
421 	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
422 	    !dma_resv_trylock(bo->base.resv)) {
423 		/* The BO is not idle, resurrect it for delayed destroy */
424 		ttm_bo_flush_all_fences(bo);
425 		bo->deleted = true;
426 
427 		spin_lock(&bo->bdev->lru_lock);
428 
429 		/*
430 		 * Make pinned bos immediately available to
431 		 * shrinkers, now that they are queued for
432 		 * destruction.
433 		 *
434 		 * FIXME: QXL is triggering this. Can be removed when the
435 		 * driver is fixed.
436 		 */
437 		if (WARN_ON_ONCE(bo->pin_count)) {
438 			bo->pin_count = 0;
439 			ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
440 		}
441 
442 		kref_init(&bo->kref);
443 		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
444 		spin_unlock(&bo->bdev->lru_lock);
445 
446 		schedule_delayed_work(&bdev->wq,
447 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
448 		return;
449 	}
450 
451 	spin_lock(&bo->bdev->lru_lock);
452 	ttm_bo_del_from_lru(bo);
453 	list_del(&bo->ddestroy);
454 	spin_unlock(&bo->bdev->lru_lock);
455 
456 	ttm_bo_cleanup_memtype_use(bo);
457 	dma_resv_unlock(bo->base.resv);
458 
459 	atomic_dec(&ttm_glob.bo_count);
460 	dma_fence_put(bo->moving);
461 	if (!ttm_bo_uses_embedded_gem_object(bo))
462 		dma_resv_fini(&bo->base._resv);
463 	bo->destroy(bo);
464 }
465 
466 void ttm_bo_put(struct ttm_buffer_object *bo)
467 {
468 	kref_put(&bo->kref, ttm_bo_release);
469 }
470 EXPORT_SYMBOL(ttm_bo_put);
471 
472 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
473 {
474 	return cancel_delayed_work_sync(&bdev->wq);
475 }
476 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
477 
478 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
479 {
480 	if (resched)
481 		schedule_delayed_work(&bdev->wq,
482 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
483 }
484 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
485 
486 static int ttm_bo_evict(struct ttm_buffer_object *bo,
487 			struct ttm_operation_ctx *ctx)
488 {
489 	struct ttm_device *bdev = bo->bdev;
490 	struct ttm_resource evict_mem;
491 	struct ttm_placement placement;
492 	struct ttm_place hop;
493 	int ret = 0;
494 
495 	memset(&hop, 0, sizeof(hop));
496 
497 	dma_resv_assert_held(bo->base.resv);
498 
499 	placement.num_placement = 0;
500 	placement.num_busy_placement = 0;
501 	bdev->funcs->evict_flags(bo, &placement);
502 
503 	if (!placement.num_placement && !placement.num_busy_placement) {
504 		ttm_bo_wait(bo, false, false);
505 
506 		ttm_bo_cleanup_memtype_use(bo);
507 		return ttm_tt_create(bo, false);
508 	}
509 
510 	evict_mem = bo->mem;
511 	evict_mem.mm_node = NULL;
512 	evict_mem.bus.offset = 0;
513 	evict_mem.bus.addr = NULL;
514 
515 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
516 	if (ret) {
517 		if (ret != -ERESTARTSYS) {
518 			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
519 			       bo);
520 			ttm_bo_mem_space_debug(bo, &placement);
521 		}
522 		goto out;
523 	}
524 
525 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
526 	if (unlikely(ret)) {
527 		WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
528 		if (ret != -ERESTARTSYS)
529 			pr_err("Buffer eviction failed\n");
530 		ttm_resource_free(bo, &evict_mem);
531 	}
532 out:
533 	return ret;
534 }
535 
536 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
537 			      const struct ttm_place *place)
538 {
539 	/* Don't evict this BO if it's outside of the
540 	 * requested placement range
541 	 */
542 	if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
543 	    (place->lpfn && place->lpfn <= bo->mem.start))
544 		return false;
545 
546 	return true;
547 }
548 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
549 
550 /*
551  * Check the target bo is allowable to be evicted or swapout, including cases:
552  *
553  * a. if share same reservation object with ctx->resv, have assumption
554  * reservation objects should already be locked, so not lock again and
555  * return true directly when either the opreation allow_reserved_eviction
556  * or the target bo already is in delayed free list;
557  *
558  * b. Otherwise, trylock it.
559  */
560 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
561 			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
562 {
563 	bool ret = false;
564 
565 	if (bo->base.resv == ctx->resv) {
566 		dma_resv_assert_held(bo->base.resv);
567 		if (ctx->allow_res_evict)
568 			ret = true;
569 		*locked = false;
570 		if (busy)
571 			*busy = false;
572 	} else {
573 		ret = dma_resv_trylock(bo->base.resv);
574 		*locked = ret;
575 		if (busy)
576 			*busy = !ret;
577 	}
578 
579 	return ret;
580 }
581 
582 /**
583  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
584  *
585  * @busy_bo: BO which couldn't be locked with trylock
586  * @ctx: operation context
587  * @ticket: acquire ticket
588  *
589  * Try to lock a busy buffer object to avoid failing eviction.
590  */
591 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
592 				   struct ttm_operation_ctx *ctx,
593 				   struct ww_acquire_ctx *ticket)
594 {
595 	int r;
596 
597 	if (!busy_bo || !ticket)
598 		return -EBUSY;
599 
600 	if (ctx->interruptible)
601 		r = dma_resv_lock_interruptible(busy_bo->base.resv,
602 							  ticket);
603 	else
604 		r = dma_resv_lock(busy_bo->base.resv, ticket);
605 
606 	/*
607 	 * TODO: It would be better to keep the BO locked until allocation is at
608 	 * least tried one more time, but that would mean a much larger rework
609 	 * of TTM.
610 	 */
611 	if (!r)
612 		dma_resv_unlock(busy_bo->base.resv);
613 
614 	return r == -EDEADLK ? -EBUSY : r;
615 }
616 
617 int ttm_mem_evict_first(struct ttm_device *bdev,
618 			struct ttm_resource_manager *man,
619 			const struct ttm_place *place,
620 			struct ttm_operation_ctx *ctx,
621 			struct ww_acquire_ctx *ticket)
622 {
623 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
624 	bool locked = false;
625 	unsigned i;
626 	int ret;
627 
628 	spin_lock(&bdev->lru_lock);
629 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
630 		list_for_each_entry(bo, &man->lru[i], lru) {
631 			bool busy;
632 
633 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
634 							    &busy)) {
635 				if (busy && !busy_bo && ticket !=
636 				    dma_resv_locking_ctx(bo->base.resv))
637 					busy_bo = bo;
638 				continue;
639 			}
640 
641 			if (place && !bdev->funcs->eviction_valuable(bo,
642 								      place)) {
643 				if (locked)
644 					dma_resv_unlock(bo->base.resv);
645 				continue;
646 			}
647 			if (!ttm_bo_get_unless_zero(bo)) {
648 				if (locked)
649 					dma_resv_unlock(bo->base.resv);
650 				continue;
651 			}
652 			break;
653 		}
654 
655 		/* If the inner loop terminated early, we have our candidate */
656 		if (&bo->lru != &man->lru[i])
657 			break;
658 
659 		bo = NULL;
660 	}
661 
662 	if (!bo) {
663 		if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
664 			busy_bo = NULL;
665 		spin_unlock(&bdev->lru_lock);
666 		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
667 		if (busy_bo)
668 			ttm_bo_put(busy_bo);
669 		return ret;
670 	}
671 
672 	if (bo->deleted) {
673 		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
674 					  ctx->no_wait_gpu, locked);
675 		ttm_bo_put(bo);
676 		return ret;
677 	}
678 
679 	spin_unlock(&bdev->lru_lock);
680 
681 	ret = ttm_bo_evict(bo, ctx);
682 	if (locked)
683 		ttm_bo_unreserve(bo);
684 
685 	ttm_bo_put(bo);
686 	return ret;
687 }
688 
689 /*
690  * Add the last move fence to the BO and reserve a new shared slot.
691  */
692 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
693 				 struct ttm_resource_manager *man,
694 				 struct ttm_resource *mem,
695 				 bool no_wait_gpu)
696 {
697 	struct dma_fence *fence;
698 	int ret;
699 
700 	spin_lock(&man->move_lock);
701 	fence = dma_fence_get(man->move);
702 	spin_unlock(&man->move_lock);
703 
704 	if (!fence)
705 		return 0;
706 
707 	if (no_wait_gpu) {
708 		ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
709 		dma_fence_put(fence);
710 		return ret;
711 	}
712 
713 	dma_resv_add_shared_fence(bo->base.resv, fence);
714 
715 	ret = dma_resv_reserve_shared(bo->base.resv, 1);
716 	if (unlikely(ret)) {
717 		dma_fence_put(fence);
718 		return ret;
719 	}
720 
721 	dma_fence_put(bo->moving);
722 	bo->moving = fence;
723 	return 0;
724 }
725 
726 /*
727  * Repeatedly evict memory from the LRU for @mem_type until we create enough
728  * space, or we've evicted everything and there isn't enough space.
729  */
730 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
731 				  const struct ttm_place *place,
732 				  struct ttm_resource *mem,
733 				  struct ttm_operation_ctx *ctx)
734 {
735 	struct ttm_device *bdev = bo->bdev;
736 	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
737 	struct ww_acquire_ctx *ticket;
738 	int ret;
739 
740 	ticket = dma_resv_locking_ctx(bo->base.resv);
741 	do {
742 		ret = ttm_resource_alloc(bo, place, mem);
743 		if (likely(!ret))
744 			break;
745 		if (unlikely(ret != -ENOSPC))
746 			return ret;
747 		ret = ttm_mem_evict_first(bdev, man, place, ctx,
748 					  ticket);
749 		if (unlikely(ret != 0))
750 			return ret;
751 	} while (1);
752 
753 	return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
754 }
755 
756 /**
757  * ttm_bo_mem_placement - check if placement is compatible
758  * @bo: BO to find memory for
759  * @place: where to search
760  * @mem: the memory object to fill in
761  *
762  * Check if placement is compatible and fill in mem structure.
763  * Returns -EBUSY if placement won't work or negative error code.
764  * 0 when placement can be used.
765  */
766 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
767 				const struct ttm_place *place,
768 				struct ttm_resource *mem)
769 {
770 	struct ttm_device *bdev = bo->bdev;
771 	struct ttm_resource_manager *man;
772 
773 	man = ttm_manager_type(bdev, place->mem_type);
774 	if (!man || !ttm_resource_manager_used(man))
775 		return -EBUSY;
776 
777 	mem->mem_type = place->mem_type;
778 	mem->placement = place->flags;
779 
780 	spin_lock(&bo->bdev->lru_lock);
781 	ttm_bo_move_to_lru_tail(bo, mem, NULL);
782 	spin_unlock(&bo->bdev->lru_lock);
783 	return 0;
784 }
785 
786 /*
787  * Creates space for memory region @mem according to its type.
788  *
789  * This function first searches for free space in compatible memory types in
790  * the priority order defined by the driver.  If free space isn't found, then
791  * ttm_bo_mem_force_space is attempted in priority order to evict and find
792  * space.
793  */
794 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
795 			struct ttm_placement *placement,
796 			struct ttm_resource *mem,
797 			struct ttm_operation_ctx *ctx)
798 {
799 	struct ttm_device *bdev = bo->bdev;
800 	bool type_found = false;
801 	int i, ret;
802 
803 	ret = dma_resv_reserve_shared(bo->base.resv, 1);
804 	if (unlikely(ret))
805 		return ret;
806 
807 	for (i = 0; i < placement->num_placement; ++i) {
808 		const struct ttm_place *place = &placement->placement[i];
809 		struct ttm_resource_manager *man;
810 
811 		ret = ttm_bo_mem_placement(bo, place, mem);
812 		if (ret)
813 			continue;
814 
815 		type_found = true;
816 		ret = ttm_resource_alloc(bo, place, mem);
817 		if (ret == -ENOSPC)
818 			continue;
819 		if (unlikely(ret))
820 			goto error;
821 
822 		man = ttm_manager_type(bdev, mem->mem_type);
823 		ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
824 		if (unlikely(ret)) {
825 			ttm_resource_free(bo, mem);
826 			if (ret == -EBUSY)
827 				continue;
828 
829 			goto error;
830 		}
831 		return 0;
832 	}
833 
834 	for (i = 0; i < placement->num_busy_placement; ++i) {
835 		const struct ttm_place *place = &placement->busy_placement[i];
836 
837 		ret = ttm_bo_mem_placement(bo, place, mem);
838 		if (ret)
839 			continue;
840 
841 		type_found = true;
842 		ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
843 		if (likely(!ret))
844 			return 0;
845 
846 		if (ret && ret != -EBUSY)
847 			goto error;
848 	}
849 
850 	ret = -ENOMEM;
851 	if (!type_found) {
852 		pr_err(TTM_PFX "No compatible memory type found\n");
853 		ret = -EINVAL;
854 	}
855 
856 error:
857 	if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
858 		ttm_bo_move_to_lru_tail_unlocked(bo);
859 
860 	return ret;
861 }
862 EXPORT_SYMBOL(ttm_bo_mem_space);
863 
864 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
865 				     struct ttm_resource *mem,
866 				     struct ttm_operation_ctx *ctx,
867 				     struct ttm_place *hop)
868 {
869 	struct ttm_placement hop_placement;
870 	int ret;
871 	struct ttm_resource hop_mem = *mem;
872 
873 	hop_mem.mm_node = NULL;
874 	hop_mem.mem_type = TTM_PL_SYSTEM;
875 	hop_mem.placement = 0;
876 
877 	hop_placement.num_placement = hop_placement.num_busy_placement = 1;
878 	hop_placement.placement = hop_placement.busy_placement = hop;
879 
880 	/* find space in the bounce domain */
881 	ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
882 	if (ret)
883 		return ret;
884 	/* move to the bounce domain */
885 	ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
886 	if (ret) {
887 		ttm_resource_free(bo, &hop_mem);
888 		return ret;
889 	}
890 	return 0;
891 }
892 
893 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
894 			      struct ttm_placement *placement,
895 			      struct ttm_operation_ctx *ctx)
896 {
897 	int ret = 0;
898 	struct ttm_place hop;
899 	struct ttm_resource mem;
900 
901 	dma_resv_assert_held(bo->base.resv);
902 
903 	memset(&hop, 0, sizeof(hop));
904 
905 	mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
906 	mem.page_alignment = bo->mem.page_alignment;
907 	mem.bus.offset = 0;
908 	mem.bus.addr = NULL;
909 	mem.mm_node = NULL;
910 
911 	/*
912 	 * Determine where to move the buffer.
913 	 *
914 	 * If driver determines move is going to need
915 	 * an extra step then it will return -EMULTIHOP
916 	 * and the buffer will be moved to the temporary
917 	 * stop and the driver will be called to make
918 	 * the second hop.
919 	 */
920 	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
921 	if (ret)
922 		return ret;
923 bounce:
924 	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
925 	if (ret == -EMULTIHOP) {
926 		ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
927 		if (ret)
928 			goto out;
929 		/* try and move to final place now. */
930 		goto bounce;
931 	}
932 out:
933 	if (ret)
934 		ttm_resource_free(bo, &mem);
935 	return ret;
936 }
937 
938 static bool ttm_bo_places_compat(const struct ttm_place *places,
939 				 unsigned num_placement,
940 				 struct ttm_resource *mem,
941 				 uint32_t *new_flags)
942 {
943 	unsigned i;
944 
945 	for (i = 0; i < num_placement; i++) {
946 		const struct ttm_place *heap = &places[i];
947 
948 		if ((mem->start < heap->fpfn ||
949 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
950 			continue;
951 
952 		*new_flags = heap->flags;
953 		if ((mem->mem_type == heap->mem_type) &&
954 		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
955 		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
956 			return true;
957 	}
958 	return false;
959 }
960 
961 bool ttm_bo_mem_compat(struct ttm_placement *placement,
962 		       struct ttm_resource *mem,
963 		       uint32_t *new_flags)
964 {
965 	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
966 				 mem, new_flags))
967 		return true;
968 
969 	if ((placement->busy_placement != placement->placement ||
970 	     placement->num_busy_placement > placement->num_placement) &&
971 	    ttm_bo_places_compat(placement->busy_placement,
972 				 placement->num_busy_placement,
973 				 mem, new_flags))
974 		return true;
975 
976 	return false;
977 }
978 EXPORT_SYMBOL(ttm_bo_mem_compat);
979 
980 int ttm_bo_validate(struct ttm_buffer_object *bo,
981 		    struct ttm_placement *placement,
982 		    struct ttm_operation_ctx *ctx)
983 {
984 	int ret;
985 	uint32_t new_flags;
986 
987 	dma_resv_assert_held(bo->base.resv);
988 
989 	/*
990 	 * Remove the backing store if no placement is given.
991 	 */
992 	if (!placement->num_placement && !placement->num_busy_placement) {
993 		ret = ttm_bo_pipeline_gutting(bo);
994 		if (ret)
995 			return ret;
996 
997 		return ttm_tt_create(bo, false);
998 	}
999 
1000 	/*
1001 	 * Check whether we need to move buffer.
1002 	 */
1003 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1004 		ret = ttm_bo_move_buffer(bo, placement, ctx);
1005 		if (ret)
1006 			return ret;
1007 	}
1008 	/*
1009 	 * We might need to add a TTM.
1010 	 */
1011 	if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1012 		ret = ttm_tt_create(bo, true);
1013 		if (ret)
1014 			return ret;
1015 	}
1016 	return 0;
1017 }
1018 EXPORT_SYMBOL(ttm_bo_validate);
1019 
1020 int ttm_bo_init_reserved(struct ttm_device *bdev,
1021 			 struct ttm_buffer_object *bo,
1022 			 size_t size,
1023 			 enum ttm_bo_type type,
1024 			 struct ttm_placement *placement,
1025 			 uint32_t page_alignment,
1026 			 struct ttm_operation_ctx *ctx,
1027 			 struct sg_table *sg,
1028 			 struct dma_resv *resv,
1029 			 void (*destroy) (struct ttm_buffer_object *))
1030 {
1031 	bool locked;
1032 	int ret = 0;
1033 
1034 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1035 
1036 	kref_init(&bo->kref);
1037 	INIT_LIST_HEAD(&bo->lru);
1038 	INIT_LIST_HEAD(&bo->ddestroy);
1039 	bo->bdev = bdev;
1040 	bo->type = type;
1041 	bo->mem.mem_type = TTM_PL_SYSTEM;
1042 	bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1043 	bo->mem.mm_node = NULL;
1044 	bo->mem.page_alignment = page_alignment;
1045 	bo->mem.bus.offset = 0;
1046 	bo->mem.bus.addr = NULL;
1047 	bo->moving = NULL;
1048 	bo->mem.placement = 0;
1049 	bo->pin_count = 0;
1050 	bo->sg = sg;
1051 	if (resv) {
1052 		bo->base.resv = resv;
1053 		dma_resv_assert_held(bo->base.resv);
1054 	} else {
1055 		bo->base.resv = &bo->base._resv;
1056 	}
1057 	if (!ttm_bo_uses_embedded_gem_object(bo)) {
1058 		/*
1059 		 * bo.base is not initialized, so we have to setup the
1060 		 * struct elements we want use regardless.
1061 		 */
1062 		bo->base.size = size;
1063 		dma_resv_init(&bo->base._resv);
1064 		drm_vma_node_reset(&bo->base.vma_node);
1065 	}
1066 	atomic_inc(&ttm_glob.bo_count);
1067 
1068 	/*
1069 	 * For ttm_bo_type_device buffers, allocate
1070 	 * address space from the device.
1071 	 */
1072 	if (bo->type == ttm_bo_type_device ||
1073 	    bo->type == ttm_bo_type_sg)
1074 		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1075 					 bo->mem.num_pages);
1076 
1077 	/* passed reservation objects should already be locked,
1078 	 * since otherwise lockdep will be angered in radeon.
1079 	 */
1080 	if (!resv) {
1081 		locked = dma_resv_trylock(bo->base.resv);
1082 		WARN_ON(!locked);
1083 	}
1084 
1085 	if (likely(!ret))
1086 		ret = ttm_bo_validate(bo, placement, ctx);
1087 
1088 	if (unlikely(ret)) {
1089 		if (!resv)
1090 			ttm_bo_unreserve(bo);
1091 
1092 		ttm_bo_put(bo);
1093 		return ret;
1094 	}
1095 
1096 	ttm_bo_move_to_lru_tail_unlocked(bo);
1097 
1098 	return ret;
1099 }
1100 EXPORT_SYMBOL(ttm_bo_init_reserved);
1101 
1102 int ttm_bo_init(struct ttm_device *bdev,
1103 		struct ttm_buffer_object *bo,
1104 		size_t size,
1105 		enum ttm_bo_type type,
1106 		struct ttm_placement *placement,
1107 		uint32_t page_alignment,
1108 		bool interruptible,
1109 		struct sg_table *sg,
1110 		struct dma_resv *resv,
1111 		void (*destroy) (struct ttm_buffer_object *))
1112 {
1113 	struct ttm_operation_ctx ctx = { interruptible, false };
1114 	int ret;
1115 
1116 	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1117 				   page_alignment, &ctx, sg, resv, destroy);
1118 	if (ret)
1119 		return ret;
1120 
1121 	if (!resv)
1122 		ttm_bo_unreserve(bo);
1123 
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL(ttm_bo_init);
1127 
1128 /*
1129  * buffer object vm functions.
1130  */
1131 
1132 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1133 {
1134 	struct ttm_device *bdev = bo->bdev;
1135 
1136 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1137 	ttm_mem_io_free(bdev, &bo->mem);
1138 }
1139 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1140 
1141 int ttm_bo_wait(struct ttm_buffer_object *bo,
1142 		bool interruptible, bool no_wait)
1143 {
1144 	long timeout = 15 * HZ;
1145 
1146 	if (no_wait) {
1147 		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1148 			return 0;
1149 		else
1150 			return -EBUSY;
1151 	}
1152 
1153 	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1154 						      interruptible, timeout);
1155 	if (timeout < 0)
1156 		return timeout;
1157 
1158 	if (timeout == 0)
1159 		return -EBUSY;
1160 
1161 	dma_resv_add_excl_fence(bo->base.resv, NULL);
1162 	return 0;
1163 }
1164 EXPORT_SYMBOL(ttm_bo_wait);
1165 
1166 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1167 		   gfp_t gfp_flags)
1168 {
1169 	bool locked;
1170 	int ret;
1171 
1172 	if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
1173 		return -EBUSY;
1174 
1175 	if (!ttm_bo_get_unless_zero(bo)) {
1176 		if (locked)
1177 			dma_resv_unlock(bo->base.resv);
1178 		return -EBUSY;
1179 	}
1180 
1181 	if (bo->deleted) {
1182 		ttm_bo_cleanup_refs(bo, false, false, locked);
1183 		ttm_bo_put(bo);
1184 		return 0;
1185 	}
1186 
1187 	ttm_bo_del_from_lru(bo);
1188 	/* TODO: Cleanup the locking */
1189 	spin_unlock(&bo->bdev->lru_lock);
1190 
1191 	/*
1192 	 * Move to system cached
1193 	 */
1194 	if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1195 		struct ttm_operation_ctx ctx = { false, false };
1196 		struct ttm_resource evict_mem;
1197 		struct ttm_place hop;
1198 
1199 		memset(&hop, 0, sizeof(hop));
1200 
1201 		evict_mem = bo->mem;
1202 		evict_mem.mm_node = NULL;
1203 		evict_mem.placement = 0;
1204 		evict_mem.mem_type = TTM_PL_SYSTEM;
1205 
1206 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1207 		if (unlikely(ret != 0)) {
1208 			WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1209 			goto out;
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * Make sure BO is idle.
1215 	 */
1216 	ret = ttm_bo_wait(bo, false, false);
1217 	if (unlikely(ret != 0))
1218 		goto out;
1219 
1220 	ttm_bo_unmap_virtual(bo);
1221 
1222 	/*
1223 	 * Swap out. Buffer will be swapped in again as soon as
1224 	 * anyone tries to access a ttm page.
1225 	 */
1226 	if (bo->bdev->funcs->swap_notify)
1227 		bo->bdev->funcs->swap_notify(bo);
1228 
1229 	ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1230 out:
1231 
1232 	/*
1233 	 * Unreserve without putting on LRU to avoid swapping out an
1234 	 * already swapped buffer.
1235 	 */
1236 	if (locked)
1237 		dma_resv_unlock(bo->base.resv);
1238 	ttm_bo_put(bo);
1239 	return ret;
1240 }
1241 
1242 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1243 {
1244 	if (bo->ttm == NULL)
1245 		return;
1246 
1247 	ttm_tt_destroy(bo->bdev, bo->ttm);
1248 	bo->ttm = NULL;
1249 }
1250