xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo.c (revision 4ee57308)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <linux/jiffies.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/file.h>
42 #include <linux/module.h>
43 #include <linux/atomic.h>
44 #include <linux/dma-resv.h>
45 
46 static void ttm_bo_global_kobj_release(struct kobject *kobj);
47 
48 /**
49  * ttm_global_mutex - protecting the global BO state
50  */
51 DEFINE_MUTEX(ttm_global_mutex);
52 unsigned ttm_bo_glob_use_count;
53 struct ttm_bo_global ttm_bo_glob;
54 EXPORT_SYMBOL(ttm_bo_glob);
55 
56 static struct attribute ttm_bo_count = {
57 	.name = "bo_count",
58 	.mode = S_IRUGO
59 };
60 
61 /* default destructor */
62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
63 {
64 	kfree(bo);
65 }
66 
67 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
68 					struct ttm_placement *placement)
69 {
70 	struct drm_printer p = drm_debug_printer(TTM_PFX);
71 	struct ttm_resource_manager *man;
72 	int i, mem_type;
73 
74 	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
75 		   bo, bo->mem.num_pages, bo->mem.size >> 10,
76 		   bo->mem.size >> 20);
77 	for (i = 0; i < placement->num_placement; i++) {
78 		mem_type = placement->placement[i].mem_type;
79 		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
80 			   i, placement->placement[i].flags, mem_type);
81 		man = ttm_manager_type(bo->bdev, mem_type);
82 		ttm_resource_manager_debug(man, &p);
83 	}
84 }
85 
86 static ssize_t ttm_bo_global_show(struct kobject *kobj,
87 				  struct attribute *attr,
88 				  char *buffer)
89 {
90 	struct ttm_bo_global *glob =
91 		container_of(kobj, struct ttm_bo_global, kobj);
92 
93 	return snprintf(buffer, PAGE_SIZE, "%d\n",
94 				atomic_read(&glob->bo_count));
95 }
96 
97 static struct attribute *ttm_bo_global_attrs[] = {
98 	&ttm_bo_count,
99 	NULL
100 };
101 
102 static const struct sysfs_ops ttm_bo_global_ops = {
103 	.show = &ttm_bo_global_show
104 };
105 
106 static struct kobj_type ttm_bo_glob_kobj_type  = {
107 	.release = &ttm_bo_global_kobj_release,
108 	.sysfs_ops = &ttm_bo_global_ops,
109 	.default_attrs = ttm_bo_global_attrs
110 };
111 
112 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
113 				  struct ttm_resource *mem)
114 {
115 	struct ttm_bo_device *bdev = bo->bdev;
116 	struct ttm_resource_manager *man;
117 
118 	if (!list_empty(&bo->lru) || bo->pin_count)
119 		return;
120 
121 	man = ttm_manager_type(bdev, mem->mem_type);
122 	list_add_tail(&bo->lru, &man->lru[bo->priority]);
123 
124 	if (man->use_tt && bo->ttm &&
125 	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
126 				     TTM_PAGE_FLAG_SWAPPED))) {
127 		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
128 	}
129 }
130 
131 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
132 {
133 	struct ttm_bo_device *bdev = bo->bdev;
134 	bool notify = false;
135 
136 	if (!list_empty(&bo->swap)) {
137 		list_del_init(&bo->swap);
138 		notify = true;
139 	}
140 	if (!list_empty(&bo->lru)) {
141 		list_del_init(&bo->lru);
142 		notify = true;
143 	}
144 
145 	if (notify && bdev->driver->del_from_lru_notify)
146 		bdev->driver->del_from_lru_notify(bo);
147 }
148 
149 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
150 				     struct ttm_buffer_object *bo)
151 {
152 	if (!pos->first)
153 		pos->first = bo;
154 	pos->last = bo;
155 }
156 
157 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
158 			     struct ttm_lru_bulk_move *bulk)
159 {
160 	dma_resv_assert_held(bo->base.resv);
161 
162 	ttm_bo_del_from_lru(bo);
163 	ttm_bo_add_mem_to_lru(bo, &bo->mem);
164 
165 	if (bulk && !bo->pin_count) {
166 		switch (bo->mem.mem_type) {
167 		case TTM_PL_TT:
168 			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
169 			break;
170 
171 		case TTM_PL_VRAM:
172 			ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
173 			break;
174 		}
175 		if (bo->ttm && !(bo->ttm->page_flags &
176 				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
177 			ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
178 	}
179 }
180 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
181 
182 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
183 {
184 	unsigned i;
185 
186 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
187 		struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
188 		struct ttm_resource_manager *man;
189 
190 		if (!pos->first)
191 			continue;
192 
193 		dma_resv_assert_held(pos->first->base.resv);
194 		dma_resv_assert_held(pos->last->base.resv);
195 
196 		man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
197 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
198 				    &pos->last->lru);
199 	}
200 
201 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
202 		struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
203 		struct ttm_resource_manager *man;
204 
205 		if (!pos->first)
206 			continue;
207 
208 		dma_resv_assert_held(pos->first->base.resv);
209 		dma_resv_assert_held(pos->last->base.resv);
210 
211 		man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
212 		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
213 				    &pos->last->lru);
214 	}
215 
216 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
217 		struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
218 		struct list_head *lru;
219 
220 		if (!pos->first)
221 			continue;
222 
223 		dma_resv_assert_held(pos->first->base.resv);
224 		dma_resv_assert_held(pos->last->base.resv);
225 
226 		lru = &ttm_bo_glob.swap_lru[i];
227 		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
228 	}
229 }
230 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
231 
232 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
233 				  struct ttm_resource *mem, bool evict,
234 				  struct ttm_operation_ctx *ctx,
235 				  struct ttm_place *hop)
236 {
237 	struct ttm_bo_device *bdev = bo->bdev;
238 	struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
239 	struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
240 	int ret;
241 
242 	ttm_bo_unmap_virtual(bo);
243 
244 	/*
245 	 * Create and bind a ttm if required.
246 	 */
247 
248 	if (new_man->use_tt) {
249 		/* Zero init the new TTM structure if the old location should
250 		 * have used one as well.
251 		 */
252 		ret = ttm_tt_create(bo, old_man->use_tt);
253 		if (ret)
254 			goto out_err;
255 
256 		if (mem->mem_type != TTM_PL_SYSTEM) {
257 			ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
258 			if (ret)
259 				goto out_err;
260 		}
261 	}
262 
263 	ret = bdev->driver->move(bo, evict, ctx, mem, hop);
264 	if (ret) {
265 		if (ret == -EMULTIHOP)
266 			return ret;
267 		goto out_err;
268 	}
269 
270 	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
271 	return 0;
272 
273 out_err:
274 	new_man = ttm_manager_type(bdev, bo->mem.mem_type);
275 	if (!new_man->use_tt)
276 		ttm_bo_tt_destroy(bo);
277 
278 	return ret;
279 }
280 
281 /**
282  * Call bo::reserved.
283  * Will release GPU memory type usage on destruction.
284  * This is the place to put in driver specific hooks to release
285  * driver private resources.
286  * Will release the bo::reserved lock.
287  */
288 
289 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
290 {
291 	if (bo->bdev->driver->delete_mem_notify)
292 		bo->bdev->driver->delete_mem_notify(bo);
293 
294 	ttm_bo_tt_destroy(bo);
295 	ttm_resource_free(bo, &bo->mem);
296 }
297 
298 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
299 {
300 	int r;
301 
302 	if (bo->base.resv == &bo->base._resv)
303 		return 0;
304 
305 	BUG_ON(!dma_resv_trylock(&bo->base._resv));
306 
307 	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
308 	dma_resv_unlock(&bo->base._resv);
309 	if (r)
310 		return r;
311 
312 	if (bo->type != ttm_bo_type_sg) {
313 		/* This works because the BO is about to be destroyed and nobody
314 		 * reference it any more. The only tricky case is the trylock on
315 		 * the resv object while holding the lru_lock.
316 		 */
317 		spin_lock(&ttm_bo_glob.lru_lock);
318 		bo->base.resv = &bo->base._resv;
319 		spin_unlock(&ttm_bo_glob.lru_lock);
320 	}
321 
322 	return r;
323 }
324 
325 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
326 {
327 	struct dma_resv *resv = &bo->base._resv;
328 	struct dma_resv_list *fobj;
329 	struct dma_fence *fence;
330 	int i;
331 
332 	rcu_read_lock();
333 	fobj = rcu_dereference(resv->fence);
334 	fence = rcu_dereference(resv->fence_excl);
335 	if (fence && !fence->ops->signaled)
336 		dma_fence_enable_sw_signaling(fence);
337 
338 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
339 		fence = rcu_dereference(fobj->shared[i]);
340 
341 		if (!fence->ops->signaled)
342 			dma_fence_enable_sw_signaling(fence);
343 	}
344 	rcu_read_unlock();
345 }
346 
347 /**
348  * function ttm_bo_cleanup_refs
349  * If bo idle, remove from lru lists, and unref.
350  * If not idle, block if possible.
351  *
352  * Must be called with lru_lock and reservation held, this function
353  * will drop the lru lock and optionally the reservation lock before returning.
354  *
355  * @interruptible         Any sleeps should occur interruptibly.
356  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
357  * @unlock_resv           Unlock the reservation lock as well.
358  */
359 
360 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
361 			       bool interruptible, bool no_wait_gpu,
362 			       bool unlock_resv)
363 {
364 	struct dma_resv *resv = &bo->base._resv;
365 	int ret;
366 
367 	if (dma_resv_test_signaled_rcu(resv, true))
368 		ret = 0;
369 	else
370 		ret = -EBUSY;
371 
372 	if (ret && !no_wait_gpu) {
373 		long lret;
374 
375 		if (unlock_resv)
376 			dma_resv_unlock(bo->base.resv);
377 		spin_unlock(&ttm_bo_glob.lru_lock);
378 
379 		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
380 						 30 * HZ);
381 
382 		if (lret < 0)
383 			return lret;
384 		else if (lret == 0)
385 			return -EBUSY;
386 
387 		spin_lock(&ttm_bo_glob.lru_lock);
388 		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
389 			/*
390 			 * We raced, and lost, someone else holds the reservation now,
391 			 * and is probably busy in ttm_bo_cleanup_memtype_use.
392 			 *
393 			 * Even if it's not the case, because we finished waiting any
394 			 * delayed destruction would succeed, so just return success
395 			 * here.
396 			 */
397 			spin_unlock(&ttm_bo_glob.lru_lock);
398 			return 0;
399 		}
400 		ret = 0;
401 	}
402 
403 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
404 		if (unlock_resv)
405 			dma_resv_unlock(bo->base.resv);
406 		spin_unlock(&ttm_bo_glob.lru_lock);
407 		return ret;
408 	}
409 
410 	ttm_bo_del_from_lru(bo);
411 	list_del_init(&bo->ddestroy);
412 	spin_unlock(&ttm_bo_glob.lru_lock);
413 	ttm_bo_cleanup_memtype_use(bo);
414 
415 	if (unlock_resv)
416 		dma_resv_unlock(bo->base.resv);
417 
418 	ttm_bo_put(bo);
419 
420 	return 0;
421 }
422 
423 /**
424  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
425  * encountered buffers.
426  */
427 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
428 {
429 	struct ttm_bo_global *glob = &ttm_bo_glob;
430 	struct list_head removed;
431 	bool empty;
432 
433 	INIT_LIST_HEAD(&removed);
434 
435 	spin_lock(&glob->lru_lock);
436 	while (!list_empty(&bdev->ddestroy)) {
437 		struct ttm_buffer_object *bo;
438 
439 		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
440 				      ddestroy);
441 		list_move_tail(&bo->ddestroy, &removed);
442 		if (!ttm_bo_get_unless_zero(bo))
443 			continue;
444 
445 		if (remove_all || bo->base.resv != &bo->base._resv) {
446 			spin_unlock(&glob->lru_lock);
447 			dma_resv_lock(bo->base.resv, NULL);
448 
449 			spin_lock(&glob->lru_lock);
450 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
451 
452 		} else if (dma_resv_trylock(bo->base.resv)) {
453 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
454 		} else {
455 			spin_unlock(&glob->lru_lock);
456 		}
457 
458 		ttm_bo_put(bo);
459 		spin_lock(&glob->lru_lock);
460 	}
461 	list_splice_tail(&removed, &bdev->ddestroy);
462 	empty = list_empty(&bdev->ddestroy);
463 	spin_unlock(&glob->lru_lock);
464 
465 	return empty;
466 }
467 
468 static void ttm_bo_delayed_workqueue(struct work_struct *work)
469 {
470 	struct ttm_bo_device *bdev =
471 	    container_of(work, struct ttm_bo_device, wq.work);
472 
473 	if (!ttm_bo_delayed_delete(bdev, false))
474 		schedule_delayed_work(&bdev->wq,
475 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
476 }
477 
478 static void ttm_bo_release(struct kref *kref)
479 {
480 	struct ttm_buffer_object *bo =
481 	    container_of(kref, struct ttm_buffer_object, kref);
482 	struct ttm_bo_device *bdev = bo->bdev;
483 	size_t acc_size = bo->acc_size;
484 	int ret;
485 
486 	if (!bo->deleted) {
487 		ret = ttm_bo_individualize_resv(bo);
488 		if (ret) {
489 			/* Last resort, if we fail to allocate memory for the
490 			 * fences block for the BO to become idle
491 			 */
492 			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
493 						  30 * HZ);
494 		}
495 
496 		if (bo->bdev->driver->release_notify)
497 			bo->bdev->driver->release_notify(bo);
498 
499 		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
500 		ttm_mem_io_free(bdev, &bo->mem);
501 	}
502 
503 	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
504 	    !dma_resv_trylock(bo->base.resv)) {
505 		/* The BO is not idle, resurrect it for delayed destroy */
506 		ttm_bo_flush_all_fences(bo);
507 		bo->deleted = true;
508 
509 		spin_lock(&ttm_bo_glob.lru_lock);
510 
511 		/*
512 		 * Make pinned bos immediately available to
513 		 * shrinkers, now that they are queued for
514 		 * destruction.
515 		 */
516 		if (bo->pin_count) {
517 			bo->pin_count = 0;
518 			ttm_bo_del_from_lru(bo);
519 			ttm_bo_add_mem_to_lru(bo, &bo->mem);
520 		}
521 
522 		kref_init(&bo->kref);
523 		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
524 		spin_unlock(&ttm_bo_glob.lru_lock);
525 
526 		schedule_delayed_work(&bdev->wq,
527 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
528 		return;
529 	}
530 
531 	spin_lock(&ttm_bo_glob.lru_lock);
532 	ttm_bo_del_from_lru(bo);
533 	list_del(&bo->ddestroy);
534 	spin_unlock(&ttm_bo_glob.lru_lock);
535 
536 	ttm_bo_cleanup_memtype_use(bo);
537 	dma_resv_unlock(bo->base.resv);
538 
539 	atomic_dec(&ttm_bo_glob.bo_count);
540 	dma_fence_put(bo->moving);
541 	if (!ttm_bo_uses_embedded_gem_object(bo))
542 		dma_resv_fini(&bo->base._resv);
543 	bo->destroy(bo);
544 	ttm_mem_global_free(&ttm_mem_glob, acc_size);
545 }
546 
547 void ttm_bo_put(struct ttm_buffer_object *bo)
548 {
549 	kref_put(&bo->kref, ttm_bo_release);
550 }
551 EXPORT_SYMBOL(ttm_bo_put);
552 
553 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
554 {
555 	return cancel_delayed_work_sync(&bdev->wq);
556 }
557 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
558 
559 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
560 {
561 	if (resched)
562 		schedule_delayed_work(&bdev->wq,
563 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
564 }
565 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
566 
567 static int ttm_bo_evict(struct ttm_buffer_object *bo,
568 			struct ttm_operation_ctx *ctx)
569 {
570 	struct ttm_bo_device *bdev = bo->bdev;
571 	struct ttm_resource evict_mem;
572 	struct ttm_placement placement;
573 	struct ttm_place hop;
574 	int ret = 0;
575 
576 	memset(&hop, 0, sizeof(hop));
577 
578 	dma_resv_assert_held(bo->base.resv);
579 
580 	placement.num_placement = 0;
581 	placement.num_busy_placement = 0;
582 	bdev->driver->evict_flags(bo, &placement);
583 
584 	if (!placement.num_placement && !placement.num_busy_placement) {
585 		ttm_bo_wait(bo, false, false);
586 
587 		ttm_bo_cleanup_memtype_use(bo);
588 		return ttm_tt_create(bo, false);
589 	}
590 
591 	evict_mem = bo->mem;
592 	evict_mem.mm_node = NULL;
593 	evict_mem.bus.offset = 0;
594 	evict_mem.bus.addr = NULL;
595 
596 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
597 	if (ret) {
598 		if (ret != -ERESTARTSYS) {
599 			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
600 			       bo);
601 			ttm_bo_mem_space_debug(bo, &placement);
602 		}
603 		goto out;
604 	}
605 
606 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
607 	if (unlikely(ret)) {
608 		WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
609 		if (ret != -ERESTARTSYS)
610 			pr_err("Buffer eviction failed\n");
611 		ttm_resource_free(bo, &evict_mem);
612 	}
613 out:
614 	return ret;
615 }
616 
617 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
618 			      const struct ttm_place *place)
619 {
620 	/* Don't evict this BO if it's outside of the
621 	 * requested placement range
622 	 */
623 	if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
624 	    (place->lpfn && place->lpfn <= bo->mem.start))
625 		return false;
626 
627 	return true;
628 }
629 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
630 
631 /**
632  * Check the target bo is allowable to be evicted or swapout, including cases:
633  *
634  * a. if share same reservation object with ctx->resv, have assumption
635  * reservation objects should already be locked, so not lock again and
636  * return true directly when either the opreation allow_reserved_eviction
637  * or the target bo already is in delayed free list;
638  *
639  * b. Otherwise, trylock it.
640  */
641 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
642 			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
643 {
644 	bool ret = false;
645 
646 	if (bo->base.resv == ctx->resv) {
647 		dma_resv_assert_held(bo->base.resv);
648 		if (ctx->allow_res_evict)
649 			ret = true;
650 		*locked = false;
651 		if (busy)
652 			*busy = false;
653 	} else {
654 		ret = dma_resv_trylock(bo->base.resv);
655 		*locked = ret;
656 		if (busy)
657 			*busy = !ret;
658 	}
659 
660 	return ret;
661 }
662 
663 /**
664  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
665  *
666  * @busy_bo: BO which couldn't be locked with trylock
667  * @ctx: operation context
668  * @ticket: acquire ticket
669  *
670  * Try to lock a busy buffer object to avoid failing eviction.
671  */
672 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
673 				   struct ttm_operation_ctx *ctx,
674 				   struct ww_acquire_ctx *ticket)
675 {
676 	int r;
677 
678 	if (!busy_bo || !ticket)
679 		return -EBUSY;
680 
681 	if (ctx->interruptible)
682 		r = dma_resv_lock_interruptible(busy_bo->base.resv,
683 							  ticket);
684 	else
685 		r = dma_resv_lock(busy_bo->base.resv, ticket);
686 
687 	/*
688 	 * TODO: It would be better to keep the BO locked until allocation is at
689 	 * least tried one more time, but that would mean a much larger rework
690 	 * of TTM.
691 	 */
692 	if (!r)
693 		dma_resv_unlock(busy_bo->base.resv);
694 
695 	return r == -EDEADLK ? -EBUSY : r;
696 }
697 
698 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
699 			struct ttm_resource_manager *man,
700 			const struct ttm_place *place,
701 			struct ttm_operation_ctx *ctx,
702 			struct ww_acquire_ctx *ticket)
703 {
704 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
705 	bool locked = false;
706 	unsigned i;
707 	int ret;
708 
709 	spin_lock(&ttm_bo_glob.lru_lock);
710 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
711 		list_for_each_entry(bo, &man->lru[i], lru) {
712 			bool busy;
713 
714 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
715 							    &busy)) {
716 				if (busy && !busy_bo && ticket !=
717 				    dma_resv_locking_ctx(bo->base.resv))
718 					busy_bo = bo;
719 				continue;
720 			}
721 
722 			if (place && !bdev->driver->eviction_valuable(bo,
723 								      place)) {
724 				if (locked)
725 					dma_resv_unlock(bo->base.resv);
726 				continue;
727 			}
728 			if (!ttm_bo_get_unless_zero(bo)) {
729 				if (locked)
730 					dma_resv_unlock(bo->base.resv);
731 				continue;
732 			}
733 			break;
734 		}
735 
736 		/* If the inner loop terminated early, we have our candidate */
737 		if (&bo->lru != &man->lru[i])
738 			break;
739 
740 		bo = NULL;
741 	}
742 
743 	if (!bo) {
744 		if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
745 			busy_bo = NULL;
746 		spin_unlock(&ttm_bo_glob.lru_lock);
747 		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
748 		if (busy_bo)
749 			ttm_bo_put(busy_bo);
750 		return ret;
751 	}
752 
753 	if (bo->deleted) {
754 		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
755 					  ctx->no_wait_gpu, locked);
756 		ttm_bo_put(bo);
757 		return ret;
758 	}
759 
760 	spin_unlock(&ttm_bo_glob.lru_lock);
761 
762 	ret = ttm_bo_evict(bo, ctx);
763 	if (locked)
764 		ttm_bo_unreserve(bo);
765 
766 	ttm_bo_put(bo);
767 	return ret;
768 }
769 
770 /**
771  * Add the last move fence to the BO and reserve a new shared slot.
772  */
773 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
774 				 struct ttm_resource_manager *man,
775 				 struct ttm_resource *mem,
776 				 bool no_wait_gpu)
777 {
778 	struct dma_fence *fence;
779 	int ret;
780 
781 	spin_lock(&man->move_lock);
782 	fence = dma_fence_get(man->move);
783 	spin_unlock(&man->move_lock);
784 
785 	if (!fence)
786 		return 0;
787 
788 	if (no_wait_gpu) {
789 		dma_fence_put(fence);
790 		return -EBUSY;
791 	}
792 
793 	dma_resv_add_shared_fence(bo->base.resv, fence);
794 
795 	ret = dma_resv_reserve_shared(bo->base.resv, 1);
796 	if (unlikely(ret)) {
797 		dma_fence_put(fence);
798 		return ret;
799 	}
800 
801 	dma_fence_put(bo->moving);
802 	bo->moving = fence;
803 	return 0;
804 }
805 
806 /**
807  * Repeatedly evict memory from the LRU for @mem_type until we create enough
808  * space, or we've evicted everything and there isn't enough space.
809  */
810 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
811 				  const struct ttm_place *place,
812 				  struct ttm_resource *mem,
813 				  struct ttm_operation_ctx *ctx)
814 {
815 	struct ttm_bo_device *bdev = bo->bdev;
816 	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
817 	struct ww_acquire_ctx *ticket;
818 	int ret;
819 
820 	ticket = dma_resv_locking_ctx(bo->base.resv);
821 	do {
822 		ret = ttm_resource_alloc(bo, place, mem);
823 		if (likely(!ret))
824 			break;
825 		if (unlikely(ret != -ENOSPC))
826 			return ret;
827 		ret = ttm_mem_evict_first(bdev, man, place, ctx,
828 					  ticket);
829 		if (unlikely(ret != 0))
830 			return ret;
831 	} while (1);
832 
833 	return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
834 }
835 
836 /**
837  * ttm_bo_mem_placement - check if placement is compatible
838  * @bo: BO to find memory for
839  * @place: where to search
840  * @mem: the memory object to fill in
841  *
842  * Check if placement is compatible and fill in mem structure.
843  * Returns -EBUSY if placement won't work or negative error code.
844  * 0 when placement can be used.
845  */
846 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
847 				const struct ttm_place *place,
848 				struct ttm_resource *mem)
849 {
850 	struct ttm_bo_device *bdev = bo->bdev;
851 	struct ttm_resource_manager *man;
852 
853 	man = ttm_manager_type(bdev, place->mem_type);
854 	if (!man || !ttm_resource_manager_used(man))
855 		return -EBUSY;
856 
857 	mem->mem_type = place->mem_type;
858 	mem->placement = place->flags;
859 
860 	spin_lock(&ttm_bo_glob.lru_lock);
861 	ttm_bo_del_from_lru(bo);
862 	ttm_bo_add_mem_to_lru(bo, mem);
863 	spin_unlock(&ttm_bo_glob.lru_lock);
864 
865 	return 0;
866 }
867 
868 /**
869  * Creates space for memory region @mem according to its type.
870  *
871  * This function first searches for free space in compatible memory types in
872  * the priority order defined by the driver.  If free space isn't found, then
873  * ttm_bo_mem_force_space is attempted in priority order to evict and find
874  * space.
875  */
876 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
877 			struct ttm_placement *placement,
878 			struct ttm_resource *mem,
879 			struct ttm_operation_ctx *ctx)
880 {
881 	struct ttm_bo_device *bdev = bo->bdev;
882 	bool type_found = false;
883 	int i, ret;
884 
885 	ret = dma_resv_reserve_shared(bo->base.resv, 1);
886 	if (unlikely(ret))
887 		return ret;
888 
889 	for (i = 0; i < placement->num_placement; ++i) {
890 		const struct ttm_place *place = &placement->placement[i];
891 		struct ttm_resource_manager *man;
892 
893 		ret = ttm_bo_mem_placement(bo, place, mem);
894 		if (ret)
895 			continue;
896 
897 		type_found = true;
898 		ret = ttm_resource_alloc(bo, place, mem);
899 		if (ret == -ENOSPC)
900 			continue;
901 		if (unlikely(ret))
902 			goto error;
903 
904 		man = ttm_manager_type(bdev, mem->mem_type);
905 		ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
906 		if (unlikely(ret)) {
907 			ttm_resource_free(bo, mem);
908 			if (ret == -EBUSY)
909 				continue;
910 
911 			goto error;
912 		}
913 		return 0;
914 	}
915 
916 	for (i = 0; i < placement->num_busy_placement; ++i) {
917 		const struct ttm_place *place = &placement->busy_placement[i];
918 
919 		ret = ttm_bo_mem_placement(bo, place, mem);
920 		if (ret)
921 			continue;
922 
923 		type_found = true;
924 		ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
925 		if (likely(!ret))
926 			return 0;
927 
928 		if (ret && ret != -EBUSY)
929 			goto error;
930 	}
931 
932 	ret = -ENOMEM;
933 	if (!type_found) {
934 		pr_err(TTM_PFX "No compatible memory type found\n");
935 		ret = -EINVAL;
936 	}
937 
938 error:
939 	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
940 		ttm_bo_move_to_lru_tail_unlocked(bo);
941 	}
942 
943 	return ret;
944 }
945 EXPORT_SYMBOL(ttm_bo_mem_space);
946 
947 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
948 				     struct ttm_resource *mem,
949 				     struct ttm_operation_ctx *ctx,
950 				     struct ttm_place *hop)
951 {
952 	struct ttm_placement hop_placement;
953 	int ret;
954 	struct ttm_resource hop_mem = *mem;
955 
956 	hop_mem.mm_node = NULL;
957 	hop_mem.mem_type = TTM_PL_SYSTEM;
958 	hop_mem.placement = 0;
959 
960 	hop_placement.num_placement = hop_placement.num_busy_placement = 1;
961 	hop_placement.placement = hop_placement.busy_placement = hop;
962 
963 	/* find space in the bounce domain */
964 	ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
965 	if (ret)
966 		return ret;
967 	/* move to the bounce domain */
968 	ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
969 	if (ret)
970 		return ret;
971 	return 0;
972 }
973 
974 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
975 			      struct ttm_placement *placement,
976 			      struct ttm_operation_ctx *ctx)
977 {
978 	int ret = 0;
979 	struct ttm_place hop;
980 	struct ttm_resource mem;
981 
982 	dma_resv_assert_held(bo->base.resv);
983 
984 	memset(&hop, 0, sizeof(hop));
985 
986 	mem.num_pages = bo->num_pages;
987 	mem.size = mem.num_pages << PAGE_SHIFT;
988 	mem.page_alignment = bo->mem.page_alignment;
989 	mem.bus.offset = 0;
990 	mem.bus.addr = NULL;
991 	mem.mm_node = NULL;
992 
993 	/*
994 	 * Determine where to move the buffer.
995 	 *
996 	 * If driver determines move is going to need
997 	 * an extra step then it will return -EMULTIHOP
998 	 * and the buffer will be moved to the temporary
999 	 * stop and the driver will be called to make
1000 	 * the second hop.
1001 	 */
1002 bounce:
1003 	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1004 	if (ret)
1005 		return ret;
1006 	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
1007 	if (ret == -EMULTIHOP) {
1008 		ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
1009 		if (ret)
1010 			return ret;
1011 		/* try and move to final place now. */
1012 		goto bounce;
1013 	}
1014 	if (ret)
1015 		ttm_resource_free(bo, &mem);
1016 	return ret;
1017 }
1018 
1019 static bool ttm_bo_places_compat(const struct ttm_place *places,
1020 				 unsigned num_placement,
1021 				 struct ttm_resource *mem,
1022 				 uint32_t *new_flags)
1023 {
1024 	unsigned i;
1025 
1026 	for (i = 0; i < num_placement; i++) {
1027 		const struct ttm_place *heap = &places[i];
1028 
1029 		if ((mem->start < heap->fpfn ||
1030 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1031 			continue;
1032 
1033 		*new_flags = heap->flags;
1034 		if ((mem->mem_type == heap->mem_type) &&
1035 		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1036 		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1037 			return true;
1038 	}
1039 	return false;
1040 }
1041 
1042 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1043 		       struct ttm_resource *mem,
1044 		       uint32_t *new_flags)
1045 {
1046 	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1047 				 mem, new_flags))
1048 		return true;
1049 
1050 	if ((placement->busy_placement != placement->placement ||
1051 	     placement->num_busy_placement > placement->num_placement) &&
1052 	    ttm_bo_places_compat(placement->busy_placement,
1053 				 placement->num_busy_placement,
1054 				 mem, new_flags))
1055 		return true;
1056 
1057 	return false;
1058 }
1059 EXPORT_SYMBOL(ttm_bo_mem_compat);
1060 
1061 int ttm_bo_validate(struct ttm_buffer_object *bo,
1062 		    struct ttm_placement *placement,
1063 		    struct ttm_operation_ctx *ctx)
1064 {
1065 	int ret;
1066 	uint32_t new_flags;
1067 
1068 	dma_resv_assert_held(bo->base.resv);
1069 
1070 	/*
1071 	 * Remove the backing store if no placement is given.
1072 	 */
1073 	if (!placement->num_placement && !placement->num_busy_placement) {
1074 		ret = ttm_bo_pipeline_gutting(bo);
1075 		if (ret)
1076 			return ret;
1077 
1078 		return ttm_tt_create(bo, false);
1079 	}
1080 
1081 	/*
1082 	 * Check whether we need to move buffer.
1083 	 */
1084 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1085 		ret = ttm_bo_move_buffer(bo, placement, ctx);
1086 		if (ret)
1087 			return ret;
1088 	}
1089 	/*
1090 	 * We might need to add a TTM.
1091 	 */
1092 	if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1093 		ret = ttm_tt_create(bo, true);
1094 		if (ret)
1095 			return ret;
1096 	}
1097 	return 0;
1098 }
1099 EXPORT_SYMBOL(ttm_bo_validate);
1100 
1101 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1102 			 struct ttm_buffer_object *bo,
1103 			 unsigned long size,
1104 			 enum ttm_bo_type type,
1105 			 struct ttm_placement *placement,
1106 			 uint32_t page_alignment,
1107 			 struct ttm_operation_ctx *ctx,
1108 			 size_t acc_size,
1109 			 struct sg_table *sg,
1110 			 struct dma_resv *resv,
1111 			 void (*destroy) (struct ttm_buffer_object *))
1112 {
1113 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1114 	int ret = 0;
1115 	unsigned long num_pages;
1116 	bool locked;
1117 
1118 	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1119 	if (ret) {
1120 		pr_err("Out of kernel memory\n");
1121 		if (destroy)
1122 			(*destroy)(bo);
1123 		else
1124 			kfree(bo);
1125 		return -ENOMEM;
1126 	}
1127 
1128 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1129 	if (num_pages == 0) {
1130 		pr_err("Illegal buffer object size\n");
1131 		if (destroy)
1132 			(*destroy)(bo);
1133 		else
1134 			kfree(bo);
1135 		ttm_mem_global_free(mem_glob, acc_size);
1136 		return -EINVAL;
1137 	}
1138 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1139 
1140 	kref_init(&bo->kref);
1141 	INIT_LIST_HEAD(&bo->lru);
1142 	INIT_LIST_HEAD(&bo->ddestroy);
1143 	INIT_LIST_HEAD(&bo->swap);
1144 	bo->bdev = bdev;
1145 	bo->type = type;
1146 	bo->num_pages = num_pages;
1147 	bo->mem.size = num_pages << PAGE_SHIFT;
1148 	bo->mem.mem_type = TTM_PL_SYSTEM;
1149 	bo->mem.num_pages = bo->num_pages;
1150 	bo->mem.mm_node = NULL;
1151 	bo->mem.page_alignment = page_alignment;
1152 	bo->mem.bus.offset = 0;
1153 	bo->mem.bus.addr = NULL;
1154 	bo->moving = NULL;
1155 	bo->mem.placement = 0;
1156 	bo->acc_size = acc_size;
1157 	bo->pin_count = 0;
1158 	bo->sg = sg;
1159 	if (resv) {
1160 		bo->base.resv = resv;
1161 		dma_resv_assert_held(bo->base.resv);
1162 	} else {
1163 		bo->base.resv = &bo->base._resv;
1164 	}
1165 	if (!ttm_bo_uses_embedded_gem_object(bo)) {
1166 		/*
1167 		 * bo.gem is not initialized, so we have to setup the
1168 		 * struct elements we want use regardless.
1169 		 */
1170 		dma_resv_init(&bo->base._resv);
1171 		drm_vma_node_reset(&bo->base.vma_node);
1172 	}
1173 	atomic_inc(&ttm_bo_glob.bo_count);
1174 
1175 	/*
1176 	 * For ttm_bo_type_device buffers, allocate
1177 	 * address space from the device.
1178 	 */
1179 	if (bo->type == ttm_bo_type_device ||
1180 	    bo->type == ttm_bo_type_sg)
1181 		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1182 					 bo->mem.num_pages);
1183 
1184 	/* passed reservation objects should already be locked,
1185 	 * since otherwise lockdep will be angered in radeon.
1186 	 */
1187 	if (!resv) {
1188 		locked = dma_resv_trylock(bo->base.resv);
1189 		WARN_ON(!locked);
1190 	}
1191 
1192 	if (likely(!ret))
1193 		ret = ttm_bo_validate(bo, placement, ctx);
1194 
1195 	if (unlikely(ret)) {
1196 		if (!resv)
1197 			ttm_bo_unreserve(bo);
1198 
1199 		ttm_bo_put(bo);
1200 		return ret;
1201 	}
1202 
1203 	ttm_bo_move_to_lru_tail_unlocked(bo);
1204 
1205 	return ret;
1206 }
1207 EXPORT_SYMBOL(ttm_bo_init_reserved);
1208 
1209 int ttm_bo_init(struct ttm_bo_device *bdev,
1210 		struct ttm_buffer_object *bo,
1211 		unsigned long size,
1212 		enum ttm_bo_type type,
1213 		struct ttm_placement *placement,
1214 		uint32_t page_alignment,
1215 		bool interruptible,
1216 		size_t acc_size,
1217 		struct sg_table *sg,
1218 		struct dma_resv *resv,
1219 		void (*destroy) (struct ttm_buffer_object *))
1220 {
1221 	struct ttm_operation_ctx ctx = { interruptible, false };
1222 	int ret;
1223 
1224 	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1225 				   page_alignment, &ctx, acc_size,
1226 				   sg, resv, destroy);
1227 	if (ret)
1228 		return ret;
1229 
1230 	if (!resv)
1231 		ttm_bo_unreserve(bo);
1232 
1233 	return 0;
1234 }
1235 EXPORT_SYMBOL(ttm_bo_init);
1236 
1237 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1238 			   unsigned long bo_size,
1239 			   unsigned struct_size)
1240 {
1241 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1242 	size_t size = 0;
1243 
1244 	size += ttm_round_pot(struct_size);
1245 	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1246 	size += ttm_round_pot(sizeof(struct ttm_tt));
1247 	return size;
1248 }
1249 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1250 
1251 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1252 {
1253 	struct ttm_bo_global *glob =
1254 		container_of(kobj, struct ttm_bo_global, kobj);
1255 
1256 	__free_page(glob->dummy_read_page);
1257 }
1258 
1259 static void ttm_bo_global_release(void)
1260 {
1261 	struct ttm_bo_global *glob = &ttm_bo_glob;
1262 
1263 	mutex_lock(&ttm_global_mutex);
1264 	if (--ttm_bo_glob_use_count > 0)
1265 		goto out;
1266 
1267 	kobject_del(&glob->kobj);
1268 	kobject_put(&glob->kobj);
1269 	ttm_mem_global_release(&ttm_mem_glob);
1270 	memset(glob, 0, sizeof(*glob));
1271 out:
1272 	mutex_unlock(&ttm_global_mutex);
1273 }
1274 
1275 static int ttm_bo_global_init(void)
1276 {
1277 	struct ttm_bo_global *glob = &ttm_bo_glob;
1278 	int ret = 0;
1279 	unsigned i;
1280 
1281 	mutex_lock(&ttm_global_mutex);
1282 	if (++ttm_bo_glob_use_count > 1)
1283 		goto out;
1284 
1285 	ret = ttm_mem_global_init(&ttm_mem_glob);
1286 	if (ret)
1287 		goto out;
1288 
1289 	spin_lock_init(&glob->lru_lock);
1290 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1291 
1292 	if (unlikely(glob->dummy_read_page == NULL)) {
1293 		ret = -ENOMEM;
1294 		goto out;
1295 	}
1296 
1297 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1298 		INIT_LIST_HEAD(&glob->swap_lru[i]);
1299 	INIT_LIST_HEAD(&glob->device_list);
1300 	atomic_set(&glob->bo_count, 0);
1301 
1302 	ret = kobject_init_and_add(
1303 		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1304 	if (unlikely(ret != 0))
1305 		kobject_put(&glob->kobj);
1306 out:
1307 	mutex_unlock(&ttm_global_mutex);
1308 	return ret;
1309 }
1310 
1311 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1312 {
1313 	struct ttm_bo_global *glob = &ttm_bo_glob;
1314 	int ret = 0;
1315 	unsigned i;
1316 	struct ttm_resource_manager *man;
1317 
1318 	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1319 	ttm_resource_manager_set_used(man, false);
1320 	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1321 
1322 	mutex_lock(&ttm_global_mutex);
1323 	list_del(&bdev->device_list);
1324 	mutex_unlock(&ttm_global_mutex);
1325 
1326 	cancel_delayed_work_sync(&bdev->wq);
1327 
1328 	if (ttm_bo_delayed_delete(bdev, true))
1329 		pr_debug("Delayed destroy list was clean\n");
1330 
1331 	spin_lock(&glob->lru_lock);
1332 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1333 		if (list_empty(&man->lru[0]))
1334 			pr_debug("Swap list %d was clean\n", i);
1335 	spin_unlock(&glob->lru_lock);
1336 
1337 	ttm_pool_fini(&bdev->pool);
1338 
1339 	if (!ret)
1340 		ttm_bo_global_release();
1341 
1342 	return ret;
1343 }
1344 EXPORT_SYMBOL(ttm_bo_device_release);
1345 
1346 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1347 {
1348 	struct ttm_resource_manager *man = &bdev->sysman;
1349 
1350 	/*
1351 	 * Initialize the system memory buffer type.
1352 	 * Other types need to be driver / IOCTL initialized.
1353 	 */
1354 	man->use_tt = true;
1355 
1356 	ttm_resource_manager_init(man, 0);
1357 	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1358 	ttm_resource_manager_set_used(man, true);
1359 }
1360 
1361 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1362 		       struct ttm_bo_driver *driver,
1363 		       struct device *dev,
1364 		       struct address_space *mapping,
1365 		       struct drm_vma_offset_manager *vma_manager,
1366 		       bool use_dma_alloc, bool use_dma32)
1367 {
1368 	struct ttm_bo_global *glob = &ttm_bo_glob;
1369 	int ret;
1370 
1371 	if (WARN_ON(vma_manager == NULL))
1372 		return -EINVAL;
1373 
1374 	ret = ttm_bo_global_init();
1375 	if (ret)
1376 		return ret;
1377 
1378 	bdev->driver = driver;
1379 
1380 	ttm_bo_init_sysman(bdev);
1381 	ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
1382 
1383 	bdev->vma_manager = vma_manager;
1384 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1385 	INIT_LIST_HEAD(&bdev->ddestroy);
1386 	bdev->dev_mapping = mapping;
1387 	mutex_lock(&ttm_global_mutex);
1388 	list_add_tail(&bdev->device_list, &glob->device_list);
1389 	mutex_unlock(&ttm_global_mutex);
1390 
1391 	return 0;
1392 }
1393 EXPORT_SYMBOL(ttm_bo_device_init);
1394 
1395 /*
1396  * buffer object vm functions.
1397  */
1398 
1399 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1400 {
1401 	struct ttm_bo_device *bdev = bo->bdev;
1402 
1403 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1404 	ttm_mem_io_free(bdev, &bo->mem);
1405 }
1406 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1407 
1408 int ttm_bo_wait(struct ttm_buffer_object *bo,
1409 		bool interruptible, bool no_wait)
1410 {
1411 	long timeout = 15 * HZ;
1412 
1413 	if (no_wait) {
1414 		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1415 			return 0;
1416 		else
1417 			return -EBUSY;
1418 	}
1419 
1420 	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1421 						      interruptible, timeout);
1422 	if (timeout < 0)
1423 		return timeout;
1424 
1425 	if (timeout == 0)
1426 		return -EBUSY;
1427 
1428 	dma_resv_add_excl_fence(bo->base.resv, NULL);
1429 	return 0;
1430 }
1431 EXPORT_SYMBOL(ttm_bo_wait);
1432 
1433 /**
1434  * A buffer object shrink method that tries to swap out the first
1435  * buffer object on the bo_global::swap_lru list.
1436  */
1437 int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
1438 {
1439 	struct ttm_bo_global *glob = &ttm_bo_glob;
1440 	struct ttm_buffer_object *bo;
1441 	int ret = -EBUSY;
1442 	bool locked;
1443 	unsigned i;
1444 
1445 	spin_lock(&glob->lru_lock);
1446 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1447 		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1448 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1449 							    NULL))
1450 				continue;
1451 
1452 			if (!ttm_bo_get_unless_zero(bo)) {
1453 				if (locked)
1454 					dma_resv_unlock(bo->base.resv);
1455 				continue;
1456 			}
1457 
1458 			ret = 0;
1459 			break;
1460 		}
1461 		if (!ret)
1462 			break;
1463 	}
1464 
1465 	if (ret) {
1466 		spin_unlock(&glob->lru_lock);
1467 		return ret;
1468 	}
1469 
1470 	if (bo->deleted) {
1471 		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1472 		ttm_bo_put(bo);
1473 		return ret;
1474 	}
1475 
1476 	ttm_bo_del_from_lru(bo);
1477 	spin_unlock(&glob->lru_lock);
1478 
1479 	/**
1480 	 * Move to system cached
1481 	 */
1482 
1483 	if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1484 		struct ttm_operation_ctx ctx = { false, false };
1485 		struct ttm_resource evict_mem;
1486 		struct ttm_place hop;
1487 
1488 		memset(&hop, 0, sizeof(hop));
1489 
1490 		evict_mem = bo->mem;
1491 		evict_mem.mm_node = NULL;
1492 		evict_mem.placement = 0;
1493 		evict_mem.mem_type = TTM_PL_SYSTEM;
1494 
1495 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1496 		if (unlikely(ret != 0)) {
1497 			WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1498 			goto out;
1499 		}
1500 	}
1501 
1502 	/**
1503 	 * Make sure BO is idle.
1504 	 */
1505 
1506 	ret = ttm_bo_wait(bo, false, false);
1507 	if (unlikely(ret != 0))
1508 		goto out;
1509 
1510 	ttm_bo_unmap_virtual(bo);
1511 
1512 	/**
1513 	 * Swap out. Buffer will be swapped in again as soon as
1514 	 * anyone tries to access a ttm page.
1515 	 */
1516 
1517 	if (bo->bdev->driver->swap_notify)
1518 		bo->bdev->driver->swap_notify(bo);
1519 
1520 	ret = ttm_tt_swapout(bo->bdev, bo->ttm);
1521 out:
1522 
1523 	/**
1524 	 *
1525 	 * Unreserve without putting on LRU to avoid swapping out an
1526 	 * already swapped buffer.
1527 	 */
1528 	if (locked)
1529 		dma_resv_unlock(bo->base.resv);
1530 	ttm_bo_put(bo);
1531 	return ret;
1532 }
1533 EXPORT_SYMBOL(ttm_bo_swapout);
1534 
1535 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1536 {
1537 	if (bo->ttm == NULL)
1538 		return;
1539 
1540 	ttm_tt_destroy(bo->bdev, bo->ttm);
1541 	bo->ttm = NULL;
1542 }
1543 
1544