xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo.c (revision f5b06569)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/reservation.h>
44 
45 #define TTM_ASSERT_LOCKED(param)
46 #define TTM_DEBUG(fmt, arg...)
47 #define TTM_BO_HASH_ORDER 13
48 
49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 static void ttm_bo_global_kobj_release(struct kobject *kobj);
51 
52 static struct attribute ttm_bo_count = {
53 	.name = "bo_count",
54 	.mode = S_IRUGO
55 };
56 
57 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58 					  uint32_t *mem_type)
59 {
60 	int i;
61 
62 	for (i = 0; i <= TTM_PL_PRIV5; i++)
63 		if (place->flags & (1 << i)) {
64 			*mem_type = i;
65 			return 0;
66 		}
67 	return -EINVAL;
68 }
69 
70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71 {
72 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73 
74 	pr_err("    has_type: %d\n", man->has_type);
75 	pr_err("    use_type: %d\n", man->use_type);
76 	pr_err("    flags: 0x%08X\n", man->flags);
77 	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
78 	pr_err("    size: %llu\n", man->size);
79 	pr_err("    available_caching: 0x%08X\n", man->available_caching);
80 	pr_err("    default_caching: 0x%08X\n", man->default_caching);
81 	if (mem_type != TTM_PL_SYSTEM)
82 		(*man->func->debug)(man, TTM_PFX);
83 }
84 
85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
86 					struct ttm_placement *placement)
87 {
88 	int i, ret, mem_type;
89 
90 	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
91 	       bo, bo->mem.num_pages, bo->mem.size >> 10,
92 	       bo->mem.size >> 20);
93 	for (i = 0; i < placement->num_placement; i++) {
94 		ret = ttm_mem_type_from_place(&placement->placement[i],
95 						&mem_type);
96 		if (ret)
97 			return;
98 		pr_err("  placement[%d]=0x%08X (%d)\n",
99 		       i, placement->placement[i].flags, mem_type);
100 		ttm_mem_type_debug(bo->bdev, mem_type);
101 	}
102 }
103 
104 static ssize_t ttm_bo_global_show(struct kobject *kobj,
105 				  struct attribute *attr,
106 				  char *buffer)
107 {
108 	struct ttm_bo_global *glob =
109 		container_of(kobj, struct ttm_bo_global, kobj);
110 
111 	return snprintf(buffer, PAGE_SIZE, "%lu\n",
112 			(unsigned long) atomic_read(&glob->bo_count));
113 }
114 
115 static struct attribute *ttm_bo_global_attrs[] = {
116 	&ttm_bo_count,
117 	NULL
118 };
119 
120 static const struct sysfs_ops ttm_bo_global_ops = {
121 	.show = &ttm_bo_global_show
122 };
123 
124 static struct kobj_type ttm_bo_glob_kobj_type  = {
125 	.release = &ttm_bo_global_kobj_release,
126 	.sysfs_ops = &ttm_bo_global_ops,
127 	.default_attrs = ttm_bo_global_attrs
128 };
129 
130 
131 static inline uint32_t ttm_bo_type_flags(unsigned type)
132 {
133 	return 1 << (type);
134 }
135 
136 static void ttm_bo_release_list(struct kref *list_kref)
137 {
138 	struct ttm_buffer_object *bo =
139 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
140 	struct ttm_bo_device *bdev = bo->bdev;
141 	size_t acc_size = bo->acc_size;
142 
143 	BUG_ON(atomic_read(&bo->list_kref.refcount));
144 	BUG_ON(atomic_read(&bo->kref.refcount));
145 	BUG_ON(atomic_read(&bo->cpu_writers));
146 	BUG_ON(bo->mem.mm_node != NULL);
147 	BUG_ON(!list_empty(&bo->lru));
148 	BUG_ON(!list_empty(&bo->ddestroy));
149 	ttm_tt_destroy(bo->ttm);
150 	atomic_dec(&bo->glob->bo_count);
151 	fence_put(bo->moving);
152 	if (bo->resv == &bo->ttm_resv)
153 		reservation_object_fini(&bo->ttm_resv);
154 	mutex_destroy(&bo->wu_mutex);
155 	if (bo->destroy)
156 		bo->destroy(bo);
157 	else {
158 		kfree(bo);
159 	}
160 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
161 }
162 
163 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
164 {
165 	struct ttm_bo_device *bdev = bo->bdev;
166 
167 	lockdep_assert_held(&bo->resv->lock.base);
168 
169 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
170 
171 		BUG_ON(!list_empty(&bo->lru));
172 
173 		list_add(&bo->lru, bdev->driver->lru_tail(bo));
174 		kref_get(&bo->list_kref);
175 
176 		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
177 			list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
178 			kref_get(&bo->list_kref);
179 		}
180 	}
181 }
182 EXPORT_SYMBOL(ttm_bo_add_to_lru);
183 
184 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
185 {
186 	struct ttm_bo_device *bdev = bo->bdev;
187 	int put_count = 0;
188 
189 	if (bdev->driver->lru_removal)
190 		bdev->driver->lru_removal(bo);
191 
192 	if (!list_empty(&bo->swap)) {
193 		list_del_init(&bo->swap);
194 		++put_count;
195 	}
196 	if (!list_empty(&bo->lru)) {
197 		list_del_init(&bo->lru);
198 		++put_count;
199 	}
200 
201 	return put_count;
202 }
203 
204 static void ttm_bo_ref_bug(struct kref *list_kref)
205 {
206 	BUG();
207 }
208 
209 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
210 			 bool never_free)
211 {
212 	kref_sub(&bo->list_kref, count,
213 		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
214 }
215 
216 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
217 {
218 	int put_count;
219 
220 	spin_lock(&bo->glob->lru_lock);
221 	put_count = ttm_bo_del_from_lru(bo);
222 	spin_unlock(&bo->glob->lru_lock);
223 	ttm_bo_list_ref_sub(bo, put_count, true);
224 }
225 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
226 
227 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
228 {
229 	struct ttm_bo_device *bdev = bo->bdev;
230 	int put_count = 0;
231 
232 	lockdep_assert_held(&bo->resv->lock.base);
233 
234 	if (bdev->driver->lru_removal)
235 		bdev->driver->lru_removal(bo);
236 
237 	put_count = ttm_bo_del_from_lru(bo);
238 	ttm_bo_list_ref_sub(bo, put_count, true);
239 	ttm_bo_add_to_lru(bo);
240 }
241 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
242 
243 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
244 {
245 	return bo->bdev->man[bo->mem.mem_type].lru.prev;
246 }
247 EXPORT_SYMBOL(ttm_bo_default_lru_tail);
248 
249 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
250 {
251 	return bo->glob->swap_lru.prev;
252 }
253 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
254 
255 /*
256  * Call bo->mutex locked.
257  */
258 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
259 {
260 	struct ttm_bo_device *bdev = bo->bdev;
261 	struct ttm_bo_global *glob = bo->glob;
262 	int ret = 0;
263 	uint32_t page_flags = 0;
264 
265 	TTM_ASSERT_LOCKED(&bo->mutex);
266 	bo->ttm = NULL;
267 
268 	if (bdev->need_dma32)
269 		page_flags |= TTM_PAGE_FLAG_DMA32;
270 
271 	switch (bo->type) {
272 	case ttm_bo_type_device:
273 		if (zero_alloc)
274 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
275 	case ttm_bo_type_kernel:
276 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
277 						      page_flags, glob->dummy_read_page);
278 		if (unlikely(bo->ttm == NULL))
279 			ret = -ENOMEM;
280 		break;
281 	case ttm_bo_type_sg:
282 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
283 						      page_flags | TTM_PAGE_FLAG_SG,
284 						      glob->dummy_read_page);
285 		if (unlikely(bo->ttm == NULL)) {
286 			ret = -ENOMEM;
287 			break;
288 		}
289 		bo->ttm->sg = bo->sg;
290 		break;
291 	default:
292 		pr_err("Illegal buffer object type\n");
293 		ret = -EINVAL;
294 		break;
295 	}
296 
297 	return ret;
298 }
299 
300 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
301 				  struct ttm_mem_reg *mem,
302 				  bool evict, bool interruptible,
303 				  bool no_wait_gpu)
304 {
305 	struct ttm_bo_device *bdev = bo->bdev;
306 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
307 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
308 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
309 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
310 	int ret = 0;
311 
312 	if (old_is_pci || new_is_pci ||
313 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
314 		ret = ttm_mem_io_lock(old_man, true);
315 		if (unlikely(ret != 0))
316 			goto out_err;
317 		ttm_bo_unmap_virtual_locked(bo);
318 		ttm_mem_io_unlock(old_man);
319 	}
320 
321 	/*
322 	 * Create and bind a ttm if required.
323 	 */
324 
325 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
326 		if (bo->ttm == NULL) {
327 			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
328 			ret = ttm_bo_add_ttm(bo, zero);
329 			if (ret)
330 				goto out_err;
331 		}
332 
333 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
334 		if (ret)
335 			goto out_err;
336 
337 		if (mem->mem_type != TTM_PL_SYSTEM) {
338 			ret = ttm_tt_bind(bo->ttm, mem);
339 			if (ret)
340 				goto out_err;
341 		}
342 
343 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
344 			if (bdev->driver->move_notify)
345 				bdev->driver->move_notify(bo, mem);
346 			bo->mem = *mem;
347 			mem->mm_node = NULL;
348 			goto moved;
349 		}
350 	}
351 
352 	if (bdev->driver->move_notify)
353 		bdev->driver->move_notify(bo, mem);
354 
355 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
356 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
357 		ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
358 				      mem);
359 	else if (bdev->driver->move)
360 		ret = bdev->driver->move(bo, evict, interruptible,
361 					 no_wait_gpu, mem);
362 	else
363 		ret = ttm_bo_move_memcpy(bo, evict, interruptible,
364 					 no_wait_gpu, mem);
365 
366 	if (ret) {
367 		if (bdev->driver->move_notify) {
368 			struct ttm_mem_reg tmp_mem = *mem;
369 			*mem = bo->mem;
370 			bo->mem = tmp_mem;
371 			bdev->driver->move_notify(bo, mem);
372 			bo->mem = *mem;
373 			*mem = tmp_mem;
374 		}
375 
376 		goto out_err;
377 	}
378 
379 moved:
380 	if (bo->evicted) {
381 		if (bdev->driver->invalidate_caches) {
382 			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
383 			if (ret)
384 				pr_err("Can not flush read caches\n");
385 		}
386 		bo->evicted = false;
387 	}
388 
389 	if (bo->mem.mm_node) {
390 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
391 		    bdev->man[bo->mem.mem_type].gpu_offset;
392 		bo->cur_placement = bo->mem.placement;
393 	} else
394 		bo->offset = 0;
395 
396 	return 0;
397 
398 out_err:
399 	new_man = &bdev->man[bo->mem.mem_type];
400 	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
401 		ttm_tt_destroy(bo->ttm);
402 		bo->ttm = NULL;
403 	}
404 
405 	return ret;
406 }
407 
408 /**
409  * Call bo::reserved.
410  * Will release GPU memory type usage on destruction.
411  * This is the place to put in driver specific hooks to release
412  * driver private resources.
413  * Will release the bo::reserved lock.
414  */
415 
416 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
417 {
418 	if (bo->bdev->driver->move_notify)
419 		bo->bdev->driver->move_notify(bo, NULL);
420 
421 	ttm_tt_destroy(bo->ttm);
422 	bo->ttm = NULL;
423 	ttm_bo_mem_put(bo, &bo->mem);
424 
425 	ww_mutex_unlock (&bo->resv->lock);
426 }
427 
428 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
429 {
430 	struct reservation_object_list *fobj;
431 	struct fence *fence;
432 	int i;
433 
434 	fobj = reservation_object_get_list(bo->resv);
435 	fence = reservation_object_get_excl(bo->resv);
436 	if (fence && !fence->ops->signaled)
437 		fence_enable_sw_signaling(fence);
438 
439 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
440 		fence = rcu_dereference_protected(fobj->shared[i],
441 					reservation_object_held(bo->resv));
442 
443 		if (!fence->ops->signaled)
444 			fence_enable_sw_signaling(fence);
445 	}
446 }
447 
448 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
449 {
450 	struct ttm_bo_device *bdev = bo->bdev;
451 	struct ttm_bo_global *glob = bo->glob;
452 	int put_count;
453 	int ret;
454 
455 	spin_lock(&glob->lru_lock);
456 	ret = __ttm_bo_reserve(bo, false, true, NULL);
457 
458 	if (!ret) {
459 		if (!ttm_bo_wait(bo, false, true)) {
460 			put_count = ttm_bo_del_from_lru(bo);
461 
462 			spin_unlock(&glob->lru_lock);
463 			ttm_bo_cleanup_memtype_use(bo);
464 
465 			ttm_bo_list_ref_sub(bo, put_count, true);
466 
467 			return;
468 		} else
469 			ttm_bo_flush_all_fences(bo);
470 
471 		/*
472 		 * Make NO_EVICT bos immediately available to
473 		 * shrinkers, now that they are queued for
474 		 * destruction.
475 		 */
476 		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
477 			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
478 			ttm_bo_add_to_lru(bo);
479 		}
480 
481 		__ttm_bo_unreserve(bo);
482 	}
483 
484 	kref_get(&bo->list_kref);
485 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
486 	spin_unlock(&glob->lru_lock);
487 
488 	schedule_delayed_work(&bdev->wq,
489 			      ((HZ / 100) < 1) ? 1 : HZ / 100);
490 }
491 
492 /**
493  * function ttm_bo_cleanup_refs_and_unlock
494  * If bo idle, remove from delayed- and lru lists, and unref.
495  * If not idle, do nothing.
496  *
497  * Must be called with lru_lock and reservation held, this function
498  * will drop both before returning.
499  *
500  * @interruptible         Any sleeps should occur interruptibly.
501  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
502  */
503 
504 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
505 					  bool interruptible,
506 					  bool no_wait_gpu)
507 {
508 	struct ttm_bo_global *glob = bo->glob;
509 	int put_count;
510 	int ret;
511 
512 	ret = ttm_bo_wait(bo, false, true);
513 
514 	if (ret && !no_wait_gpu) {
515 		long lret;
516 		ww_mutex_unlock(&bo->resv->lock);
517 		spin_unlock(&glob->lru_lock);
518 
519 		lret = reservation_object_wait_timeout_rcu(bo->resv,
520 							   true,
521 							   interruptible,
522 							   30 * HZ);
523 
524 		if (lret < 0)
525 			return lret;
526 		else if (lret == 0)
527 			return -EBUSY;
528 
529 		spin_lock(&glob->lru_lock);
530 		ret = __ttm_bo_reserve(bo, false, true, NULL);
531 
532 		/*
533 		 * We raced, and lost, someone else holds the reservation now,
534 		 * and is probably busy in ttm_bo_cleanup_memtype_use.
535 		 *
536 		 * Even if it's not the case, because we finished waiting any
537 		 * delayed destruction would succeed, so just return success
538 		 * here.
539 		 */
540 		if (ret) {
541 			spin_unlock(&glob->lru_lock);
542 			return 0;
543 		}
544 
545 		/*
546 		 * remove sync_obj with ttm_bo_wait, the wait should be
547 		 * finished, and no new wait object should have been added.
548 		 */
549 		ret = ttm_bo_wait(bo, false, true);
550 		WARN_ON(ret);
551 	}
552 
553 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
554 		__ttm_bo_unreserve(bo);
555 		spin_unlock(&glob->lru_lock);
556 		return ret;
557 	}
558 
559 	put_count = ttm_bo_del_from_lru(bo);
560 	list_del_init(&bo->ddestroy);
561 	++put_count;
562 
563 	spin_unlock(&glob->lru_lock);
564 	ttm_bo_cleanup_memtype_use(bo);
565 
566 	ttm_bo_list_ref_sub(bo, put_count, true);
567 
568 	return 0;
569 }
570 
571 /**
572  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
573  * encountered buffers.
574  */
575 
576 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
577 {
578 	struct ttm_bo_global *glob = bdev->glob;
579 	struct ttm_buffer_object *entry = NULL;
580 	int ret = 0;
581 
582 	spin_lock(&glob->lru_lock);
583 	if (list_empty(&bdev->ddestroy))
584 		goto out_unlock;
585 
586 	entry = list_first_entry(&bdev->ddestroy,
587 		struct ttm_buffer_object, ddestroy);
588 	kref_get(&entry->list_kref);
589 
590 	for (;;) {
591 		struct ttm_buffer_object *nentry = NULL;
592 
593 		if (entry->ddestroy.next != &bdev->ddestroy) {
594 			nentry = list_first_entry(&entry->ddestroy,
595 				struct ttm_buffer_object, ddestroy);
596 			kref_get(&nentry->list_kref);
597 		}
598 
599 		ret = __ttm_bo_reserve(entry, false, true, NULL);
600 		if (remove_all && ret) {
601 			spin_unlock(&glob->lru_lock);
602 			ret = __ttm_bo_reserve(entry, false, false, NULL);
603 			spin_lock(&glob->lru_lock);
604 		}
605 
606 		if (!ret)
607 			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
608 							     !remove_all);
609 		else
610 			spin_unlock(&glob->lru_lock);
611 
612 		kref_put(&entry->list_kref, ttm_bo_release_list);
613 		entry = nentry;
614 
615 		if (ret || !entry)
616 			goto out;
617 
618 		spin_lock(&glob->lru_lock);
619 		if (list_empty(&entry->ddestroy))
620 			break;
621 	}
622 
623 out_unlock:
624 	spin_unlock(&glob->lru_lock);
625 out:
626 	if (entry)
627 		kref_put(&entry->list_kref, ttm_bo_release_list);
628 	return ret;
629 }
630 
631 static void ttm_bo_delayed_workqueue(struct work_struct *work)
632 {
633 	struct ttm_bo_device *bdev =
634 	    container_of(work, struct ttm_bo_device, wq.work);
635 
636 	if (ttm_bo_delayed_delete(bdev, false)) {
637 		schedule_delayed_work(&bdev->wq,
638 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
639 	}
640 }
641 
642 static void ttm_bo_release(struct kref *kref)
643 {
644 	struct ttm_buffer_object *bo =
645 	    container_of(kref, struct ttm_buffer_object, kref);
646 	struct ttm_bo_device *bdev = bo->bdev;
647 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
648 
649 	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
650 	ttm_mem_io_lock(man, false);
651 	ttm_mem_io_free_vm(bo);
652 	ttm_mem_io_unlock(man);
653 	ttm_bo_cleanup_refs_or_queue(bo);
654 	kref_put(&bo->list_kref, ttm_bo_release_list);
655 }
656 
657 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
658 {
659 	struct ttm_buffer_object *bo = *p_bo;
660 
661 	*p_bo = NULL;
662 	kref_put(&bo->kref, ttm_bo_release);
663 }
664 EXPORT_SYMBOL(ttm_bo_unref);
665 
666 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
667 {
668 	return cancel_delayed_work_sync(&bdev->wq);
669 }
670 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
671 
672 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
673 {
674 	if (resched)
675 		schedule_delayed_work(&bdev->wq,
676 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
677 }
678 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
679 
680 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
681 			bool no_wait_gpu)
682 {
683 	struct ttm_bo_device *bdev = bo->bdev;
684 	struct ttm_mem_reg evict_mem;
685 	struct ttm_placement placement;
686 	int ret = 0;
687 
688 	lockdep_assert_held(&bo->resv->lock.base);
689 
690 	evict_mem = bo->mem;
691 	evict_mem.mm_node = NULL;
692 	evict_mem.bus.io_reserved_vm = false;
693 	evict_mem.bus.io_reserved_count = 0;
694 
695 	placement.num_placement = 0;
696 	placement.num_busy_placement = 0;
697 	bdev->driver->evict_flags(bo, &placement);
698 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
699 				no_wait_gpu);
700 	if (ret) {
701 		if (ret != -ERESTARTSYS) {
702 			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
703 			       bo);
704 			ttm_bo_mem_space_debug(bo, &placement);
705 		}
706 		goto out;
707 	}
708 
709 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
710 				     no_wait_gpu);
711 	if (unlikely(ret)) {
712 		if (ret != -ERESTARTSYS)
713 			pr_err("Buffer eviction failed\n");
714 		ttm_bo_mem_put(bo, &evict_mem);
715 		goto out;
716 	}
717 	bo->evicted = true;
718 out:
719 	return ret;
720 }
721 
722 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
723 				uint32_t mem_type,
724 				const struct ttm_place *place,
725 				bool interruptible,
726 				bool no_wait_gpu)
727 {
728 	struct ttm_bo_global *glob = bdev->glob;
729 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
730 	struct ttm_buffer_object *bo;
731 	int ret = -EBUSY, put_count;
732 
733 	spin_lock(&glob->lru_lock);
734 	list_for_each_entry(bo, &man->lru, lru) {
735 		ret = __ttm_bo_reserve(bo, false, true, NULL);
736 		if (!ret) {
737 			if (place && (place->fpfn || place->lpfn)) {
738 				/* Don't evict this BO if it's outside of the
739 				 * requested placement range
740 				 */
741 				if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
742 				    (place->lpfn && place->lpfn <= bo->mem.start)) {
743 					__ttm_bo_unreserve(bo);
744 					ret = -EBUSY;
745 					continue;
746 				}
747 			}
748 
749 			break;
750 		}
751 	}
752 
753 	if (ret) {
754 		spin_unlock(&glob->lru_lock);
755 		return ret;
756 	}
757 
758 	kref_get(&bo->list_kref);
759 
760 	if (!list_empty(&bo->ddestroy)) {
761 		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
762 						     no_wait_gpu);
763 		kref_put(&bo->list_kref, ttm_bo_release_list);
764 		return ret;
765 	}
766 
767 	put_count = ttm_bo_del_from_lru(bo);
768 	spin_unlock(&glob->lru_lock);
769 
770 	BUG_ON(ret != 0);
771 
772 	ttm_bo_list_ref_sub(bo, put_count, true);
773 
774 	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
775 	ttm_bo_unreserve(bo);
776 
777 	kref_put(&bo->list_kref, ttm_bo_release_list);
778 	return ret;
779 }
780 
781 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
782 {
783 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
784 
785 	if (mem->mm_node)
786 		(*man->func->put_node)(man, mem);
787 }
788 EXPORT_SYMBOL(ttm_bo_mem_put);
789 
790 /**
791  * Add the last move fence to the BO and reserve a new shared slot.
792  */
793 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
794 				 struct ttm_mem_type_manager *man,
795 				 struct ttm_mem_reg *mem)
796 {
797 	struct fence *fence;
798 	int ret;
799 
800 	spin_lock(&man->move_lock);
801 	fence = fence_get(man->move);
802 	spin_unlock(&man->move_lock);
803 
804 	if (fence) {
805 		reservation_object_add_shared_fence(bo->resv, fence);
806 
807 		ret = reservation_object_reserve_shared(bo->resv);
808 		if (unlikely(ret))
809 			return ret;
810 
811 		fence_put(bo->moving);
812 		bo->moving = fence;
813 	}
814 
815 	return 0;
816 }
817 
818 /**
819  * Repeatedly evict memory from the LRU for @mem_type until we create enough
820  * space, or we've evicted everything and there isn't enough space.
821  */
822 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
823 					uint32_t mem_type,
824 					const struct ttm_place *place,
825 					struct ttm_mem_reg *mem,
826 					bool interruptible,
827 					bool no_wait_gpu)
828 {
829 	struct ttm_bo_device *bdev = bo->bdev;
830 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
831 	int ret;
832 
833 	do {
834 		ret = (*man->func->get_node)(man, bo, place, mem);
835 		if (unlikely(ret != 0))
836 			return ret;
837 		if (mem->mm_node)
838 			break;
839 		ret = ttm_mem_evict_first(bdev, mem_type, place,
840 					  interruptible, no_wait_gpu);
841 		if (unlikely(ret != 0))
842 			return ret;
843 	} while (1);
844 	mem->mem_type = mem_type;
845 	return ttm_bo_add_move_fence(bo, man, mem);
846 }
847 
848 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
849 				      uint32_t cur_placement,
850 				      uint32_t proposed_placement)
851 {
852 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
853 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
854 
855 	/**
856 	 * Keep current caching if possible.
857 	 */
858 
859 	if ((cur_placement & caching) != 0)
860 		result |= (cur_placement & caching);
861 	else if ((man->default_caching & caching) != 0)
862 		result |= man->default_caching;
863 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
864 		result |= TTM_PL_FLAG_CACHED;
865 	else if ((TTM_PL_FLAG_WC & caching) != 0)
866 		result |= TTM_PL_FLAG_WC;
867 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
868 		result |= TTM_PL_FLAG_UNCACHED;
869 
870 	return result;
871 }
872 
873 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
874 				 uint32_t mem_type,
875 				 const struct ttm_place *place,
876 				 uint32_t *masked_placement)
877 {
878 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
879 
880 	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
881 		return false;
882 
883 	if ((place->flags & man->available_caching) == 0)
884 		return false;
885 
886 	cur_flags |= (place->flags & man->available_caching);
887 
888 	*masked_placement = cur_flags;
889 	return true;
890 }
891 
892 /**
893  * Creates space for memory region @mem according to its type.
894  *
895  * This function first searches for free space in compatible memory types in
896  * the priority order defined by the driver.  If free space isn't found, then
897  * ttm_bo_mem_force_space is attempted in priority order to evict and find
898  * space.
899  */
900 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
901 			struct ttm_placement *placement,
902 			struct ttm_mem_reg *mem,
903 			bool interruptible,
904 			bool no_wait_gpu)
905 {
906 	struct ttm_bo_device *bdev = bo->bdev;
907 	struct ttm_mem_type_manager *man;
908 	uint32_t mem_type = TTM_PL_SYSTEM;
909 	uint32_t cur_flags = 0;
910 	bool type_found = false;
911 	bool type_ok = false;
912 	bool has_erestartsys = false;
913 	int i, ret;
914 
915 	ret = reservation_object_reserve_shared(bo->resv);
916 	if (unlikely(ret))
917 		return ret;
918 
919 	mem->mm_node = NULL;
920 	for (i = 0; i < placement->num_placement; ++i) {
921 		const struct ttm_place *place = &placement->placement[i];
922 
923 		ret = ttm_mem_type_from_place(place, &mem_type);
924 		if (ret)
925 			return ret;
926 		man = &bdev->man[mem_type];
927 		if (!man->has_type || !man->use_type)
928 			continue;
929 
930 		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
931 						&cur_flags);
932 
933 		if (!type_ok)
934 			continue;
935 
936 		type_found = true;
937 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
938 						  cur_flags);
939 		/*
940 		 * Use the access and other non-mapping-related flag bits from
941 		 * the memory placement flags to the current flags
942 		 */
943 		ttm_flag_masked(&cur_flags, place->flags,
944 				~TTM_PL_MASK_MEMTYPE);
945 
946 		if (mem_type == TTM_PL_SYSTEM)
947 			break;
948 
949 		ret = (*man->func->get_node)(man, bo, place, mem);
950 		if (unlikely(ret))
951 			return ret;
952 
953 		if (mem->mm_node) {
954 			ret = ttm_bo_add_move_fence(bo, man, mem);
955 			if (unlikely(ret)) {
956 				(*man->func->put_node)(man, mem);
957 				return ret;
958 			}
959 			break;
960 		}
961 	}
962 
963 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
964 		mem->mem_type = mem_type;
965 		mem->placement = cur_flags;
966 		return 0;
967 	}
968 
969 	for (i = 0; i < placement->num_busy_placement; ++i) {
970 		const struct ttm_place *place = &placement->busy_placement[i];
971 
972 		ret = ttm_mem_type_from_place(place, &mem_type);
973 		if (ret)
974 			return ret;
975 		man = &bdev->man[mem_type];
976 		if (!man->has_type || !man->use_type)
977 			continue;
978 		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
979 			continue;
980 
981 		type_found = true;
982 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
983 						  cur_flags);
984 		/*
985 		 * Use the access and other non-mapping-related flag bits from
986 		 * the memory placement flags to the current flags
987 		 */
988 		ttm_flag_masked(&cur_flags, place->flags,
989 				~TTM_PL_MASK_MEMTYPE);
990 
991 		if (mem_type == TTM_PL_SYSTEM) {
992 			mem->mem_type = mem_type;
993 			mem->placement = cur_flags;
994 			mem->mm_node = NULL;
995 			return 0;
996 		}
997 
998 		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
999 						interruptible, no_wait_gpu);
1000 		if (ret == 0 && mem->mm_node) {
1001 			mem->placement = cur_flags;
1002 			return 0;
1003 		}
1004 		if (ret == -ERESTARTSYS)
1005 			has_erestartsys = true;
1006 	}
1007 
1008 	if (!type_found) {
1009 		printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1014 }
1015 EXPORT_SYMBOL(ttm_bo_mem_space);
1016 
1017 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1018 			struct ttm_placement *placement,
1019 			bool interruptible,
1020 			bool no_wait_gpu)
1021 {
1022 	int ret = 0;
1023 	struct ttm_mem_reg mem;
1024 
1025 	lockdep_assert_held(&bo->resv->lock.base);
1026 
1027 	mem.num_pages = bo->num_pages;
1028 	mem.size = mem.num_pages << PAGE_SHIFT;
1029 	mem.page_alignment = bo->mem.page_alignment;
1030 	mem.bus.io_reserved_vm = false;
1031 	mem.bus.io_reserved_count = 0;
1032 	/*
1033 	 * Determine where to move the buffer.
1034 	 */
1035 	ret = ttm_bo_mem_space(bo, placement, &mem,
1036 			       interruptible, no_wait_gpu);
1037 	if (ret)
1038 		goto out_unlock;
1039 	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1040 				     interruptible, no_wait_gpu);
1041 out_unlock:
1042 	if (ret && mem.mm_node)
1043 		ttm_bo_mem_put(bo, &mem);
1044 	return ret;
1045 }
1046 
1047 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1048 		       struct ttm_mem_reg *mem,
1049 		       uint32_t *new_flags)
1050 {
1051 	int i;
1052 
1053 	for (i = 0; i < placement->num_placement; i++) {
1054 		const struct ttm_place *heap = &placement->placement[i];
1055 		if (mem->mm_node &&
1056 		    (mem->start < heap->fpfn ||
1057 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1058 			continue;
1059 
1060 		*new_flags = heap->flags;
1061 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1062 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1063 			return true;
1064 	}
1065 
1066 	for (i = 0; i < placement->num_busy_placement; i++) {
1067 		const struct ttm_place *heap = &placement->busy_placement[i];
1068 		if (mem->mm_node &&
1069 		    (mem->start < heap->fpfn ||
1070 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1071 			continue;
1072 
1073 		*new_flags = heap->flags;
1074 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1075 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1076 			return true;
1077 	}
1078 
1079 	return false;
1080 }
1081 EXPORT_SYMBOL(ttm_bo_mem_compat);
1082 
1083 int ttm_bo_validate(struct ttm_buffer_object *bo,
1084 			struct ttm_placement *placement,
1085 			bool interruptible,
1086 			bool no_wait_gpu)
1087 {
1088 	int ret;
1089 	uint32_t new_flags;
1090 
1091 	lockdep_assert_held(&bo->resv->lock.base);
1092 	/*
1093 	 * Check whether we need to move buffer.
1094 	 */
1095 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1096 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1097 					 no_wait_gpu);
1098 		if (ret)
1099 			return ret;
1100 	} else {
1101 		/*
1102 		 * Use the access and other non-mapping-related flag bits from
1103 		 * the compatible memory placement flags to the active flags
1104 		 */
1105 		ttm_flag_masked(&bo->mem.placement, new_flags,
1106 				~TTM_PL_MASK_MEMTYPE);
1107 	}
1108 	/*
1109 	 * We might need to add a TTM.
1110 	 */
1111 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1112 		ret = ttm_bo_add_ttm(bo, true);
1113 		if (ret)
1114 			return ret;
1115 	}
1116 	return 0;
1117 }
1118 EXPORT_SYMBOL(ttm_bo_validate);
1119 
1120 int ttm_bo_init(struct ttm_bo_device *bdev,
1121 		struct ttm_buffer_object *bo,
1122 		unsigned long size,
1123 		enum ttm_bo_type type,
1124 		struct ttm_placement *placement,
1125 		uint32_t page_alignment,
1126 		bool interruptible,
1127 		struct file *persistent_swap_storage,
1128 		size_t acc_size,
1129 		struct sg_table *sg,
1130 		struct reservation_object *resv,
1131 		void (*destroy) (struct ttm_buffer_object *))
1132 {
1133 	int ret = 0;
1134 	unsigned long num_pages;
1135 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1136 	bool locked;
1137 
1138 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1139 	if (ret) {
1140 		pr_err("Out of kernel memory\n");
1141 		if (destroy)
1142 			(*destroy)(bo);
1143 		else
1144 			kfree(bo);
1145 		return -ENOMEM;
1146 	}
1147 
1148 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1149 	if (num_pages == 0) {
1150 		pr_err("Illegal buffer object size\n");
1151 		if (destroy)
1152 			(*destroy)(bo);
1153 		else
1154 			kfree(bo);
1155 		ttm_mem_global_free(mem_glob, acc_size);
1156 		return -EINVAL;
1157 	}
1158 	bo->destroy = destroy;
1159 
1160 	kref_init(&bo->kref);
1161 	kref_init(&bo->list_kref);
1162 	atomic_set(&bo->cpu_writers, 0);
1163 	INIT_LIST_HEAD(&bo->lru);
1164 	INIT_LIST_HEAD(&bo->ddestroy);
1165 	INIT_LIST_HEAD(&bo->swap);
1166 	INIT_LIST_HEAD(&bo->io_reserve_lru);
1167 	mutex_init(&bo->wu_mutex);
1168 	bo->bdev = bdev;
1169 	bo->glob = bdev->glob;
1170 	bo->type = type;
1171 	bo->num_pages = num_pages;
1172 	bo->mem.size = num_pages << PAGE_SHIFT;
1173 	bo->mem.mem_type = TTM_PL_SYSTEM;
1174 	bo->mem.num_pages = bo->num_pages;
1175 	bo->mem.mm_node = NULL;
1176 	bo->mem.page_alignment = page_alignment;
1177 	bo->mem.bus.io_reserved_vm = false;
1178 	bo->mem.bus.io_reserved_count = 0;
1179 	bo->moving = NULL;
1180 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1181 	bo->persistent_swap_storage = persistent_swap_storage;
1182 	bo->acc_size = acc_size;
1183 	bo->sg = sg;
1184 	if (resv) {
1185 		bo->resv = resv;
1186 		lockdep_assert_held(&bo->resv->lock.base);
1187 	} else {
1188 		bo->resv = &bo->ttm_resv;
1189 		reservation_object_init(&bo->ttm_resv);
1190 	}
1191 	atomic_inc(&bo->glob->bo_count);
1192 	drm_vma_node_reset(&bo->vma_node);
1193 
1194 	/*
1195 	 * For ttm_bo_type_device buffers, allocate
1196 	 * address space from the device.
1197 	 */
1198 	if (bo->type == ttm_bo_type_device ||
1199 	    bo->type == ttm_bo_type_sg)
1200 		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1201 					 bo->mem.num_pages);
1202 
1203 	/* passed reservation objects should already be locked,
1204 	 * since otherwise lockdep will be angered in radeon.
1205 	 */
1206 	if (!resv) {
1207 		locked = ww_mutex_trylock(&bo->resv->lock);
1208 		WARN_ON(!locked);
1209 	}
1210 
1211 	if (likely(!ret))
1212 		ret = ttm_bo_validate(bo, placement, interruptible, false);
1213 
1214 	if (!resv) {
1215 		ttm_bo_unreserve(bo);
1216 
1217 	} else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1218 		spin_lock(&bo->glob->lru_lock);
1219 		ttm_bo_add_to_lru(bo);
1220 		spin_unlock(&bo->glob->lru_lock);
1221 	}
1222 
1223 	if (unlikely(ret))
1224 		ttm_bo_unref(&bo);
1225 
1226 	return ret;
1227 }
1228 EXPORT_SYMBOL(ttm_bo_init);
1229 
1230 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1231 		       unsigned long bo_size,
1232 		       unsigned struct_size)
1233 {
1234 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1235 	size_t size = 0;
1236 
1237 	size += ttm_round_pot(struct_size);
1238 	size += ttm_round_pot(npages * sizeof(void *));
1239 	size += ttm_round_pot(sizeof(struct ttm_tt));
1240 	return size;
1241 }
1242 EXPORT_SYMBOL(ttm_bo_acc_size);
1243 
1244 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1245 			   unsigned long bo_size,
1246 			   unsigned struct_size)
1247 {
1248 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1249 	size_t size = 0;
1250 
1251 	size += ttm_round_pot(struct_size);
1252 	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1253 	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1254 	return size;
1255 }
1256 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1257 
1258 int ttm_bo_create(struct ttm_bo_device *bdev,
1259 			unsigned long size,
1260 			enum ttm_bo_type type,
1261 			struct ttm_placement *placement,
1262 			uint32_t page_alignment,
1263 			bool interruptible,
1264 			struct file *persistent_swap_storage,
1265 			struct ttm_buffer_object **p_bo)
1266 {
1267 	struct ttm_buffer_object *bo;
1268 	size_t acc_size;
1269 	int ret;
1270 
1271 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1272 	if (unlikely(bo == NULL))
1273 		return -ENOMEM;
1274 
1275 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1276 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1277 			  interruptible, persistent_swap_storage, acc_size,
1278 			  NULL, NULL, NULL);
1279 	if (likely(ret == 0))
1280 		*p_bo = bo;
1281 
1282 	return ret;
1283 }
1284 EXPORT_SYMBOL(ttm_bo_create);
1285 
1286 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1287 					unsigned mem_type, bool allow_errors)
1288 {
1289 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1290 	struct ttm_bo_global *glob = bdev->glob;
1291 	struct fence *fence;
1292 	int ret;
1293 
1294 	/*
1295 	 * Can't use standard list traversal since we're unlocking.
1296 	 */
1297 
1298 	spin_lock(&glob->lru_lock);
1299 	while (!list_empty(&man->lru)) {
1300 		spin_unlock(&glob->lru_lock);
1301 		ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1302 		if (ret) {
1303 			if (allow_errors) {
1304 				return ret;
1305 			} else {
1306 				pr_err("Cleanup eviction failed\n");
1307 			}
1308 		}
1309 		spin_lock(&glob->lru_lock);
1310 	}
1311 	spin_unlock(&glob->lru_lock);
1312 
1313 	spin_lock(&man->move_lock);
1314 	fence = fence_get(man->move);
1315 	spin_unlock(&man->move_lock);
1316 
1317 	if (fence) {
1318 		ret = fence_wait(fence, false);
1319 		fence_put(fence);
1320 		if (ret) {
1321 			if (allow_errors) {
1322 				return ret;
1323 			} else {
1324 				pr_err("Cleanup eviction failed\n");
1325 			}
1326 		}
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1333 {
1334 	struct ttm_mem_type_manager *man;
1335 	int ret = -EINVAL;
1336 
1337 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1338 		pr_err("Illegal memory type %d\n", mem_type);
1339 		return ret;
1340 	}
1341 	man = &bdev->man[mem_type];
1342 
1343 	if (!man->has_type) {
1344 		pr_err("Trying to take down uninitialized memory manager type %u\n",
1345 		       mem_type);
1346 		return ret;
1347 	}
1348 	fence_put(man->move);
1349 
1350 	man->use_type = false;
1351 	man->has_type = false;
1352 
1353 	ret = 0;
1354 	if (mem_type > 0) {
1355 		ttm_bo_force_list_clean(bdev, mem_type, false);
1356 
1357 		ret = (*man->func->takedown)(man);
1358 	}
1359 
1360 	return ret;
1361 }
1362 EXPORT_SYMBOL(ttm_bo_clean_mm);
1363 
1364 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1365 {
1366 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1367 
1368 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1369 		pr_err("Illegal memory manager memory type %u\n", mem_type);
1370 		return -EINVAL;
1371 	}
1372 
1373 	if (!man->has_type) {
1374 		pr_err("Memory type %u has not been initialized\n", mem_type);
1375 		return 0;
1376 	}
1377 
1378 	return ttm_bo_force_list_clean(bdev, mem_type, true);
1379 }
1380 EXPORT_SYMBOL(ttm_bo_evict_mm);
1381 
1382 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1383 			unsigned long p_size)
1384 {
1385 	int ret = -EINVAL;
1386 	struct ttm_mem_type_manager *man;
1387 
1388 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1389 	man = &bdev->man[type];
1390 	BUG_ON(man->has_type);
1391 	man->io_reserve_fastpath = true;
1392 	man->use_io_reserve_lru = false;
1393 	mutex_init(&man->io_reserve_mutex);
1394 	spin_lock_init(&man->move_lock);
1395 	INIT_LIST_HEAD(&man->io_reserve_lru);
1396 
1397 	ret = bdev->driver->init_mem_type(bdev, type, man);
1398 	if (ret)
1399 		return ret;
1400 	man->bdev = bdev;
1401 
1402 	ret = 0;
1403 	if (type != TTM_PL_SYSTEM) {
1404 		ret = (*man->func->init)(man, p_size);
1405 		if (ret)
1406 			return ret;
1407 	}
1408 	man->has_type = true;
1409 	man->use_type = true;
1410 	man->size = p_size;
1411 
1412 	INIT_LIST_HEAD(&man->lru);
1413 	man->move = NULL;
1414 
1415 	return 0;
1416 }
1417 EXPORT_SYMBOL(ttm_bo_init_mm);
1418 
1419 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1420 {
1421 	struct ttm_bo_global *glob =
1422 		container_of(kobj, struct ttm_bo_global, kobj);
1423 
1424 	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1425 	__free_page(glob->dummy_read_page);
1426 	kfree(glob);
1427 }
1428 
1429 void ttm_bo_global_release(struct drm_global_reference *ref)
1430 {
1431 	struct ttm_bo_global *glob = ref->object;
1432 
1433 	kobject_del(&glob->kobj);
1434 	kobject_put(&glob->kobj);
1435 }
1436 EXPORT_SYMBOL(ttm_bo_global_release);
1437 
1438 int ttm_bo_global_init(struct drm_global_reference *ref)
1439 {
1440 	struct ttm_bo_global_ref *bo_ref =
1441 		container_of(ref, struct ttm_bo_global_ref, ref);
1442 	struct ttm_bo_global *glob = ref->object;
1443 	int ret;
1444 
1445 	mutex_init(&glob->device_list_mutex);
1446 	spin_lock_init(&glob->lru_lock);
1447 	glob->mem_glob = bo_ref->mem_glob;
1448 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1449 
1450 	if (unlikely(glob->dummy_read_page == NULL)) {
1451 		ret = -ENOMEM;
1452 		goto out_no_drp;
1453 	}
1454 
1455 	INIT_LIST_HEAD(&glob->swap_lru);
1456 	INIT_LIST_HEAD(&glob->device_list);
1457 
1458 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1459 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1460 	if (unlikely(ret != 0)) {
1461 		pr_err("Could not register buffer object swapout\n");
1462 		goto out_no_shrink;
1463 	}
1464 
1465 	atomic_set(&glob->bo_count, 0);
1466 
1467 	ret = kobject_init_and_add(
1468 		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1469 	if (unlikely(ret != 0))
1470 		kobject_put(&glob->kobj);
1471 	return ret;
1472 out_no_shrink:
1473 	__free_page(glob->dummy_read_page);
1474 out_no_drp:
1475 	kfree(glob);
1476 	return ret;
1477 }
1478 EXPORT_SYMBOL(ttm_bo_global_init);
1479 
1480 
1481 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1482 {
1483 	int ret = 0;
1484 	unsigned i = TTM_NUM_MEM_TYPES;
1485 	struct ttm_mem_type_manager *man;
1486 	struct ttm_bo_global *glob = bdev->glob;
1487 
1488 	while (i--) {
1489 		man = &bdev->man[i];
1490 		if (man->has_type) {
1491 			man->use_type = false;
1492 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1493 				ret = -EBUSY;
1494 				pr_err("DRM memory manager type %d is not clean\n",
1495 				       i);
1496 			}
1497 			man->has_type = false;
1498 		}
1499 	}
1500 
1501 	mutex_lock(&glob->device_list_mutex);
1502 	list_del(&bdev->device_list);
1503 	mutex_unlock(&glob->device_list_mutex);
1504 
1505 	cancel_delayed_work_sync(&bdev->wq);
1506 
1507 	while (ttm_bo_delayed_delete(bdev, true))
1508 		;
1509 
1510 	spin_lock(&glob->lru_lock);
1511 	if (list_empty(&bdev->ddestroy))
1512 		TTM_DEBUG("Delayed destroy list was clean\n");
1513 
1514 	if (list_empty(&bdev->man[0].lru))
1515 		TTM_DEBUG("Swap list was clean\n");
1516 	spin_unlock(&glob->lru_lock);
1517 
1518 	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1519 
1520 	return ret;
1521 }
1522 EXPORT_SYMBOL(ttm_bo_device_release);
1523 
1524 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1525 		       struct ttm_bo_global *glob,
1526 		       struct ttm_bo_driver *driver,
1527 		       struct address_space *mapping,
1528 		       uint64_t file_page_offset,
1529 		       bool need_dma32)
1530 {
1531 	int ret = -EINVAL;
1532 
1533 	bdev->driver = driver;
1534 
1535 	memset(bdev->man, 0, sizeof(bdev->man));
1536 
1537 	/*
1538 	 * Initialize the system memory buffer type.
1539 	 * Other types need to be driver / IOCTL initialized.
1540 	 */
1541 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1542 	if (unlikely(ret != 0))
1543 		goto out_no_sys;
1544 
1545 	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1546 				    0x10000000);
1547 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1548 	INIT_LIST_HEAD(&bdev->ddestroy);
1549 	bdev->dev_mapping = mapping;
1550 	bdev->glob = glob;
1551 	bdev->need_dma32 = need_dma32;
1552 	mutex_lock(&glob->device_list_mutex);
1553 	list_add_tail(&bdev->device_list, &glob->device_list);
1554 	mutex_unlock(&glob->device_list_mutex);
1555 
1556 	return 0;
1557 out_no_sys:
1558 	return ret;
1559 }
1560 EXPORT_SYMBOL(ttm_bo_device_init);
1561 
1562 /*
1563  * buffer object vm functions.
1564  */
1565 
1566 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1567 {
1568 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1569 
1570 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1571 		if (mem->mem_type == TTM_PL_SYSTEM)
1572 			return false;
1573 
1574 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1575 			return false;
1576 
1577 		if (mem->placement & TTM_PL_FLAG_CACHED)
1578 			return false;
1579 	}
1580 	return true;
1581 }
1582 
1583 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1584 {
1585 	struct ttm_bo_device *bdev = bo->bdev;
1586 
1587 	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1588 	ttm_mem_io_free_vm(bo);
1589 }
1590 
1591 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1592 {
1593 	struct ttm_bo_device *bdev = bo->bdev;
1594 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1595 
1596 	ttm_mem_io_lock(man, false);
1597 	ttm_bo_unmap_virtual_locked(bo);
1598 	ttm_mem_io_unlock(man);
1599 }
1600 
1601 
1602 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1603 
1604 int ttm_bo_wait(struct ttm_buffer_object *bo,
1605 		bool interruptible, bool no_wait)
1606 {
1607 	long timeout = no_wait ? 0 : 15 * HZ;
1608 
1609 	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1610 						      interruptible, timeout);
1611 	if (timeout < 0)
1612 		return timeout;
1613 
1614 	if (timeout == 0)
1615 		return -EBUSY;
1616 
1617 	reservation_object_add_excl_fence(bo->resv, NULL);
1618 	return 0;
1619 }
1620 EXPORT_SYMBOL(ttm_bo_wait);
1621 
1622 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1623 {
1624 	int ret = 0;
1625 
1626 	/*
1627 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1628 	 */
1629 
1630 	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1631 	if (unlikely(ret != 0))
1632 		return ret;
1633 	ret = ttm_bo_wait(bo, true, no_wait);
1634 	if (likely(ret == 0))
1635 		atomic_inc(&bo->cpu_writers);
1636 	ttm_bo_unreserve(bo);
1637 	return ret;
1638 }
1639 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1640 
1641 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1642 {
1643 	atomic_dec(&bo->cpu_writers);
1644 }
1645 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1646 
1647 /**
1648  * A buffer object shrink method that tries to swap out the first
1649  * buffer object on the bo_global::swap_lru list.
1650  */
1651 
1652 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1653 {
1654 	struct ttm_bo_global *glob =
1655 	    container_of(shrink, struct ttm_bo_global, shrink);
1656 	struct ttm_buffer_object *bo;
1657 	int ret = -EBUSY;
1658 	int put_count;
1659 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1660 
1661 	spin_lock(&glob->lru_lock);
1662 	list_for_each_entry(bo, &glob->swap_lru, swap) {
1663 		ret = __ttm_bo_reserve(bo, false, true, NULL);
1664 		if (!ret)
1665 			break;
1666 	}
1667 
1668 	if (ret) {
1669 		spin_unlock(&glob->lru_lock);
1670 		return ret;
1671 	}
1672 
1673 	kref_get(&bo->list_kref);
1674 
1675 	if (!list_empty(&bo->ddestroy)) {
1676 		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1677 		kref_put(&bo->list_kref, ttm_bo_release_list);
1678 		return ret;
1679 	}
1680 
1681 	put_count = ttm_bo_del_from_lru(bo);
1682 	spin_unlock(&glob->lru_lock);
1683 
1684 	ttm_bo_list_ref_sub(bo, put_count, true);
1685 
1686 	/**
1687 	 * Move to system cached
1688 	 */
1689 
1690 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1691 		struct ttm_mem_reg evict_mem;
1692 
1693 		evict_mem = bo->mem;
1694 		evict_mem.mm_node = NULL;
1695 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1696 		evict_mem.mem_type = TTM_PL_SYSTEM;
1697 
1698 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1699 					     false, false);
1700 		if (unlikely(ret != 0))
1701 			goto out;
1702 	}
1703 
1704 	/**
1705 	 * Make sure BO is idle.
1706 	 */
1707 
1708 	ret = ttm_bo_wait(bo, false, false);
1709 	if (unlikely(ret != 0))
1710 		goto out;
1711 
1712 	ttm_bo_unmap_virtual(bo);
1713 
1714 	/**
1715 	 * Swap out. Buffer will be swapped in again as soon as
1716 	 * anyone tries to access a ttm page.
1717 	 */
1718 
1719 	if (bo->bdev->driver->swap_notify)
1720 		bo->bdev->driver->swap_notify(bo);
1721 
1722 	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1723 out:
1724 
1725 	/**
1726 	 *
1727 	 * Unreserve without putting on LRU to avoid swapping out an
1728 	 * already swapped buffer.
1729 	 */
1730 
1731 	__ttm_bo_unreserve(bo);
1732 	kref_put(&bo->list_kref, ttm_bo_release_list);
1733 	return ret;
1734 }
1735 
1736 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1737 {
1738 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1739 		;
1740 }
1741 EXPORT_SYMBOL(ttm_bo_swapout_all);
1742 
1743 /**
1744  * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1745  * unreserved
1746  *
1747  * @bo: Pointer to buffer
1748  */
1749 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1750 {
1751 	int ret;
1752 
1753 	/*
1754 	 * In the absense of a wait_unlocked API,
1755 	 * Use the bo::wu_mutex to avoid triggering livelocks due to
1756 	 * concurrent use of this function. Note that this use of
1757 	 * bo::wu_mutex can go away if we change locking order to
1758 	 * mmap_sem -> bo::reserve.
1759 	 */
1760 	ret = mutex_lock_interruptible(&bo->wu_mutex);
1761 	if (unlikely(ret != 0))
1762 		return -ERESTARTSYS;
1763 	if (!ww_mutex_is_locked(&bo->resv->lock))
1764 		goto out_unlock;
1765 	ret = __ttm_bo_reserve(bo, true, false, NULL);
1766 	if (unlikely(ret != 0))
1767 		goto out_unlock;
1768 	__ttm_bo_unreserve(bo);
1769 
1770 out_unlock:
1771 	mutex_unlock(&bo->wu_mutex);
1772 	return ret;
1773 }
1774