1213d5092SThomas Hellström // SPDX-License-Identifier: MIT
2213d5092SThomas Hellström /*
3213d5092SThomas Hellström  * Copyright © 2021 Intel Corporation
4213d5092SThomas Hellström  */
5213d5092SThomas Hellström 
6213d5092SThomas Hellström #include <drm/ttm/ttm_bo_driver.h>
7213d5092SThomas Hellström #include <drm/ttm/ttm_placement.h>
8213d5092SThomas Hellström 
9213d5092SThomas Hellström #include "i915_drv.h"
10213d5092SThomas Hellström #include "intel_memory_region.h"
11213d5092SThomas Hellström #include "intel_region_ttm.h"
12213d5092SThomas Hellström 
13213d5092SThomas Hellström #include "gem/i915_gem_object.h"
14213d5092SThomas Hellström #include "gem/i915_gem_region.h"
15213d5092SThomas Hellström #include "gem/i915_gem_ttm.h"
16cf3e3e86SMaarten Lankhorst #include "gem/i915_gem_mman.h"
17213d5092SThomas Hellström 
1850331a7bSRamalingam C #include "gt/intel_migrate.h"
1950331a7bSRamalingam C #include "gt/intel_engine_pm.h"
2050331a7bSRamalingam C 
21213d5092SThomas Hellström #define I915_PL_LMEM0 TTM_PL_PRIV
22213d5092SThomas Hellström #define I915_PL_SYSTEM TTM_PL_SYSTEM
23213d5092SThomas Hellström #define I915_PL_STOLEN TTM_PL_VRAM
24213d5092SThomas Hellström #define I915_PL_GGTT TTM_PL_TT
25213d5092SThomas Hellström 
26213d5092SThomas Hellström #define I915_TTM_PRIO_PURGE     0
27213d5092SThomas Hellström #define I915_TTM_PRIO_NO_PAGES  1
28213d5092SThomas Hellström #define I915_TTM_PRIO_HAS_PAGES 2
29213d5092SThomas Hellström 
3038f28c06SThomas Hellström /*
3138f28c06SThomas Hellström  * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
3238f28c06SThomas Hellström  */
3338f28c06SThomas Hellström #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
3438f28c06SThomas Hellström 
35213d5092SThomas Hellström /**
36213d5092SThomas Hellström  * struct i915_ttm_tt - TTM page vector with additional private information
37213d5092SThomas Hellström  * @ttm: The base TTM page vector.
38213d5092SThomas Hellström  * @dev: The struct device used for dma mapping and unmapping.
39213d5092SThomas Hellström  * @cached_st: The cached scatter-gather table.
40213d5092SThomas Hellström  *
41213d5092SThomas Hellström  * Note that DMA may be going on right up to the point where the page-
42213d5092SThomas Hellström  * vector is unpopulated in delayed destroy. Hence keep the
43213d5092SThomas Hellström  * scatter-gather table mapped and cached up to that point. This is
44213d5092SThomas Hellström  * different from the cached gem object io scatter-gather table which
45213d5092SThomas Hellström  * doesn't have an associated dma mapping.
46213d5092SThomas Hellström  */
47213d5092SThomas Hellström struct i915_ttm_tt {
48213d5092SThomas Hellström 	struct ttm_tt ttm;
49213d5092SThomas Hellström 	struct device *dev;
50213d5092SThomas Hellström 	struct sg_table *cached_st;
51213d5092SThomas Hellström };
52213d5092SThomas Hellström 
5338f28c06SThomas Hellström static const struct ttm_place sys_placement_flags = {
54213d5092SThomas Hellström 	.fpfn = 0,
55213d5092SThomas Hellström 	.lpfn = 0,
56213d5092SThomas Hellström 	.mem_type = I915_PL_SYSTEM,
57213d5092SThomas Hellström 	.flags = 0,
58213d5092SThomas Hellström };
59213d5092SThomas Hellström 
60213d5092SThomas Hellström static struct ttm_placement i915_sys_placement = {
61213d5092SThomas Hellström 	.num_placement = 1,
6238f28c06SThomas Hellström 	.placement = &sys_placement_flags,
63213d5092SThomas Hellström 	.num_busy_placement = 1,
6438f28c06SThomas Hellström 	.busy_placement = &sys_placement_flags,
65213d5092SThomas Hellström };
66213d5092SThomas Hellström 
67b07a6483SThomas Hellström static int i915_ttm_err_to_gem(int err)
68b07a6483SThomas Hellström {
69b07a6483SThomas Hellström 	/* Fastpath */
70b07a6483SThomas Hellström 	if (likely(!err))
71b07a6483SThomas Hellström 		return 0;
72b07a6483SThomas Hellström 
73b07a6483SThomas Hellström 	switch (err) {
74b07a6483SThomas Hellström 	case -EBUSY:
75b07a6483SThomas Hellström 		/*
76b07a6483SThomas Hellström 		 * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
77b07a6483SThomas Hellström 		 * restart the operation, since we don't record the contending
78b07a6483SThomas Hellström 		 * lock. We use -EAGAIN to restart.
79b07a6483SThomas Hellström 		 */
80b07a6483SThomas Hellström 		return -EAGAIN;
81b07a6483SThomas Hellström 	case -ENOSPC:
82b07a6483SThomas Hellström 		/*
83b07a6483SThomas Hellström 		 * Memory type / region is full, and we can't evict.
84b07a6483SThomas Hellström 		 * Except possibly system, that returns -ENOMEM;
85b07a6483SThomas Hellström 		 */
86b07a6483SThomas Hellström 		return -ENXIO;
87b07a6483SThomas Hellström 	default:
88b07a6483SThomas Hellström 		break;
89b07a6483SThomas Hellström 	}
90b07a6483SThomas Hellström 
91b07a6483SThomas Hellström 	return err;
92b07a6483SThomas Hellström }
93b07a6483SThomas Hellström 
94*3c2b8f32SThomas Hellström static bool gpu_binds_iomem(struct ttm_resource *mem)
95*3c2b8f32SThomas Hellström {
96*3c2b8f32SThomas Hellström 	return mem->mem_type != TTM_PL_SYSTEM;
97*3c2b8f32SThomas Hellström }
98*3c2b8f32SThomas Hellström 
99*3c2b8f32SThomas Hellström static bool cpu_maps_iomem(struct ttm_resource *mem)
100*3c2b8f32SThomas Hellström {
101*3c2b8f32SThomas Hellström 	/* Once / if we support GGTT, this is also false for cached ttm_tts */
102*3c2b8f32SThomas Hellström 	return mem->mem_type != TTM_PL_SYSTEM;
103*3c2b8f32SThomas Hellström }
104*3c2b8f32SThomas Hellström 
105*3c2b8f32SThomas Hellström static enum i915_cache_level
106*3c2b8f32SThomas Hellström i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
107*3c2b8f32SThomas Hellström 		     struct ttm_tt *ttm)
108*3c2b8f32SThomas Hellström {
109*3c2b8f32SThomas Hellström 	return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
110*3c2b8f32SThomas Hellström 		ttm->caching == ttm_cached) ? I915_CACHE_LLC :
111*3c2b8f32SThomas Hellström 		I915_CACHE_NONE;
112*3c2b8f32SThomas Hellström }
113*3c2b8f32SThomas Hellström 
114213d5092SThomas Hellström static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
115213d5092SThomas Hellström 
11638f28c06SThomas Hellström static enum ttm_caching
11738f28c06SThomas Hellström i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
11838f28c06SThomas Hellström {
11938f28c06SThomas Hellström 	/*
12038f28c06SThomas Hellström 	 * Objects only allowed in system get cached cpu-mappings.
12138f28c06SThomas Hellström 	 * Other objects get WC mapping for now. Even if in system.
12238f28c06SThomas Hellström 	 */
12338f28c06SThomas Hellström 	if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
12438f28c06SThomas Hellström 	    obj->mm.n_placements <= 1)
12538f28c06SThomas Hellström 		return ttm_cached;
12638f28c06SThomas Hellström 
12738f28c06SThomas Hellström 	return ttm_write_combined;
12838f28c06SThomas Hellström }
12938f28c06SThomas Hellström 
13038f28c06SThomas Hellström static void
13138f28c06SThomas Hellström i915_ttm_place_from_region(const struct intel_memory_region *mr,
132beb6a229SMatthew Auld 			   struct ttm_place *place,
133beb6a229SMatthew Auld 			   unsigned int flags)
13438f28c06SThomas Hellström {
13538f28c06SThomas Hellström 	memset(place, 0, sizeof(*place));
13638f28c06SThomas Hellström 	place->mem_type = intel_region_to_ttm_type(mr);
137beb6a229SMatthew Auld 
138beb6a229SMatthew Auld 	if (flags & I915_BO_ALLOC_CONTIGUOUS)
139beb6a229SMatthew Auld 		place->flags = TTM_PL_FLAG_CONTIGUOUS;
14038f28c06SThomas Hellström }
14138f28c06SThomas Hellström 
14238f28c06SThomas Hellström static void
14338f28c06SThomas Hellström i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
14438f28c06SThomas Hellström 			    struct ttm_place *requested,
14538f28c06SThomas Hellström 			    struct ttm_place *busy,
14638f28c06SThomas Hellström 			    struct ttm_placement *placement)
14738f28c06SThomas Hellström {
14838f28c06SThomas Hellström 	unsigned int num_allowed = obj->mm.n_placements;
149beb6a229SMatthew Auld 	unsigned int flags = obj->flags;
15038f28c06SThomas Hellström 	unsigned int i;
15138f28c06SThomas Hellström 
15238f28c06SThomas Hellström 	placement->num_placement = 1;
15338f28c06SThomas Hellström 	i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
154beb6a229SMatthew Auld 				   obj->mm.region, requested, flags);
15538f28c06SThomas Hellström 
15638f28c06SThomas Hellström 	/* Cache this on object? */
15738f28c06SThomas Hellström 	placement->num_busy_placement = num_allowed;
15838f28c06SThomas Hellström 	for (i = 0; i < placement->num_busy_placement; ++i)
159beb6a229SMatthew Auld 		i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
16038f28c06SThomas Hellström 
16138f28c06SThomas Hellström 	if (num_allowed == 0) {
16238f28c06SThomas Hellström 		*busy = *requested;
16338f28c06SThomas Hellström 		placement->num_busy_placement = 1;
16438f28c06SThomas Hellström 	}
16538f28c06SThomas Hellström 
16638f28c06SThomas Hellström 	placement->placement = requested;
16738f28c06SThomas Hellström 	placement->busy_placement = busy;
16838f28c06SThomas Hellström }
16938f28c06SThomas Hellström 
170213d5092SThomas Hellström static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
171213d5092SThomas Hellström 					 uint32_t page_flags)
172213d5092SThomas Hellström {
173213d5092SThomas Hellström 	struct ttm_resource_manager *man =
174213d5092SThomas Hellström 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
175213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
176213d5092SThomas Hellström 	struct i915_ttm_tt *i915_tt;
177213d5092SThomas Hellström 	int ret;
178213d5092SThomas Hellström 
179213d5092SThomas Hellström 	i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
180213d5092SThomas Hellström 	if (!i915_tt)
181213d5092SThomas Hellström 		return NULL;
182213d5092SThomas Hellström 
183213d5092SThomas Hellström 	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
184213d5092SThomas Hellström 	    man->use_tt)
185213d5092SThomas Hellström 		page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
186213d5092SThomas Hellström 
18738f28c06SThomas Hellström 	ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
18838f28c06SThomas Hellström 			  i915_ttm_select_tt_caching(obj));
189213d5092SThomas Hellström 	if (ret) {
190213d5092SThomas Hellström 		kfree(i915_tt);
191213d5092SThomas Hellström 		return NULL;
192213d5092SThomas Hellström 	}
193213d5092SThomas Hellström 
194213d5092SThomas Hellström 	i915_tt->dev = obj->base.dev->dev;
195213d5092SThomas Hellström 
196213d5092SThomas Hellström 	return &i915_tt->ttm;
197213d5092SThomas Hellström }
198213d5092SThomas Hellström 
199213d5092SThomas Hellström static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
200213d5092SThomas Hellström {
201213d5092SThomas Hellström 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
202213d5092SThomas Hellström 
203213d5092SThomas Hellström 	if (i915_tt->cached_st) {
204213d5092SThomas Hellström 		dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
205213d5092SThomas Hellström 				  DMA_BIDIRECTIONAL, 0);
206213d5092SThomas Hellström 		sg_free_table(i915_tt->cached_st);
207213d5092SThomas Hellström 		kfree(i915_tt->cached_st);
208213d5092SThomas Hellström 		i915_tt->cached_st = NULL;
209213d5092SThomas Hellström 	}
210213d5092SThomas Hellström 	ttm_pool_free(&bdev->pool, ttm);
211213d5092SThomas Hellström }
212213d5092SThomas Hellström 
213213d5092SThomas Hellström static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
214213d5092SThomas Hellström {
215213d5092SThomas Hellström 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
216213d5092SThomas Hellström 
217213d5092SThomas Hellström 	ttm_tt_destroy_common(bdev, ttm);
218c865204eSThomas Hellström 	ttm_tt_fini(ttm);
219213d5092SThomas Hellström 	kfree(i915_tt);
220213d5092SThomas Hellström }
221213d5092SThomas Hellström 
222213d5092SThomas Hellström static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
223213d5092SThomas Hellström 				       const struct ttm_place *place)
224213d5092SThomas Hellström {
225213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
226213d5092SThomas Hellström 
227213d5092SThomas Hellström 	/* Will do for now. Our pinned objects are still on TTM's LRU lists */
228d53ec322SMatthew Auld 	return i915_gem_object_evictable(obj);
229213d5092SThomas Hellström }
230213d5092SThomas Hellström 
231213d5092SThomas Hellström static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
232213d5092SThomas Hellström 				 struct ttm_placement *placement)
233213d5092SThomas Hellström {
234213d5092SThomas Hellström 	*placement = i915_sys_placement;
235213d5092SThomas Hellström }
236213d5092SThomas Hellström 
237213d5092SThomas Hellström static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
238213d5092SThomas Hellström {
239213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
240213d5092SThomas Hellström 	int ret;
241213d5092SThomas Hellström 
242213d5092SThomas Hellström 	ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
243213d5092SThomas Hellström 	if (ret)
244213d5092SThomas Hellström 		return ret;
245213d5092SThomas Hellström 
246213d5092SThomas Hellström 	ret = __i915_gem_object_put_pages(obj);
247213d5092SThomas Hellström 	if (ret)
248213d5092SThomas Hellström 		return ret;
249213d5092SThomas Hellström 
250213d5092SThomas Hellström 	return 0;
251213d5092SThomas Hellström }
252213d5092SThomas Hellström 
253213d5092SThomas Hellström static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
254213d5092SThomas Hellström {
255cf3e3e86SMaarten Lankhorst 	struct radix_tree_iter iter;
256cf3e3e86SMaarten Lankhorst 	void __rcu **slot;
257cf3e3e86SMaarten Lankhorst 
258cf3e3e86SMaarten Lankhorst 	if (!obj->ttm.cached_io_st)
259cf3e3e86SMaarten Lankhorst 		return;
260cf3e3e86SMaarten Lankhorst 
261cf3e3e86SMaarten Lankhorst 	rcu_read_lock();
262cf3e3e86SMaarten Lankhorst 	radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
263cf3e3e86SMaarten Lankhorst 		radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
264cf3e3e86SMaarten Lankhorst 	rcu_read_unlock();
265cf3e3e86SMaarten Lankhorst 
266213d5092SThomas Hellström 	sg_free_table(obj->ttm.cached_io_st);
267213d5092SThomas Hellström 	kfree(obj->ttm.cached_io_st);
268213d5092SThomas Hellström 	obj->ttm.cached_io_st = NULL;
269213d5092SThomas Hellström }
270213d5092SThomas Hellström 
271*3c2b8f32SThomas Hellström static void
272*3c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
273*3c2b8f32SThomas Hellström {
274*3c2b8f32SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
275*3c2b8f32SThomas Hellström 
276*3c2b8f32SThomas Hellström 	if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
277*3c2b8f32SThomas Hellström 		obj->write_domain = I915_GEM_DOMAIN_WC;
278*3c2b8f32SThomas Hellström 		obj->read_domains = I915_GEM_DOMAIN_WC;
279*3c2b8f32SThomas Hellström 	} else {
280*3c2b8f32SThomas Hellström 		obj->write_domain = I915_GEM_DOMAIN_CPU;
281*3c2b8f32SThomas Hellström 		obj->read_domains = I915_GEM_DOMAIN_CPU;
282*3c2b8f32SThomas Hellström 	}
283*3c2b8f32SThomas Hellström }
284*3c2b8f32SThomas Hellström 
285*3c2b8f32SThomas Hellström static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
286*3c2b8f32SThomas Hellström {
287*3c2b8f32SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
288*3c2b8f32SThomas Hellström 	unsigned int cache_level;
289*3c2b8f32SThomas Hellström 
290*3c2b8f32SThomas Hellström 	obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
291*3c2b8f32SThomas Hellström 
292*3c2b8f32SThomas Hellström 	obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
293*3c2b8f32SThomas Hellström 		I915_BO_FLAG_STRUCT_PAGE;
294*3c2b8f32SThomas Hellström 
295*3c2b8f32SThomas Hellström 	cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
296*3c2b8f32SThomas Hellström 					   bo->ttm);
297*3c2b8f32SThomas Hellström 	i915_gem_object_set_cache_coherency(obj, cache_level);
298*3c2b8f32SThomas Hellström }
299*3c2b8f32SThomas Hellström 
300213d5092SThomas Hellström static void i915_ttm_purge(struct drm_i915_gem_object *obj)
301213d5092SThomas Hellström {
302213d5092SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
303213d5092SThomas Hellström 	struct ttm_operation_ctx ctx = {
304213d5092SThomas Hellström 		.interruptible = true,
305213d5092SThomas Hellström 		.no_wait_gpu = false,
306213d5092SThomas Hellström 	};
307213d5092SThomas Hellström 	struct ttm_placement place = {};
308213d5092SThomas Hellström 	int ret;
309213d5092SThomas Hellström 
310213d5092SThomas Hellström 	if (obj->mm.madv == __I915_MADV_PURGED)
311213d5092SThomas Hellström 		return;
312213d5092SThomas Hellström 
313213d5092SThomas Hellström 	/* TTM's purge interface. Note that we might be reentering. */
314213d5092SThomas Hellström 	ret = ttm_bo_validate(bo, &place, &ctx);
315213d5092SThomas Hellström 	if (!ret) {
316*3c2b8f32SThomas Hellström 		obj->write_domain = 0;
317*3c2b8f32SThomas Hellström 		obj->read_domains = 0;
318*3c2b8f32SThomas Hellström 		i915_ttm_adjust_gem_after_move(obj);
319213d5092SThomas Hellström 		i915_ttm_free_cached_io_st(obj);
320213d5092SThomas Hellström 		obj->mm.madv = __I915_MADV_PURGED;
321213d5092SThomas Hellström 	}
322213d5092SThomas Hellström }
323213d5092SThomas Hellström 
324213d5092SThomas Hellström static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
325213d5092SThomas Hellström {
326213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
327213d5092SThomas Hellström 	int ret = i915_ttm_move_notify(bo);
328213d5092SThomas Hellström 
329213d5092SThomas Hellström 	GEM_WARN_ON(ret);
330213d5092SThomas Hellström 	GEM_WARN_ON(obj->ttm.cached_io_st);
331213d5092SThomas Hellström 	if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
332213d5092SThomas Hellström 		i915_ttm_purge(obj);
333213d5092SThomas Hellström }
334213d5092SThomas Hellström 
335213d5092SThomas Hellström static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
336213d5092SThomas Hellström {
337213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
338213d5092SThomas Hellström 
339213d5092SThomas Hellström 	if (likely(obj)) {
340213d5092SThomas Hellström 		/* This releases all gem object bindings to the backend. */
341c865204eSThomas Hellström 		i915_ttm_free_cached_io_st(obj);
342213d5092SThomas Hellström 		__i915_gem_free_object(obj);
343213d5092SThomas Hellström 	}
344213d5092SThomas Hellström }
345213d5092SThomas Hellström 
346213d5092SThomas Hellström static struct intel_memory_region *
347213d5092SThomas Hellström i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
348213d5092SThomas Hellström {
349213d5092SThomas Hellström 	struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
350213d5092SThomas Hellström 
351213d5092SThomas Hellström 	/* There's some room for optimization here... */
352213d5092SThomas Hellström 	GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
353213d5092SThomas Hellström 		   ttm_mem_type < I915_PL_LMEM0);
354213d5092SThomas Hellström 	if (ttm_mem_type == I915_PL_SYSTEM)
355213d5092SThomas Hellström 		return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
356213d5092SThomas Hellström 						  0);
357213d5092SThomas Hellström 
358213d5092SThomas Hellström 	return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
359213d5092SThomas Hellström 					  ttm_mem_type - I915_PL_LMEM0);
360213d5092SThomas Hellström }
361213d5092SThomas Hellström 
362213d5092SThomas Hellström static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
363213d5092SThomas Hellström {
364213d5092SThomas Hellström 	struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
365213d5092SThomas Hellström 	struct scatterlist *sg;
366213d5092SThomas Hellström 	struct sg_table *st;
367213d5092SThomas Hellström 	int ret;
368213d5092SThomas Hellström 
369213d5092SThomas Hellström 	if (i915_tt->cached_st)
370213d5092SThomas Hellström 		return i915_tt->cached_st;
371213d5092SThomas Hellström 
372213d5092SThomas Hellström 	st = kzalloc(sizeof(*st), GFP_KERNEL);
373213d5092SThomas Hellström 	if (!st)
374213d5092SThomas Hellström 		return ERR_PTR(-ENOMEM);
375213d5092SThomas Hellström 
376213d5092SThomas Hellström 	sg = __sg_alloc_table_from_pages
377213d5092SThomas Hellström 		(st, ttm->pages, ttm->num_pages, 0,
378213d5092SThomas Hellström 		 (unsigned long)ttm->num_pages << PAGE_SHIFT,
379213d5092SThomas Hellström 		 i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
380213d5092SThomas Hellström 	if (IS_ERR(sg)) {
381213d5092SThomas Hellström 		kfree(st);
382213d5092SThomas Hellström 		return ERR_CAST(sg);
383213d5092SThomas Hellström 	}
384213d5092SThomas Hellström 
385213d5092SThomas Hellström 	ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
386213d5092SThomas Hellström 	if (ret) {
387213d5092SThomas Hellström 		sg_free_table(st);
388213d5092SThomas Hellström 		kfree(st);
389213d5092SThomas Hellström 		return ERR_PTR(ret);
390213d5092SThomas Hellström 	}
391213d5092SThomas Hellström 
392213d5092SThomas Hellström 	i915_tt->cached_st = st;
393213d5092SThomas Hellström 	return st;
394213d5092SThomas Hellström }
395213d5092SThomas Hellström 
396213d5092SThomas Hellström static struct sg_table *
397213d5092SThomas Hellström i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
398213d5092SThomas Hellström 			 struct ttm_resource *res)
399213d5092SThomas Hellström {
400213d5092SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
401213d5092SThomas Hellström 
402*3c2b8f32SThomas Hellström 	if (!gpu_binds_iomem(res))
403213d5092SThomas Hellström 		return i915_ttm_tt_get_st(bo->ttm);
404213d5092SThomas Hellström 
405*3c2b8f32SThomas Hellström 	/*
406*3c2b8f32SThomas Hellström 	 * If CPU mapping differs, we need to add the ttm_tt pages to
407*3c2b8f32SThomas Hellström 	 * the resulting st. Might make sense for GGTT.
408*3c2b8f32SThomas Hellström 	 */
409*3c2b8f32SThomas Hellström 	GEM_WARN_ON(!cpu_maps_iomem(res));
410687c7d0fSMatthew Auld 	return intel_region_ttm_resource_to_st(obj->mm.region, res);
411213d5092SThomas Hellström }
412213d5092SThomas Hellström 
41350331a7bSRamalingam C static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
41450331a7bSRamalingam C 			       struct ttm_resource *dst_mem,
41550331a7bSRamalingam C 			       struct sg_table *dst_st)
41650331a7bSRamalingam C {
41750331a7bSRamalingam C 	struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
41850331a7bSRamalingam C 						     bdev);
41950331a7bSRamalingam C 	struct ttm_resource_manager *src_man =
42050331a7bSRamalingam C 		ttm_manager_type(bo->bdev, bo->resource->mem_type);
42150331a7bSRamalingam C 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
42250331a7bSRamalingam C 	struct sg_table *src_st;
42350331a7bSRamalingam C 	struct i915_request *rq;
424*3c2b8f32SThomas Hellström 	struct ttm_tt *ttm = bo->ttm;
425*3c2b8f32SThomas Hellström 	enum i915_cache_level src_level, dst_level;
42650331a7bSRamalingam C 	int ret;
42750331a7bSRamalingam C 
42850331a7bSRamalingam C 	if (!i915->gt.migrate.context)
42950331a7bSRamalingam C 		return -EINVAL;
43050331a7bSRamalingam C 
431*3c2b8f32SThomas Hellström 	dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
432*3c2b8f32SThomas Hellström 	if (!ttm || !ttm_tt_is_populated(ttm)) {
43350331a7bSRamalingam C 		if (bo->type == ttm_bo_type_kernel)
43450331a7bSRamalingam C 			return -EINVAL;
43550331a7bSRamalingam C 
436*3c2b8f32SThomas Hellström 		if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
43750331a7bSRamalingam C 			return 0;
43850331a7bSRamalingam C 
43950331a7bSRamalingam C 		intel_engine_pm_get(i915->gt.migrate.context->engine);
44050331a7bSRamalingam C 		ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
441*3c2b8f32SThomas Hellström 						  dst_st->sgl, dst_level,
442*3c2b8f32SThomas Hellström 						  gpu_binds_iomem(dst_mem),
44350331a7bSRamalingam C 						  0, &rq);
44450331a7bSRamalingam C 
44550331a7bSRamalingam C 		if (!ret && rq) {
44650331a7bSRamalingam C 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
44750331a7bSRamalingam C 			i915_request_put(rq);
44850331a7bSRamalingam C 		}
44950331a7bSRamalingam C 		intel_engine_pm_put(i915->gt.migrate.context->engine);
45050331a7bSRamalingam C 	} else {
451*3c2b8f32SThomas Hellström 		src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
45250331a7bSRamalingam C 			obj->ttm.cached_io_st;
45350331a7bSRamalingam C 
454*3c2b8f32SThomas Hellström 		src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
45550331a7bSRamalingam C 		intel_engine_pm_get(i915->gt.migrate.context->engine);
45650331a7bSRamalingam C 		ret = intel_context_migrate_copy(i915->gt.migrate.context,
457*3c2b8f32SThomas Hellström 						 NULL, src_st->sgl, src_level,
458*3c2b8f32SThomas Hellström 						 gpu_binds_iomem(bo->resource),
459*3c2b8f32SThomas Hellström 						 dst_st->sgl, dst_level,
460*3c2b8f32SThomas Hellström 						 gpu_binds_iomem(dst_mem),
46150331a7bSRamalingam C 						 &rq);
46250331a7bSRamalingam C 		if (!ret && rq) {
46350331a7bSRamalingam C 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
46450331a7bSRamalingam C 			i915_request_put(rq);
46550331a7bSRamalingam C 		}
46650331a7bSRamalingam C 		intel_engine_pm_put(i915->gt.migrate.context->engine);
46750331a7bSRamalingam C 	}
46850331a7bSRamalingam C 
46950331a7bSRamalingam C 	return ret;
47050331a7bSRamalingam C }
47150331a7bSRamalingam C 
472213d5092SThomas Hellström static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
473213d5092SThomas Hellström 			 struct ttm_operation_ctx *ctx,
474213d5092SThomas Hellström 			 struct ttm_resource *dst_mem,
475213d5092SThomas Hellström 			 struct ttm_place *hop)
476213d5092SThomas Hellström {
477213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
478213d5092SThomas Hellström 	struct ttm_resource_manager *dst_man =
479213d5092SThomas Hellström 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
480213d5092SThomas Hellström 	struct intel_memory_region *dst_reg, *src_reg;
481213d5092SThomas Hellström 	union {
482213d5092SThomas Hellström 		struct ttm_kmap_iter_tt tt;
483213d5092SThomas Hellström 		struct ttm_kmap_iter_iomap io;
484213d5092SThomas Hellström 	} _dst_iter, _src_iter;
485213d5092SThomas Hellström 	struct ttm_kmap_iter *dst_iter, *src_iter;
486213d5092SThomas Hellström 	struct sg_table *dst_st;
487213d5092SThomas Hellström 	int ret;
488213d5092SThomas Hellström 
489213d5092SThomas Hellström 	dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
490213d5092SThomas Hellström 	src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
491213d5092SThomas Hellström 	GEM_BUG_ON(!dst_reg || !src_reg);
492213d5092SThomas Hellström 
493213d5092SThomas Hellström 	/* Sync for now. We could do the actual copy async. */
494213d5092SThomas Hellström 	ret = ttm_bo_wait_ctx(bo, ctx);
495213d5092SThomas Hellström 	if (ret)
496213d5092SThomas Hellström 		return ret;
497213d5092SThomas Hellström 
498213d5092SThomas Hellström 	ret = i915_ttm_move_notify(bo);
499213d5092SThomas Hellström 	if (ret)
500213d5092SThomas Hellström 		return ret;
501213d5092SThomas Hellström 
502213d5092SThomas Hellström 	if (obj->mm.madv != I915_MADV_WILLNEED) {
503213d5092SThomas Hellström 		i915_ttm_purge(obj);
504213d5092SThomas Hellström 		ttm_resource_free(bo, &dst_mem);
505213d5092SThomas Hellström 		return 0;
506213d5092SThomas Hellström 	}
507213d5092SThomas Hellström 
508213d5092SThomas Hellström 	/* Populate ttm with pages if needed. Typically system memory. */
509213d5092SThomas Hellström 	if (bo->ttm && (dst_man->use_tt ||
510213d5092SThomas Hellström 			(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
511213d5092SThomas Hellström 		ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
512213d5092SThomas Hellström 		if (ret)
513213d5092SThomas Hellström 			return ret;
514213d5092SThomas Hellström 	}
515213d5092SThomas Hellström 
516213d5092SThomas Hellström 	dst_st = i915_ttm_resource_get_st(obj, dst_mem);
517213d5092SThomas Hellström 	if (IS_ERR(dst_st))
518213d5092SThomas Hellström 		return PTR_ERR(dst_st);
519213d5092SThomas Hellström 
52050331a7bSRamalingam C 	ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
52150331a7bSRamalingam C 	if (ret) {
522213d5092SThomas Hellström 		/* If we start mapping GGTT, we can no longer use man::use_tt here. */
523*3c2b8f32SThomas Hellström 		dst_iter = !cpu_maps_iomem(dst_mem) ?
524213d5092SThomas Hellström 			ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
525213d5092SThomas Hellström 			ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
526213d5092SThomas Hellström 						 dst_st, dst_reg->region.start);
527213d5092SThomas Hellström 
528*3c2b8f32SThomas Hellström 		src_iter = !cpu_maps_iomem(bo->resource) ?
529213d5092SThomas Hellström 			ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
530213d5092SThomas Hellström 			ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
531213d5092SThomas Hellström 						 obj->ttm.cached_io_st,
532213d5092SThomas Hellström 						 src_reg->region.start);
533213d5092SThomas Hellström 
534213d5092SThomas Hellström 		ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
53550331a7bSRamalingam C 	}
536*3c2b8f32SThomas Hellström 	/* Below dst_mem becomes bo->resource. */
537213d5092SThomas Hellström 	ttm_bo_move_sync_cleanup(bo, dst_mem);
538*3c2b8f32SThomas Hellström 	i915_ttm_adjust_domains_after_move(obj);
539213d5092SThomas Hellström 	i915_ttm_free_cached_io_st(obj);
540213d5092SThomas Hellström 
541*3c2b8f32SThomas Hellström 	if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
542213d5092SThomas Hellström 		obj->ttm.cached_io_st = dst_st;
543cf3e3e86SMaarten Lankhorst 		obj->ttm.get_io_page.sg_pos = dst_st->sgl;
544cf3e3e86SMaarten Lankhorst 		obj->ttm.get_io_page.sg_idx = 0;
545cf3e3e86SMaarten Lankhorst 	}
546213d5092SThomas Hellström 
547*3c2b8f32SThomas Hellström 	i915_ttm_adjust_gem_after_move(obj);
548213d5092SThomas Hellström 	return 0;
549213d5092SThomas Hellström }
550213d5092SThomas Hellström 
551cf3e3e86SMaarten Lankhorst static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
552cf3e3e86SMaarten Lankhorst {
553*3c2b8f32SThomas Hellström 	if (!cpu_maps_iomem(mem))
554cf3e3e86SMaarten Lankhorst 		return 0;
555cf3e3e86SMaarten Lankhorst 
556cf3e3e86SMaarten Lankhorst 	mem->bus.caching = ttm_write_combined;
557cf3e3e86SMaarten Lankhorst 	mem->bus.is_iomem = true;
558cf3e3e86SMaarten Lankhorst 
559cf3e3e86SMaarten Lankhorst 	return 0;
560cf3e3e86SMaarten Lankhorst }
561cf3e3e86SMaarten Lankhorst 
562cf3e3e86SMaarten Lankhorst static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
563cf3e3e86SMaarten Lankhorst 					 unsigned long page_offset)
564cf3e3e86SMaarten Lankhorst {
565cf3e3e86SMaarten Lankhorst 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
566cf3e3e86SMaarten Lankhorst 	unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
567cf3e3e86SMaarten Lankhorst 	struct scatterlist *sg;
568cf3e3e86SMaarten Lankhorst 	unsigned int ofs;
569cf3e3e86SMaarten Lankhorst 
570cf3e3e86SMaarten Lankhorst 	GEM_WARN_ON(bo->ttm);
571cf3e3e86SMaarten Lankhorst 
572cf3e3e86SMaarten Lankhorst 	sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
573cf3e3e86SMaarten Lankhorst 
574cf3e3e86SMaarten Lankhorst 	return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
575cf3e3e86SMaarten Lankhorst }
576cf3e3e86SMaarten Lankhorst 
577213d5092SThomas Hellström static struct ttm_device_funcs i915_ttm_bo_driver = {
578213d5092SThomas Hellström 	.ttm_tt_create = i915_ttm_tt_create,
579213d5092SThomas Hellström 	.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
580213d5092SThomas Hellström 	.ttm_tt_destroy = i915_ttm_tt_destroy,
581213d5092SThomas Hellström 	.eviction_valuable = i915_ttm_eviction_valuable,
582213d5092SThomas Hellström 	.evict_flags = i915_ttm_evict_flags,
583213d5092SThomas Hellström 	.move = i915_ttm_move,
584213d5092SThomas Hellström 	.swap_notify = i915_ttm_swap_notify,
585213d5092SThomas Hellström 	.delete_mem_notify = i915_ttm_delete_mem_notify,
586cf3e3e86SMaarten Lankhorst 	.io_mem_reserve = i915_ttm_io_mem_reserve,
587cf3e3e86SMaarten Lankhorst 	.io_mem_pfn = i915_ttm_io_mem_pfn,
588213d5092SThomas Hellström };
589213d5092SThomas Hellström 
590213d5092SThomas Hellström /**
591213d5092SThomas Hellström  * i915_ttm_driver - Return a pointer to the TTM device funcs
592213d5092SThomas Hellström  *
593213d5092SThomas Hellström  * Return: Pointer to statically allocated TTM device funcs.
594213d5092SThomas Hellström  */
595213d5092SThomas Hellström struct ttm_device_funcs *i915_ttm_driver(void)
596213d5092SThomas Hellström {
597213d5092SThomas Hellström 	return &i915_ttm_bo_driver;
598213d5092SThomas Hellström }
599213d5092SThomas Hellström 
600213d5092SThomas Hellström static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
601213d5092SThomas Hellström {
602213d5092SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
603213d5092SThomas Hellström 	struct ttm_operation_ctx ctx = {
604213d5092SThomas Hellström 		.interruptible = true,
605213d5092SThomas Hellström 		.no_wait_gpu = false,
606213d5092SThomas Hellström 	};
607213d5092SThomas Hellström 	struct sg_table *st;
60838f28c06SThomas Hellström 	struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
60938f28c06SThomas Hellström 	struct ttm_placement placement;
610b07a6483SThomas Hellström 	int real_num_busy;
611213d5092SThomas Hellström 	int ret;
612213d5092SThomas Hellström 
61338f28c06SThomas Hellström 	GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
61438f28c06SThomas Hellström 
615213d5092SThomas Hellström 	/* Move to the requested placement. */
61638f28c06SThomas Hellström 	i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
617b07a6483SThomas Hellström 
618b07a6483SThomas Hellström 	/*
619b07a6483SThomas Hellström 	 * For now we support LMEM only with TTM.
620b07a6483SThomas Hellström 	 * TODO: Remove with system support
621b07a6483SThomas Hellström 	 */
622b07a6483SThomas Hellström 	GEM_BUG_ON(requested.mem_type < I915_PL_LMEM0 ||
623b07a6483SThomas Hellström 		   busy[0].mem_type < I915_PL_LMEM0);
624b07a6483SThomas Hellström 
625b07a6483SThomas Hellström 	/* First try only the requested placement. No eviction. */
626b07a6483SThomas Hellström 	real_num_busy = fetch_and_zero(&placement.num_busy_placement);
627b07a6483SThomas Hellström 	ret = ttm_bo_validate(bo, &placement, &ctx);
628b07a6483SThomas Hellström 	if (ret) {
629b07a6483SThomas Hellström 		ret = i915_ttm_err_to_gem(ret);
630b07a6483SThomas Hellström 		/*
631b07a6483SThomas Hellström 		 * Anything that wants to restart the operation gets to
632b07a6483SThomas Hellström 		 * do that.
633b07a6483SThomas Hellström 		 */
634b07a6483SThomas Hellström 		if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
635b07a6483SThomas Hellström 		    ret == -EAGAIN)
636b07a6483SThomas Hellström 			return ret;
637b07a6483SThomas Hellström 
638b07a6483SThomas Hellström 		/* TODO: Remove this when we support system as TTM. */
639b07a6483SThomas Hellström 		real_num_busy = 1;
640b07a6483SThomas Hellström 
641b07a6483SThomas Hellström 		/*
642b07a6483SThomas Hellström 		 * If the initial attempt fails, allow all accepted placements,
643b07a6483SThomas Hellström 		 * evicting if necessary.
644b07a6483SThomas Hellström 		 */
645b07a6483SThomas Hellström 		placement.num_busy_placement = real_num_busy;
64638f28c06SThomas Hellström 		ret = ttm_bo_validate(bo, &placement, &ctx);
647213d5092SThomas Hellström 		if (ret)
648b07a6483SThomas Hellström 			return i915_ttm_err_to_gem(ret);
649b07a6483SThomas Hellström 	}
650213d5092SThomas Hellström 
651*3c2b8f32SThomas Hellström 	i915_ttm_adjust_lru(obj);
652*3c2b8f32SThomas Hellström 	if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
653*3c2b8f32SThomas Hellström 		ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
654*3c2b8f32SThomas Hellström 		if (ret)
655*3c2b8f32SThomas Hellström 			return ret;
656*3c2b8f32SThomas Hellström 
657*3c2b8f32SThomas Hellström 		i915_ttm_adjust_domains_after_move(obj);
658*3c2b8f32SThomas Hellström 		i915_ttm_adjust_gem_after_move(obj);
659*3c2b8f32SThomas Hellström 	}
660*3c2b8f32SThomas Hellström 
661213d5092SThomas Hellström 	/* Object either has a page vector or is an iomem object */
662213d5092SThomas Hellström 	st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
663213d5092SThomas Hellström 	if (IS_ERR(st))
664213d5092SThomas Hellström 		return PTR_ERR(st);
665213d5092SThomas Hellström 
666213d5092SThomas Hellström 	__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
667213d5092SThomas Hellström 
668213d5092SThomas Hellström 	return ret;
669213d5092SThomas Hellström }
670213d5092SThomas Hellström 
671213d5092SThomas Hellström static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
672213d5092SThomas Hellström 			       struct sg_table *st)
673213d5092SThomas Hellström {
674213d5092SThomas Hellström 	/*
675213d5092SThomas Hellström 	 * We're currently not called from a shrinker, so put_pages()
676213d5092SThomas Hellström 	 * typically means the object is about to destroyed, or called
677213d5092SThomas Hellström 	 * from move_notify(). So just avoid doing much for now.
678213d5092SThomas Hellström 	 * If the object is not destroyed next, The TTM eviction logic
679213d5092SThomas Hellström 	 * and shrinkers will move it out if needed.
680213d5092SThomas Hellström 	 */
681213d5092SThomas Hellström 
682213d5092SThomas Hellström 	i915_ttm_adjust_lru(obj);
683213d5092SThomas Hellström }
684213d5092SThomas Hellström 
685213d5092SThomas Hellström static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
686213d5092SThomas Hellström {
687213d5092SThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
688213d5092SThomas Hellström 
689213d5092SThomas Hellström 	/*
690213d5092SThomas Hellström 	 * Don't manipulate the TTM LRUs while in TTM bo destruction.
691213d5092SThomas Hellström 	 * We're called through i915_ttm_delete_mem_notify().
692213d5092SThomas Hellström 	 */
693213d5092SThomas Hellström 	if (!kref_read(&bo->kref))
694213d5092SThomas Hellström 		return;
695213d5092SThomas Hellström 
696213d5092SThomas Hellström 	/*
697213d5092SThomas Hellström 	 * Put on the correct LRU list depending on the MADV status
698213d5092SThomas Hellström 	 */
699213d5092SThomas Hellström 	spin_lock(&bo->bdev->lru_lock);
700213d5092SThomas Hellström 	if (obj->mm.madv != I915_MADV_WILLNEED) {
701213d5092SThomas Hellström 		bo->priority = I915_TTM_PRIO_PURGE;
702213d5092SThomas Hellström 	} else if (!i915_gem_object_has_pages(obj)) {
703213d5092SThomas Hellström 		if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
704213d5092SThomas Hellström 			bo->priority = I915_TTM_PRIO_HAS_PAGES;
705213d5092SThomas Hellström 	} else {
706213d5092SThomas Hellström 		if (bo->priority > I915_TTM_PRIO_NO_PAGES)
707213d5092SThomas Hellström 			bo->priority = I915_TTM_PRIO_NO_PAGES;
708213d5092SThomas Hellström 	}
709213d5092SThomas Hellström 
710213d5092SThomas Hellström 	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
711213d5092SThomas Hellström 	spin_unlock(&bo->bdev->lru_lock);
712213d5092SThomas Hellström }
713213d5092SThomas Hellström 
714213d5092SThomas Hellström /*
715213d5092SThomas Hellström  * TTM-backed gem object destruction requires some clarification.
716213d5092SThomas Hellström  * Basically we have two possibilities here. We can either rely on the
717213d5092SThomas Hellström  * i915 delayed destruction and put the TTM object when the object
718213d5092SThomas Hellström  * is idle. This would be detected by TTM which would bypass the
719213d5092SThomas Hellström  * TTM delayed destroy handling. The other approach is to put the TTM
720213d5092SThomas Hellström  * object early and rely on the TTM destroyed handling, and then free
721213d5092SThomas Hellström  * the leftover parts of the GEM object once TTM's destroyed list handling is
722213d5092SThomas Hellström  * complete. For now, we rely on the latter for two reasons:
723213d5092SThomas Hellström  * a) TTM can evict an object even when it's on the delayed destroy list,
724213d5092SThomas Hellström  * which in theory allows for complete eviction.
725213d5092SThomas Hellström  * b) There is work going on in TTM to allow freeing an object even when
726213d5092SThomas Hellström  * it's not idle, and using the TTM destroyed list handling could help us
727213d5092SThomas Hellström  * benefit from that.
728213d5092SThomas Hellström  */
729213d5092SThomas Hellström static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
730213d5092SThomas Hellström {
731213d5092SThomas Hellström 	if (obj->ttm.created) {
732213d5092SThomas Hellström 		ttm_bo_put(i915_gem_to_ttm(obj));
733213d5092SThomas Hellström 	} else {
734213d5092SThomas Hellström 		__i915_gem_free_object(obj);
735213d5092SThomas Hellström 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
736213d5092SThomas Hellström 	}
737213d5092SThomas Hellström }
738213d5092SThomas Hellström 
739cf3e3e86SMaarten Lankhorst static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
740cf3e3e86SMaarten Lankhorst {
741cf3e3e86SMaarten Lankhorst 	struct vm_area_struct *area = vmf->vma;
742cf3e3e86SMaarten Lankhorst 	struct drm_i915_gem_object *obj =
743cf3e3e86SMaarten Lankhorst 		i915_ttm_to_gem(area->vm_private_data);
744cf3e3e86SMaarten Lankhorst 
745cf3e3e86SMaarten Lankhorst 	/* Sanity check that we allow writing into this object */
746cf3e3e86SMaarten Lankhorst 	if (unlikely(i915_gem_object_is_readonly(obj) &&
747cf3e3e86SMaarten Lankhorst 		     area->vm_flags & VM_WRITE))
748cf3e3e86SMaarten Lankhorst 		return VM_FAULT_SIGBUS;
749cf3e3e86SMaarten Lankhorst 
750cf3e3e86SMaarten Lankhorst 	return ttm_bo_vm_fault(vmf);
751cf3e3e86SMaarten Lankhorst }
752cf3e3e86SMaarten Lankhorst 
753cf3e3e86SMaarten Lankhorst static int
754cf3e3e86SMaarten Lankhorst vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
755cf3e3e86SMaarten Lankhorst 	      void *buf, int len, int write)
756cf3e3e86SMaarten Lankhorst {
757cf3e3e86SMaarten Lankhorst 	struct drm_i915_gem_object *obj =
758cf3e3e86SMaarten Lankhorst 		i915_ttm_to_gem(area->vm_private_data);
759cf3e3e86SMaarten Lankhorst 
760cf3e3e86SMaarten Lankhorst 	if (i915_gem_object_is_readonly(obj) && write)
761cf3e3e86SMaarten Lankhorst 		return -EACCES;
762cf3e3e86SMaarten Lankhorst 
763cf3e3e86SMaarten Lankhorst 	return ttm_bo_vm_access(area, addr, buf, len, write);
764cf3e3e86SMaarten Lankhorst }
765cf3e3e86SMaarten Lankhorst 
766cf3e3e86SMaarten Lankhorst static void ttm_vm_open(struct vm_area_struct *vma)
767cf3e3e86SMaarten Lankhorst {
768cf3e3e86SMaarten Lankhorst 	struct drm_i915_gem_object *obj =
769cf3e3e86SMaarten Lankhorst 		i915_ttm_to_gem(vma->vm_private_data);
770cf3e3e86SMaarten Lankhorst 
771cf3e3e86SMaarten Lankhorst 	GEM_BUG_ON(!obj);
772cf3e3e86SMaarten Lankhorst 	i915_gem_object_get(obj);
773cf3e3e86SMaarten Lankhorst }
774cf3e3e86SMaarten Lankhorst 
775cf3e3e86SMaarten Lankhorst static void ttm_vm_close(struct vm_area_struct *vma)
776cf3e3e86SMaarten Lankhorst {
777cf3e3e86SMaarten Lankhorst 	struct drm_i915_gem_object *obj =
778cf3e3e86SMaarten Lankhorst 		i915_ttm_to_gem(vma->vm_private_data);
779cf3e3e86SMaarten Lankhorst 
780cf3e3e86SMaarten Lankhorst 	GEM_BUG_ON(!obj);
781cf3e3e86SMaarten Lankhorst 	i915_gem_object_put(obj);
782cf3e3e86SMaarten Lankhorst }
783cf3e3e86SMaarten Lankhorst 
784cf3e3e86SMaarten Lankhorst static const struct vm_operations_struct vm_ops_ttm = {
785cf3e3e86SMaarten Lankhorst 	.fault = vm_fault_ttm,
786cf3e3e86SMaarten Lankhorst 	.access = vm_access_ttm,
787cf3e3e86SMaarten Lankhorst 	.open = ttm_vm_open,
788cf3e3e86SMaarten Lankhorst 	.close = ttm_vm_close,
789cf3e3e86SMaarten Lankhorst };
790cf3e3e86SMaarten Lankhorst 
791cf3e3e86SMaarten Lankhorst static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
792cf3e3e86SMaarten Lankhorst {
793cf3e3e86SMaarten Lankhorst 	/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
794cf3e3e86SMaarten Lankhorst 	GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
795cf3e3e86SMaarten Lankhorst 
796cf3e3e86SMaarten Lankhorst 	return drm_vma_node_offset_addr(&obj->base.vma_node);
797cf3e3e86SMaarten Lankhorst }
798cf3e3e86SMaarten Lankhorst 
7994bc2d574SMatthew Auld static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
800213d5092SThomas Hellström 	.name = "i915_gem_object_ttm",
801213d5092SThomas Hellström 
802213d5092SThomas Hellström 	.get_pages = i915_ttm_get_pages,
803213d5092SThomas Hellström 	.put_pages = i915_ttm_put_pages,
804213d5092SThomas Hellström 	.truncate = i915_ttm_purge,
805213d5092SThomas Hellström 	.adjust_lru = i915_ttm_adjust_lru,
806213d5092SThomas Hellström 	.delayed_free = i915_ttm_delayed_free,
807cf3e3e86SMaarten Lankhorst 	.mmap_offset = i915_ttm_mmap_offset,
808cf3e3e86SMaarten Lankhorst 	.mmap_ops = &vm_ops_ttm,
809213d5092SThomas Hellström };
810213d5092SThomas Hellström 
811213d5092SThomas Hellström void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
812213d5092SThomas Hellström {
813213d5092SThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
814213d5092SThomas Hellström 
815213d5092SThomas Hellström 	i915_gem_object_release_memory_region(obj);
816cf3e3e86SMaarten Lankhorst 	mutex_destroy(&obj->ttm.get_io_page.lock);
817213d5092SThomas Hellström 	if (obj->ttm.created)
818213d5092SThomas Hellström 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
819213d5092SThomas Hellström }
820213d5092SThomas Hellström 
821213d5092SThomas Hellström /**
822213d5092SThomas Hellström  * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
823213d5092SThomas Hellström  * @mem: The initial memory region for the object.
824213d5092SThomas Hellström  * @obj: The gem object.
825213d5092SThomas Hellström  * @size: Object size in bytes.
826213d5092SThomas Hellström  * @flags: gem object flags.
827213d5092SThomas Hellström  *
828213d5092SThomas Hellström  * Return: 0 on success, negative error code on failure.
829213d5092SThomas Hellström  */
830213d5092SThomas Hellström int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
831213d5092SThomas Hellström 			       struct drm_i915_gem_object *obj,
832213d5092SThomas Hellström 			       resource_size_t size,
833213d5092SThomas Hellström 			       unsigned int flags)
834213d5092SThomas Hellström {
835213d5092SThomas Hellström 	static struct lock_class_key lock_class;
836213d5092SThomas Hellström 	struct drm_i915_private *i915 = mem->i915;
837*3c2b8f32SThomas Hellström 	struct ttm_operation_ctx ctx = {
838*3c2b8f32SThomas Hellström 		.interruptible = true,
839*3c2b8f32SThomas Hellström 		.no_wait_gpu = false,
840*3c2b8f32SThomas Hellström 	};
841213d5092SThomas Hellström 	enum ttm_bo_type bo_type;
842213d5092SThomas Hellström 	int ret;
843213d5092SThomas Hellström 
844213d5092SThomas Hellström 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
845213d5092SThomas Hellström 	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
846213d5092SThomas Hellström 	i915_gem_object_init_memory_region(obj, mem);
847213d5092SThomas Hellström 	i915_gem_object_make_unshrinkable(obj);
848cf3e3e86SMaarten Lankhorst 	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
849cf3e3e86SMaarten Lankhorst 	mutex_init(&obj->ttm.get_io_page.lock);
850213d5092SThomas Hellström 	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
851213d5092SThomas Hellström 		ttm_bo_type_kernel;
852213d5092SThomas Hellström 
853*3c2b8f32SThomas Hellström 	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
854*3c2b8f32SThomas Hellström 
855213d5092SThomas Hellström 	/*
856213d5092SThomas Hellström 	 * If this function fails, it will call the destructor, but
857213d5092SThomas Hellström 	 * our caller still owns the object. So no freeing in the
858213d5092SThomas Hellström 	 * destructor until obj->ttm.created is true.
859213d5092SThomas Hellström 	 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
860213d5092SThomas Hellström 	 * until successful initialization.
861213d5092SThomas Hellström 	 */
862*3c2b8f32SThomas Hellström 	ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
86313c2ceb6SMatthew Auld 				   bo_type, &i915_sys_placement,
86413c2ceb6SMatthew Auld 				   mem->min_page_size >> PAGE_SHIFT,
865*3c2b8f32SThomas Hellström 				   &ctx, NULL, NULL, i915_ttm_bo_destroy);
866*3c2b8f32SThomas Hellström 	if (ret)
867b07a6483SThomas Hellström 		return i915_ttm_err_to_gem(ret);
868*3c2b8f32SThomas Hellström 
869*3c2b8f32SThomas Hellström 	obj->ttm.created = true;
870*3c2b8f32SThomas Hellström 	i915_ttm_adjust_domains_after_move(obj);
871*3c2b8f32SThomas Hellström 	i915_ttm_adjust_gem_after_move(obj);
872*3c2b8f32SThomas Hellström 	i915_gem_object_unlock(obj);
873*3c2b8f32SThomas Hellström 
874*3c2b8f32SThomas Hellström 	return 0;
875213d5092SThomas Hellström }
876