1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 #include <drm/ttm/ttm_bo_driver.h>
6 #include <drm/ttm/ttm_device.h>
7 #include <drm/ttm/ttm_range_manager.h>
8 
9 #include "i915_drv.h"
10 #include "i915_scatterlist.h"
11 #include "i915_ttm_buddy_manager.h"
12 
13 #include "intel_region_ttm.h"
14 
15 #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
16 /**
17  * DOC: TTM support structure
18  *
19  * The code in this file deals with setting up memory managers for TTM
20  * LMEM and MOCK regions and converting the output from
21  * the managers to struct sg_table, Basically providing the mapping from
22  * i915 GEM regions to TTM memory types and resource managers.
23  */
24 
25 /**
26  * intel_region_ttm_device_init - Initialize a TTM device
27  * @dev_priv: Pointer to an i915 device private structure.
28  *
29  * Return: 0 on success, negative error code on failure.
30  */
31 int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
32 {
33 	struct drm_device *drm = &dev_priv->drm;
34 
35 	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
36 			       drm->dev, drm->anon_inode->i_mapping,
37 			       drm->vma_offset_manager, false, false);
38 }
39 
40 /**
41  * intel_region_ttm_device_fini - Finalize a TTM device
42  * @dev_priv: Pointer to an i915 device private structure.
43  */
44 void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
45 {
46 	ttm_device_fini(&dev_priv->bdev);
47 }
48 
49 /*
50  * Map the i915 memory regions to TTM memory types. We use the
51  * driver-private types for now, reserving TTM_PL_VRAM for stolen
52  * memory and TTM_PL_TT for GGTT use if decided to implement this.
53  */
54 int intel_region_to_ttm_type(const struct intel_memory_region *mem)
55 {
56 	int type;
57 
58 	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
59 		   mem->type != INTEL_MEMORY_MOCK &&
60 		   mem->type != INTEL_MEMORY_SYSTEM);
61 
62 	if (mem->type == INTEL_MEMORY_SYSTEM)
63 		return TTM_PL_SYSTEM;
64 
65 	type = mem->instance + TTM_PL_PRIV;
66 	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
67 
68 	return type;
69 }
70 
71 /**
72  * intel_region_ttm_init - Initialize a memory region for TTM.
73  * @mem: The region to initialize.
74  *
75  * This function initializes a suitable TTM resource manager for the
76  * region, and if it's a LMEM region type, attaches it to the TTM
77  * device. MOCK regions are NOT attached to the TTM device, since we don't
78  * have one for the mock selftests.
79  *
80  * Return: 0 on success, negative error code on failure.
81  */
82 int intel_region_ttm_init(struct intel_memory_region *mem)
83 {
84 	struct ttm_device *bdev = &mem->i915->bdev;
85 	int mem_type = intel_region_to_ttm_type(mem);
86 	int ret;
87 
88 	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
89 				      resource_size(&mem->region),
90 				      mem->io_size,
91 				      mem->min_page_size, PAGE_SIZE);
92 	if (ret)
93 		return ret;
94 
95 	mem->region_private = ttm_manager_type(bdev, mem_type);
96 
97 	return 0;
98 }
99 
100 /**
101  * intel_region_ttm_fini - Finalize a TTM region.
102  * @mem: The memory region
103  *
104  * This functions takes down the TTM resource manager associated with the
105  * memory region, and if it was registered with the TTM device,
106  * removes that registration.
107  */
108 int intel_region_ttm_fini(struct intel_memory_region *mem)
109 {
110 	struct ttm_resource_manager *man = mem->region_private;
111 	int ret = -EBUSY;
112 	int count;
113 
114 	/*
115 	 * Put the region's move fences. This releases requests that
116 	 * may hold on to contexts and vms that may hold on to buffer
117 	 * objects placed in this region.
118 	 */
119 	if (man)
120 		ttm_resource_manager_cleanup(man);
121 
122 	/* Flush objects from region. */
123 	for (count = 0; count < 10; ++count) {
124 		i915_gem_flush_free_objects(mem->i915);
125 
126 		mutex_lock(&mem->objects.lock);
127 		if (list_empty(&mem->objects.list))
128 			ret = 0;
129 		mutex_unlock(&mem->objects.lock);
130 		if (!ret)
131 			break;
132 
133 		msleep(20);
134 		flush_delayed_work(&mem->i915->bdev.wq);
135 	}
136 
137 	/* If we leaked objects, Don't free the region causing use after free */
138 	if (ret || !man)
139 		return ret;
140 
141 	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
142 				      intel_region_to_ttm_type(mem));
143 	GEM_WARN_ON(ret);
144 	mem->region_private = NULL;
145 
146 	return ret;
147 }
148 
149 /**
150  * intel_region_ttm_resource_to_rsgt -
151  * Convert an opaque TTM resource manager resource to a refcounted sg_table.
152  * @mem: The memory region.
153  * @res: The resource manager resource obtained from the TTM resource manager.
154  *
155  * The gem backends typically use sg-tables for operations on the underlying
156  * io_memory. So provide a way for the backends to translate the
157  * nodes they are handed from TTM to sg-tables.
158  *
159  * Return: A malloced sg_table on success, an error pointer on failure.
160  */
161 struct i915_refct_sgt *
162 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
163 				  struct ttm_resource *res)
164 {
165 	if (mem->is_range_manager) {
166 		struct ttm_range_mgr_node *range_node =
167 			to_ttm_range_mgr_node(res);
168 
169 		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
170 					      mem->region.start);
171 	} else {
172 		return i915_rsgt_from_buddy_resource(res, mem->region.start);
173 	}
174 }
175 
176 #ifdef CONFIG_DRM_I915_SELFTEST
177 /**
178  * intel_region_ttm_resource_alloc - Allocate memory resources from a region
179  * @mem: The memory region,
180  * @size: The requested size in bytes
181  * @flags: Allocation flags
182  *
183  * This functionality is provided only for callers that need to allocate
184  * memory from standalone TTM range managers, without the TTM eviction
185  * functionality. Don't use if you are not completely sure that's the
186  * case. The returned opaque node can be converted to an sg_table using
187  * intel_region_ttm_resource_to_st(), and can be freed using
188  * intel_region_ttm_resource_free().
189  *
190  * Return: A valid pointer on success, an error pointer on failure.
191  */
192 struct ttm_resource *
193 intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
194 				resource_size_t size,
195 				unsigned int flags)
196 {
197 	struct ttm_resource_manager *man = mem->region_private;
198 	struct ttm_place place = {};
199 	struct ttm_buffer_object mock_bo = {};
200 	struct ttm_resource *res;
201 	int ret;
202 
203 	if (flags & I915_BO_ALLOC_CONTIGUOUS)
204 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
205 	if (mem->io_size && mem->io_size < mem->total) {
206 		if (flags & I915_BO_ALLOC_GPU_ONLY) {
207 			place.flags |= TTM_PL_FLAG_TOPDOWN;
208 		} else {
209 			place.fpfn = 0;
210 			place.lpfn = mem->io_size >> PAGE_SHIFT;
211 		}
212 	}
213 
214 	mock_bo.base.size = size;
215 	mock_bo.bdev = &mem->i915->bdev;
216 
217 	ret = man->func->alloc(man, &mock_bo, &place, &res);
218 	if (ret == -ENOSPC)
219 		ret = -ENXIO;
220 	if (!ret)
221 		res->bo = NULL; /* Rather blow up, then some uaf */
222 	return ret ? ERR_PTR(ret) : res;
223 }
224 
225 #endif
226 
227 /**
228  * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
229  * @mem: The region the resource was allocated from.
230  * @res: The opaque resource representing an allocation.
231  */
232 void intel_region_ttm_resource_free(struct intel_memory_region *mem,
233 				    struct ttm_resource *res)
234 {
235 	struct ttm_resource_manager *man = mem->region_private;
236 	struct ttm_buffer_object mock_bo = {};
237 
238 	mock_bo.base.size = res->num_pages << PAGE_SHIFT;
239 	mock_bo.bdev = &mem->i915->bdev;
240 	res->bo = &mock_bo;
241 
242 	man->func->free(man, res);
243 }
244