1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 #include <drm/ttm/ttm_bo_driver.h>
6 #include <drm/ttm/ttm_device.h>
7 #include <drm/ttm/ttm_range_manager.h>
8 
9 #include "i915_drv.h"
10 #include "i915_scatterlist.h"
11 
12 #include "intel_region_ttm.h"
13 
14 #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
15 /**
16  * DOC: TTM support structure
17  *
18  * The code in this file deals with setting up memory managers for TTM
19  * LMEM and MOCK regions and converting the output from
20  * the managers to struct sg_table, Basically providing the mapping from
21  * i915 GEM regions to TTM memory types and resource managers.
22  */
23 
24 /**
25  * intel_region_ttm_device_init - Initialize a TTM device
26  * @dev_priv: Pointer to an i915 device private structure.
27  *
28  * Return: 0 on success, negative error code on failure.
29  */
30 int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
31 {
32 	struct drm_device *drm = &dev_priv->drm;
33 
34 	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
35 			       drm->dev, drm->anon_inode->i_mapping,
36 			       drm->vma_offset_manager, false, false);
37 }
38 
39 /**
40  * intel_region_ttm_device_fini - Finalize a TTM device
41  * @dev_priv: Pointer to an i915 device private structure.
42  */
43 void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
44 {
45 	ttm_device_fini(&dev_priv->bdev);
46 }
47 
48 /*
49  * Map the i915 memory regions to TTM memory types. We use the
50  * driver-private types for now, reserving TTM_PL_VRAM for stolen
51  * memory and TTM_PL_TT for GGTT use if decided to implement this.
52  */
53 int intel_region_to_ttm_type(const struct intel_memory_region *mem)
54 {
55 	int type;
56 
57 	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
58 		   mem->type != INTEL_MEMORY_MOCK &&
59 		   mem->type != INTEL_MEMORY_SYSTEM);
60 
61 	if (mem->type == INTEL_MEMORY_SYSTEM)
62 		return TTM_PL_SYSTEM;
63 
64 	type = mem->instance + TTM_PL_PRIV;
65 	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
66 
67 	return type;
68 }
69 
70 static struct ttm_resource *
71 intel_region_ttm_node_reserve(struct intel_memory_region *mem,
72 			      resource_size_t offset,
73 			      resource_size_t size)
74 {
75 	struct ttm_resource_manager *man = mem->region_private;
76 	struct ttm_place place = {};
77 	struct ttm_buffer_object mock_bo = {};
78 	struct ttm_resource *res;
79 	int ret;
80 
81 	/*
82 	 * Having to use a mock_bo is unfortunate but stems from some
83 	 * drivers having private managers that insist to know what the
84 	 * allocate memory is intended for, using it to send private
85 	 * data to the manager. Also recently the bo has been used to send
86 	 * alignment info to the manager. Assume that apart from the latter,
87 	 * none of the managers we use will ever access the buffer object
88 	 * members, hoping we can pass the alignment info in the
89 	 * struct ttm_place in the future.
90 	 */
91 
92 	place.fpfn = offset >> PAGE_SHIFT;
93 	place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
94 	mock_bo.base.size = size;
95 	ret = man->func->alloc(man, &mock_bo, &place, &res);
96 	if (ret == -ENOSPC)
97 		ret = -ENXIO;
98 
99 	return ret ? ERR_PTR(ret) : res;
100 }
101 
102 /**
103  * intel_region_ttm_node_free - Free a node allocated from a resource manager
104  * @mem: The region the node was allocated from.
105  * @node: The opaque node representing an allocation.
106  */
107 void intel_region_ttm_node_free(struct intel_memory_region *mem,
108 				struct ttm_resource *res)
109 {
110 	struct ttm_resource_manager *man = mem->region_private;
111 
112 	man->func->free(man, res);
113 }
114 
115 static const struct intel_memory_region_private_ops priv_ops = {
116 	.reserve = intel_region_ttm_node_reserve,
117 	.free = intel_region_ttm_node_free,
118 };
119 
120 int intel_region_ttm_init(struct intel_memory_region *mem)
121 {
122 	struct ttm_device *bdev = &mem->i915->bdev;
123 	int mem_type = intel_region_to_ttm_type(mem);
124 	int ret;
125 
126 	ret = ttm_range_man_init(bdev, mem_type, false,
127 				 resource_size(&mem->region) >> PAGE_SHIFT);
128 	if (ret)
129 		return ret;
130 
131 	mem->chunk_size = PAGE_SIZE;
132 	mem->max_order =
133 		get_order(rounddown_pow_of_two(resource_size(&mem->region)));
134 	mem->is_range_manager = true;
135 	mem->priv_ops = &priv_ops;
136 	mem->region_private = ttm_manager_type(bdev, mem_type);
137 
138 	return 0;
139 }
140 
141 /**
142  * intel_region_ttm_fini - Finalize a TTM region.
143  * @mem: The memory region
144  *
145  * This functions takes down the TTM resource manager associated with the
146  * memory region, and if it was registered with the TTM device,
147  * removes that registration.
148  */
149 void intel_region_ttm_fini(struct intel_memory_region *mem)
150 {
151 	int ret;
152 
153 	ret = ttm_range_man_fini(&mem->i915->bdev,
154 				 intel_region_to_ttm_type(mem));
155 	GEM_WARN_ON(ret);
156 	mem->region_private = NULL;
157 }
158 
159 /**
160  * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
161  * to an sg_table.
162  * @mem: The memory region.
163  * @node: The resource manager node obtained from the TTM resource manager.
164  *
165  * The gem backends typically use sg-tables for operations on the underlying
166  * io_memory. So provide a way for the backends to translate the
167  * nodes they are handed from TTM to sg-tables.
168  *
169  * Return: A malloced sg_table on success, an error pointer on failure.
170  */
171 struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
172 					     struct ttm_resource *res)
173 {
174 	struct ttm_range_mgr_node *range_node =
175 		container_of(res, typeof(*range_node), base);
176 
177 	GEM_WARN_ON(!mem->is_range_manager);
178 	return i915_sg_from_mm_node(&range_node->mm_nodes[0],
179 				    mem->region.start);
180 }
181 
182 #ifdef CONFIG_DRM_I915_SELFTEST
183 /**
184  * intel_region_ttm_node_alloc - Allocate memory resources from a region
185  * @mem: The memory region,
186  * @size: The requested size in bytes
187  * @flags: Allocation flags
188  *
189  * This functionality is provided only for callers that need to allocate
190  * memory from standalone TTM range managers, without the TTM eviction
191  * functionality. Don't use if you are not completely sure that's the
192  * case. The returned opaque node can be converted to an sg_table using
193  * intel_region_ttm_node_to_st(), and can be freed using
194  * intel_region_ttm_node_free().
195  *
196  * Return: A valid pointer on success, an error pointer on failure.
197  */
198 struct ttm_resource *
199 intel_region_ttm_node_alloc(struct intel_memory_region *mem,
200 			    resource_size_t size,
201 			    unsigned int flags)
202 {
203 	struct ttm_resource_manager *man = mem->region_private;
204 	struct ttm_place place = {};
205 	struct ttm_buffer_object mock_bo = {};
206 	struct ttm_resource *res;
207 	int ret;
208 
209 	/*
210 	 * We ignore the flags for now since we're using the range
211 	 * manager and contigous and min page size would be fulfilled
212 	 * by default if size is min page size aligned.
213 	 */
214 	mock_bo.base.size = size;
215 
216 	if (mem->is_range_manager) {
217 		if (size >= SZ_1G)
218 			mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
219 		else if (size >= SZ_2M)
220 			mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
221 		else if (size >= SZ_64K)
222 			mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
223 	}
224 
225 	ret = man->func->alloc(man, &mock_bo, &place, &res);
226 	if (ret == -ENOSPC)
227 		ret = -ENXIO;
228 	return ret ? ERR_PTR(ret) : res;
229 }
230 #endif
231