1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <linux/slab.h>
7 
8 #include <drm/ttm/ttm_bo_driver.h>
9 #include <drm/ttm/ttm_placement.h>
10 
11 #include <drm/drm_buddy.h>
12 
13 #include "i915_ttm_buddy_manager.h"
14 
15 #include "i915_gem.h"
16 
17 struct i915_ttm_buddy_manager {
18 	struct ttm_resource_manager manager;
19 	struct drm_buddy mm;
20 	struct list_head reserved;
21 	struct mutex lock;
22 	u64 default_page_size;
23 };
24 
25 static struct i915_ttm_buddy_manager *
26 to_buddy_manager(struct ttm_resource_manager *man)
27 {
28 	return container_of(man, struct i915_ttm_buddy_manager, manager);
29 }
30 
31 static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
32 				    struct ttm_buffer_object *bo,
33 				    const struct ttm_place *place,
34 				    struct ttm_resource **res)
35 {
36 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
37 	struct i915_ttm_buddy_resource *bman_res;
38 	struct drm_buddy *mm = &bman->mm;
39 	unsigned long n_pages, lpfn;
40 	u64 min_page_size;
41 	u64 size;
42 	int err;
43 
44 	lpfn = place->lpfn;
45 	if (!lpfn)
46 		lpfn = man->size;
47 
48 	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
49 	if (!bman_res)
50 		return -ENOMEM;
51 
52 	ttm_resource_init(bo, place, &bman_res->base);
53 	INIT_LIST_HEAD(&bman_res->blocks);
54 	bman_res->mm = mm;
55 
56 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
57 		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
58 
59 	if (place->fpfn || lpfn != man->size)
60 		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
61 
62 	GEM_BUG_ON(!bman_res->base.num_pages);
63 	size = bman_res->base.num_pages << PAGE_SHIFT;
64 
65 	min_page_size = bman->default_page_size;
66 	if (bo->page_alignment)
67 		min_page_size = bo->page_alignment << PAGE_SHIFT;
68 
69 	GEM_BUG_ON(min_page_size < mm->chunk_size);
70 
71 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
72 		unsigned long pages;
73 
74 		size = roundup_pow_of_two(size);
75 		min_page_size = size;
76 
77 		pages = size >> ilog2(mm->chunk_size);
78 		if (pages > lpfn)
79 			lpfn = pages;
80 	}
81 
82 	if (size > mm->size) {
83 		err = -E2BIG;
84 		goto err_free_res;
85 	}
86 
87 	n_pages = size >> ilog2(mm->chunk_size);
88 
89 	mutex_lock(&bman->lock);
90 	err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
91 				     (u64)lpfn << PAGE_SHIFT,
92 				     (u64)n_pages << PAGE_SHIFT,
93 				     min_page_size,
94 				     &bman_res->blocks,
95 				     bman_res->flags);
96 	mutex_unlock(&bman->lock);
97 	if (unlikely(err))
98 		goto err_free_blocks;
99 
100 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
101 		u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
102 
103 		mutex_lock(&bman->lock);
104 		drm_buddy_block_trim(mm,
105 				     original_size,
106 				     &bman_res->blocks);
107 		mutex_unlock(&bman->lock);
108 	}
109 
110 	*res = &bman_res->base;
111 	return 0;
112 
113 err_free_blocks:
114 	mutex_lock(&bman->lock);
115 	drm_buddy_free_list(mm, &bman_res->blocks);
116 	mutex_unlock(&bman->lock);
117 err_free_res:
118 	ttm_resource_fini(man, &bman_res->base);
119 	kfree(bman_res);
120 	return err;
121 }
122 
123 static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
124 				    struct ttm_resource *res)
125 {
126 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
127 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
128 
129 	mutex_lock(&bman->lock);
130 	drm_buddy_free_list(&bman->mm, &bman_res->blocks);
131 	mutex_unlock(&bman->lock);
132 
133 	ttm_resource_fini(man, res);
134 	kfree(bman_res);
135 }
136 
137 static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
138 				     struct drm_printer *printer)
139 {
140 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
141 	struct drm_buddy_block *block;
142 
143 	mutex_lock(&bman->lock);
144 	drm_printf(printer, "default_page_size: %lluKiB\n",
145 		   bman->default_page_size >> 10);
146 
147 	drm_buddy_print(&bman->mm, printer);
148 
149 	drm_printf(printer, "reserved:\n");
150 	list_for_each_entry(block, &bman->reserved, link)
151 		drm_buddy_block_print(&bman->mm, block, printer);
152 	mutex_unlock(&bman->lock);
153 }
154 
155 static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
156 	.alloc = i915_ttm_buddy_man_alloc,
157 	.free = i915_ttm_buddy_man_free,
158 	.debug = i915_ttm_buddy_man_debug,
159 };
160 
161 /**
162  * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
163  * @bdev: The ttm device
164  * @type: Memory type we want to manage
165  * @use_tt: Set use_tt for the manager
166  * @size: The size in bytes to manage
167  * @default_page_size: The default minimum page size in bytes for allocations,
168  * this must be at least as large as @chunk_size, and can be overridden by
169  * setting the BO page_alignment, to be larger or smaller as needed.
170  * @chunk_size: The minimum page size in bytes for our allocations i.e
171  * order-zero
172  *
173  * Note that the starting address is assumed to be zero here, since this
174  * simplifies keeping the property where allocated blocks having natural
175  * power-of-two alignment. So long as the real starting address is some large
176  * power-of-two, or naturally start from zero, then this should be fine.  Also
177  * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
178  * if say there is some unusable range from the start of the region. We can
179  * revisit this in the future and make the interface accept an actual starting
180  * offset and let it take care of the rest.
181  *
182  * Note that if the @size is not aligned to the @chunk_size then we perform the
183  * required rounding to get the usable size. The final size in pages can be
184  * taken from &ttm_resource_manager.size.
185  *
186  * Return: 0 on success, negative error code on failure.
187  */
188 int i915_ttm_buddy_man_init(struct ttm_device *bdev,
189 			    unsigned int type, bool use_tt,
190 			    u64 size, u64 default_page_size,
191 			    u64 chunk_size)
192 {
193 	struct ttm_resource_manager *man;
194 	struct i915_ttm_buddy_manager *bman;
195 	int err;
196 
197 	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
198 	if (!bman)
199 		return -ENOMEM;
200 
201 	err = drm_buddy_init(&bman->mm, size, chunk_size);
202 	if (err)
203 		goto err_free_bman;
204 
205 	mutex_init(&bman->lock);
206 	INIT_LIST_HEAD(&bman->reserved);
207 	GEM_BUG_ON(default_page_size < chunk_size);
208 	bman->default_page_size = default_page_size;
209 
210 	man = &bman->manager;
211 	man->use_tt = use_tt;
212 	man->func = &i915_ttm_buddy_manager_func;
213 	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
214 
215 	ttm_resource_manager_set_used(man, true);
216 	ttm_set_driver_manager(bdev, type, man);
217 
218 	return 0;
219 
220 err_free_bman:
221 	kfree(bman);
222 	return err;
223 }
224 
225 /**
226  * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
227  * @bdev: The ttm device
228  * @type: Memory type we want to manage
229  *
230  * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
231  * also be freed for us here.
232  *
233  * Return: 0 on success, negative error code on failure.
234  */
235 int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
236 {
237 	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
238 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
239 	struct drm_buddy *mm = &bman->mm;
240 	int ret;
241 
242 	ttm_resource_manager_set_used(man, false);
243 
244 	ret = ttm_resource_manager_evict_all(bdev, man);
245 	if (ret)
246 		return ret;
247 
248 	ttm_set_driver_manager(bdev, type, NULL);
249 
250 	mutex_lock(&bman->lock);
251 	drm_buddy_free_list(mm, &bman->reserved);
252 	drm_buddy_fini(mm);
253 	mutex_unlock(&bman->lock);
254 
255 	ttm_resource_manager_cleanup(man);
256 	kfree(bman);
257 
258 	return 0;
259 }
260 
261 /**
262  * i915_ttm_buddy_man_reserve - Reserve address range
263  * @man: The buddy allocator ttm manager
264  * @start: The offset in bytes, where the region start is assumed to be zero
265  * @size: The size in bytes
266  *
267  * Note that the starting address for the region is always assumed to be zero.
268  *
269  * Return: 0 on success, negative error code on failure.
270  */
271 int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
272 			       u64 start, u64 size)
273 {
274 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
275 	struct drm_buddy *mm = &bman->mm;
276 	unsigned long flags = 0;
277 	int ret;
278 
279 	flags |= DRM_BUDDY_RANGE_ALLOCATION;
280 
281 	mutex_lock(&bman->lock);
282 	ret = drm_buddy_alloc_blocks(mm, start,
283 				     start + size,
284 				     size, mm->chunk_size,
285 				     &bman->reserved,
286 				     flags);
287 	mutex_unlock(&bman->lock);
288 
289 	return ret;
290 }
291 
292