1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8 
9 static const struct {
10 	u16 class;
11 	u16 instance;
12 } intel_region_map[] = {
13 	[INTEL_REGION_SMEM] = {
14 		.class = INTEL_MEMORY_SYSTEM,
15 		.instance = 0,
16 	},
17 	[INTEL_REGION_LMEM] = {
18 		.class = INTEL_MEMORY_LOCAL,
19 		.instance = 0,
20 	},
21 	[INTEL_REGION_STOLEN_SMEM] = {
22 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
23 		.instance = 0,
24 	},
25 	[INTEL_REGION_STOLEN_LMEM] = {
26 		.class = INTEL_MEMORY_STOLEN_LOCAL,
27 		.instance = 0,
28 	},
29 };
30 
31 struct intel_memory_region *
32 intel_memory_region_lookup(struct drm_i915_private *i915,
33 			   u16 class, u16 instance)
34 {
35 	struct intel_memory_region *mr;
36 	int id;
37 
38 	/* XXX: consider maybe converting to an rb tree at some point */
39 	for_each_memory_region(mr, i915, id) {
40 		if (mr->type == class && mr->instance == instance)
41 			return mr;
42 	}
43 
44 	return NULL;
45 }
46 
47 struct intel_memory_region *
48 intel_memory_region_by_type(struct drm_i915_private *i915,
49 			    enum intel_memory_type mem_type)
50 {
51 	struct intel_memory_region *mr;
52 	int id;
53 
54 	for_each_memory_region(mr, i915, id)
55 		if (mr->type == mem_type)
56 			return mr;
57 
58 	return NULL;
59 }
60 
61 static u64
62 intel_memory_region_free_pages(struct intel_memory_region *mem,
63 			       struct list_head *blocks)
64 {
65 	struct i915_buddy_block *block, *on;
66 	u64 size = 0;
67 
68 	list_for_each_entry_safe(block, on, blocks, link) {
69 		size += i915_buddy_block_size(&mem->mm, block);
70 		i915_buddy_free(&mem->mm, block);
71 	}
72 	INIT_LIST_HEAD(blocks);
73 
74 	return size;
75 }
76 
77 void
78 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
79 				      struct list_head *blocks)
80 {
81 	mutex_lock(&mem->mm_lock);
82 	mem->avail += intel_memory_region_free_pages(mem, blocks);
83 	mutex_unlock(&mem->mm_lock);
84 }
85 
86 void
87 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
88 {
89 	struct list_head blocks;
90 
91 	INIT_LIST_HEAD(&blocks);
92 	list_add(&block->link, &blocks);
93 	__intel_memory_region_put_pages_buddy(block->private, &blocks);
94 }
95 
96 int
97 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
98 				      resource_size_t size,
99 				      unsigned int flags,
100 				      struct list_head *blocks)
101 {
102 	unsigned int min_order = 0;
103 	unsigned long n_pages;
104 
105 	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
106 	GEM_BUG_ON(!list_empty(blocks));
107 
108 	if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
109 		min_order = ilog2(mem->min_page_size) -
110 			    ilog2(mem->mm.chunk_size);
111 	}
112 
113 	if (flags & I915_ALLOC_CONTIGUOUS) {
114 		size = roundup_pow_of_two(size);
115 		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
116 	}
117 
118 	if (size > mem->mm.size)
119 		return -E2BIG;
120 
121 	n_pages = size >> ilog2(mem->mm.chunk_size);
122 
123 	mutex_lock(&mem->mm_lock);
124 
125 	do {
126 		struct i915_buddy_block *block;
127 		unsigned int order;
128 
129 		order = fls(n_pages) - 1;
130 		GEM_BUG_ON(order > mem->mm.max_order);
131 		GEM_BUG_ON(order < min_order);
132 
133 		do {
134 			block = i915_buddy_alloc(&mem->mm, order);
135 			if (!IS_ERR(block))
136 				break;
137 
138 			if (order-- == min_order)
139 				goto err_free_blocks;
140 		} while (1);
141 
142 		n_pages -= BIT(order);
143 
144 		block->private = mem;
145 		list_add_tail(&block->link, blocks);
146 
147 		if (!n_pages)
148 			break;
149 	} while (1);
150 
151 	mem->avail -= size;
152 	mutex_unlock(&mem->mm_lock);
153 	return 0;
154 
155 err_free_blocks:
156 	intel_memory_region_free_pages(mem, blocks);
157 	mutex_unlock(&mem->mm_lock);
158 	return -ENXIO;
159 }
160 
161 struct i915_buddy_block *
162 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
163 				      resource_size_t size,
164 				      unsigned int flags)
165 {
166 	struct i915_buddy_block *block;
167 	LIST_HEAD(blocks);
168 	int ret;
169 
170 	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
171 	if (ret)
172 		return ERR_PTR(ret);
173 
174 	block = list_first_entry(&blocks, typeof(*block), link);
175 	list_del_init(&block->link);
176 	return block;
177 }
178 
179 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
180 {
181 	return i915_buddy_init(&mem->mm, resource_size(&mem->region),
182 			       PAGE_SIZE);
183 }
184 
185 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
186 {
187 	i915_buddy_free_list(&mem->mm, &mem->reserved);
188 	i915_buddy_fini(&mem->mm);
189 }
190 
191 int intel_memory_region_reserve(struct intel_memory_region *mem,
192 				u64 offset, u64 size)
193 {
194 	int ret;
195 
196 	mutex_lock(&mem->mm_lock);
197 	ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
198 	mutex_unlock(&mem->mm_lock);
199 
200 	return ret;
201 }
202 
203 struct intel_memory_region *
204 intel_memory_region_create(struct drm_i915_private *i915,
205 			   resource_size_t start,
206 			   resource_size_t size,
207 			   resource_size_t min_page_size,
208 			   resource_size_t io_start,
209 			   const struct intel_memory_region_ops *ops)
210 {
211 	struct intel_memory_region *mem;
212 	int err;
213 
214 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
215 	if (!mem)
216 		return ERR_PTR(-ENOMEM);
217 
218 	mem->i915 = i915;
219 	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
220 	mem->io_start = io_start;
221 	mem->min_page_size = min_page_size;
222 	mem->ops = ops;
223 	mem->total = size;
224 	mem->avail = mem->total;
225 
226 	mutex_init(&mem->objects.lock);
227 	INIT_LIST_HEAD(&mem->objects.list);
228 	INIT_LIST_HEAD(&mem->objects.purgeable);
229 	INIT_LIST_HEAD(&mem->reserved);
230 
231 	mutex_init(&mem->mm_lock);
232 
233 	if (ops->init) {
234 		err = ops->init(mem);
235 		if (err)
236 			goto err_free;
237 	}
238 
239 	kref_init(&mem->kref);
240 	return mem;
241 
242 err_free:
243 	kfree(mem);
244 	return ERR_PTR(err);
245 }
246 
247 void intel_memory_region_set_name(struct intel_memory_region *mem,
248 				  const char *fmt, ...)
249 {
250 	va_list ap;
251 
252 	va_start(ap, fmt);
253 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
254 	va_end(ap);
255 }
256 
257 static void __intel_memory_region_destroy(struct kref *kref)
258 {
259 	struct intel_memory_region *mem =
260 		container_of(kref, typeof(*mem), kref);
261 
262 	if (mem->ops->release)
263 		mem->ops->release(mem);
264 
265 	mutex_destroy(&mem->mm_lock);
266 	mutex_destroy(&mem->objects.lock);
267 	kfree(mem);
268 }
269 
270 struct intel_memory_region *
271 intel_memory_region_get(struct intel_memory_region *mem)
272 {
273 	kref_get(&mem->kref);
274 	return mem;
275 }
276 
277 void intel_memory_region_put(struct intel_memory_region *mem)
278 {
279 	kref_put(&mem->kref, __intel_memory_region_destroy);
280 }
281 
282 /* Global memory region registration -- only slight layer inversions! */
283 
284 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
285 {
286 	int err, i;
287 
288 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
289 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
290 		u16 type, instance;
291 
292 		if (!HAS_REGION(i915, BIT(i)))
293 			continue;
294 
295 		type = intel_region_map[i].class;
296 		instance = intel_region_map[i].instance;
297 		switch (type) {
298 		case INTEL_MEMORY_SYSTEM:
299 			mem = i915_gem_shmem_setup(i915);
300 			break;
301 		case INTEL_MEMORY_STOLEN_LOCAL:
302 			mem = i915_gem_stolen_lmem_setup(i915);
303 			if (!IS_ERR(mem))
304 				i915->mm.stolen_region = mem;
305 			break;
306 		case INTEL_MEMORY_STOLEN_SYSTEM:
307 			mem = i915_gem_stolen_smem_setup(i915);
308 			if (!IS_ERR(mem))
309 				i915->mm.stolen_region = mem;
310 			break;
311 		default:
312 			continue;
313 		}
314 
315 		if (IS_ERR(mem)) {
316 			err = PTR_ERR(mem);
317 			drm_err(&i915->drm,
318 				"Failed to setup region(%d) type=%d\n",
319 				err, type);
320 			goto out_cleanup;
321 		}
322 
323 		mem->id = i;
324 		mem->type = type;
325 		mem->instance = instance;
326 
327 		i915->mm.regions[i] = mem;
328 	}
329 
330 	return 0;
331 
332 out_cleanup:
333 	intel_memory_regions_driver_release(i915);
334 	return err;
335 }
336 
337 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
338 {
339 	int i;
340 
341 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
342 		struct intel_memory_region *region =
343 			fetch_and_zero(&i915->mm.regions[i]);
344 
345 		if (region)
346 			intel_memory_region_put(region);
347 	}
348 }
349 
350 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
351 #include "selftests/intel_memory_region.c"
352 #include "selftests/mock_region.c"
353 #endif
354