1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prandom.h>
7 
8 #include "intel_memory_region.h"
9 #include "i915_drv.h"
10 #include "i915_ttm_buddy_manager.h"
11 
12 static const struct {
13 	u16 class;
14 	u16 instance;
15 } intel_region_map[] = {
16 	[INTEL_REGION_SMEM] = {
17 		.class = INTEL_MEMORY_SYSTEM,
18 		.instance = 0,
19 	},
20 	[INTEL_REGION_LMEM] = {
21 		.class = INTEL_MEMORY_LOCAL,
22 		.instance = 0,
23 	},
24 	[INTEL_REGION_STOLEN_SMEM] = {
25 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
26 		.instance = 0,
27 	},
28 	[INTEL_REGION_STOLEN_LMEM] = {
29 		.class = INTEL_MEMORY_STOLEN_LOCAL,
30 		.instance = 0,
31 	},
32 };
33 
34 static int __iopagetest(struct intel_memory_region *mem,
35 			u8 __iomem *va, int pagesize,
36 			u8 value, resource_size_t offset,
37 			const void *caller)
38 {
39 	int byte = prandom_u32_max(pagesize);
40 	u8 result[3];
41 
42 	memset_io(va, value, pagesize); /* or GPF! */
43 	wmb();
44 
45 	result[0] = ioread8(va);
46 	result[1] = ioread8(va + byte);
47 	result[2] = ioread8(va + pagesize - 1);
48 	if (memchr_inv(result, value, sizeof(result))) {
49 		dev_err(mem->i915->drm.dev,
50 			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
51 			&mem->region, &mem->io_start, &offset, caller,
52 			value, result[0], result[1], result[2]);
53 		return -EINVAL;
54 	}
55 
56 	return 0;
57 }
58 
59 static int iopagetest(struct intel_memory_region *mem,
60 		      resource_size_t offset,
61 		      const void *caller)
62 {
63 	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
64 	void __iomem *va;
65 	int err;
66 	int i;
67 
68 	va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
69 	if (!va) {
70 		dev_err(mem->i915->drm.dev,
71 			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
72 			&mem->io_start, &offset, caller);
73 		return -EFAULT;
74 	}
75 
76 	for (i = 0; i < ARRAY_SIZE(val); i++) {
77 		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
78 		if (err)
79 			break;
80 
81 		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
82 		if (err)
83 			break;
84 	}
85 
86 	iounmap(va);
87 	return err;
88 }
89 
90 static resource_size_t random_page(resource_size_t last)
91 {
92 	/* Limited to low 44b (16TiB), but should suffice for a spot check */
93 	return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
94 }
95 
96 static int iomemtest(struct intel_memory_region *mem,
97 		     bool test_all,
98 		     const void *caller)
99 {
100 	resource_size_t last, page;
101 	int err;
102 
103 	if (mem->io_size < PAGE_SIZE)
104 		return 0;
105 
106 	last = mem->io_size - PAGE_SIZE;
107 
108 	/*
109 	 * Quick test to check read/write access to the iomap (backing store).
110 	 *
111 	 * Write a byte, read it back. If the iomapping fails, we expect
112 	 * a GPF preventing further execution. If the backing store does not
113 	 * exist, the read back will return garbage. We check a couple of pages,
114 	 * the first and last of the specified region to confirm the backing
115 	 * store + iomap does cover the entire memory region; and we check
116 	 * a random offset within as a quick spot check for bad memory.
117 	 */
118 
119 	if (test_all) {
120 		for (page = 0; page <= last; page += PAGE_SIZE) {
121 			err = iopagetest(mem, page, caller);
122 			if (err)
123 				return err;
124 		}
125 	} else {
126 		err = iopagetest(mem, 0, caller);
127 		if (err)
128 			return err;
129 
130 		err = iopagetest(mem, last, caller);
131 		if (err)
132 			return err;
133 
134 		err = iopagetest(mem, random_page(last), caller);
135 		if (err)
136 			return err;
137 	}
138 
139 	return 0;
140 }
141 
142 struct intel_memory_region *
143 intel_memory_region_lookup(struct drm_i915_private *i915,
144 			   u16 class, u16 instance)
145 {
146 	struct intel_memory_region *mr;
147 	int id;
148 
149 	/* XXX: consider maybe converting to an rb tree at some point */
150 	for_each_memory_region(mr, i915, id) {
151 		if (mr->type == class && mr->instance == instance)
152 			return mr;
153 	}
154 
155 	return NULL;
156 }
157 
158 struct intel_memory_region *
159 intel_memory_region_by_type(struct drm_i915_private *i915,
160 			    enum intel_memory_type mem_type)
161 {
162 	struct intel_memory_region *mr;
163 	int id;
164 
165 	for_each_memory_region(mr, i915, id)
166 		if (mr->type == mem_type)
167 			return mr;
168 
169 	return NULL;
170 }
171 
172 /**
173  * intel_memory_region_reserve - Reserve a memory range
174  * @mem: The region for which we want to reserve a range.
175  * @offset: Start of the range to reserve.
176  * @size: The size of the range to reserve.
177  *
178  * Return: 0 on success, negative error code on failure.
179  */
180 int intel_memory_region_reserve(struct intel_memory_region *mem,
181 				resource_size_t offset,
182 				resource_size_t size)
183 {
184 	struct ttm_resource_manager *man = mem->region_private;
185 
186 	GEM_BUG_ON(mem->is_range_manager);
187 
188 	return i915_ttm_buddy_man_reserve(man, offset, size);
189 }
190 
191 void intel_memory_region_debug(struct intel_memory_region *mr,
192 			       struct drm_printer *printer)
193 {
194 	drm_printf(printer, "%s: ", mr->name);
195 
196 	if (mr->region_private)
197 		ttm_resource_manager_debug(mr->region_private, printer);
198 	else
199 		drm_printf(printer, "total:%pa, available:%pa bytes\n",
200 			   &mr->total, &mr->avail);
201 }
202 
203 static int intel_memory_region_memtest(struct intel_memory_region *mem,
204 				       void *caller)
205 {
206 	struct drm_i915_private *i915 = mem->i915;
207 	int err = 0;
208 
209 	if (!mem->io_start)
210 		return 0;
211 
212 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
213 		err = iomemtest(mem, i915->params.memtest, caller);
214 
215 	return err;
216 }
217 
218 struct intel_memory_region *
219 intel_memory_region_create(struct drm_i915_private *i915,
220 			   resource_size_t start,
221 			   resource_size_t size,
222 			   resource_size_t min_page_size,
223 			   resource_size_t io_start,
224 			   resource_size_t io_size,
225 			   u16 type,
226 			   u16 instance,
227 			   const struct intel_memory_region_ops *ops)
228 {
229 	struct intel_memory_region *mem;
230 	int err;
231 
232 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
233 	if (!mem)
234 		return ERR_PTR(-ENOMEM);
235 
236 	mem->i915 = i915;
237 	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
238 	mem->io_start = io_start;
239 	mem->io_size = io_size;
240 	mem->min_page_size = min_page_size;
241 	mem->ops = ops;
242 	mem->total = size;
243 	mem->avail = mem->total;
244 	mem->type = type;
245 	mem->instance = instance;
246 
247 	mutex_init(&mem->objects.lock);
248 	INIT_LIST_HEAD(&mem->objects.list);
249 
250 	if (ops->init) {
251 		err = ops->init(mem);
252 		if (err)
253 			goto err_free;
254 	}
255 
256 	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
257 	if (err)
258 		goto err_release;
259 
260 	return mem;
261 
262 err_release:
263 	if (mem->ops->release)
264 		mem->ops->release(mem);
265 err_free:
266 	kfree(mem);
267 	return ERR_PTR(err);
268 }
269 
270 void intel_memory_region_set_name(struct intel_memory_region *mem,
271 				  const char *fmt, ...)
272 {
273 	va_list ap;
274 
275 	va_start(ap, fmt);
276 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
277 	va_end(ap);
278 }
279 
280 void intel_memory_region_destroy(struct intel_memory_region *mem)
281 {
282 	int ret = 0;
283 
284 	if (mem->ops->release)
285 		ret = mem->ops->release(mem);
286 
287 	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
288 	mutex_destroy(&mem->objects.lock);
289 	if (!ret)
290 		kfree(mem);
291 }
292 
293 /* Global memory region registration -- only slight layer inversions! */
294 
295 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
296 {
297 	int err, i;
298 
299 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
300 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
301 		u16 type, instance;
302 
303 		if (!HAS_REGION(i915, BIT(i)))
304 			continue;
305 
306 		type = intel_region_map[i].class;
307 		instance = intel_region_map[i].instance;
308 		switch (type) {
309 		case INTEL_MEMORY_SYSTEM:
310 			if (IS_DGFX(i915))
311 				mem = i915_gem_ttm_system_setup(i915, type,
312 								instance);
313 			else
314 				mem = i915_gem_shmem_setup(i915, type,
315 							   instance);
316 			break;
317 		case INTEL_MEMORY_STOLEN_LOCAL:
318 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
319 			if (!IS_ERR(mem))
320 				i915->mm.stolen_region = mem;
321 			break;
322 		case INTEL_MEMORY_STOLEN_SYSTEM:
323 			mem = i915_gem_stolen_smem_setup(i915, type, instance);
324 			if (!IS_ERR(mem))
325 				i915->mm.stolen_region = mem;
326 			break;
327 		default:
328 			continue;
329 		}
330 
331 		if (IS_ERR(mem)) {
332 			err = PTR_ERR(mem);
333 			drm_err(&i915->drm,
334 				"Failed to setup region(%d) type=%d\n",
335 				err, type);
336 			goto out_cleanup;
337 		}
338 
339 		mem->id = i;
340 		i915->mm.regions[i] = mem;
341 	}
342 
343 	return 0;
344 
345 out_cleanup:
346 	intel_memory_regions_driver_release(i915);
347 	return err;
348 }
349 
350 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
351 {
352 	int i;
353 
354 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
355 		struct intel_memory_region *region =
356 			fetch_and_zero(&i915->mm.regions[i]);
357 
358 		if (region)
359 			intel_memory_region_destroy(region);
360 	}
361 }
362 
363 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
364 #include "selftests/intel_memory_region.c"
365 #include "selftests/mock_region.c"
366 #endif
367