1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11 
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 
15 static int cpu_set(struct drm_i915_gem_object *obj,
16 		   unsigned long offset,
17 		   u32 v)
18 {
19 	unsigned int needs_clflush;
20 	struct page *page;
21 	void *map;
22 	u32 *cpu;
23 	int err;
24 
25 	err = i915_gem_object_prepare_write(obj, &needs_clflush);
26 	if (err)
27 		return err;
28 
29 	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
30 	map = kmap_atomic(page);
31 	cpu = map + offset_in_page(offset);
32 
33 	if (needs_clflush & CLFLUSH_BEFORE)
34 		drm_clflush_virt_range(cpu, sizeof(*cpu));
35 
36 	*cpu = v;
37 
38 	if (needs_clflush & CLFLUSH_AFTER)
39 		drm_clflush_virt_range(cpu, sizeof(*cpu));
40 
41 	kunmap_atomic(map);
42 	i915_gem_object_finish_access(obj);
43 
44 	return 0;
45 }
46 
47 static int cpu_get(struct drm_i915_gem_object *obj,
48 		   unsigned long offset,
49 		   u32 *v)
50 {
51 	unsigned int needs_clflush;
52 	struct page *page;
53 	void *map;
54 	u32 *cpu;
55 	int err;
56 
57 	err = i915_gem_object_prepare_read(obj, &needs_clflush);
58 	if (err)
59 		return err;
60 
61 	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
62 	map = kmap_atomic(page);
63 	cpu = map + offset_in_page(offset);
64 
65 	if (needs_clflush & CLFLUSH_BEFORE)
66 		drm_clflush_virt_range(cpu, sizeof(*cpu));
67 
68 	*v = *cpu;
69 
70 	kunmap_atomic(map);
71 	i915_gem_object_finish_access(obj);
72 
73 	return 0;
74 }
75 
76 static int gtt_set(struct drm_i915_gem_object *obj,
77 		   unsigned long offset,
78 		   u32 v)
79 {
80 	struct i915_vma *vma;
81 	u32 __iomem *map;
82 	int err = 0;
83 
84 	i915_gem_object_lock(obj);
85 	err = i915_gem_object_set_to_gtt_domain(obj, true);
86 	i915_gem_object_unlock(obj);
87 	if (err)
88 		return err;
89 
90 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
91 	if (IS_ERR(vma))
92 		return PTR_ERR(vma);
93 
94 	intel_gt_pm_get(vma->vm->gt);
95 
96 	map = i915_vma_pin_iomap(vma);
97 	i915_vma_unpin(vma);
98 	if (IS_ERR(map)) {
99 		err = PTR_ERR(map);
100 		goto out_rpm;
101 	}
102 
103 	iowrite32(v, &map[offset / sizeof(*map)]);
104 	i915_vma_unpin_iomap(vma);
105 
106 out_rpm:
107 	intel_gt_pm_put(vma->vm->gt);
108 	return err;
109 }
110 
111 static int gtt_get(struct drm_i915_gem_object *obj,
112 		   unsigned long offset,
113 		   u32 *v)
114 {
115 	struct i915_vma *vma;
116 	u32 __iomem *map;
117 	int err = 0;
118 
119 	i915_gem_object_lock(obj);
120 	err = i915_gem_object_set_to_gtt_domain(obj, false);
121 	i915_gem_object_unlock(obj);
122 	if (err)
123 		return err;
124 
125 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
126 	if (IS_ERR(vma))
127 		return PTR_ERR(vma);
128 
129 	intel_gt_pm_get(vma->vm->gt);
130 
131 	map = i915_vma_pin_iomap(vma);
132 	i915_vma_unpin(vma);
133 	if (IS_ERR(map)) {
134 		err = PTR_ERR(map);
135 		goto out_rpm;
136 	}
137 
138 	*v = ioread32(&map[offset / sizeof(*map)]);
139 	i915_vma_unpin_iomap(vma);
140 
141 out_rpm:
142 	intel_gt_pm_put(vma->vm->gt);
143 	return err;
144 }
145 
146 static int wc_set(struct drm_i915_gem_object *obj,
147 		  unsigned long offset,
148 		  u32 v)
149 {
150 	u32 *map;
151 	int err;
152 
153 	i915_gem_object_lock(obj);
154 	err = i915_gem_object_set_to_wc_domain(obj, true);
155 	i915_gem_object_unlock(obj);
156 	if (err)
157 		return err;
158 
159 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
160 	if (IS_ERR(map))
161 		return PTR_ERR(map);
162 
163 	map[offset / sizeof(*map)] = v;
164 	i915_gem_object_unpin_map(obj);
165 
166 	return 0;
167 }
168 
169 static int wc_get(struct drm_i915_gem_object *obj,
170 		  unsigned long offset,
171 		  u32 *v)
172 {
173 	u32 *map;
174 	int err;
175 
176 	i915_gem_object_lock(obj);
177 	err = i915_gem_object_set_to_wc_domain(obj, false);
178 	i915_gem_object_unlock(obj);
179 	if (err)
180 		return err;
181 
182 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
183 	if (IS_ERR(map))
184 		return PTR_ERR(map);
185 
186 	*v = map[offset / sizeof(*map)];
187 	i915_gem_object_unpin_map(obj);
188 
189 	return 0;
190 }
191 
192 static int gpu_set(struct drm_i915_gem_object *obj,
193 		   unsigned long offset,
194 		   u32 v)
195 {
196 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
197 	struct i915_request *rq;
198 	struct i915_vma *vma;
199 	u32 *cs;
200 	int err;
201 
202 	i915_gem_object_lock(obj);
203 	err = i915_gem_object_set_to_gtt_domain(obj, true);
204 	i915_gem_object_unlock(obj);
205 	if (err)
206 		return err;
207 
208 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
209 	if (IS_ERR(vma))
210 		return PTR_ERR(vma);
211 
212 	rq = i915_request_create(i915->engine[RCS0]->kernel_context);
213 	if (IS_ERR(rq)) {
214 		i915_vma_unpin(vma);
215 		return PTR_ERR(rq);
216 	}
217 
218 	cs = intel_ring_begin(rq, 4);
219 	if (IS_ERR(cs)) {
220 		i915_request_add(rq);
221 		i915_vma_unpin(vma);
222 		return PTR_ERR(cs);
223 	}
224 
225 	if (INTEL_GEN(i915) >= 8) {
226 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
227 		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
228 		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
229 		*cs++ = v;
230 	} else if (INTEL_GEN(i915) >= 4) {
231 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
232 		*cs++ = 0;
233 		*cs++ = i915_ggtt_offset(vma) + offset;
234 		*cs++ = v;
235 	} else {
236 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
237 		*cs++ = i915_ggtt_offset(vma) + offset;
238 		*cs++ = v;
239 		*cs++ = MI_NOOP;
240 	}
241 	intel_ring_advance(rq, cs);
242 
243 	i915_vma_lock(vma);
244 	err = i915_request_await_object(rq, vma->obj, true);
245 	if (err == 0)
246 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
247 	i915_vma_unlock(vma);
248 	i915_vma_unpin(vma);
249 
250 	i915_request_add(rq);
251 
252 	return err;
253 }
254 
255 static bool always_valid(struct drm_i915_private *i915)
256 {
257 	return true;
258 }
259 
260 static bool needs_fence_registers(struct drm_i915_private *i915)
261 {
262 	return !intel_gt_is_wedged(&i915->gt);
263 }
264 
265 static bool needs_mi_store_dword(struct drm_i915_private *i915)
266 {
267 	if (intel_gt_is_wedged(&i915->gt))
268 		return false;
269 
270 	if (!HAS_ENGINE(i915, RCS0))
271 		return false;
272 
273 	return intel_engine_can_store_dword(i915->engine[RCS0]);
274 }
275 
276 static const struct igt_coherency_mode {
277 	const char *name;
278 	int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
279 	int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
280 	bool (*valid)(struct drm_i915_private *i915);
281 } igt_coherency_mode[] = {
282 	{ "cpu", cpu_set, cpu_get, always_valid },
283 	{ "gtt", gtt_set, gtt_get, needs_fence_registers },
284 	{ "wc", wc_set, wc_get, always_valid },
285 	{ "gpu", gpu_set, NULL, needs_mi_store_dword },
286 	{ },
287 };
288 
289 static int igt_gem_coherency(void *arg)
290 {
291 	const unsigned int ncachelines = PAGE_SIZE/64;
292 	I915_RND_STATE(prng);
293 	struct drm_i915_private *i915 = arg;
294 	const struct igt_coherency_mode *read, *write, *over;
295 	struct drm_i915_gem_object *obj;
296 	unsigned long count, n;
297 	u32 *offsets, *values;
298 	int err = 0;
299 
300 	/* We repeatedly write, overwrite and read from a sequence of
301 	 * cachelines in order to try and detect incoherency (unflushed writes
302 	 * from either the CPU or GPU). Each setter/getter uses our cache
303 	 * domain API which should prevent incoherency.
304 	 */
305 
306 	offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
307 	if (!offsets)
308 		return -ENOMEM;
309 	for (count = 0; count < ncachelines; count++)
310 		offsets[count] = count * 64 + 4 * (count % 16);
311 
312 	values = offsets + ncachelines;
313 
314 	for (over = igt_coherency_mode; over->name; over++) {
315 		if (!over->set)
316 			continue;
317 
318 		if (!over->valid(i915))
319 			continue;
320 
321 		for (write = igt_coherency_mode; write->name; write++) {
322 			if (!write->set)
323 				continue;
324 
325 			if (!write->valid(i915))
326 				continue;
327 
328 			for (read = igt_coherency_mode; read->name; read++) {
329 				if (!read->get)
330 					continue;
331 
332 				if (!read->valid(i915))
333 					continue;
334 
335 				for_each_prime_number_from(count, 1, ncachelines) {
336 					obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
337 					if (IS_ERR(obj)) {
338 						err = PTR_ERR(obj);
339 						goto free;
340 					}
341 
342 					i915_random_reorder(offsets, ncachelines, &prng);
343 					for (n = 0; n < count; n++)
344 						values[n] = prandom_u32_state(&prng);
345 
346 					for (n = 0; n < count; n++) {
347 						err = over->set(obj, offsets[n], ~values[n]);
348 						if (err) {
349 							pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
350 							       n, count, over->name, err);
351 							goto put_object;
352 						}
353 					}
354 
355 					for (n = 0; n < count; n++) {
356 						err = write->set(obj, offsets[n], values[n]);
357 						if (err) {
358 							pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
359 							       n, count, write->name, err);
360 							goto put_object;
361 						}
362 					}
363 
364 					for (n = 0; n < count; n++) {
365 						u32 found;
366 
367 						err = read->get(obj, offsets[n], &found);
368 						if (err) {
369 							pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
370 							       n, count, read->name, err);
371 							goto put_object;
372 						}
373 
374 						if (found != values[n]) {
375 							pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
376 							       n, count, over->name,
377 							       write->name, values[n],
378 							       read->name, found,
379 							       ~values[n], offsets[n]);
380 							err = -EINVAL;
381 							goto put_object;
382 						}
383 					}
384 
385 					i915_gem_object_put(obj);
386 				}
387 			}
388 		}
389 	}
390 free:
391 	kfree(offsets);
392 	return err;
393 
394 put_object:
395 	i915_gem_object_put(obj);
396 	goto free;
397 }
398 
399 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
400 {
401 	static const struct i915_subtest tests[] = {
402 		SUBTEST(igt_gem_coherency),
403 	};
404 
405 	return i915_subtests(tests, i915);
406 }
407