1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_ring.h"
14 
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 
18 struct context {
19 	struct drm_i915_gem_object *obj;
20 	struct intel_engine_cs *engine;
21 };
22 
cpu_set(struct context * ctx,unsigned long offset,u32 v)23 static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
24 {
25 	unsigned int needs_clflush;
26 	struct page *page;
27 	void *map;
28 	u32 *cpu;
29 	int err;
30 
31 	i915_gem_object_lock(ctx->obj, NULL);
32 	err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
33 	if (err)
34 		goto out;
35 
36 	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
37 	map = kmap_atomic(page);
38 	cpu = map + offset_in_page(offset);
39 
40 	if (needs_clflush & CLFLUSH_BEFORE)
41 		drm_clflush_virt_range(cpu, sizeof(*cpu));
42 
43 	*cpu = v;
44 
45 	if (needs_clflush & CLFLUSH_AFTER)
46 		drm_clflush_virt_range(cpu, sizeof(*cpu));
47 
48 	kunmap_atomic(map);
49 	i915_gem_object_finish_access(ctx->obj);
50 
51 out:
52 	i915_gem_object_unlock(ctx->obj);
53 	return err;
54 }
55 
cpu_get(struct context * ctx,unsigned long offset,u32 * v)56 static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
57 {
58 	unsigned int needs_clflush;
59 	struct page *page;
60 	void *map;
61 	u32 *cpu;
62 	int err;
63 
64 	i915_gem_object_lock(ctx->obj, NULL);
65 	err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
66 	if (err)
67 		goto out;
68 
69 	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
70 	map = kmap_atomic(page);
71 	cpu = map + offset_in_page(offset);
72 
73 	if (needs_clflush & CLFLUSH_BEFORE)
74 		drm_clflush_virt_range(cpu, sizeof(*cpu));
75 
76 	*v = *cpu;
77 
78 	kunmap_atomic(map);
79 	i915_gem_object_finish_access(ctx->obj);
80 
81 out:
82 	i915_gem_object_unlock(ctx->obj);
83 	return err;
84 }
85 
gtt_set(struct context * ctx,unsigned long offset,u32 v)86 static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
87 {
88 	struct i915_vma *vma;
89 	u32 __iomem *map;
90 	int err = 0;
91 
92 	i915_gem_object_lock(ctx->obj, NULL);
93 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
94 	i915_gem_object_unlock(ctx->obj);
95 	if (err)
96 		return err;
97 
98 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
99 	if (IS_ERR(vma))
100 		return PTR_ERR(vma);
101 
102 	intel_gt_pm_get(vma->vm->gt);
103 
104 	map = i915_vma_pin_iomap(vma);
105 	i915_vma_unpin(vma);
106 	if (IS_ERR(map)) {
107 		err = PTR_ERR(map);
108 		goto out_rpm;
109 	}
110 
111 	iowrite32(v, &map[offset / sizeof(*map)]);
112 	i915_vma_unpin_iomap(vma);
113 
114 out_rpm:
115 	intel_gt_pm_put(vma->vm->gt);
116 	return err;
117 }
118 
gtt_get(struct context * ctx,unsigned long offset,u32 * v)119 static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
120 {
121 	struct i915_vma *vma;
122 	u32 __iomem *map;
123 	int err = 0;
124 
125 	i915_gem_object_lock(ctx->obj, NULL);
126 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
127 	i915_gem_object_unlock(ctx->obj);
128 	if (err)
129 		return err;
130 
131 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
132 	if (IS_ERR(vma))
133 		return PTR_ERR(vma);
134 
135 	intel_gt_pm_get(vma->vm->gt);
136 
137 	map = i915_vma_pin_iomap(vma);
138 	i915_vma_unpin(vma);
139 	if (IS_ERR(map)) {
140 		err = PTR_ERR(map);
141 		goto out_rpm;
142 	}
143 
144 	*v = ioread32(&map[offset / sizeof(*map)]);
145 	i915_vma_unpin_iomap(vma);
146 
147 out_rpm:
148 	intel_gt_pm_put(vma->vm->gt);
149 	return err;
150 }
151 
wc_set(struct context * ctx,unsigned long offset,u32 v)152 static int wc_set(struct context *ctx, unsigned long offset, u32 v)
153 {
154 	u32 *map;
155 	int err;
156 
157 	i915_gem_object_lock(ctx->obj, NULL);
158 	err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
159 	i915_gem_object_unlock(ctx->obj);
160 	if (err)
161 		return err;
162 
163 	map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
164 	if (IS_ERR(map))
165 		return PTR_ERR(map);
166 
167 	map[offset / sizeof(*map)] = v;
168 
169 	__i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
170 	i915_gem_object_unpin_map(ctx->obj);
171 
172 	return 0;
173 }
174 
wc_get(struct context * ctx,unsigned long offset,u32 * v)175 static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
176 {
177 	u32 *map;
178 	int err;
179 
180 	i915_gem_object_lock(ctx->obj, NULL);
181 	err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
182 	i915_gem_object_unlock(ctx->obj);
183 	if (err)
184 		return err;
185 
186 	map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
187 	if (IS_ERR(map))
188 		return PTR_ERR(map);
189 
190 	*v = map[offset / sizeof(*map)];
191 	i915_gem_object_unpin_map(ctx->obj);
192 
193 	return 0;
194 }
195 
gpu_set(struct context * ctx,unsigned long offset,u32 v)196 static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
197 {
198 	struct i915_request *rq;
199 	struct i915_vma *vma;
200 	u32 *cs;
201 	int err;
202 
203 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
204 	if (IS_ERR(vma))
205 		return PTR_ERR(vma);
206 
207 	i915_gem_object_lock(ctx->obj, NULL);
208 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
209 	if (err)
210 		goto out_unlock;
211 
212 	rq = intel_engine_create_kernel_request(ctx->engine);
213 	if (IS_ERR(rq)) {
214 		err = PTR_ERR(rq);
215 		goto out_unpin;
216 	}
217 
218 	cs = intel_ring_begin(rq, 4);
219 	if (IS_ERR(cs)) {
220 		err = PTR_ERR(cs);
221 		goto out_rq;
222 	}
223 
224 	if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
225 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
226 		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
227 		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
228 		*cs++ = v;
229 	} else if (GRAPHICS_VER(ctx->engine->i915) >= 4) {
230 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
231 		*cs++ = 0;
232 		*cs++ = i915_ggtt_offset(vma) + offset;
233 		*cs++ = v;
234 	} else {
235 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
236 		*cs++ = i915_ggtt_offset(vma) + offset;
237 		*cs++ = v;
238 		*cs++ = MI_NOOP;
239 	}
240 	intel_ring_advance(rq, cs);
241 
242 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
243 
244 out_rq:
245 	i915_request_add(rq);
246 out_unpin:
247 	i915_vma_unpin(vma);
248 out_unlock:
249 	i915_gem_object_unlock(ctx->obj);
250 
251 	return err;
252 }
253 
always_valid(struct context * ctx)254 static bool always_valid(struct context *ctx)
255 {
256 	return true;
257 }
258 
needs_fence_registers(struct context * ctx)259 static bool needs_fence_registers(struct context *ctx)
260 {
261 	struct intel_gt *gt = ctx->engine->gt;
262 
263 	if (intel_gt_is_wedged(gt))
264 		return false;
265 
266 	return gt->ggtt->num_fences;
267 }
268 
needs_mi_store_dword(struct context * ctx)269 static bool needs_mi_store_dword(struct context *ctx)
270 {
271 	if (intel_gt_is_wedged(ctx->engine->gt))
272 		return false;
273 
274 	return intel_engine_can_store_dword(ctx->engine);
275 }
276 
277 static const struct igt_coherency_mode {
278 	const char *name;
279 	int (*set)(struct context *ctx, unsigned long offset, u32 v);
280 	int (*get)(struct context *ctx, unsigned long offset, u32 *v);
281 	bool (*valid)(struct context *ctx);
282 } igt_coherency_mode[] = {
283 	{ "cpu", cpu_set, cpu_get, always_valid },
284 	{ "gtt", gtt_set, gtt_get, needs_fence_registers },
285 	{ "wc", wc_set, wc_get, always_valid },
286 	{ "gpu", gpu_set, NULL, needs_mi_store_dword },
287 	{ },
288 };
289 
290 static struct intel_engine_cs *
random_engine(struct drm_i915_private * i915,struct rnd_state * prng)291 random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
292 {
293 	struct intel_engine_cs *engine;
294 	unsigned int count;
295 
296 	count = 0;
297 	for_each_uabi_engine(engine, i915)
298 		count++;
299 
300 	count = i915_prandom_u32_max_state(count, prng);
301 	for_each_uabi_engine(engine, i915)
302 		if (count-- == 0)
303 			return engine;
304 
305 	return NULL;
306 }
307 
igt_gem_coherency(void * arg)308 static int igt_gem_coherency(void *arg)
309 {
310 	const unsigned int ncachelines = PAGE_SIZE/64;
311 	struct drm_i915_private *i915 = arg;
312 	const struct igt_coherency_mode *read, *write, *over;
313 	unsigned long count, n;
314 	u32 *offsets, *values;
315 	I915_RND_STATE(prng);
316 	struct context ctx;
317 	int err = 0;
318 
319 	/*
320 	 * We repeatedly write, overwrite and read from a sequence of
321 	 * cachelines in order to try and detect incoherency (unflushed writes
322 	 * from either the CPU or GPU). Each setter/getter uses our cache
323 	 * domain API which should prevent incoherency.
324 	 */
325 
326 	offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
327 	if (!offsets)
328 		return -ENOMEM;
329 	for (count = 0; count < ncachelines; count++)
330 		offsets[count] = count * 64 + 4 * (count % 16);
331 
332 	values = offsets + ncachelines;
333 
334 	ctx.engine = random_engine(i915, &prng);
335 	if (!ctx.engine) {
336 		err = -ENODEV;
337 		goto out_free;
338 	}
339 	pr_info("%s: using %s\n", __func__, ctx.engine->name);
340 	intel_engine_pm_get(ctx.engine);
341 
342 	for (over = igt_coherency_mode; over->name; over++) {
343 		if (!over->set)
344 			continue;
345 
346 		if (!over->valid(&ctx))
347 			continue;
348 
349 		for (write = igt_coherency_mode; write->name; write++) {
350 			if (!write->set)
351 				continue;
352 
353 			if (!write->valid(&ctx))
354 				continue;
355 
356 			for (read = igt_coherency_mode; read->name; read++) {
357 				if (!read->get)
358 					continue;
359 
360 				if (!read->valid(&ctx))
361 					continue;
362 
363 				for_each_prime_number_from(count, 1, ncachelines) {
364 					ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
365 					if (IS_ERR(ctx.obj)) {
366 						err = PTR_ERR(ctx.obj);
367 						goto out_pm;
368 					}
369 
370 					i915_random_reorder(offsets, ncachelines, &prng);
371 					for (n = 0; n < count; n++)
372 						values[n] = prandom_u32_state(&prng);
373 
374 					for (n = 0; n < count; n++) {
375 						err = over->set(&ctx, offsets[n], ~values[n]);
376 						if (err) {
377 							pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
378 							       n, count, over->name, err);
379 							goto put_object;
380 						}
381 					}
382 
383 					for (n = 0; n < count; n++) {
384 						err = write->set(&ctx, offsets[n], values[n]);
385 						if (err) {
386 							pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
387 							       n, count, write->name, err);
388 							goto put_object;
389 						}
390 					}
391 
392 					for (n = 0; n < count; n++) {
393 						u32 found;
394 
395 						err = read->get(&ctx, offsets[n], &found);
396 						if (err) {
397 							pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
398 							       n, count, read->name, err);
399 							goto put_object;
400 						}
401 
402 						if (found != values[n]) {
403 							pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
404 							       n, count, over->name,
405 							       write->name, values[n],
406 							       read->name, found,
407 							       ~values[n], offsets[n]);
408 							err = -EINVAL;
409 							goto put_object;
410 						}
411 					}
412 
413 					i915_gem_object_put(ctx.obj);
414 				}
415 			}
416 		}
417 	}
418 out_pm:
419 	intel_engine_pm_put(ctx.engine);
420 out_free:
421 	kfree(offsets);
422 	return err;
423 
424 put_object:
425 	i915_gem_object_put(ctx.obj);
426 	goto out_pm;
427 }
428 
i915_gem_coherency_live_selftests(struct drm_i915_private * i915)429 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
430 {
431 	static const struct i915_subtest tests[] = {
432 		SUBTEST(igt_gem_coherency),
433 	};
434 
435 	return i915_live_subtests(tests, i915);
436 }
437