1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "intel_engine_pm.h"
7 #include "selftests/igt_flush_test.h"
8 
9 static struct i915_vma *create_wally(struct intel_engine_cs *engine)
10 {
11 	struct drm_i915_gem_object *obj;
12 	struct i915_vma *vma;
13 	u32 *cs;
14 	int err;
15 
16 	obj = i915_gem_object_create_internal(engine->i915, 4096);
17 	if (IS_ERR(obj))
18 		return ERR_CAST(obj);
19 
20 	vma = i915_vma_instance(obj, engine->gt->vm, NULL);
21 	if (IS_ERR(vma)) {
22 		i915_gem_object_put(obj);
23 		return vma;
24 	}
25 
26 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
27 	if (err) {
28 		i915_gem_object_put(obj);
29 		return ERR_PTR(err);
30 	}
31 
32 	err = i915_vma_sync(vma);
33 	if (err) {
34 		i915_gem_object_put(obj);
35 		return ERR_PTR(err);
36 	}
37 
38 	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
39 	if (IS_ERR(cs)) {
40 		i915_gem_object_put(obj);
41 		return ERR_CAST(cs);
42 	}
43 
44 	if (INTEL_GEN(engine->i915) >= 6) {
45 		*cs++ = MI_STORE_DWORD_IMM_GEN4;
46 		*cs++ = 0;
47 	} else if (INTEL_GEN(engine->i915) >= 4) {
48 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
49 		*cs++ = 0;
50 	} else {
51 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
52 	}
53 	*cs++ = vma->node.start + 4000;
54 	*cs++ = STACK_MAGIC;
55 
56 	*cs++ = MI_BATCH_BUFFER_END;
57 	i915_gem_object_unpin_map(obj);
58 
59 	vma->private = intel_context_create(engine); /* dummy residuals */
60 	if (IS_ERR(vma->private)) {
61 		vma = ERR_CAST(vma->private);
62 		i915_gem_object_put(obj);
63 	}
64 
65 	return vma;
66 }
67 
68 static int context_sync(struct intel_context *ce)
69 {
70 	struct i915_request *rq;
71 	int err = 0;
72 
73 	rq = intel_context_create_request(ce);
74 	if (IS_ERR(rq))
75 		return PTR_ERR(rq);
76 
77 	i915_request_get(rq);
78 	i915_request_add(rq);
79 
80 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
81 		err = -ETIME;
82 	i915_request_put(rq);
83 
84 	return err;
85 }
86 
87 static int new_context_sync(struct intel_engine_cs *engine)
88 {
89 	struct intel_context *ce;
90 	int err;
91 
92 	ce = intel_context_create(engine);
93 	if (IS_ERR(ce))
94 		return PTR_ERR(ce);
95 
96 	err = context_sync(ce);
97 	intel_context_put(ce);
98 
99 	return err;
100 }
101 
102 static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
103 {
104 	int pass;
105 	int err;
106 
107 	for (pass = 0; pass < 2; pass++) {
108 		WRITE_ONCE(*result, 0);
109 		err = context_sync(engine->kernel_context);
110 		if (err || READ_ONCE(*result)) {
111 			if (!err) {
112 				pr_err("pass[%d] wa_bb emitted for the kernel context\n",
113 				       pass);
114 				err = -EINVAL;
115 			}
116 			return err;
117 		}
118 
119 		WRITE_ONCE(*result, 0);
120 		err = new_context_sync(engine);
121 		if (READ_ONCE(*result) != STACK_MAGIC) {
122 			if (!err) {
123 				pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
124 				       pass);
125 				err = -EINVAL;
126 			}
127 			return err;
128 		}
129 
130 		WRITE_ONCE(*result, 0);
131 		err = new_context_sync(engine);
132 		if (READ_ONCE(*result) != STACK_MAGIC) {
133 			if (!err) {
134 				pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
135 				       pass);
136 				err = -EINVAL;
137 			}
138 			return err;
139 		}
140 	}
141 
142 	return 0;
143 }
144 
145 static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
146 {
147 	struct intel_context *ce;
148 	int err, i;
149 
150 	ce = intel_context_create(engine);
151 	if (IS_ERR(ce))
152 		return PTR_ERR(ce);
153 
154 	for (i = 0; i < 2; i++) {
155 		WRITE_ONCE(*result, 0);
156 		err = context_sync(ce);
157 		if (err)
158 			break;
159 	}
160 	intel_context_put(ce);
161 	if (err)
162 		return err;
163 
164 	if (READ_ONCE(*result)) {
165 		pr_err("wa_bb emitted between the same user context\n");
166 		return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 
172 static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
173 {
174 	struct intel_context *ce;
175 	int err, i;
176 
177 	ce = intel_context_create(engine);
178 	if (IS_ERR(ce))
179 		return PTR_ERR(ce);
180 
181 	for (i = 0; i < 2; i++) {
182 		WRITE_ONCE(*result, 0);
183 		err = context_sync(ce);
184 		if (err)
185 			break;
186 
187 		err = context_sync(engine->kernel_context);
188 		if (err)
189 			break;
190 	}
191 	intel_context_put(ce);
192 	if (err)
193 		return err;
194 
195 	if (READ_ONCE(*result)) {
196 		pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
197 		return -EINVAL;
198 	}
199 
200 	return 0;
201 }
202 
203 static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
204 {
205 	struct i915_vma *bb;
206 	u32 *result;
207 	int err;
208 
209 	bb = create_wally(engine);
210 	if (IS_ERR(bb))
211 		return PTR_ERR(bb);
212 
213 	result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
214 	if (IS_ERR(result)) {
215 		intel_context_put(bb->private);
216 		i915_vma_unpin_and_release(&bb, 0);
217 		return PTR_ERR(result);
218 	}
219 	result += 1000;
220 
221 	engine->wa_ctx.vma = bb;
222 
223 	err = mixed_contexts_sync(engine, result);
224 	if (err)
225 		goto out;
226 
227 	err = double_context_sync_00(engine, result);
228 	if (err)
229 		goto out;
230 
231 	err = kernel_context_sync_00(engine, result);
232 	if (err)
233 		goto out;
234 
235 out:
236 	intel_context_put(engine->wa_ctx.vma->private);
237 	i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
238 	return err;
239 }
240 
241 static int live_ctx_switch_wa(void *arg)
242 {
243 	struct intel_gt *gt = arg;
244 	struct intel_engine_cs *engine;
245 	enum intel_engine_id id;
246 
247 	/*
248 	 * Exercise the inter-context wa batch.
249 	 *
250 	 * Between each user context we run a wa batch, and since it may
251 	 * have implications for user visible state, we have to check that
252 	 * we do actually execute it.
253 	 *
254 	 * The trick we use is to replace the normal wa batch with a custom
255 	 * one that writes to a marker within it, and we can then look for
256 	 * that marker to confirm if the batch was run when we expect it,
257 	 * and equally important it was wasn't run when we don't!
258 	 */
259 
260 	for_each_engine(engine, gt, id) {
261 		struct i915_vma *saved_wa;
262 		int err;
263 
264 		if (!intel_engine_can_store_dword(engine))
265 			continue;
266 
267 		if (IS_GEN_RANGE(gt->i915, 4, 5))
268 			continue; /* MI_STORE_DWORD is privileged! */
269 
270 		saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
271 
272 		intel_engine_pm_get(engine);
273 		err = __live_ctx_switch_wa(engine);
274 		intel_engine_pm_put(engine);
275 		if (igt_flush_test(gt->i915))
276 			err = -EIO;
277 
278 		engine->wa_ctx.vma = saved_wa;
279 		if (err)
280 			return err;
281 	}
282 
283 	return 0;
284 }
285 
286 int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
287 {
288 	static const struct i915_subtest tests[] = {
289 		SUBTEST(live_ctx_switch_wa),
290 	};
291 
292 	if (HAS_EXECLISTS(i915))
293 		return 0;
294 
295 	return intel_gt_live_subtests(tests, &i915->gt);
296 }
297