1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_selftest.h"
8 #include "intel_gt.h"
9 
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/mock_drm.h"
13 
14 static int request_sync(struct i915_request *rq)
15 {
16 	long timeout;
17 	int err = 0;
18 
19 	i915_request_get(rq);
20 
21 	i915_request_add(rq);
22 	timeout = i915_request_wait(rq, 0, HZ / 10);
23 	if (timeout < 0) {
24 		err = timeout;
25 	} else {
26 		mutex_lock(&rq->timeline->mutex);
27 		i915_request_retire_upto(rq);
28 		mutex_unlock(&rq->timeline->mutex);
29 	}
30 
31 	i915_request_put(rq);
32 
33 	return err;
34 }
35 
36 static int context_sync(struct intel_context *ce)
37 {
38 	struct intel_timeline *tl = ce->timeline;
39 	int err = 0;
40 
41 	mutex_lock(&tl->mutex);
42 	do {
43 		struct i915_request *rq;
44 		long timeout;
45 
46 		rcu_read_lock();
47 		rq = rcu_dereference(tl->last_request.request);
48 		if (rq)
49 			rq = i915_request_get_rcu(rq);
50 		rcu_read_unlock();
51 		if (!rq)
52 			break;
53 
54 		timeout = i915_request_wait(rq, 0, HZ / 10);
55 		if (timeout < 0)
56 			err = timeout;
57 		else
58 			i915_request_retire_upto(rq);
59 
60 		i915_request_put(rq);
61 	} while (!err);
62 	mutex_unlock(&tl->mutex);
63 
64 	return err;
65 }
66 
67 static int __live_active_context(struct intel_engine_cs *engine,
68 				 struct i915_gem_context *fixme)
69 {
70 	struct intel_context *ce;
71 	int pass;
72 	int err;
73 
74 	/*
75 	 * We keep active contexts alive until after a subsequent context
76 	 * switch as the final write from the context-save will be after
77 	 * we retire the final request. We track when we unpin the context,
78 	 * under the presumption that the final pin is from the last request,
79 	 * and instead of immediately unpinning the context, we add a task
80 	 * to unpin the context from the next idle-barrier.
81 	 *
82 	 * This test makes sure that the context is kept alive until a
83 	 * subsequent idle-barrier (emitted when the engine wakeref hits 0
84 	 * with no more outstanding requests).
85 	 */
86 
87 	if (intel_engine_pm_is_awake(engine)) {
88 		pr_err("%s is awake before starting %s!\n",
89 		       engine->name, __func__);
90 		return -EINVAL;
91 	}
92 
93 	ce = intel_context_create(fixme, engine);
94 	if (IS_ERR(ce))
95 		return PTR_ERR(ce);
96 
97 	for (pass = 0; pass <= 2; pass++) {
98 		struct i915_request *rq;
99 
100 		rq = intel_context_create_request(ce);
101 		if (IS_ERR(rq)) {
102 			err = PTR_ERR(rq);
103 			goto err;
104 		}
105 
106 		err = request_sync(rq);
107 		if (err)
108 			goto err;
109 
110 		/* Context will be kept active until after an idle-barrier. */
111 		if (i915_active_is_idle(&ce->active)) {
112 			pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
113 			       engine->name, pass);
114 			err = -EINVAL;
115 			goto err;
116 		}
117 
118 		if (!intel_engine_pm_is_awake(engine)) {
119 			pr_err("%s is asleep before idle-barrier\n",
120 			       engine->name);
121 			err = -EINVAL;
122 			goto err;
123 		}
124 	}
125 
126 	/* Now make sure our idle-barriers are flushed */
127 	err = context_sync(engine->kernel_context);
128 	if (err)
129 		goto err;
130 
131 	if (!i915_active_is_idle(&ce->active)) {
132 		pr_err("context is still active!");
133 		err = -EINVAL;
134 	}
135 
136 	if (intel_engine_pm_is_awake(engine)) {
137 		struct drm_printer p = drm_debug_printer(__func__);
138 
139 		intel_engine_dump(engine, &p,
140 				  "%s is still awake after idle-barriers\n",
141 				  engine->name);
142 		GEM_TRACE_DUMP();
143 
144 		err = -EINVAL;
145 		goto err;
146 	}
147 
148 err:
149 	intel_context_put(ce);
150 	return err;
151 }
152 
153 static int live_active_context(void *arg)
154 {
155 	struct intel_gt *gt = arg;
156 	struct intel_engine_cs *engine;
157 	struct i915_gem_context *fixme;
158 	enum intel_engine_id id;
159 	struct drm_file *file;
160 	int err = 0;
161 
162 	file = mock_file(gt->i915);
163 	if (IS_ERR(file))
164 		return PTR_ERR(file);
165 
166 	mutex_lock(&gt->i915->drm.struct_mutex);
167 
168 	fixme = live_context(gt->i915, file);
169 	if (IS_ERR(fixme)) {
170 		err = PTR_ERR(fixme);
171 		goto unlock;
172 	}
173 
174 	for_each_engine(engine, gt->i915, id) {
175 		err = __live_active_context(engine, fixme);
176 		if (err)
177 			break;
178 
179 		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
180 		if (err)
181 			break;
182 	}
183 
184 unlock:
185 	mutex_unlock(&gt->i915->drm.struct_mutex);
186 	mock_file_free(gt->i915, file);
187 	return err;
188 }
189 
190 static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
191 {
192 	struct i915_request *rq;
193 	int err;
194 
195 	err = intel_context_pin(remote);
196 	if (err)
197 		return err;
198 
199 	rq = intel_context_create_request(ce);
200 	if (IS_ERR(rq)) {
201 		err = PTR_ERR(rq);
202 		goto unpin;
203 	}
204 
205 	err = intel_context_prepare_remote_request(remote, rq);
206 	if (err) {
207 		i915_request_add(rq);
208 		goto unpin;
209 	}
210 
211 	err = request_sync(rq);
212 
213 unpin:
214 	intel_context_unpin(remote);
215 	return err;
216 }
217 
218 static int __live_remote_context(struct intel_engine_cs *engine,
219 				 struct i915_gem_context *fixme)
220 {
221 	struct intel_context *local, *remote;
222 	int pass;
223 	int err;
224 
225 	/*
226 	 * Check that our idle barriers do not interfere with normal
227 	 * activity tracking. In particular, check that operating
228 	 * on the context image remotely (intel_context_prepare_remote_request),
229 	 * which inserts foreign fences into intel_context.active, does not
230 	 * clobber the idle-barrier.
231 	 */
232 
233 	remote = intel_context_create(fixme, engine);
234 	if (IS_ERR(remote))
235 		return PTR_ERR(remote);
236 
237 	local = intel_context_create(fixme, engine);
238 	if (IS_ERR(local)) {
239 		err = PTR_ERR(local);
240 		goto err_remote;
241 	}
242 
243 	for (pass = 0; pass <= 2; pass++) {
244 		err = __remote_sync(local, remote);
245 		if (err)
246 			break;
247 
248 		err = __remote_sync(engine->kernel_context, remote);
249 		if (err)
250 			break;
251 
252 		if (i915_active_is_idle(&remote->active)) {
253 			pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
254 			       engine->name, pass);
255 			err = -EINVAL;
256 			break;
257 		}
258 	}
259 
260 	intel_context_put(local);
261 err_remote:
262 	intel_context_put(remote);
263 	return err;
264 }
265 
266 static int live_remote_context(void *arg)
267 {
268 	struct intel_gt *gt = arg;
269 	struct intel_engine_cs *engine;
270 	struct i915_gem_context *fixme;
271 	enum intel_engine_id id;
272 	struct drm_file *file;
273 	int err = 0;
274 
275 	file = mock_file(gt->i915);
276 	if (IS_ERR(file))
277 		return PTR_ERR(file);
278 
279 	mutex_lock(&gt->i915->drm.struct_mutex);
280 
281 	fixme = live_context(gt->i915, file);
282 	if (IS_ERR(fixme)) {
283 		err = PTR_ERR(fixme);
284 		goto unlock;
285 	}
286 
287 	for_each_engine(engine, gt->i915, id) {
288 		err = __live_remote_context(engine, fixme);
289 		if (err)
290 			break;
291 
292 		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
293 		if (err)
294 			break;
295 	}
296 
297 unlock:
298 	mutex_unlock(&gt->i915->drm.struct_mutex);
299 	mock_file_free(gt->i915, file);
300 	return err;
301 }
302 
303 int intel_context_live_selftests(struct drm_i915_private *i915)
304 {
305 	static const struct i915_subtest tests[] = {
306 		SUBTEST(live_active_context),
307 		SUBTEST(live_remote_context),
308 	};
309 	struct intel_gt *gt = &i915->gt;
310 
311 	if (intel_gt_is_wedged(gt))
312 		return 0;
313 
314 	return intel_gt_live_subtests(tests, gt);
315 }
316