1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/sort.h>
8 
9 #include "i915_selftest.h"
10 #include "intel_gpu_commands.h"
11 #include "intel_gt_clock_utils.h"
12 #include "selftest_engine.h"
13 #include "selftest_engine_heartbeat.h"
14 #include "selftests/igt_atomic.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_spinner.h"
17 
18 #define COUNT 5
19 
20 static int cmp_u64(const void *A, const void *B)
21 {
22 	const u64 *a = A, *b = B;
23 
24 	return *a - *b;
25 }
26 
27 static u64 trifilter(u64 *a)
28 {
29 	sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
30 	return (a[1] + 2 * a[2] + a[3]) >> 2;
31 }
32 
33 static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
34 {
35 	*cs++ = MI_SEMAPHORE_WAIT |
36 		MI_SEMAPHORE_GLOBAL_GTT |
37 		MI_SEMAPHORE_POLL |
38 		op;
39 	*cs++ = value;
40 	*cs++ = offset;
41 	*cs++ = 0;
42 
43 	return cs;
44 }
45 
46 static u32 *emit_store(u32 *cs, u32 offset, u32 value)
47 {
48 	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
49 	*cs++ = offset;
50 	*cs++ = 0;
51 	*cs++ = value;
52 
53 	return cs;
54 }
55 
56 static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
57 {
58 	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
59 	*cs++ = i915_mmio_reg_offset(reg);
60 	*cs++ = offset;
61 	*cs++ = 0;
62 
63 	return cs;
64 }
65 
66 static void write_semaphore(u32 *x, u32 value)
67 {
68 	WRITE_ONCE(*x, value);
69 	wmb();
70 }
71 
72 static int __measure_timestamps(struct intel_context *ce,
73 				u64 *dt, u64 *d_ring, u64 *d_ctx)
74 {
75 	struct intel_engine_cs *engine = ce->engine;
76 	u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
77 	u32 offset = i915_ggtt_offset(engine->status_page.vma);
78 	struct i915_request *rq;
79 	u32 *cs;
80 
81 	rq = intel_context_create_request(ce);
82 	if (IS_ERR(rq))
83 		return PTR_ERR(rq);
84 
85 	cs = intel_ring_begin(rq, 28);
86 	if (IS_ERR(cs)) {
87 		i915_request_add(rq);
88 		return PTR_ERR(cs);
89 	}
90 
91 	/* Signal & wait for start */
92 	cs = emit_store(cs, offset + 4008, 1);
93 	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
94 
95 	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
96 	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
97 
98 	/* Busy wait */
99 	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
100 
101 	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
102 	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
103 
104 	intel_ring_advance(rq, cs);
105 	i915_request_get(rq);
106 	i915_request_add(rq);
107 	intel_engine_flush_submission(engine);
108 
109 	/* Wait for the request to start executing, that then waits for us */
110 	while (READ_ONCE(sema[2]) == 0)
111 		cpu_relax();
112 
113 	/* Run the request for a 100us, sampling timestamps before/after */
114 	preempt_disable();
115 	*dt = local_clock();
116 	write_semaphore(&sema[2], 0);
117 	udelay(100);
118 	*dt = local_clock() - *dt;
119 	write_semaphore(&sema[2], 1);
120 	preempt_enable();
121 
122 	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
123 		i915_request_put(rq);
124 		return -ETIME;
125 	}
126 	i915_request_put(rq);
127 
128 	pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
129 		 engine->name, sema[1], sema[3], sema[0], sema[4]);
130 
131 	*d_ctx = sema[3] - sema[1];
132 	*d_ring = sema[4] - sema[0];
133 	return 0;
134 }
135 
136 static int __live_engine_timestamps(struct intel_engine_cs *engine)
137 {
138 	u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
139 	struct intel_context *ce;
140 	int i, err = 0;
141 
142 	ce = intel_context_create(engine);
143 	if (IS_ERR(ce))
144 		return PTR_ERR(ce);
145 
146 	for (i = 0; i < COUNT; i++) {
147 		err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
148 		if (err)
149 			break;
150 	}
151 	intel_context_put(ce);
152 	if (err)
153 		return err;
154 
155 	dt = trifilter(st);
156 	d_ring = trifilter(s_ring);
157 	d_ctx = trifilter(s_ctx);
158 
159 	pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
160 		engine->name, dt,
161 		intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
162 		intel_gt_clock_interval_to_ns(engine->gt, d_ring));
163 
164 	d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
165 	if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
166 		pr_err("%s Mismatch between ring timestamp and walltime!\n",
167 		       engine->name);
168 		return -EINVAL;
169 	}
170 
171 	d_ring = trifilter(s_ring);
172 	d_ctx = trifilter(s_ctx);
173 
174 	d_ctx *= engine->gt->clock_frequency;
175 	if (IS_ICELAKE(engine->i915))
176 		d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
177 	else
178 		d_ring *= engine->gt->clock_frequency;
179 
180 	if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
181 		pr_err("%s Mismatch between ring and context timestamps!\n",
182 		       engine->name);
183 		return -EINVAL;
184 	}
185 
186 	return 0;
187 }
188 
189 static int live_engine_timestamps(void *arg)
190 {
191 	struct intel_gt *gt = arg;
192 	struct intel_engine_cs *engine;
193 	enum intel_engine_id id;
194 
195 	/*
196 	 * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
197 	 * the same CS clock.
198 	 */
199 
200 	if (INTEL_GEN(gt->i915) < 8)
201 		return 0;
202 
203 	for_each_engine(engine, gt, id) {
204 		int err;
205 
206 		st_engine_heartbeat_disable(engine);
207 		err = __live_engine_timestamps(engine);
208 		st_engine_heartbeat_enable(engine);
209 		if (err)
210 			return err;
211 	}
212 
213 	return 0;
214 }
215 
216 static int live_engine_busy_stats(void *arg)
217 {
218 	struct intel_gt *gt = arg;
219 	struct intel_engine_cs *engine;
220 	enum intel_engine_id id;
221 	struct igt_spinner spin;
222 	int err = 0;
223 
224 	/*
225 	 * Check that if an engine supports busy-stats, they tell the truth.
226 	 */
227 
228 	if (igt_spinner_init(&spin, gt))
229 		return -ENOMEM;
230 
231 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
232 	for_each_engine(engine, gt, id) {
233 		struct i915_request *rq;
234 		ktime_t de, dt;
235 		ktime_t t[2];
236 
237 		if (!intel_engine_supports_stats(engine))
238 			continue;
239 
240 		if (!intel_engine_can_store_dword(engine))
241 			continue;
242 
243 		if (intel_gt_pm_wait_for_idle(gt)) {
244 			err = -EBUSY;
245 			break;
246 		}
247 
248 		st_engine_heartbeat_disable(engine);
249 
250 		ENGINE_TRACE(engine, "measuring idle time\n");
251 		preempt_disable();
252 		de = intel_engine_get_busy_time(engine, &t[0]);
253 		udelay(100);
254 		de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
255 		preempt_enable();
256 		dt = ktime_sub(t[1], t[0]);
257 		if (de < 0 || de > 10) {
258 			pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
259 			       engine->name,
260 			       de, (int)div64_u64(100 * de, dt), dt);
261 			GEM_TRACE_DUMP();
262 			err = -EINVAL;
263 			goto end;
264 		}
265 
266 		/* 100% busy */
267 		rq = igt_spinner_create_request(&spin,
268 						engine->kernel_context,
269 						MI_NOOP);
270 		if (IS_ERR(rq)) {
271 			err = PTR_ERR(rq);
272 			goto end;
273 		}
274 		i915_request_add(rq);
275 
276 		if (!igt_wait_for_spinner(&spin, rq)) {
277 			intel_gt_set_wedged(engine->gt);
278 			err = -ETIME;
279 			goto end;
280 		}
281 
282 		ENGINE_TRACE(engine, "measuring busy time\n");
283 		preempt_disable();
284 		de = intel_engine_get_busy_time(engine, &t[0]);
285 		udelay(100);
286 		de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
287 		preempt_enable();
288 		dt = ktime_sub(t[1], t[0]);
289 		if (100 * de < 95 * dt || 95 * de > 100 * dt) {
290 			pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
291 			       engine->name,
292 			       de, (int)div64_u64(100 * de, dt), dt);
293 			GEM_TRACE_DUMP();
294 			err = -EINVAL;
295 			goto end;
296 		}
297 
298 end:
299 		st_engine_heartbeat_enable(engine);
300 		igt_spinner_end(&spin);
301 		if (igt_flush_test(gt->i915))
302 			err = -EIO;
303 		if (err)
304 			break;
305 	}
306 
307 	igt_spinner_fini(&spin);
308 	if (igt_flush_test(gt->i915))
309 		err = -EIO;
310 	return err;
311 }
312 
313 static int live_engine_pm(void *arg)
314 {
315 	struct intel_gt *gt = arg;
316 	struct intel_engine_cs *engine;
317 	enum intel_engine_id id;
318 
319 	/*
320 	 * Check we can call intel_engine_pm_put from any context. No
321 	 * failures are reported directly, but if we mess up lockdep should
322 	 * tell us.
323 	 */
324 	if (intel_gt_pm_wait_for_idle(gt)) {
325 		pr_err("Unable to flush GT pm before test\n");
326 		return -EBUSY;
327 	}
328 
329 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
330 	for_each_engine(engine, gt, id) {
331 		const typeof(*igt_atomic_phases) *p;
332 
333 		for (p = igt_atomic_phases; p->name; p++) {
334 			/*
335 			 * Acquisition is always synchronous, except if we
336 			 * know that the engine is already awake, in which
337 			 * case we should use intel_engine_pm_get_if_awake()
338 			 * to atomically grab the wakeref.
339 			 *
340 			 * In practice,
341 			 *    intel_engine_pm_get();
342 			 *    intel_engine_pm_put();
343 			 * occurs in one thread, while simultaneously
344 			 *    intel_engine_pm_get_if_awake();
345 			 *    intel_engine_pm_put();
346 			 * occurs from atomic context in another.
347 			 */
348 			GEM_BUG_ON(intel_engine_pm_is_awake(engine));
349 			intel_engine_pm_get(engine);
350 
351 			p->critical_section_begin();
352 			if (!intel_engine_pm_get_if_awake(engine))
353 				pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
354 				       engine->name, p->name);
355 			else
356 				intel_engine_pm_put_async(engine);
357 			intel_engine_pm_put_async(engine);
358 			p->critical_section_end();
359 
360 			intel_engine_pm_flush(engine);
361 
362 			if (intel_engine_pm_is_awake(engine)) {
363 				pr_err("%s is still awake after flushing pm\n",
364 				       engine->name);
365 				return -EINVAL;
366 			}
367 
368 			/* gt wakeref is async (deferred to workqueue) */
369 			if (intel_gt_pm_wait_for_idle(gt)) {
370 				pr_err("GT failed to idle\n");
371 				return -EINVAL;
372 			}
373 		}
374 	}
375 
376 	return 0;
377 }
378 
379 int live_engine_pm_selftests(struct intel_gt *gt)
380 {
381 	static const struct i915_subtest tests[] = {
382 		SUBTEST(live_engine_timestamps),
383 		SUBTEST(live_engine_busy_stats),
384 		SUBTEST(live_engine_pm),
385 	};
386 
387 	return intel_gt_live_subtests(tests, gt);
388 }
389