1f0c02c1bSTvrtko Ursulin /*
2f0c02c1bSTvrtko Ursulin  * SPDX-License-Identifier: MIT
3f0c02c1bSTvrtko Ursulin  *
4f0c02c1bSTvrtko Ursulin  * Copyright © 2017-2018 Intel Corporation
5f0c02c1bSTvrtko Ursulin  */
6f0c02c1bSTvrtko Ursulin 
7f0c02c1bSTvrtko Ursulin #include <linux/prime_numbers.h>
8f0c02c1bSTvrtko Ursulin 
9bb5e4397SChris Wilson #include "intel_context.h"
10bb5e4397SChris Wilson #include "intel_engine_heartbeat.h"
117e805762SChris Wilson #include "intel_engine_pm.h"
12cb823ed9SChris Wilson #include "intel_gt.h"
1366101975SChris Wilson #include "intel_gt_requests.h"
142871ea85SChris Wilson #include "intel_ring.h"
151b90e4a4SChris Wilson #include "selftest_engine_heartbeat.h"
16f0c02c1bSTvrtko Ursulin 
17f0c02c1bSTvrtko Ursulin #include "../selftests/i915_random.h"
18f0c02c1bSTvrtko Ursulin #include "../i915_selftest.h"
19f0c02c1bSTvrtko Ursulin 
20f0c02c1bSTvrtko Ursulin #include "../selftests/igt_flush_test.h"
21f0c02c1bSTvrtko Ursulin #include "../selftests/mock_gem_device.h"
22f0c02c1bSTvrtko Ursulin #include "selftests/mock_timeline.h"
23f0c02c1bSTvrtko Ursulin 
24f0c02c1bSTvrtko Ursulin static struct page *hwsp_page(struct intel_timeline *tl)
25f0c02c1bSTvrtko Ursulin {
26f0c02c1bSTvrtko Ursulin 	struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
27f0c02c1bSTvrtko Ursulin 
28f0c02c1bSTvrtko Ursulin 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
29f0c02c1bSTvrtko Ursulin 	return sg_page(obj->mm.pages->sgl);
30f0c02c1bSTvrtko Ursulin }
31f0c02c1bSTvrtko Ursulin 
32f0c02c1bSTvrtko Ursulin static unsigned long hwsp_cacheline(struct intel_timeline *tl)
33f0c02c1bSTvrtko Ursulin {
34f0c02c1bSTvrtko Ursulin 	unsigned long address = (unsigned long)page_address(hwsp_page(tl));
35f0c02c1bSTvrtko Ursulin 
36f0c02c1bSTvrtko Ursulin 	return (address + tl->hwsp_offset) / CACHELINE_BYTES;
37f0c02c1bSTvrtko Ursulin }
38f0c02c1bSTvrtko Ursulin 
39f0c02c1bSTvrtko Ursulin #define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
40f0c02c1bSTvrtko Ursulin 
41f0c02c1bSTvrtko Ursulin struct mock_hwsp_freelist {
425f65d5a6SChris Wilson 	struct intel_gt *gt;
43f0c02c1bSTvrtko Ursulin 	struct radix_tree_root cachelines;
44f0c02c1bSTvrtko Ursulin 	struct intel_timeline **history;
45f0c02c1bSTvrtko Ursulin 	unsigned long count, max;
46f0c02c1bSTvrtko Ursulin 	struct rnd_state prng;
47f0c02c1bSTvrtko Ursulin };
48f0c02c1bSTvrtko Ursulin 
49f0c02c1bSTvrtko Ursulin enum {
50f0c02c1bSTvrtko Ursulin 	SHUFFLE = BIT(0),
51f0c02c1bSTvrtko Ursulin };
52f0c02c1bSTvrtko Ursulin 
53f0c02c1bSTvrtko Ursulin static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
54f0c02c1bSTvrtko Ursulin 			       unsigned int idx,
55f0c02c1bSTvrtko Ursulin 			       struct intel_timeline *tl)
56f0c02c1bSTvrtko Ursulin {
57f0c02c1bSTvrtko Ursulin 	tl = xchg(&state->history[idx], tl);
58f0c02c1bSTvrtko Ursulin 	if (tl) {
59f0c02c1bSTvrtko Ursulin 		radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
60f0c02c1bSTvrtko Ursulin 		intel_timeline_put(tl);
61f0c02c1bSTvrtko Ursulin 	}
62f0c02c1bSTvrtko Ursulin }
63f0c02c1bSTvrtko Ursulin 
64f0c02c1bSTvrtko Ursulin static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
65f0c02c1bSTvrtko Ursulin 				unsigned int count,
66f0c02c1bSTvrtko Ursulin 				unsigned int flags)
67f0c02c1bSTvrtko Ursulin {
68f0c02c1bSTvrtko Ursulin 	struct intel_timeline *tl;
69f0c02c1bSTvrtko Ursulin 	unsigned int idx;
70f0c02c1bSTvrtko Ursulin 
71f0c02c1bSTvrtko Ursulin 	while (count--) {
72f0c02c1bSTvrtko Ursulin 		unsigned long cacheline;
73f0c02c1bSTvrtko Ursulin 		int err;
74f0c02c1bSTvrtko Ursulin 
75d1bf5dd8SChris Wilson 		tl = intel_timeline_create(state->gt);
76f0c02c1bSTvrtko Ursulin 		if (IS_ERR(tl))
77f0c02c1bSTvrtko Ursulin 			return PTR_ERR(tl);
78f0c02c1bSTvrtko Ursulin 
79f0c02c1bSTvrtko Ursulin 		cacheline = hwsp_cacheline(tl);
80f0c02c1bSTvrtko Ursulin 		err = radix_tree_insert(&state->cachelines, cacheline, tl);
81f0c02c1bSTvrtko Ursulin 		if (err) {
82f0c02c1bSTvrtko Ursulin 			if (err == -EEXIST) {
83f0c02c1bSTvrtko Ursulin 				pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
84f0c02c1bSTvrtko Ursulin 				       cacheline);
85f0c02c1bSTvrtko Ursulin 			}
86f0c02c1bSTvrtko Ursulin 			intel_timeline_put(tl);
87f0c02c1bSTvrtko Ursulin 			return err;
88f0c02c1bSTvrtko Ursulin 		}
89f0c02c1bSTvrtko Ursulin 
90f0c02c1bSTvrtko Ursulin 		idx = state->count++ % state->max;
91f0c02c1bSTvrtko Ursulin 		__mock_hwsp_record(state, idx, tl);
92f0c02c1bSTvrtko Ursulin 	}
93f0c02c1bSTvrtko Ursulin 
94f0c02c1bSTvrtko Ursulin 	if (flags & SHUFFLE)
95f0c02c1bSTvrtko Ursulin 		i915_prandom_shuffle(state->history,
96f0c02c1bSTvrtko Ursulin 				     sizeof(*state->history),
97f0c02c1bSTvrtko Ursulin 				     min(state->count, state->max),
98f0c02c1bSTvrtko Ursulin 				     &state->prng);
99f0c02c1bSTvrtko Ursulin 
100f0c02c1bSTvrtko Ursulin 	count = i915_prandom_u32_max_state(min(state->count, state->max),
101f0c02c1bSTvrtko Ursulin 					   &state->prng);
102f0c02c1bSTvrtko Ursulin 	while (count--) {
103f0c02c1bSTvrtko Ursulin 		idx = --state->count % state->max;
104f0c02c1bSTvrtko Ursulin 		__mock_hwsp_record(state, idx, NULL);
105f0c02c1bSTvrtko Ursulin 	}
106f0c02c1bSTvrtko Ursulin 
107f0c02c1bSTvrtko Ursulin 	return 0;
108f0c02c1bSTvrtko Ursulin }
109f0c02c1bSTvrtko Ursulin 
110f0c02c1bSTvrtko Ursulin static int mock_hwsp_freelist(void *arg)
111f0c02c1bSTvrtko Ursulin {
112f0c02c1bSTvrtko Ursulin 	struct mock_hwsp_freelist state;
1135f65d5a6SChris Wilson 	struct drm_i915_private *i915;
114f0c02c1bSTvrtko Ursulin 	const struct {
115f0c02c1bSTvrtko Ursulin 		const char *name;
116f0c02c1bSTvrtko Ursulin 		unsigned int flags;
117f0c02c1bSTvrtko Ursulin 	} phases[] = {
118f0c02c1bSTvrtko Ursulin 		{ "linear", 0 },
119f0c02c1bSTvrtko Ursulin 		{ "shuffled", SHUFFLE },
120f0c02c1bSTvrtko Ursulin 		{ },
121f0c02c1bSTvrtko Ursulin 	}, *p;
122f0c02c1bSTvrtko Ursulin 	unsigned int na;
123f0c02c1bSTvrtko Ursulin 	int err = 0;
124f0c02c1bSTvrtko Ursulin 
1255f65d5a6SChris Wilson 	i915 = mock_gem_device();
1265f65d5a6SChris Wilson 	if (!i915)
1275f65d5a6SChris Wilson 		return -ENOMEM;
1285f65d5a6SChris Wilson 
129f0c02c1bSTvrtko Ursulin 	INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
130f0c02c1bSTvrtko Ursulin 	state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
131f0c02c1bSTvrtko Ursulin 
1325f65d5a6SChris Wilson 	state.gt = &i915->gt;
133f0c02c1bSTvrtko Ursulin 
134f0c02c1bSTvrtko Ursulin 	/*
135f0c02c1bSTvrtko Ursulin 	 * Create a bunch of timelines and check that their HWSP do not overlap.
136f0c02c1bSTvrtko Ursulin 	 * Free some, and try again.
137f0c02c1bSTvrtko Ursulin 	 */
138f0c02c1bSTvrtko Ursulin 
139f0c02c1bSTvrtko Ursulin 	state.max = PAGE_SIZE / sizeof(*state.history);
140f0c02c1bSTvrtko Ursulin 	state.count = 0;
141f0c02c1bSTvrtko Ursulin 	state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
142f0c02c1bSTvrtko Ursulin 	if (!state.history) {
143f0c02c1bSTvrtko Ursulin 		err = -ENOMEM;
144f0c02c1bSTvrtko Ursulin 		goto err_put;
145f0c02c1bSTvrtko Ursulin 	}
146f0c02c1bSTvrtko Ursulin 
147f0c02c1bSTvrtko Ursulin 	for (p = phases; p->name; p++) {
148f0c02c1bSTvrtko Ursulin 		pr_debug("%s(%s)\n", __func__, p->name);
149f0c02c1bSTvrtko Ursulin 		for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
150f0c02c1bSTvrtko Ursulin 			err = __mock_hwsp_timeline(&state, na, p->flags);
151f0c02c1bSTvrtko Ursulin 			if (err)
152f0c02c1bSTvrtko Ursulin 				goto out;
153f0c02c1bSTvrtko Ursulin 		}
154f0c02c1bSTvrtko Ursulin 	}
155f0c02c1bSTvrtko Ursulin 
156f0c02c1bSTvrtko Ursulin out:
157f0c02c1bSTvrtko Ursulin 	for (na = 0; na < state.max; na++)
158f0c02c1bSTvrtko Ursulin 		__mock_hwsp_record(&state, na, NULL);
159f0c02c1bSTvrtko Ursulin 	kfree(state.history);
160f0c02c1bSTvrtko Ursulin err_put:
1615f65d5a6SChris Wilson 	drm_dev_put(&i915->drm);
162f0c02c1bSTvrtko Ursulin 	return err;
163f0c02c1bSTvrtko Ursulin }
164f0c02c1bSTvrtko Ursulin 
165f0c02c1bSTvrtko Ursulin struct __igt_sync {
166f0c02c1bSTvrtko Ursulin 	const char *name;
167f0c02c1bSTvrtko Ursulin 	u32 seqno;
168f0c02c1bSTvrtko Ursulin 	bool expected;
169f0c02c1bSTvrtko Ursulin 	bool set;
170f0c02c1bSTvrtko Ursulin };
171f0c02c1bSTvrtko Ursulin 
172f0c02c1bSTvrtko Ursulin static int __igt_sync(struct intel_timeline *tl,
173f0c02c1bSTvrtko Ursulin 		      u64 ctx,
174f0c02c1bSTvrtko Ursulin 		      const struct __igt_sync *p,
175f0c02c1bSTvrtko Ursulin 		      const char *name)
176f0c02c1bSTvrtko Ursulin {
177f0c02c1bSTvrtko Ursulin 	int ret;
178f0c02c1bSTvrtko Ursulin 
179f0c02c1bSTvrtko Ursulin 	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
180f0c02c1bSTvrtko Ursulin 		pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
181f0c02c1bSTvrtko Ursulin 		       name, p->name, ctx, p->seqno, yesno(p->expected));
182f0c02c1bSTvrtko Ursulin 		return -EINVAL;
183f0c02c1bSTvrtko Ursulin 	}
184f0c02c1bSTvrtko Ursulin 
185f0c02c1bSTvrtko Ursulin 	if (p->set) {
186f0c02c1bSTvrtko Ursulin 		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
187f0c02c1bSTvrtko Ursulin 		if (ret)
188f0c02c1bSTvrtko Ursulin 			return ret;
189f0c02c1bSTvrtko Ursulin 	}
190f0c02c1bSTvrtko Ursulin 
191f0c02c1bSTvrtko Ursulin 	return 0;
192f0c02c1bSTvrtko Ursulin }
193f0c02c1bSTvrtko Ursulin 
194f0c02c1bSTvrtko Ursulin static int igt_sync(void *arg)
195f0c02c1bSTvrtko Ursulin {
196f0c02c1bSTvrtko Ursulin 	const struct __igt_sync pass[] = {
197f0c02c1bSTvrtko Ursulin 		{ "unset", 0, false, false },
198f0c02c1bSTvrtko Ursulin 		{ "new", 0, false, true },
199f0c02c1bSTvrtko Ursulin 		{ "0a", 0, true, true },
200f0c02c1bSTvrtko Ursulin 		{ "1a", 1, false, true },
201f0c02c1bSTvrtko Ursulin 		{ "1b", 1, true, true },
202f0c02c1bSTvrtko Ursulin 		{ "0b", 0, true, false },
203f0c02c1bSTvrtko Ursulin 		{ "2a", 2, false, true },
204f0c02c1bSTvrtko Ursulin 		{ "4", 4, false, true },
205f0c02c1bSTvrtko Ursulin 		{ "INT_MAX", INT_MAX, false, true },
206f0c02c1bSTvrtko Ursulin 		{ "INT_MAX-1", INT_MAX-1, true, false },
207f0c02c1bSTvrtko Ursulin 		{ "INT_MAX+1", (u32)INT_MAX+1, false, true },
208f0c02c1bSTvrtko Ursulin 		{ "INT_MAX", INT_MAX, true, false },
209f0c02c1bSTvrtko Ursulin 		{ "UINT_MAX", UINT_MAX, false, true },
210f0c02c1bSTvrtko Ursulin 		{ "wrap", 0, false, true },
211f0c02c1bSTvrtko Ursulin 		{ "unwrap", UINT_MAX, true, false },
212f0c02c1bSTvrtko Ursulin 		{},
213f0c02c1bSTvrtko Ursulin 	}, *p;
214f0c02c1bSTvrtko Ursulin 	struct intel_timeline tl;
215f0c02c1bSTvrtko Ursulin 	int order, offset;
216f0c02c1bSTvrtko Ursulin 	int ret = -ENODEV;
217f0c02c1bSTvrtko Ursulin 
218f0c02c1bSTvrtko Ursulin 	mock_timeline_init(&tl, 0);
219f0c02c1bSTvrtko Ursulin 	for (p = pass; p->name; p++) {
220f0c02c1bSTvrtko Ursulin 		for (order = 1; order < 64; order++) {
221f0c02c1bSTvrtko Ursulin 			for (offset = -1; offset <= (order > 1); offset++) {
222f0c02c1bSTvrtko Ursulin 				u64 ctx = BIT_ULL(order) + offset;
223f0c02c1bSTvrtko Ursulin 
224f0c02c1bSTvrtko Ursulin 				ret = __igt_sync(&tl, ctx, p, "1");
225f0c02c1bSTvrtko Ursulin 				if (ret)
226f0c02c1bSTvrtko Ursulin 					goto out;
227f0c02c1bSTvrtko Ursulin 			}
228f0c02c1bSTvrtko Ursulin 		}
229f0c02c1bSTvrtko Ursulin 	}
230f0c02c1bSTvrtko Ursulin 	mock_timeline_fini(&tl);
231f0c02c1bSTvrtko Ursulin 
232f0c02c1bSTvrtko Ursulin 	mock_timeline_init(&tl, 0);
233f0c02c1bSTvrtko Ursulin 	for (order = 1; order < 64; order++) {
234f0c02c1bSTvrtko Ursulin 		for (offset = -1; offset <= (order > 1); offset++) {
235f0c02c1bSTvrtko Ursulin 			u64 ctx = BIT_ULL(order) + offset;
236f0c02c1bSTvrtko Ursulin 
237f0c02c1bSTvrtko Ursulin 			for (p = pass; p->name; p++) {
238f0c02c1bSTvrtko Ursulin 				ret = __igt_sync(&tl, ctx, p, "2");
239f0c02c1bSTvrtko Ursulin 				if (ret)
240f0c02c1bSTvrtko Ursulin 					goto out;
241f0c02c1bSTvrtko Ursulin 			}
242f0c02c1bSTvrtko Ursulin 		}
243f0c02c1bSTvrtko Ursulin 	}
244f0c02c1bSTvrtko Ursulin 
245f0c02c1bSTvrtko Ursulin out:
246f0c02c1bSTvrtko Ursulin 	mock_timeline_fini(&tl);
247f0c02c1bSTvrtko Ursulin 	return ret;
248f0c02c1bSTvrtko Ursulin }
249f0c02c1bSTvrtko Ursulin 
250f0c02c1bSTvrtko Ursulin static unsigned int random_engine(struct rnd_state *rnd)
251f0c02c1bSTvrtko Ursulin {
252f0c02c1bSTvrtko Ursulin 	return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
253f0c02c1bSTvrtko Ursulin }
254f0c02c1bSTvrtko Ursulin 
255f0c02c1bSTvrtko Ursulin static int bench_sync(void *arg)
256f0c02c1bSTvrtko Ursulin {
257f0c02c1bSTvrtko Ursulin 	struct rnd_state prng;
258f0c02c1bSTvrtko Ursulin 	struct intel_timeline tl;
259f0c02c1bSTvrtko Ursulin 	unsigned long end_time, count;
260f0c02c1bSTvrtko Ursulin 	u64 prng32_1M;
261f0c02c1bSTvrtko Ursulin 	ktime_t kt;
262f0c02c1bSTvrtko Ursulin 	int order, last_order;
263f0c02c1bSTvrtko Ursulin 
264f0c02c1bSTvrtko Ursulin 	mock_timeline_init(&tl, 0);
265f0c02c1bSTvrtko Ursulin 
266f0c02c1bSTvrtko Ursulin 	/* Lookups from cache are very fast and so the random number generation
267f0c02c1bSTvrtko Ursulin 	 * and the loop itself becomes a significant factor in the per-iteration
268f0c02c1bSTvrtko Ursulin 	 * timings. We try to compensate the results by measuring the overhead
269f0c02c1bSTvrtko Ursulin 	 * of the prng and subtract it from the reported results.
270f0c02c1bSTvrtko Ursulin 	 */
271f0c02c1bSTvrtko Ursulin 	prandom_seed_state(&prng, i915_selftest.random_seed);
272f0c02c1bSTvrtko Ursulin 	count = 0;
273f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
274f0c02c1bSTvrtko Ursulin 	end_time = jiffies + HZ/10;
275f0c02c1bSTvrtko Ursulin 	do {
276f0c02c1bSTvrtko Ursulin 		u32 x;
277f0c02c1bSTvrtko Ursulin 
278f0c02c1bSTvrtko Ursulin 		/* Make sure the compiler doesn't optimise away the prng call */
279f0c02c1bSTvrtko Ursulin 		WRITE_ONCE(x, prandom_u32_state(&prng));
280f0c02c1bSTvrtko Ursulin 
281f0c02c1bSTvrtko Ursulin 		count++;
282f0c02c1bSTvrtko Ursulin 	} while (!time_after(jiffies, end_time));
283f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
284f0c02c1bSTvrtko Ursulin 	pr_debug("%s: %lu random evaluations, %lluns/prng\n",
285f0c02c1bSTvrtko Ursulin 		 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
286f0c02c1bSTvrtko Ursulin 	prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
287f0c02c1bSTvrtko Ursulin 
288f0c02c1bSTvrtko Ursulin 	/* Benchmark (only) setting random context ids */
289f0c02c1bSTvrtko Ursulin 	prandom_seed_state(&prng, i915_selftest.random_seed);
290f0c02c1bSTvrtko Ursulin 	count = 0;
291f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
292f0c02c1bSTvrtko Ursulin 	end_time = jiffies + HZ/10;
293f0c02c1bSTvrtko Ursulin 	do {
294f0c02c1bSTvrtko Ursulin 		u64 id = i915_prandom_u64_state(&prng);
295f0c02c1bSTvrtko Ursulin 
296f0c02c1bSTvrtko Ursulin 		__intel_timeline_sync_set(&tl, id, 0);
297f0c02c1bSTvrtko Ursulin 		count++;
298f0c02c1bSTvrtko Ursulin 	} while (!time_after(jiffies, end_time));
299f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
300f0c02c1bSTvrtko Ursulin 	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
301f0c02c1bSTvrtko Ursulin 	pr_info("%s: %lu random insertions, %lluns/insert\n",
302f0c02c1bSTvrtko Ursulin 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
303f0c02c1bSTvrtko Ursulin 
304f0c02c1bSTvrtko Ursulin 	/* Benchmark looking up the exact same context ids as we just set */
305f0c02c1bSTvrtko Ursulin 	prandom_seed_state(&prng, i915_selftest.random_seed);
306f0c02c1bSTvrtko Ursulin 	end_time = count;
307f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
308f0c02c1bSTvrtko Ursulin 	while (end_time--) {
309f0c02c1bSTvrtko Ursulin 		u64 id = i915_prandom_u64_state(&prng);
310f0c02c1bSTvrtko Ursulin 
311f0c02c1bSTvrtko Ursulin 		if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
312f0c02c1bSTvrtko Ursulin 			mock_timeline_fini(&tl);
313f0c02c1bSTvrtko Ursulin 			pr_err("Lookup of %llu failed\n", id);
314f0c02c1bSTvrtko Ursulin 			return -EINVAL;
315f0c02c1bSTvrtko Ursulin 		}
316f0c02c1bSTvrtko Ursulin 	}
317f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
318f0c02c1bSTvrtko Ursulin 	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
319f0c02c1bSTvrtko Ursulin 	pr_info("%s: %lu random lookups, %lluns/lookup\n",
320f0c02c1bSTvrtko Ursulin 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
321f0c02c1bSTvrtko Ursulin 
322f0c02c1bSTvrtko Ursulin 	mock_timeline_fini(&tl);
323f0c02c1bSTvrtko Ursulin 	cond_resched();
324f0c02c1bSTvrtko Ursulin 
325f0c02c1bSTvrtko Ursulin 	mock_timeline_init(&tl, 0);
326f0c02c1bSTvrtko Ursulin 
327f0c02c1bSTvrtko Ursulin 	/* Benchmark setting the first N (in order) contexts */
328f0c02c1bSTvrtko Ursulin 	count = 0;
329f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
330f0c02c1bSTvrtko Ursulin 	end_time = jiffies + HZ/10;
331f0c02c1bSTvrtko Ursulin 	do {
332f0c02c1bSTvrtko Ursulin 		__intel_timeline_sync_set(&tl, count++, 0);
333f0c02c1bSTvrtko Ursulin 	} while (!time_after(jiffies, end_time));
334f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
335f0c02c1bSTvrtko Ursulin 	pr_info("%s: %lu in-order insertions, %lluns/insert\n",
336f0c02c1bSTvrtko Ursulin 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
337f0c02c1bSTvrtko Ursulin 
338f0c02c1bSTvrtko Ursulin 	/* Benchmark looking up the exact same context ids as we just set */
339f0c02c1bSTvrtko Ursulin 	end_time = count;
340f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
341f0c02c1bSTvrtko Ursulin 	while (end_time--) {
342f0c02c1bSTvrtko Ursulin 		if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
343f0c02c1bSTvrtko Ursulin 			pr_err("Lookup of %lu failed\n", end_time);
344f0c02c1bSTvrtko Ursulin 			mock_timeline_fini(&tl);
345f0c02c1bSTvrtko Ursulin 			return -EINVAL;
346f0c02c1bSTvrtko Ursulin 		}
347f0c02c1bSTvrtko Ursulin 	}
348f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
349f0c02c1bSTvrtko Ursulin 	pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
350f0c02c1bSTvrtko Ursulin 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
351f0c02c1bSTvrtko Ursulin 
352f0c02c1bSTvrtko Ursulin 	mock_timeline_fini(&tl);
353f0c02c1bSTvrtko Ursulin 	cond_resched();
354f0c02c1bSTvrtko Ursulin 
355f0c02c1bSTvrtko Ursulin 	mock_timeline_init(&tl, 0);
356f0c02c1bSTvrtko Ursulin 
357f0c02c1bSTvrtko Ursulin 	/* Benchmark searching for a random context id and maybe changing it */
358f0c02c1bSTvrtko Ursulin 	prandom_seed_state(&prng, i915_selftest.random_seed);
359f0c02c1bSTvrtko Ursulin 	count = 0;
360f0c02c1bSTvrtko Ursulin 	kt = ktime_get();
361f0c02c1bSTvrtko Ursulin 	end_time = jiffies + HZ/10;
362f0c02c1bSTvrtko Ursulin 	do {
363f0c02c1bSTvrtko Ursulin 		u32 id = random_engine(&prng);
364f0c02c1bSTvrtko Ursulin 		u32 seqno = prandom_u32_state(&prng);
365f0c02c1bSTvrtko Ursulin 
366f0c02c1bSTvrtko Ursulin 		if (!__intel_timeline_sync_is_later(&tl, id, seqno))
367f0c02c1bSTvrtko Ursulin 			__intel_timeline_sync_set(&tl, id, seqno);
368f0c02c1bSTvrtko Ursulin 
369f0c02c1bSTvrtko Ursulin 		count++;
370f0c02c1bSTvrtko Ursulin 	} while (!time_after(jiffies, end_time));
371f0c02c1bSTvrtko Ursulin 	kt = ktime_sub(ktime_get(), kt);
372f0c02c1bSTvrtko Ursulin 	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
373f0c02c1bSTvrtko Ursulin 	pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
374f0c02c1bSTvrtko Ursulin 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
375f0c02c1bSTvrtko Ursulin 	mock_timeline_fini(&tl);
376f0c02c1bSTvrtko Ursulin 	cond_resched();
377f0c02c1bSTvrtko Ursulin 
378f0c02c1bSTvrtko Ursulin 	/* Benchmark searching for a known context id and changing the seqno */
379f0c02c1bSTvrtko Ursulin 	for (last_order = 1, order = 1; order < 32;
380f0c02c1bSTvrtko Ursulin 	     ({ int tmp = last_order; last_order = order; order += tmp; })) {
381f0c02c1bSTvrtko Ursulin 		unsigned int mask = BIT(order) - 1;
382f0c02c1bSTvrtko Ursulin 
383f0c02c1bSTvrtko Ursulin 		mock_timeline_init(&tl, 0);
384f0c02c1bSTvrtko Ursulin 
385f0c02c1bSTvrtko Ursulin 		count = 0;
386f0c02c1bSTvrtko Ursulin 		kt = ktime_get();
387f0c02c1bSTvrtko Ursulin 		end_time = jiffies + HZ/10;
388f0c02c1bSTvrtko Ursulin 		do {
389f0c02c1bSTvrtko Ursulin 			/* Without assuming too many details of the underlying
390f0c02c1bSTvrtko Ursulin 			 * implementation, try to identify its phase-changes
391f0c02c1bSTvrtko Ursulin 			 * (if any)!
392f0c02c1bSTvrtko Ursulin 			 */
393f0c02c1bSTvrtko Ursulin 			u64 id = (u64)(count & mask) << order;
394f0c02c1bSTvrtko Ursulin 
395f0c02c1bSTvrtko Ursulin 			__intel_timeline_sync_is_later(&tl, id, 0);
396f0c02c1bSTvrtko Ursulin 			__intel_timeline_sync_set(&tl, id, 0);
397f0c02c1bSTvrtko Ursulin 
398f0c02c1bSTvrtko Ursulin 			count++;
399f0c02c1bSTvrtko Ursulin 		} while (!time_after(jiffies, end_time));
400f0c02c1bSTvrtko Ursulin 		kt = ktime_sub(ktime_get(), kt);
401f0c02c1bSTvrtko Ursulin 		pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
402f0c02c1bSTvrtko Ursulin 			__func__, count, order,
403f0c02c1bSTvrtko Ursulin 			(long long)div64_ul(ktime_to_ns(kt), count));
404f0c02c1bSTvrtko Ursulin 		mock_timeline_fini(&tl);
405f0c02c1bSTvrtko Ursulin 		cond_resched();
406f0c02c1bSTvrtko Ursulin 	}
407f0c02c1bSTvrtko Ursulin 
408f0c02c1bSTvrtko Ursulin 	return 0;
409f0c02c1bSTvrtko Ursulin }
410f0c02c1bSTvrtko Ursulin 
411f0c02c1bSTvrtko Ursulin int intel_timeline_mock_selftests(void)
412f0c02c1bSTvrtko Ursulin {
413f0c02c1bSTvrtko Ursulin 	static const struct i915_subtest tests[] = {
414f0c02c1bSTvrtko Ursulin 		SUBTEST(mock_hwsp_freelist),
415f0c02c1bSTvrtko Ursulin 		SUBTEST(igt_sync),
416f0c02c1bSTvrtko Ursulin 		SUBTEST(bench_sync),
417f0c02c1bSTvrtko Ursulin 	};
418f0c02c1bSTvrtko Ursulin 
419f0c02c1bSTvrtko Ursulin 	return i915_subtests(tests, NULL);
420f0c02c1bSTvrtko Ursulin }
421f0c02c1bSTvrtko Ursulin 
422f0c02c1bSTvrtko Ursulin static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
423f0c02c1bSTvrtko Ursulin {
424f0c02c1bSTvrtko Ursulin 	u32 *cs;
425f0c02c1bSTvrtko Ursulin 
426f0c02c1bSTvrtko Ursulin 	cs = intel_ring_begin(rq, 4);
427f0c02c1bSTvrtko Ursulin 	if (IS_ERR(cs))
428f0c02c1bSTvrtko Ursulin 		return PTR_ERR(cs);
429f0c02c1bSTvrtko Ursulin 
4305a833995SChris Wilson 	if (INTEL_GEN(rq->engine->i915) >= 8) {
431f0c02c1bSTvrtko Ursulin 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
432f0c02c1bSTvrtko Ursulin 		*cs++ = addr;
433f0c02c1bSTvrtko Ursulin 		*cs++ = 0;
434f0c02c1bSTvrtko Ursulin 		*cs++ = value;
4355a833995SChris Wilson 	} else if (INTEL_GEN(rq->engine->i915) >= 4) {
436f0c02c1bSTvrtko Ursulin 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
437f0c02c1bSTvrtko Ursulin 		*cs++ = 0;
438f0c02c1bSTvrtko Ursulin 		*cs++ = addr;
439f0c02c1bSTvrtko Ursulin 		*cs++ = value;
440f0c02c1bSTvrtko Ursulin 	} else {
441f0c02c1bSTvrtko Ursulin 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
442f0c02c1bSTvrtko Ursulin 		*cs++ = addr;
443f0c02c1bSTvrtko Ursulin 		*cs++ = value;
444f0c02c1bSTvrtko Ursulin 		*cs++ = MI_NOOP;
445f0c02c1bSTvrtko Ursulin 	}
446f0c02c1bSTvrtko Ursulin 
447f0c02c1bSTvrtko Ursulin 	intel_ring_advance(rq, cs);
448f0c02c1bSTvrtko Ursulin 
449f0c02c1bSTvrtko Ursulin 	return 0;
450f0c02c1bSTvrtko Ursulin }
451f0c02c1bSTvrtko Ursulin 
452f0c02c1bSTvrtko Ursulin static struct i915_request *
453f0c02c1bSTvrtko Ursulin tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
454f0c02c1bSTvrtko Ursulin {
455f0c02c1bSTvrtko Ursulin 	struct i915_request *rq;
456f0c02c1bSTvrtko Ursulin 	int err;
457f0c02c1bSTvrtko Ursulin 
45847b08693SMaarten Lankhorst 	err = intel_timeline_pin(tl, NULL);
459f0c02c1bSTvrtko Ursulin 	if (err) {
460f0c02c1bSTvrtko Ursulin 		rq = ERR_PTR(err);
461f0c02c1bSTvrtko Ursulin 		goto out;
462f0c02c1bSTvrtko Ursulin 	}
463f0c02c1bSTvrtko Ursulin 
464de5825beSChris Wilson 	rq = intel_engine_create_kernel_request(engine);
465f0c02c1bSTvrtko Ursulin 	if (IS_ERR(rq))
466f0c02c1bSTvrtko Ursulin 		goto out_unpin;
467f0c02c1bSTvrtko Ursulin 
4687e805762SChris Wilson 	i915_request_get(rq);
4697e805762SChris Wilson 
470f0c02c1bSTvrtko Ursulin 	err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
471f0c02c1bSTvrtko Ursulin 	i915_request_add(rq);
4727e805762SChris Wilson 	if (err) {
4737e805762SChris Wilson 		i915_request_put(rq);
474f0c02c1bSTvrtko Ursulin 		rq = ERR_PTR(err);
4757e805762SChris Wilson 	}
476f0c02c1bSTvrtko Ursulin 
477f0c02c1bSTvrtko Ursulin out_unpin:
478f0c02c1bSTvrtko Ursulin 	intel_timeline_unpin(tl);
479f0c02c1bSTvrtko Ursulin out:
480f0c02c1bSTvrtko Ursulin 	if (IS_ERR(rq))
481f0c02c1bSTvrtko Ursulin 		pr_err("Failed to write to timeline!\n");
482f0c02c1bSTvrtko Ursulin 	return rq;
483f0c02c1bSTvrtko Ursulin }
484f0c02c1bSTvrtko Ursulin 
485f0c02c1bSTvrtko Ursulin static struct intel_timeline *
4865f65d5a6SChris Wilson checked_intel_timeline_create(struct intel_gt *gt)
487f0c02c1bSTvrtko Ursulin {
488f0c02c1bSTvrtko Ursulin 	struct intel_timeline *tl;
489f0c02c1bSTvrtko Ursulin 
490d1bf5dd8SChris Wilson 	tl = intel_timeline_create(gt);
491f0c02c1bSTvrtko Ursulin 	if (IS_ERR(tl))
492f0c02c1bSTvrtko Ursulin 		return tl;
493f0c02c1bSTvrtko Ursulin 
494e310b435SChris Wilson 	if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
495f0c02c1bSTvrtko Ursulin 		pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
496f0c02c1bSTvrtko Ursulin 		       *tl->hwsp_seqno, tl->seqno);
497f0c02c1bSTvrtko Ursulin 		intel_timeline_put(tl);
498f0c02c1bSTvrtko Ursulin 		return ERR_PTR(-EINVAL);
499f0c02c1bSTvrtko Ursulin 	}
500f0c02c1bSTvrtko Ursulin 
501f0c02c1bSTvrtko Ursulin 	return tl;
502f0c02c1bSTvrtko Ursulin }
503f0c02c1bSTvrtko Ursulin 
504f0c02c1bSTvrtko Ursulin static int live_hwsp_engine(void *arg)
505f0c02c1bSTvrtko Ursulin {
506f0c02c1bSTvrtko Ursulin #define NUM_TIMELINES 4096
5075f65d5a6SChris Wilson 	struct intel_gt *gt = arg;
508f0c02c1bSTvrtko Ursulin 	struct intel_timeline **timelines;
509f0c02c1bSTvrtko Ursulin 	struct intel_engine_cs *engine;
510f0c02c1bSTvrtko Ursulin 	enum intel_engine_id id;
511f0c02c1bSTvrtko Ursulin 	unsigned long count, n;
512f0c02c1bSTvrtko Ursulin 	int err = 0;
513f0c02c1bSTvrtko Ursulin 
514f0c02c1bSTvrtko Ursulin 	/*
515f0c02c1bSTvrtko Ursulin 	 * Create a bunch of timelines and check we can write
516f0c02c1bSTvrtko Ursulin 	 * independently to each of their breadcrumb slots.
517f0c02c1bSTvrtko Ursulin 	 */
518f0c02c1bSTvrtko Ursulin 
519f0c02c1bSTvrtko Ursulin 	timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
520f0c02c1bSTvrtko Ursulin 				   sizeof(*timelines),
521f0c02c1bSTvrtko Ursulin 				   GFP_KERNEL);
522f0c02c1bSTvrtko Ursulin 	if (!timelines)
523f0c02c1bSTvrtko Ursulin 		return -ENOMEM;
524f0c02c1bSTvrtko Ursulin 
525f0c02c1bSTvrtko Ursulin 	count = 0;
5265d904e3cSTvrtko Ursulin 	for_each_engine(engine, gt, id) {
527f0c02c1bSTvrtko Ursulin 		if (!intel_engine_can_store_dword(engine))
528f0c02c1bSTvrtko Ursulin 			continue;
529f0c02c1bSTvrtko Ursulin 
5307e805762SChris Wilson 		intel_engine_pm_get(engine);
5317e805762SChris Wilson 
532f0c02c1bSTvrtko Ursulin 		for (n = 0; n < NUM_TIMELINES; n++) {
533f0c02c1bSTvrtko Ursulin 			struct intel_timeline *tl;
534f0c02c1bSTvrtko Ursulin 			struct i915_request *rq;
535f0c02c1bSTvrtko Ursulin 
5365f65d5a6SChris Wilson 			tl = checked_intel_timeline_create(gt);
537f0c02c1bSTvrtko Ursulin 			if (IS_ERR(tl)) {
538f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(tl);
5397e805762SChris Wilson 				break;
540f0c02c1bSTvrtko Ursulin 			}
541f0c02c1bSTvrtko Ursulin 
542f0c02c1bSTvrtko Ursulin 			rq = tl_write(tl, engine, count);
543f0c02c1bSTvrtko Ursulin 			if (IS_ERR(rq)) {
544f0c02c1bSTvrtko Ursulin 				intel_timeline_put(tl);
545f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(rq);
5467e805762SChris Wilson 				break;
547f0c02c1bSTvrtko Ursulin 			}
548f0c02c1bSTvrtko Ursulin 
549f0c02c1bSTvrtko Ursulin 			timelines[count++] = tl;
5507e805762SChris Wilson 			i915_request_put(rq);
551f0c02c1bSTvrtko Ursulin 		}
552f0c02c1bSTvrtko Ursulin 
5537e805762SChris Wilson 		intel_engine_pm_put(engine);
5547e805762SChris Wilson 		if (err)
5557e805762SChris Wilson 			break;
5567e805762SChris Wilson 	}
5577e805762SChris Wilson 
5585f65d5a6SChris Wilson 	if (igt_flush_test(gt->i915))
559f0c02c1bSTvrtko Ursulin 		err = -EIO;
560f0c02c1bSTvrtko Ursulin 
561f0c02c1bSTvrtko Ursulin 	for (n = 0; n < count; n++) {
562f0c02c1bSTvrtko Ursulin 		struct intel_timeline *tl = timelines[n];
563f0c02c1bSTvrtko Ursulin 
564e310b435SChris Wilson 		if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
565e310b435SChris Wilson 			GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
566e310b435SChris Wilson 				      n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
567d45171acSChris Wilson 			GEM_TRACE_DUMP();
568f0c02c1bSTvrtko Ursulin 			err = -EINVAL;
569f0c02c1bSTvrtko Ursulin 		}
570f0c02c1bSTvrtko Ursulin 		intel_timeline_put(tl);
571f0c02c1bSTvrtko Ursulin 	}
572f0c02c1bSTvrtko Ursulin 
573f0c02c1bSTvrtko Ursulin 	kvfree(timelines);
574f0c02c1bSTvrtko Ursulin 	return err;
575f0c02c1bSTvrtko Ursulin #undef NUM_TIMELINES
576f0c02c1bSTvrtko Ursulin }
577f0c02c1bSTvrtko Ursulin 
578f0c02c1bSTvrtko Ursulin static int live_hwsp_alternate(void *arg)
579f0c02c1bSTvrtko Ursulin {
580f0c02c1bSTvrtko Ursulin #define NUM_TIMELINES 4096
5815f65d5a6SChris Wilson 	struct intel_gt *gt = arg;
582f0c02c1bSTvrtko Ursulin 	struct intel_timeline **timelines;
583f0c02c1bSTvrtko Ursulin 	struct intel_engine_cs *engine;
584f0c02c1bSTvrtko Ursulin 	enum intel_engine_id id;
585f0c02c1bSTvrtko Ursulin 	unsigned long count, n;
586f0c02c1bSTvrtko Ursulin 	int err = 0;
587f0c02c1bSTvrtko Ursulin 
588f0c02c1bSTvrtko Ursulin 	/*
589f0c02c1bSTvrtko Ursulin 	 * Create a bunch of timelines and check we can write
590f0c02c1bSTvrtko Ursulin 	 * independently to each of their breadcrumb slots with adjacent
591f0c02c1bSTvrtko Ursulin 	 * engines.
592f0c02c1bSTvrtko Ursulin 	 */
593f0c02c1bSTvrtko Ursulin 
594f0c02c1bSTvrtko Ursulin 	timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
595f0c02c1bSTvrtko Ursulin 				   sizeof(*timelines),
596f0c02c1bSTvrtko Ursulin 				   GFP_KERNEL);
597f0c02c1bSTvrtko Ursulin 	if (!timelines)
598f0c02c1bSTvrtko Ursulin 		return -ENOMEM;
599f0c02c1bSTvrtko Ursulin 
600f0c02c1bSTvrtko Ursulin 	count = 0;
601f0c02c1bSTvrtko Ursulin 	for (n = 0; n < NUM_TIMELINES; n++) {
6025d904e3cSTvrtko Ursulin 		for_each_engine(engine, gt, id) {
603f0c02c1bSTvrtko Ursulin 			struct intel_timeline *tl;
604f0c02c1bSTvrtko Ursulin 			struct i915_request *rq;
605f0c02c1bSTvrtko Ursulin 
606f0c02c1bSTvrtko Ursulin 			if (!intel_engine_can_store_dword(engine))
607f0c02c1bSTvrtko Ursulin 				continue;
608f0c02c1bSTvrtko Ursulin 
6095f65d5a6SChris Wilson 			tl = checked_intel_timeline_create(gt);
610f0c02c1bSTvrtko Ursulin 			if (IS_ERR(tl)) {
611f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(tl);
612f0c02c1bSTvrtko Ursulin 				goto out;
613f0c02c1bSTvrtko Ursulin 			}
614f0c02c1bSTvrtko Ursulin 
6157e805762SChris Wilson 			intel_engine_pm_get(engine);
616f0c02c1bSTvrtko Ursulin 			rq = tl_write(tl, engine, count);
6177e805762SChris Wilson 			intel_engine_pm_put(engine);
618f0c02c1bSTvrtko Ursulin 			if (IS_ERR(rq)) {
619f0c02c1bSTvrtko Ursulin 				intel_timeline_put(tl);
620f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(rq);
621f0c02c1bSTvrtko Ursulin 				goto out;
622f0c02c1bSTvrtko Ursulin 			}
623f0c02c1bSTvrtko Ursulin 
624f0c02c1bSTvrtko Ursulin 			timelines[count++] = tl;
6257e805762SChris Wilson 			i915_request_put(rq);
626f0c02c1bSTvrtko Ursulin 		}
627f0c02c1bSTvrtko Ursulin 	}
628f0c02c1bSTvrtko Ursulin 
629f0c02c1bSTvrtko Ursulin out:
6305f65d5a6SChris Wilson 	if (igt_flush_test(gt->i915))
631f0c02c1bSTvrtko Ursulin 		err = -EIO;
632f0c02c1bSTvrtko Ursulin 
633f0c02c1bSTvrtko Ursulin 	for (n = 0; n < count; n++) {
634f0c02c1bSTvrtko Ursulin 		struct intel_timeline *tl = timelines[n];
635f0c02c1bSTvrtko Ursulin 
636e310b435SChris Wilson 		if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
637e310b435SChris Wilson 			GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
638e310b435SChris Wilson 				      n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
639d45171acSChris Wilson 			GEM_TRACE_DUMP();
640f0c02c1bSTvrtko Ursulin 			err = -EINVAL;
641f0c02c1bSTvrtko Ursulin 		}
642f0c02c1bSTvrtko Ursulin 		intel_timeline_put(tl);
643f0c02c1bSTvrtko Ursulin 	}
644f0c02c1bSTvrtko Ursulin 
645f0c02c1bSTvrtko Ursulin 	kvfree(timelines);
646f0c02c1bSTvrtko Ursulin 	return err;
647f0c02c1bSTvrtko Ursulin #undef NUM_TIMELINES
648f0c02c1bSTvrtko Ursulin }
649f0c02c1bSTvrtko Ursulin 
650f0c02c1bSTvrtko Ursulin static int live_hwsp_wrap(void *arg)
651f0c02c1bSTvrtko Ursulin {
6525f65d5a6SChris Wilson 	struct intel_gt *gt = arg;
653f0c02c1bSTvrtko Ursulin 	struct intel_engine_cs *engine;
654f0c02c1bSTvrtko Ursulin 	struct intel_timeline *tl;
655f0c02c1bSTvrtko Ursulin 	enum intel_engine_id id;
656f0c02c1bSTvrtko Ursulin 	int err = 0;
657f0c02c1bSTvrtko Ursulin 
658f0c02c1bSTvrtko Ursulin 	/*
659f0c02c1bSTvrtko Ursulin 	 * Across a seqno wrap, we need to keep the old cacheline alive for
660f0c02c1bSTvrtko Ursulin 	 * foreign GPU references.
661f0c02c1bSTvrtko Ursulin 	 */
662f0c02c1bSTvrtko Ursulin 
663d1bf5dd8SChris Wilson 	tl = intel_timeline_create(gt);
6647e805762SChris Wilson 	if (IS_ERR(tl))
6657e805762SChris Wilson 		return PTR_ERR(tl);
6667e805762SChris Wilson 
667f0c02c1bSTvrtko Ursulin 	if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
668f0c02c1bSTvrtko Ursulin 		goto out_free;
669f0c02c1bSTvrtko Ursulin 
67047b08693SMaarten Lankhorst 	err = intel_timeline_pin(tl, NULL);
671f0c02c1bSTvrtko Ursulin 	if (err)
672f0c02c1bSTvrtko Ursulin 		goto out_free;
673f0c02c1bSTvrtko Ursulin 
6745d904e3cSTvrtko Ursulin 	for_each_engine(engine, gt, id) {
675f0c02c1bSTvrtko Ursulin 		const u32 *hwsp_seqno[2];
676f0c02c1bSTvrtko Ursulin 		struct i915_request *rq;
677f0c02c1bSTvrtko Ursulin 		u32 seqno[2];
678f0c02c1bSTvrtko Ursulin 
679f0c02c1bSTvrtko Ursulin 		if (!intel_engine_can_store_dword(engine))
680f0c02c1bSTvrtko Ursulin 			continue;
681f0c02c1bSTvrtko Ursulin 
682de5825beSChris Wilson 		rq = intel_engine_create_kernel_request(engine);
683f0c02c1bSTvrtko Ursulin 		if (IS_ERR(rq)) {
684f0c02c1bSTvrtko Ursulin 			err = PTR_ERR(rq);
685f0c02c1bSTvrtko Ursulin 			goto out;
686f0c02c1bSTvrtko Ursulin 		}
687f0c02c1bSTvrtko Ursulin 
688f0c02c1bSTvrtko Ursulin 		tl->seqno = -4u;
689f0c02c1bSTvrtko Ursulin 
69025ffd4b1SChris Wilson 		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
691f0c02c1bSTvrtko Ursulin 		err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
69225ffd4b1SChris Wilson 		mutex_unlock(&tl->mutex);
693f0c02c1bSTvrtko Ursulin 		if (err) {
694f0c02c1bSTvrtko Ursulin 			i915_request_add(rq);
695f0c02c1bSTvrtko Ursulin 			goto out;
696f0c02c1bSTvrtko Ursulin 		}
697f0c02c1bSTvrtko Ursulin 		pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
698f0c02c1bSTvrtko Ursulin 			 seqno[0], tl->hwsp_offset);
699f0c02c1bSTvrtko Ursulin 
700f0c02c1bSTvrtko Ursulin 		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
701f0c02c1bSTvrtko Ursulin 		if (err) {
702f0c02c1bSTvrtko Ursulin 			i915_request_add(rq);
703f0c02c1bSTvrtko Ursulin 			goto out;
704f0c02c1bSTvrtko Ursulin 		}
705f0c02c1bSTvrtko Ursulin 		hwsp_seqno[0] = tl->hwsp_seqno;
706f0c02c1bSTvrtko Ursulin 
70725ffd4b1SChris Wilson 		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
708f0c02c1bSTvrtko Ursulin 		err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
70925ffd4b1SChris Wilson 		mutex_unlock(&tl->mutex);
710f0c02c1bSTvrtko Ursulin 		if (err) {
711f0c02c1bSTvrtko Ursulin 			i915_request_add(rq);
712f0c02c1bSTvrtko Ursulin 			goto out;
713f0c02c1bSTvrtko Ursulin 		}
714f0c02c1bSTvrtko Ursulin 		pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
715f0c02c1bSTvrtko Ursulin 			 seqno[1], tl->hwsp_offset);
716f0c02c1bSTvrtko Ursulin 
717f0c02c1bSTvrtko Ursulin 		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
718f0c02c1bSTvrtko Ursulin 		if (err) {
719f0c02c1bSTvrtko Ursulin 			i915_request_add(rq);
720f0c02c1bSTvrtko Ursulin 			goto out;
721f0c02c1bSTvrtko Ursulin 		}
722f0c02c1bSTvrtko Ursulin 		hwsp_seqno[1] = tl->hwsp_seqno;
723f0c02c1bSTvrtko Ursulin 
724f0c02c1bSTvrtko Ursulin 		/* With wrap should come a new hwsp */
725f0c02c1bSTvrtko Ursulin 		GEM_BUG_ON(seqno[1] >= seqno[0]);
726f0c02c1bSTvrtko Ursulin 		GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
727f0c02c1bSTvrtko Ursulin 
728f0c02c1bSTvrtko Ursulin 		i915_request_add(rq);
729f0c02c1bSTvrtko Ursulin 
730f0c02c1bSTvrtko Ursulin 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
731f0c02c1bSTvrtko Ursulin 			pr_err("Wait for timeline writes timed out!\n");
732f0c02c1bSTvrtko Ursulin 			err = -EIO;
733f0c02c1bSTvrtko Ursulin 			goto out;
734f0c02c1bSTvrtko Ursulin 		}
735f0c02c1bSTvrtko Ursulin 
736e310b435SChris Wilson 		if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
737e310b435SChris Wilson 		    READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
738f0c02c1bSTvrtko Ursulin 			pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
739f0c02c1bSTvrtko Ursulin 			       *hwsp_seqno[0], *hwsp_seqno[1],
740f0c02c1bSTvrtko Ursulin 			       seqno[0], seqno[1]);
741f0c02c1bSTvrtko Ursulin 			err = -EINVAL;
742f0c02c1bSTvrtko Ursulin 			goto out;
743f0c02c1bSTvrtko Ursulin 		}
744f0c02c1bSTvrtko Ursulin 
74566101975SChris Wilson 		intel_gt_retire_requests(gt); /* recycle HWSP */
746f0c02c1bSTvrtko Ursulin 	}
747f0c02c1bSTvrtko Ursulin 
748f0c02c1bSTvrtko Ursulin out:
7495f65d5a6SChris Wilson 	if (igt_flush_test(gt->i915))
750f0c02c1bSTvrtko Ursulin 		err = -EIO;
751f0c02c1bSTvrtko Ursulin 
752f0c02c1bSTvrtko Ursulin 	intel_timeline_unpin(tl);
753f0c02c1bSTvrtko Ursulin out_free:
754f0c02c1bSTvrtko Ursulin 	intel_timeline_put(tl);
755f0c02c1bSTvrtko Ursulin 	return err;
756f0c02c1bSTvrtko Ursulin }
757f0c02c1bSTvrtko Ursulin 
758bb5e4397SChris Wilson static int live_hwsp_rollover_kernel(void *arg)
759bb5e4397SChris Wilson {
760bb5e4397SChris Wilson 	struct intel_gt *gt = arg;
761bb5e4397SChris Wilson 	struct intel_engine_cs *engine;
762bb5e4397SChris Wilson 	enum intel_engine_id id;
763bb5e4397SChris Wilson 	int err = 0;
764bb5e4397SChris Wilson 
765bb5e4397SChris Wilson 	/*
766bb5e4397SChris Wilson 	 * Run the host for long enough, and even the kernel context will
767bb5e4397SChris Wilson 	 * see a seqno rollover.
768bb5e4397SChris Wilson 	 */
769bb5e4397SChris Wilson 
770bb5e4397SChris Wilson 	for_each_engine(engine, gt, id) {
771bb5e4397SChris Wilson 		struct intel_context *ce = engine->kernel_context;
772bb5e4397SChris Wilson 		struct intel_timeline *tl = ce->timeline;
773bb5e4397SChris Wilson 		struct i915_request *rq[3] = {};
774bb5e4397SChris Wilson 		int i;
775bb5e4397SChris Wilson 
7761b90e4a4SChris Wilson 		st_engine_heartbeat_disable(engine);
777bb5e4397SChris Wilson 		if (intel_gt_wait_for_idle(gt, HZ / 2)) {
778bb5e4397SChris Wilson 			err = -EIO;
779bb5e4397SChris Wilson 			goto out;
780bb5e4397SChris Wilson 		}
781bb5e4397SChris Wilson 
782bb5e4397SChris Wilson 		GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
783bb5e4397SChris Wilson 		tl->seqno = 0;
784bb5e4397SChris Wilson 		timeline_rollback(tl);
785bb5e4397SChris Wilson 		timeline_rollback(tl);
786bb5e4397SChris Wilson 		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
787bb5e4397SChris Wilson 
788bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
789bb5e4397SChris Wilson 			struct i915_request *this;
790bb5e4397SChris Wilson 
791bb5e4397SChris Wilson 			this = i915_request_create(ce);
792bb5e4397SChris Wilson 			if (IS_ERR(this)) {
793bb5e4397SChris Wilson 				err = PTR_ERR(this);
794bb5e4397SChris Wilson 				goto out;
795bb5e4397SChris Wilson 			}
796bb5e4397SChris Wilson 
797bb5e4397SChris Wilson 			pr_debug("%s: create fence.seqnp:%d\n",
798bb5e4397SChris Wilson 				 engine->name,
799bb5e4397SChris Wilson 				 lower_32_bits(this->fence.seqno));
800bb5e4397SChris Wilson 
801bb5e4397SChris Wilson 			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
802bb5e4397SChris Wilson 
803bb5e4397SChris Wilson 			rq[i] = i915_request_get(this);
804bb5e4397SChris Wilson 			i915_request_add(this);
805bb5e4397SChris Wilson 		}
806bb5e4397SChris Wilson 
807bb5e4397SChris Wilson 		/* We expected a wrap! */
808bb5e4397SChris Wilson 		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
809bb5e4397SChris Wilson 
810bb5e4397SChris Wilson 		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
811bb5e4397SChris Wilson 			pr_err("Wait for timeline wrap timed out!\n");
812bb5e4397SChris Wilson 			err = -EIO;
813bb5e4397SChris Wilson 			goto out;
814bb5e4397SChris Wilson 		}
815bb5e4397SChris Wilson 
816bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
817bb5e4397SChris Wilson 			if (!i915_request_completed(rq[i])) {
818bb5e4397SChris Wilson 				pr_err("Pre-wrap request not completed!\n");
819bb5e4397SChris Wilson 				err = -EINVAL;
820bb5e4397SChris Wilson 				goto out;
821bb5e4397SChris Wilson 			}
822bb5e4397SChris Wilson 		}
823bb5e4397SChris Wilson 
824bb5e4397SChris Wilson out:
825bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++)
826bb5e4397SChris Wilson 			i915_request_put(rq[i]);
8271b90e4a4SChris Wilson 		st_engine_heartbeat_enable(engine);
828bb5e4397SChris Wilson 		if (err)
829bb5e4397SChris Wilson 			break;
830bb5e4397SChris Wilson 	}
831bb5e4397SChris Wilson 
832bb5e4397SChris Wilson 	if (igt_flush_test(gt->i915))
833bb5e4397SChris Wilson 		err = -EIO;
834bb5e4397SChris Wilson 
835bb5e4397SChris Wilson 	return err;
836bb5e4397SChris Wilson }
837bb5e4397SChris Wilson 
838bb5e4397SChris Wilson static int live_hwsp_rollover_user(void *arg)
839bb5e4397SChris Wilson {
840bb5e4397SChris Wilson 	struct intel_gt *gt = arg;
841bb5e4397SChris Wilson 	struct intel_engine_cs *engine;
842bb5e4397SChris Wilson 	enum intel_engine_id id;
843bb5e4397SChris Wilson 	int err = 0;
844bb5e4397SChris Wilson 
845bb5e4397SChris Wilson 	/*
846bb5e4397SChris Wilson 	 * Simulate a long running user context, and force the seqno wrap
847bb5e4397SChris Wilson 	 * on the user's timeline.
848bb5e4397SChris Wilson 	 */
849bb5e4397SChris Wilson 
850bb5e4397SChris Wilson 	for_each_engine(engine, gt, id) {
851bb5e4397SChris Wilson 		struct i915_request *rq[3] = {};
852bb5e4397SChris Wilson 		struct intel_timeline *tl;
853bb5e4397SChris Wilson 		struct intel_context *ce;
854bb5e4397SChris Wilson 		int i;
855bb5e4397SChris Wilson 
856bb5e4397SChris Wilson 		ce = intel_context_create(engine);
857bb5e4397SChris Wilson 		if (IS_ERR(ce))
858bb5e4397SChris Wilson 			return PTR_ERR(ce);
859bb5e4397SChris Wilson 
860bb5e4397SChris Wilson 		err = intel_context_alloc_state(ce);
861bb5e4397SChris Wilson 		if (err)
862bb5e4397SChris Wilson 			goto out;
863bb5e4397SChris Wilson 
864bb5e4397SChris Wilson 		tl = ce->timeline;
865bb5e4397SChris Wilson 		if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
866bb5e4397SChris Wilson 			goto out;
867bb5e4397SChris Wilson 
868bb5e4397SChris Wilson 		timeline_rollback(tl);
869bb5e4397SChris Wilson 		timeline_rollback(tl);
870bb5e4397SChris Wilson 		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
871bb5e4397SChris Wilson 
872bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
873bb5e4397SChris Wilson 			struct i915_request *this;
874bb5e4397SChris Wilson 
875bb5e4397SChris Wilson 			this = intel_context_create_request(ce);
876bb5e4397SChris Wilson 			if (IS_ERR(this)) {
877bb5e4397SChris Wilson 				err = PTR_ERR(this);
878bb5e4397SChris Wilson 				goto out;
879bb5e4397SChris Wilson 			}
880bb5e4397SChris Wilson 
881bb5e4397SChris Wilson 			pr_debug("%s: create fence.seqnp:%d\n",
882bb5e4397SChris Wilson 				 engine->name,
883bb5e4397SChris Wilson 				 lower_32_bits(this->fence.seqno));
884bb5e4397SChris Wilson 
885bb5e4397SChris Wilson 			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
886bb5e4397SChris Wilson 
887bb5e4397SChris Wilson 			rq[i] = i915_request_get(this);
888bb5e4397SChris Wilson 			i915_request_add(this);
889bb5e4397SChris Wilson 		}
890bb5e4397SChris Wilson 
891bb5e4397SChris Wilson 		/* We expected a wrap! */
892bb5e4397SChris Wilson 		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
893bb5e4397SChris Wilson 
894bb5e4397SChris Wilson 		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
895bb5e4397SChris Wilson 			pr_err("Wait for timeline wrap timed out!\n");
896bb5e4397SChris Wilson 			err = -EIO;
897bb5e4397SChris Wilson 			goto out;
898bb5e4397SChris Wilson 		}
899bb5e4397SChris Wilson 
900bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++) {
901bb5e4397SChris Wilson 			if (!i915_request_completed(rq[i])) {
902bb5e4397SChris Wilson 				pr_err("Pre-wrap request not completed!\n");
903bb5e4397SChris Wilson 				err = -EINVAL;
904bb5e4397SChris Wilson 				goto out;
905bb5e4397SChris Wilson 			}
906bb5e4397SChris Wilson 		}
907bb5e4397SChris Wilson 
908bb5e4397SChris Wilson out:
909bb5e4397SChris Wilson 		for (i = 0; i < ARRAY_SIZE(rq); i++)
910bb5e4397SChris Wilson 			i915_request_put(rq[i]);
911bb5e4397SChris Wilson 		intel_context_put(ce);
912bb5e4397SChris Wilson 		if (err)
913bb5e4397SChris Wilson 			break;
914bb5e4397SChris Wilson 	}
915bb5e4397SChris Wilson 
916bb5e4397SChris Wilson 	if (igt_flush_test(gt->i915))
917bb5e4397SChris Wilson 		err = -EIO;
918bb5e4397SChris Wilson 
919bb5e4397SChris Wilson 	return err;
920bb5e4397SChris Wilson }
921bb5e4397SChris Wilson 
922f0c02c1bSTvrtko Ursulin static int live_hwsp_recycle(void *arg)
923f0c02c1bSTvrtko Ursulin {
9245f65d5a6SChris Wilson 	struct intel_gt *gt = arg;
925f0c02c1bSTvrtko Ursulin 	struct intel_engine_cs *engine;
926f0c02c1bSTvrtko Ursulin 	enum intel_engine_id id;
927f0c02c1bSTvrtko Ursulin 	unsigned long count;
928f0c02c1bSTvrtko Ursulin 	int err = 0;
929f0c02c1bSTvrtko Ursulin 
930f0c02c1bSTvrtko Ursulin 	/*
931f0c02c1bSTvrtko Ursulin 	 * Check seqno writes into one timeline at a time. We expect to
932f0c02c1bSTvrtko Ursulin 	 * recycle the breadcrumb slot between iterations and neither
933f0c02c1bSTvrtko Ursulin 	 * want to confuse ourselves or the GPU.
934f0c02c1bSTvrtko Ursulin 	 */
935f0c02c1bSTvrtko Ursulin 
936f0c02c1bSTvrtko Ursulin 	count = 0;
9375d904e3cSTvrtko Ursulin 	for_each_engine(engine, gt, id) {
938f0c02c1bSTvrtko Ursulin 		IGT_TIMEOUT(end_time);
939f0c02c1bSTvrtko Ursulin 
940f0c02c1bSTvrtko Ursulin 		if (!intel_engine_can_store_dword(engine))
941f0c02c1bSTvrtko Ursulin 			continue;
942f0c02c1bSTvrtko Ursulin 
9437e805762SChris Wilson 		intel_engine_pm_get(engine);
9447e805762SChris Wilson 
945f0c02c1bSTvrtko Ursulin 		do {
946f0c02c1bSTvrtko Ursulin 			struct intel_timeline *tl;
947f0c02c1bSTvrtko Ursulin 			struct i915_request *rq;
948f0c02c1bSTvrtko Ursulin 
9495f65d5a6SChris Wilson 			tl = checked_intel_timeline_create(gt);
950f0c02c1bSTvrtko Ursulin 			if (IS_ERR(tl)) {
951f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(tl);
9527e805762SChris Wilson 				break;
953f0c02c1bSTvrtko Ursulin 			}
954f0c02c1bSTvrtko Ursulin 
955f0c02c1bSTvrtko Ursulin 			rq = tl_write(tl, engine, count);
956f0c02c1bSTvrtko Ursulin 			if (IS_ERR(rq)) {
957f0c02c1bSTvrtko Ursulin 				intel_timeline_put(tl);
958f0c02c1bSTvrtko Ursulin 				err = PTR_ERR(rq);
9597e805762SChris Wilson 				break;
960f0c02c1bSTvrtko Ursulin 			}
961f0c02c1bSTvrtko Ursulin 
962f0c02c1bSTvrtko Ursulin 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
963f0c02c1bSTvrtko Ursulin 				pr_err("Wait for timeline writes timed out!\n");
9647e805762SChris Wilson 				i915_request_put(rq);
965f0c02c1bSTvrtko Ursulin 				intel_timeline_put(tl);
966f0c02c1bSTvrtko Ursulin 				err = -EIO;
9677e805762SChris Wilson 				break;
968f0c02c1bSTvrtko Ursulin 			}
969f0c02c1bSTvrtko Ursulin 
970e310b435SChris Wilson 			if (READ_ONCE(*tl->hwsp_seqno) != count) {
971e310b435SChris Wilson 				GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
972e310b435SChris Wilson 					      count, tl->fence_context,
973e310b435SChris Wilson 					      tl->hwsp_offset, *tl->hwsp_seqno);
974d45171acSChris Wilson 				GEM_TRACE_DUMP();
975f0c02c1bSTvrtko Ursulin 				err = -EINVAL;
976f0c02c1bSTvrtko Ursulin 			}
977f0c02c1bSTvrtko Ursulin 
9787e805762SChris Wilson 			i915_request_put(rq);
979f0c02c1bSTvrtko Ursulin 			intel_timeline_put(tl);
980f0c02c1bSTvrtko Ursulin 			count++;
981f0c02c1bSTvrtko Ursulin 
982f0c02c1bSTvrtko Ursulin 			if (err)
9837e805762SChris Wilson 				break;
984f0c02c1bSTvrtko Ursulin 		} while (!__igt_timeout(end_time, NULL));
985f0c02c1bSTvrtko Ursulin 
9867e805762SChris Wilson 		intel_engine_pm_put(engine);
9877e805762SChris Wilson 		if (err)
9887e805762SChris Wilson 			break;
9897e805762SChris Wilson 	}
990f0c02c1bSTvrtko Ursulin 
991f0c02c1bSTvrtko Ursulin 	return err;
992f0c02c1bSTvrtko Ursulin }
993f0c02c1bSTvrtko Ursulin 
994f0c02c1bSTvrtko Ursulin int intel_timeline_live_selftests(struct drm_i915_private *i915)
995f0c02c1bSTvrtko Ursulin {
996f0c02c1bSTvrtko Ursulin 	static const struct i915_subtest tests[] = {
997f0c02c1bSTvrtko Ursulin 		SUBTEST(live_hwsp_recycle),
998f0c02c1bSTvrtko Ursulin 		SUBTEST(live_hwsp_engine),
999f0c02c1bSTvrtko Ursulin 		SUBTEST(live_hwsp_alternate),
1000f0c02c1bSTvrtko Ursulin 		SUBTEST(live_hwsp_wrap),
1001bb5e4397SChris Wilson 		SUBTEST(live_hwsp_rollover_kernel),
1002bb5e4397SChris Wilson 		SUBTEST(live_hwsp_rollover_user),
1003f0c02c1bSTvrtko Ursulin 	};
1004f0c02c1bSTvrtko Ursulin 
1005cb823ed9SChris Wilson 	if (intel_gt_is_wedged(&i915->gt))
1006f0c02c1bSTvrtko Ursulin 		return 0;
1007f0c02c1bSTvrtko Ursulin 
10085f65d5a6SChris Wilson 	return intel_gt_live_subtests(tests, &i915->gt);
1009f0c02c1bSTvrtko Ursulin }
1010