xref: /openbmc/linux/drivers/gpu/drm/i915/gt/selftest_lrc.c (revision 4984dd069f2995f239f075199ee8c0d9f020bcd9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_reset.h"
10 #include "i915_selftest.h"
11 #include "selftests/i915_random.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_gem_utils.h"
14 #include "selftests/igt_live_test.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/lib_sw_fence.h"
17 #include "selftests/mock_context.h"
18 
19 static int live_sanitycheck(void *arg)
20 {
21 	struct drm_i915_private *i915 = arg;
22 	struct intel_engine_cs *engine;
23 	struct i915_gem_context *ctx;
24 	enum intel_engine_id id;
25 	struct igt_spinner spin;
26 	intel_wakeref_t wakeref;
27 	int err = -ENOMEM;
28 
29 	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
30 		return 0;
31 
32 	mutex_lock(&i915->drm.struct_mutex);
33 	wakeref = intel_runtime_pm_get(i915);
34 
35 	if (igt_spinner_init(&spin, i915))
36 		goto err_unlock;
37 
38 	ctx = kernel_context(i915);
39 	if (!ctx)
40 		goto err_spin;
41 
42 	for_each_engine(engine, i915, id) {
43 		struct i915_request *rq;
44 
45 		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
46 		if (IS_ERR(rq)) {
47 			err = PTR_ERR(rq);
48 			goto err_ctx;
49 		}
50 
51 		i915_request_add(rq);
52 		if (!igt_wait_for_spinner(&spin, rq)) {
53 			GEM_TRACE("spinner failed to start\n");
54 			GEM_TRACE_DUMP();
55 			i915_gem_set_wedged(i915);
56 			err = -EIO;
57 			goto err_ctx;
58 		}
59 
60 		igt_spinner_end(&spin);
61 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
62 			err = -EIO;
63 			goto err_ctx;
64 		}
65 	}
66 
67 	err = 0;
68 err_ctx:
69 	kernel_context_close(ctx);
70 err_spin:
71 	igt_spinner_fini(&spin);
72 err_unlock:
73 	igt_flush_test(i915, I915_WAIT_LOCKED);
74 	intel_runtime_pm_put(i915, wakeref);
75 	mutex_unlock(&i915->drm.struct_mutex);
76 	return err;
77 }
78 
79 static int live_busywait_preempt(void *arg)
80 {
81 	struct drm_i915_private *i915 = arg;
82 	struct i915_gem_context *ctx_hi, *ctx_lo;
83 	struct intel_engine_cs *engine;
84 	struct drm_i915_gem_object *obj;
85 	struct i915_vma *vma;
86 	enum intel_engine_id id;
87 	intel_wakeref_t wakeref;
88 	int err = -ENOMEM;
89 	u32 *map;
90 
91 	/*
92 	 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
93 	 * preempt the busywaits used to synchronise between rings.
94 	 */
95 
96 	mutex_lock(&i915->drm.struct_mutex);
97 	wakeref = intel_runtime_pm_get(i915);
98 
99 	ctx_hi = kernel_context(i915);
100 	if (!ctx_hi)
101 		goto err_unlock;
102 	ctx_hi->sched.priority =
103 		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
104 
105 	ctx_lo = kernel_context(i915);
106 	if (!ctx_lo)
107 		goto err_ctx_hi;
108 	ctx_lo->sched.priority =
109 		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
110 
111 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
112 	if (IS_ERR(obj)) {
113 		err = PTR_ERR(obj);
114 		goto err_ctx_lo;
115 	}
116 
117 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
118 	if (IS_ERR(map)) {
119 		err = PTR_ERR(map);
120 		goto err_obj;
121 	}
122 
123 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_map;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_map;
132 
133 	for_each_engine(engine, i915, id) {
134 		struct i915_request *lo, *hi;
135 		struct igt_live_test t;
136 		u32 *cs;
137 
138 		if (!intel_engine_can_store_dword(engine))
139 			continue;
140 
141 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
142 			err = -EIO;
143 			goto err_vma;
144 		}
145 
146 		/*
147 		 * We create two requests. The low priority request
148 		 * busywaits on a semaphore (inside the ringbuffer where
149 		 * is should be preemptible) and the high priority requests
150 		 * uses a MI_STORE_DWORD_IMM to update the semaphore value
151 		 * allowing the first request to complete. If preemption
152 		 * fails, we hang instead.
153 		 */
154 
155 		lo = igt_request_alloc(ctx_lo, engine);
156 		if (IS_ERR(lo)) {
157 			err = PTR_ERR(lo);
158 			goto err_vma;
159 		}
160 
161 		cs = intel_ring_begin(lo, 8);
162 		if (IS_ERR(cs)) {
163 			err = PTR_ERR(cs);
164 			i915_request_add(lo);
165 			goto err_vma;
166 		}
167 
168 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
169 		*cs++ = i915_ggtt_offset(vma);
170 		*cs++ = 0;
171 		*cs++ = 1;
172 
173 		/* XXX Do we need a flush + invalidate here? */
174 
175 		*cs++ = MI_SEMAPHORE_WAIT |
176 			MI_SEMAPHORE_GLOBAL_GTT |
177 			MI_SEMAPHORE_POLL |
178 			MI_SEMAPHORE_SAD_EQ_SDD;
179 		*cs++ = 0;
180 		*cs++ = i915_ggtt_offset(vma);
181 		*cs++ = 0;
182 
183 		intel_ring_advance(lo, cs);
184 		i915_request_add(lo);
185 
186 		if (wait_for(READ_ONCE(*map), 10)) {
187 			err = -ETIMEDOUT;
188 			goto err_vma;
189 		}
190 
191 		/* Low priority request should be busywaiting now */
192 		if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
193 			pr_err("%s: Busywaiting request did not!\n",
194 			       engine->name);
195 			err = -EIO;
196 			goto err_vma;
197 		}
198 
199 		hi = igt_request_alloc(ctx_hi, engine);
200 		if (IS_ERR(hi)) {
201 			err = PTR_ERR(hi);
202 			goto err_vma;
203 		}
204 
205 		cs = intel_ring_begin(hi, 4);
206 		if (IS_ERR(cs)) {
207 			err = PTR_ERR(cs);
208 			i915_request_add(hi);
209 			goto err_vma;
210 		}
211 
212 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
213 		*cs++ = i915_ggtt_offset(vma);
214 		*cs++ = 0;
215 		*cs++ = 0;
216 
217 		intel_ring_advance(hi, cs);
218 		i915_request_add(hi);
219 
220 		if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
221 			struct drm_printer p = drm_info_printer(i915->drm.dev);
222 
223 			pr_err("%s: Failed to preempt semaphore busywait!\n",
224 			       engine->name);
225 
226 			intel_engine_dump(engine, &p, "%s\n", engine->name);
227 			GEM_TRACE_DUMP();
228 
229 			i915_gem_set_wedged(i915);
230 			err = -EIO;
231 			goto err_vma;
232 		}
233 		GEM_BUG_ON(READ_ONCE(*map));
234 
235 		if (igt_live_test_end(&t)) {
236 			err = -EIO;
237 			goto err_vma;
238 		}
239 	}
240 
241 	err = 0;
242 err_vma:
243 	i915_vma_unpin(vma);
244 err_map:
245 	i915_gem_object_unpin_map(obj);
246 err_obj:
247 	i915_gem_object_put(obj);
248 err_ctx_lo:
249 	kernel_context_close(ctx_lo);
250 err_ctx_hi:
251 	kernel_context_close(ctx_hi);
252 err_unlock:
253 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
254 		err = -EIO;
255 	intel_runtime_pm_put(i915, wakeref);
256 	mutex_unlock(&i915->drm.struct_mutex);
257 	return err;
258 }
259 
260 static int live_preempt(void *arg)
261 {
262 	struct drm_i915_private *i915 = arg;
263 	struct i915_gem_context *ctx_hi, *ctx_lo;
264 	struct igt_spinner spin_hi, spin_lo;
265 	struct intel_engine_cs *engine;
266 	enum intel_engine_id id;
267 	intel_wakeref_t wakeref;
268 	int err = -ENOMEM;
269 
270 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
271 		return 0;
272 
273 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
274 		pr_err("Logical preemption supported, but not exposed\n");
275 
276 	mutex_lock(&i915->drm.struct_mutex);
277 	wakeref = intel_runtime_pm_get(i915);
278 
279 	if (igt_spinner_init(&spin_hi, i915))
280 		goto err_unlock;
281 
282 	if (igt_spinner_init(&spin_lo, i915))
283 		goto err_spin_hi;
284 
285 	ctx_hi = kernel_context(i915);
286 	if (!ctx_hi)
287 		goto err_spin_lo;
288 	ctx_hi->sched.priority =
289 		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
290 
291 	ctx_lo = kernel_context(i915);
292 	if (!ctx_lo)
293 		goto err_ctx_hi;
294 	ctx_lo->sched.priority =
295 		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
296 
297 	for_each_engine(engine, i915, id) {
298 		struct igt_live_test t;
299 		struct i915_request *rq;
300 
301 		if (!intel_engine_has_preemption(engine))
302 			continue;
303 
304 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
305 			err = -EIO;
306 			goto err_ctx_lo;
307 		}
308 
309 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
310 						MI_ARB_CHECK);
311 		if (IS_ERR(rq)) {
312 			err = PTR_ERR(rq);
313 			goto err_ctx_lo;
314 		}
315 
316 		i915_request_add(rq);
317 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
318 			GEM_TRACE("lo spinner failed to start\n");
319 			GEM_TRACE_DUMP();
320 			i915_gem_set_wedged(i915);
321 			err = -EIO;
322 			goto err_ctx_lo;
323 		}
324 
325 		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
326 						MI_ARB_CHECK);
327 		if (IS_ERR(rq)) {
328 			igt_spinner_end(&spin_lo);
329 			err = PTR_ERR(rq);
330 			goto err_ctx_lo;
331 		}
332 
333 		i915_request_add(rq);
334 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
335 			GEM_TRACE("hi spinner failed to start\n");
336 			GEM_TRACE_DUMP();
337 			i915_gem_set_wedged(i915);
338 			err = -EIO;
339 			goto err_ctx_lo;
340 		}
341 
342 		igt_spinner_end(&spin_hi);
343 		igt_spinner_end(&spin_lo);
344 
345 		if (igt_live_test_end(&t)) {
346 			err = -EIO;
347 			goto err_ctx_lo;
348 		}
349 	}
350 
351 	err = 0;
352 err_ctx_lo:
353 	kernel_context_close(ctx_lo);
354 err_ctx_hi:
355 	kernel_context_close(ctx_hi);
356 err_spin_lo:
357 	igt_spinner_fini(&spin_lo);
358 err_spin_hi:
359 	igt_spinner_fini(&spin_hi);
360 err_unlock:
361 	igt_flush_test(i915, I915_WAIT_LOCKED);
362 	intel_runtime_pm_put(i915, wakeref);
363 	mutex_unlock(&i915->drm.struct_mutex);
364 	return err;
365 }
366 
367 static int live_late_preempt(void *arg)
368 {
369 	struct drm_i915_private *i915 = arg;
370 	struct i915_gem_context *ctx_hi, *ctx_lo;
371 	struct igt_spinner spin_hi, spin_lo;
372 	struct intel_engine_cs *engine;
373 	struct i915_sched_attr attr = {};
374 	enum intel_engine_id id;
375 	intel_wakeref_t wakeref;
376 	int err = -ENOMEM;
377 
378 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
379 		return 0;
380 
381 	mutex_lock(&i915->drm.struct_mutex);
382 	wakeref = intel_runtime_pm_get(i915);
383 
384 	if (igt_spinner_init(&spin_hi, i915))
385 		goto err_unlock;
386 
387 	if (igt_spinner_init(&spin_lo, i915))
388 		goto err_spin_hi;
389 
390 	ctx_hi = kernel_context(i915);
391 	if (!ctx_hi)
392 		goto err_spin_lo;
393 
394 	ctx_lo = kernel_context(i915);
395 	if (!ctx_lo)
396 		goto err_ctx_hi;
397 
398 	for_each_engine(engine, i915, id) {
399 		struct igt_live_test t;
400 		struct i915_request *rq;
401 
402 		if (!intel_engine_has_preemption(engine))
403 			continue;
404 
405 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
406 			err = -EIO;
407 			goto err_ctx_lo;
408 		}
409 
410 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
411 						MI_ARB_CHECK);
412 		if (IS_ERR(rq)) {
413 			err = PTR_ERR(rq);
414 			goto err_ctx_lo;
415 		}
416 
417 		i915_request_add(rq);
418 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
419 			pr_err("First context failed to start\n");
420 			goto err_wedged;
421 		}
422 
423 		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
424 						MI_NOOP);
425 		if (IS_ERR(rq)) {
426 			igt_spinner_end(&spin_lo);
427 			err = PTR_ERR(rq);
428 			goto err_ctx_lo;
429 		}
430 
431 		i915_request_add(rq);
432 		if (igt_wait_for_spinner(&spin_hi, rq)) {
433 			pr_err("Second context overtook first?\n");
434 			goto err_wedged;
435 		}
436 
437 		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
438 		engine->schedule(rq, &attr);
439 
440 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
441 			pr_err("High priority context failed to preempt the low priority context\n");
442 			GEM_TRACE_DUMP();
443 			goto err_wedged;
444 		}
445 
446 		igt_spinner_end(&spin_hi);
447 		igt_spinner_end(&spin_lo);
448 
449 		if (igt_live_test_end(&t)) {
450 			err = -EIO;
451 			goto err_ctx_lo;
452 		}
453 	}
454 
455 	err = 0;
456 err_ctx_lo:
457 	kernel_context_close(ctx_lo);
458 err_ctx_hi:
459 	kernel_context_close(ctx_hi);
460 err_spin_lo:
461 	igt_spinner_fini(&spin_lo);
462 err_spin_hi:
463 	igt_spinner_fini(&spin_hi);
464 err_unlock:
465 	igt_flush_test(i915, I915_WAIT_LOCKED);
466 	intel_runtime_pm_put(i915, wakeref);
467 	mutex_unlock(&i915->drm.struct_mutex);
468 	return err;
469 
470 err_wedged:
471 	igt_spinner_end(&spin_hi);
472 	igt_spinner_end(&spin_lo);
473 	i915_gem_set_wedged(i915);
474 	err = -EIO;
475 	goto err_ctx_lo;
476 }
477 
478 struct preempt_client {
479 	struct igt_spinner spin;
480 	struct i915_gem_context *ctx;
481 };
482 
483 static int preempt_client_init(struct drm_i915_private *i915,
484 			       struct preempt_client *c)
485 {
486 	c->ctx = kernel_context(i915);
487 	if (!c->ctx)
488 		return -ENOMEM;
489 
490 	if (igt_spinner_init(&c->spin, i915))
491 		goto err_ctx;
492 
493 	return 0;
494 
495 err_ctx:
496 	kernel_context_close(c->ctx);
497 	return -ENOMEM;
498 }
499 
500 static void preempt_client_fini(struct preempt_client *c)
501 {
502 	igt_spinner_fini(&c->spin);
503 	kernel_context_close(c->ctx);
504 }
505 
506 static int live_suppress_self_preempt(void *arg)
507 {
508 	struct drm_i915_private *i915 = arg;
509 	struct intel_engine_cs *engine;
510 	struct i915_sched_attr attr = {
511 		.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
512 	};
513 	struct preempt_client a, b;
514 	enum intel_engine_id id;
515 	intel_wakeref_t wakeref;
516 	int err = -ENOMEM;
517 
518 	/*
519 	 * Verify that if a preemption request does not cause a change in
520 	 * the current execution order, the preempt-to-idle injection is
521 	 * skipped and that we do not accidentally apply it after the CS
522 	 * completion event.
523 	 */
524 
525 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
526 		return 0;
527 
528 	if (USES_GUC_SUBMISSION(i915))
529 		return 0; /* presume black blox */
530 
531 	mutex_lock(&i915->drm.struct_mutex);
532 	wakeref = intel_runtime_pm_get(i915);
533 
534 	if (preempt_client_init(i915, &a))
535 		goto err_unlock;
536 	if (preempt_client_init(i915, &b))
537 		goto err_client_a;
538 
539 	for_each_engine(engine, i915, id) {
540 		struct i915_request *rq_a, *rq_b;
541 		int depth;
542 
543 		if (!intel_engine_has_preemption(engine))
544 			continue;
545 
546 		engine->execlists.preempt_hang.count = 0;
547 
548 		rq_a = igt_spinner_create_request(&a.spin,
549 						  a.ctx, engine,
550 						  MI_NOOP);
551 		if (IS_ERR(rq_a)) {
552 			err = PTR_ERR(rq_a);
553 			goto err_client_b;
554 		}
555 
556 		i915_request_add(rq_a);
557 		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
558 			pr_err("First client failed to start\n");
559 			goto err_wedged;
560 		}
561 
562 		for (depth = 0; depth < 8; depth++) {
563 			rq_b = igt_spinner_create_request(&b.spin,
564 							  b.ctx, engine,
565 							  MI_NOOP);
566 			if (IS_ERR(rq_b)) {
567 				err = PTR_ERR(rq_b);
568 				goto err_client_b;
569 			}
570 			i915_request_add(rq_b);
571 
572 			GEM_BUG_ON(i915_request_completed(rq_a));
573 			engine->schedule(rq_a, &attr);
574 			igt_spinner_end(&a.spin);
575 
576 			if (!igt_wait_for_spinner(&b.spin, rq_b)) {
577 				pr_err("Second client failed to start\n");
578 				goto err_wedged;
579 			}
580 
581 			swap(a, b);
582 			rq_a = rq_b;
583 		}
584 		igt_spinner_end(&a.spin);
585 
586 		if (engine->execlists.preempt_hang.count) {
587 			pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
588 			       engine->execlists.preempt_hang.count,
589 			       depth);
590 			err = -EINVAL;
591 			goto err_client_b;
592 		}
593 
594 		if (igt_flush_test(i915, I915_WAIT_LOCKED))
595 			goto err_wedged;
596 	}
597 
598 	err = 0;
599 err_client_b:
600 	preempt_client_fini(&b);
601 err_client_a:
602 	preempt_client_fini(&a);
603 err_unlock:
604 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
605 		err = -EIO;
606 	intel_runtime_pm_put(i915, wakeref);
607 	mutex_unlock(&i915->drm.struct_mutex);
608 	return err;
609 
610 err_wedged:
611 	igt_spinner_end(&b.spin);
612 	igt_spinner_end(&a.spin);
613 	i915_gem_set_wedged(i915);
614 	err = -EIO;
615 	goto err_client_b;
616 }
617 
618 static int __i915_sw_fence_call
619 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
620 {
621 	return NOTIFY_DONE;
622 }
623 
624 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
625 {
626 	struct i915_request *rq;
627 
628 	rq = kzalloc(sizeof(*rq), GFP_KERNEL);
629 	if (!rq)
630 		return NULL;
631 
632 	INIT_LIST_HEAD(&rq->active_list);
633 	rq->engine = engine;
634 
635 	i915_sched_node_init(&rq->sched);
636 
637 	/* mark this request as permanently incomplete */
638 	rq->fence.seqno = 1;
639 	BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
640 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
641 	GEM_BUG_ON(i915_request_completed(rq));
642 
643 	i915_sw_fence_init(&rq->submit, dummy_notify);
644 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
645 
646 	return rq;
647 }
648 
649 static void dummy_request_free(struct i915_request *dummy)
650 {
651 	/* We have to fake the CS interrupt to kick the next request */
652 	i915_sw_fence_commit(&dummy->submit);
653 
654 	i915_request_mark_complete(dummy);
655 	dma_fence_signal(&dummy->fence);
656 
657 	i915_sched_node_fini(&dummy->sched);
658 	i915_sw_fence_fini(&dummy->submit);
659 
660 	dma_fence_free(&dummy->fence);
661 }
662 
663 static int live_suppress_wait_preempt(void *arg)
664 {
665 	struct drm_i915_private *i915 = arg;
666 	struct preempt_client client[4];
667 	struct intel_engine_cs *engine;
668 	enum intel_engine_id id;
669 	intel_wakeref_t wakeref;
670 	int err = -ENOMEM;
671 	int i;
672 
673 	/*
674 	 * Waiters are given a little priority nudge, but not enough
675 	 * to actually cause any preemption. Double check that we do
676 	 * not needlessly generate preempt-to-idle cycles.
677 	 */
678 
679 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
680 		return 0;
681 
682 	mutex_lock(&i915->drm.struct_mutex);
683 	wakeref = intel_runtime_pm_get(i915);
684 
685 	if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
686 		goto err_unlock;
687 	if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
688 		goto err_client_0;
689 	if (preempt_client_init(i915, &client[2])) /* head of queue */
690 		goto err_client_1;
691 	if (preempt_client_init(i915, &client[3])) /* bystander */
692 		goto err_client_2;
693 
694 	for_each_engine(engine, i915, id) {
695 		int depth;
696 
697 		if (!intel_engine_has_preemption(engine))
698 			continue;
699 
700 		if (!engine->emit_init_breadcrumb)
701 			continue;
702 
703 		for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
704 			struct i915_request *rq[ARRAY_SIZE(client)];
705 			struct i915_request *dummy;
706 
707 			engine->execlists.preempt_hang.count = 0;
708 
709 			dummy = dummy_request(engine);
710 			if (!dummy)
711 				goto err_client_3;
712 
713 			for (i = 0; i < ARRAY_SIZE(client); i++) {
714 				rq[i] = igt_spinner_create_request(&client[i].spin,
715 								   client[i].ctx, engine,
716 								   MI_NOOP);
717 				if (IS_ERR(rq[i])) {
718 					err = PTR_ERR(rq[i]);
719 					goto err_wedged;
720 				}
721 
722 				/* Disable NEWCLIENT promotion */
723 				__i915_active_request_set(&rq[i]->timeline->last_request,
724 							  dummy);
725 				i915_request_add(rq[i]);
726 			}
727 
728 			dummy_request_free(dummy);
729 
730 			GEM_BUG_ON(i915_request_completed(rq[0]));
731 			if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
732 				pr_err("%s: First client failed to start\n",
733 				       engine->name);
734 				goto err_wedged;
735 			}
736 			GEM_BUG_ON(!i915_request_started(rq[0]));
737 
738 			if (i915_request_wait(rq[depth],
739 					      I915_WAIT_LOCKED |
740 					      I915_WAIT_PRIORITY,
741 					      1) != -ETIME) {
742 				pr_err("%s: Waiter depth:%d completed!\n",
743 				       engine->name, depth);
744 				goto err_wedged;
745 			}
746 
747 			for (i = 0; i < ARRAY_SIZE(client); i++)
748 				igt_spinner_end(&client[i].spin);
749 
750 			if (igt_flush_test(i915, I915_WAIT_LOCKED))
751 				goto err_wedged;
752 
753 			if (engine->execlists.preempt_hang.count) {
754 				pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
755 				       engine->name,
756 				       engine->execlists.preempt_hang.count,
757 				       depth);
758 				err = -EINVAL;
759 				goto err_client_3;
760 			}
761 		}
762 	}
763 
764 	err = 0;
765 err_client_3:
766 	preempt_client_fini(&client[3]);
767 err_client_2:
768 	preempt_client_fini(&client[2]);
769 err_client_1:
770 	preempt_client_fini(&client[1]);
771 err_client_0:
772 	preempt_client_fini(&client[0]);
773 err_unlock:
774 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
775 		err = -EIO;
776 	intel_runtime_pm_put(i915, wakeref);
777 	mutex_unlock(&i915->drm.struct_mutex);
778 	return err;
779 
780 err_wedged:
781 	for (i = 0; i < ARRAY_SIZE(client); i++)
782 		igt_spinner_end(&client[i].spin);
783 	i915_gem_set_wedged(i915);
784 	err = -EIO;
785 	goto err_client_3;
786 }
787 
788 static int live_chain_preempt(void *arg)
789 {
790 	struct drm_i915_private *i915 = arg;
791 	struct intel_engine_cs *engine;
792 	struct preempt_client hi, lo;
793 	enum intel_engine_id id;
794 	intel_wakeref_t wakeref;
795 	int err = -ENOMEM;
796 
797 	/*
798 	 * Build a chain AB...BA between two contexts (A, B) and request
799 	 * preemption of the last request. It should then complete before
800 	 * the previously submitted spinner in B.
801 	 */
802 
803 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
804 		return 0;
805 
806 	mutex_lock(&i915->drm.struct_mutex);
807 	wakeref = intel_runtime_pm_get(i915);
808 
809 	if (preempt_client_init(i915, &hi))
810 		goto err_unlock;
811 
812 	if (preempt_client_init(i915, &lo))
813 		goto err_client_hi;
814 
815 	for_each_engine(engine, i915, id) {
816 		struct i915_sched_attr attr = {
817 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
818 		};
819 		struct igt_live_test t;
820 		struct i915_request *rq;
821 		int ring_size, count, i;
822 
823 		if (!intel_engine_has_preemption(engine))
824 			continue;
825 
826 		rq = igt_spinner_create_request(&lo.spin,
827 						lo.ctx, engine,
828 						MI_ARB_CHECK);
829 		if (IS_ERR(rq))
830 			goto err_wedged;
831 		i915_request_add(rq);
832 
833 		ring_size = rq->wa_tail - rq->head;
834 		if (ring_size < 0)
835 			ring_size += rq->ring->size;
836 		ring_size = rq->ring->size / ring_size;
837 		pr_debug("%s(%s): Using maximum of %d requests\n",
838 			 __func__, engine->name, ring_size);
839 
840 		igt_spinner_end(&lo.spin);
841 		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
842 			pr_err("Timed out waiting to flush %s\n", engine->name);
843 			goto err_wedged;
844 		}
845 
846 		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
847 			err = -EIO;
848 			goto err_wedged;
849 		}
850 
851 		for_each_prime_number_from(count, 1, ring_size) {
852 			rq = igt_spinner_create_request(&hi.spin,
853 							hi.ctx, engine,
854 							MI_ARB_CHECK);
855 			if (IS_ERR(rq))
856 				goto err_wedged;
857 			i915_request_add(rq);
858 			if (!igt_wait_for_spinner(&hi.spin, rq))
859 				goto err_wedged;
860 
861 			rq = igt_spinner_create_request(&lo.spin,
862 							lo.ctx, engine,
863 							MI_ARB_CHECK);
864 			if (IS_ERR(rq))
865 				goto err_wedged;
866 			i915_request_add(rq);
867 
868 			for (i = 0; i < count; i++) {
869 				rq = igt_request_alloc(lo.ctx, engine);
870 				if (IS_ERR(rq))
871 					goto err_wedged;
872 				i915_request_add(rq);
873 			}
874 
875 			rq = igt_request_alloc(hi.ctx, engine);
876 			if (IS_ERR(rq))
877 				goto err_wedged;
878 			i915_request_add(rq);
879 			engine->schedule(rq, &attr);
880 
881 			igt_spinner_end(&hi.spin);
882 			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
883 				struct drm_printer p =
884 					drm_info_printer(i915->drm.dev);
885 
886 				pr_err("Failed to preempt over chain of %d\n",
887 				       count);
888 				intel_engine_dump(engine, &p,
889 						  "%s\n", engine->name);
890 				goto err_wedged;
891 			}
892 			igt_spinner_end(&lo.spin);
893 
894 			rq = igt_request_alloc(lo.ctx, engine);
895 			if (IS_ERR(rq))
896 				goto err_wedged;
897 			i915_request_add(rq);
898 			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
899 				struct drm_printer p =
900 					drm_info_printer(i915->drm.dev);
901 
902 				pr_err("Failed to flush low priority chain of %d requests\n",
903 				       count);
904 				intel_engine_dump(engine, &p,
905 						  "%s\n", engine->name);
906 				goto err_wedged;
907 			}
908 		}
909 
910 		if (igt_live_test_end(&t)) {
911 			err = -EIO;
912 			goto err_wedged;
913 		}
914 	}
915 
916 	err = 0;
917 err_client_lo:
918 	preempt_client_fini(&lo);
919 err_client_hi:
920 	preempt_client_fini(&hi);
921 err_unlock:
922 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
923 		err = -EIO;
924 	intel_runtime_pm_put(i915, wakeref);
925 	mutex_unlock(&i915->drm.struct_mutex);
926 	return err;
927 
928 err_wedged:
929 	igt_spinner_end(&hi.spin);
930 	igt_spinner_end(&lo.spin);
931 	i915_gem_set_wedged(i915);
932 	err = -EIO;
933 	goto err_client_lo;
934 }
935 
936 static int live_preempt_hang(void *arg)
937 {
938 	struct drm_i915_private *i915 = arg;
939 	struct i915_gem_context *ctx_hi, *ctx_lo;
940 	struct igt_spinner spin_hi, spin_lo;
941 	struct intel_engine_cs *engine;
942 	enum intel_engine_id id;
943 	intel_wakeref_t wakeref;
944 	int err = -ENOMEM;
945 
946 	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
947 		return 0;
948 
949 	if (!intel_has_reset_engine(i915))
950 		return 0;
951 
952 	mutex_lock(&i915->drm.struct_mutex);
953 	wakeref = intel_runtime_pm_get(i915);
954 
955 	if (igt_spinner_init(&spin_hi, i915))
956 		goto err_unlock;
957 
958 	if (igt_spinner_init(&spin_lo, i915))
959 		goto err_spin_hi;
960 
961 	ctx_hi = kernel_context(i915);
962 	if (!ctx_hi)
963 		goto err_spin_lo;
964 	ctx_hi->sched.priority =
965 		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
966 
967 	ctx_lo = kernel_context(i915);
968 	if (!ctx_lo)
969 		goto err_ctx_hi;
970 	ctx_lo->sched.priority =
971 		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
972 
973 	for_each_engine(engine, i915, id) {
974 		struct i915_request *rq;
975 
976 		if (!intel_engine_has_preemption(engine))
977 			continue;
978 
979 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
980 						MI_ARB_CHECK);
981 		if (IS_ERR(rq)) {
982 			err = PTR_ERR(rq);
983 			goto err_ctx_lo;
984 		}
985 
986 		i915_request_add(rq);
987 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
988 			GEM_TRACE("lo spinner failed to start\n");
989 			GEM_TRACE_DUMP();
990 			i915_gem_set_wedged(i915);
991 			err = -EIO;
992 			goto err_ctx_lo;
993 		}
994 
995 		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
996 						MI_ARB_CHECK);
997 		if (IS_ERR(rq)) {
998 			igt_spinner_end(&spin_lo);
999 			err = PTR_ERR(rq);
1000 			goto err_ctx_lo;
1001 		}
1002 
1003 		init_completion(&engine->execlists.preempt_hang.completion);
1004 		engine->execlists.preempt_hang.inject_hang = true;
1005 
1006 		i915_request_add(rq);
1007 
1008 		if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1009 						 HZ / 10)) {
1010 			pr_err("Preemption did not occur within timeout!");
1011 			GEM_TRACE_DUMP();
1012 			i915_gem_set_wedged(i915);
1013 			err = -EIO;
1014 			goto err_ctx_lo;
1015 		}
1016 
1017 		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1018 		i915_reset_engine(engine, NULL);
1019 		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1020 
1021 		engine->execlists.preempt_hang.inject_hang = false;
1022 
1023 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
1024 			GEM_TRACE("hi spinner failed to start\n");
1025 			GEM_TRACE_DUMP();
1026 			i915_gem_set_wedged(i915);
1027 			err = -EIO;
1028 			goto err_ctx_lo;
1029 		}
1030 
1031 		igt_spinner_end(&spin_hi);
1032 		igt_spinner_end(&spin_lo);
1033 		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1034 			err = -EIO;
1035 			goto err_ctx_lo;
1036 		}
1037 	}
1038 
1039 	err = 0;
1040 err_ctx_lo:
1041 	kernel_context_close(ctx_lo);
1042 err_ctx_hi:
1043 	kernel_context_close(ctx_hi);
1044 err_spin_lo:
1045 	igt_spinner_fini(&spin_lo);
1046 err_spin_hi:
1047 	igt_spinner_fini(&spin_hi);
1048 err_unlock:
1049 	igt_flush_test(i915, I915_WAIT_LOCKED);
1050 	intel_runtime_pm_put(i915, wakeref);
1051 	mutex_unlock(&i915->drm.struct_mutex);
1052 	return err;
1053 }
1054 
1055 static int random_range(struct rnd_state *rnd, int min, int max)
1056 {
1057 	return i915_prandom_u32_max_state(max - min, rnd) + min;
1058 }
1059 
1060 static int random_priority(struct rnd_state *rnd)
1061 {
1062 	return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1063 }
1064 
1065 struct preempt_smoke {
1066 	struct drm_i915_private *i915;
1067 	struct i915_gem_context **contexts;
1068 	struct intel_engine_cs *engine;
1069 	struct drm_i915_gem_object *batch;
1070 	unsigned int ncontext;
1071 	struct rnd_state prng;
1072 	unsigned long count;
1073 };
1074 
1075 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1076 {
1077 	return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1078 							  &smoke->prng)];
1079 }
1080 
1081 static int smoke_submit(struct preempt_smoke *smoke,
1082 			struct i915_gem_context *ctx, int prio,
1083 			struct drm_i915_gem_object *batch)
1084 {
1085 	struct i915_request *rq;
1086 	struct i915_vma *vma = NULL;
1087 	int err = 0;
1088 
1089 	if (batch) {
1090 		vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1091 		if (IS_ERR(vma))
1092 			return PTR_ERR(vma);
1093 
1094 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
1095 		if (err)
1096 			return err;
1097 	}
1098 
1099 	ctx->sched.priority = prio;
1100 
1101 	rq = igt_request_alloc(ctx, smoke->engine);
1102 	if (IS_ERR(rq)) {
1103 		err = PTR_ERR(rq);
1104 		goto unpin;
1105 	}
1106 
1107 	if (vma) {
1108 		err = rq->engine->emit_bb_start(rq,
1109 						vma->node.start,
1110 						PAGE_SIZE, 0);
1111 		if (!err)
1112 			err = i915_vma_move_to_active(vma, rq, 0);
1113 	}
1114 
1115 	i915_request_add(rq);
1116 
1117 unpin:
1118 	if (vma)
1119 		i915_vma_unpin(vma);
1120 
1121 	return err;
1122 }
1123 
1124 static int smoke_crescendo_thread(void *arg)
1125 {
1126 	struct preempt_smoke *smoke = arg;
1127 	IGT_TIMEOUT(end_time);
1128 	unsigned long count;
1129 
1130 	count = 0;
1131 	do {
1132 		struct i915_gem_context *ctx = smoke_context(smoke);
1133 		int err;
1134 
1135 		mutex_lock(&smoke->i915->drm.struct_mutex);
1136 		err = smoke_submit(smoke,
1137 				   ctx, count % I915_PRIORITY_MAX,
1138 				   smoke->batch);
1139 		mutex_unlock(&smoke->i915->drm.struct_mutex);
1140 		if (err)
1141 			return err;
1142 
1143 		count++;
1144 	} while (!__igt_timeout(end_time, NULL));
1145 
1146 	smoke->count = count;
1147 	return 0;
1148 }
1149 
1150 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1151 #define BATCH BIT(0)
1152 {
1153 	struct task_struct *tsk[I915_NUM_ENGINES] = {};
1154 	struct preempt_smoke arg[I915_NUM_ENGINES];
1155 	struct intel_engine_cs *engine;
1156 	enum intel_engine_id id;
1157 	unsigned long count;
1158 	int err = 0;
1159 
1160 	mutex_unlock(&smoke->i915->drm.struct_mutex);
1161 
1162 	for_each_engine(engine, smoke->i915, id) {
1163 		arg[id] = *smoke;
1164 		arg[id].engine = engine;
1165 		if (!(flags & BATCH))
1166 			arg[id].batch = NULL;
1167 		arg[id].count = 0;
1168 
1169 		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1170 				      "igt/smoke:%d", id);
1171 		if (IS_ERR(tsk[id])) {
1172 			err = PTR_ERR(tsk[id]);
1173 			break;
1174 		}
1175 		get_task_struct(tsk[id]);
1176 	}
1177 
1178 	count = 0;
1179 	for_each_engine(engine, smoke->i915, id) {
1180 		int status;
1181 
1182 		if (IS_ERR_OR_NULL(tsk[id]))
1183 			continue;
1184 
1185 		status = kthread_stop(tsk[id]);
1186 		if (status && !err)
1187 			err = status;
1188 
1189 		count += arg[id].count;
1190 
1191 		put_task_struct(tsk[id]);
1192 	}
1193 
1194 	mutex_lock(&smoke->i915->drm.struct_mutex);
1195 
1196 	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1197 		count, flags,
1198 		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1199 	return 0;
1200 }
1201 
1202 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1203 {
1204 	enum intel_engine_id id;
1205 	IGT_TIMEOUT(end_time);
1206 	unsigned long count;
1207 
1208 	count = 0;
1209 	do {
1210 		for_each_engine(smoke->engine, smoke->i915, id) {
1211 			struct i915_gem_context *ctx = smoke_context(smoke);
1212 			int err;
1213 
1214 			err = smoke_submit(smoke,
1215 					   ctx, random_priority(&smoke->prng),
1216 					   flags & BATCH ? smoke->batch : NULL);
1217 			if (err)
1218 				return err;
1219 
1220 			count++;
1221 		}
1222 	} while (!__igt_timeout(end_time, NULL));
1223 
1224 	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1225 		count, flags,
1226 		RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1227 	return 0;
1228 }
1229 
1230 static int live_preempt_smoke(void *arg)
1231 {
1232 	struct preempt_smoke smoke = {
1233 		.i915 = arg,
1234 		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1235 		.ncontext = 1024,
1236 	};
1237 	const unsigned int phase[] = { 0, BATCH };
1238 	intel_wakeref_t wakeref;
1239 	struct igt_live_test t;
1240 	int err = -ENOMEM;
1241 	u32 *cs;
1242 	int n;
1243 
1244 	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1245 		return 0;
1246 
1247 	smoke.contexts = kmalloc_array(smoke.ncontext,
1248 				       sizeof(*smoke.contexts),
1249 				       GFP_KERNEL);
1250 	if (!smoke.contexts)
1251 		return -ENOMEM;
1252 
1253 	mutex_lock(&smoke.i915->drm.struct_mutex);
1254 	wakeref = intel_runtime_pm_get(smoke.i915);
1255 
1256 	smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1257 	if (IS_ERR(smoke.batch)) {
1258 		err = PTR_ERR(smoke.batch);
1259 		goto err_unlock;
1260 	}
1261 
1262 	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1263 	if (IS_ERR(cs)) {
1264 		err = PTR_ERR(cs);
1265 		goto err_batch;
1266 	}
1267 	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1268 		cs[n] = MI_ARB_CHECK;
1269 	cs[n] = MI_BATCH_BUFFER_END;
1270 	i915_gem_object_flush_map(smoke.batch);
1271 	i915_gem_object_unpin_map(smoke.batch);
1272 
1273 	if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1274 		err = -EIO;
1275 		goto err_batch;
1276 	}
1277 
1278 	for (n = 0; n < smoke.ncontext; n++) {
1279 		smoke.contexts[n] = kernel_context(smoke.i915);
1280 		if (!smoke.contexts[n])
1281 			goto err_ctx;
1282 	}
1283 
1284 	for (n = 0; n < ARRAY_SIZE(phase); n++) {
1285 		err = smoke_crescendo(&smoke, phase[n]);
1286 		if (err)
1287 			goto err_ctx;
1288 
1289 		err = smoke_random(&smoke, phase[n]);
1290 		if (err)
1291 			goto err_ctx;
1292 	}
1293 
1294 err_ctx:
1295 	if (igt_live_test_end(&t))
1296 		err = -EIO;
1297 
1298 	for (n = 0; n < smoke.ncontext; n++) {
1299 		if (!smoke.contexts[n])
1300 			break;
1301 		kernel_context_close(smoke.contexts[n]);
1302 	}
1303 
1304 err_batch:
1305 	i915_gem_object_put(smoke.batch);
1306 err_unlock:
1307 	intel_runtime_pm_put(smoke.i915, wakeref);
1308 	mutex_unlock(&smoke.i915->drm.struct_mutex);
1309 	kfree(smoke.contexts);
1310 
1311 	return err;
1312 }
1313 
1314 static int nop_virtual_engine(struct drm_i915_private *i915,
1315 			      struct intel_engine_cs **siblings,
1316 			      unsigned int nsibling,
1317 			      unsigned int nctx,
1318 			      unsigned int flags)
1319 #define CHAIN BIT(0)
1320 {
1321 	IGT_TIMEOUT(end_time);
1322 	struct i915_request *request[16];
1323 	struct i915_gem_context *ctx[16];
1324 	struct intel_context *ve[16];
1325 	unsigned long n, prime, nc;
1326 	struct igt_live_test t;
1327 	ktime_t times[2] = {};
1328 	int err;
1329 
1330 	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1331 
1332 	for (n = 0; n < nctx; n++) {
1333 		ctx[n] = kernel_context(i915);
1334 		if (!ctx[n]) {
1335 			err = -ENOMEM;
1336 			nctx = n;
1337 			goto out;
1338 		}
1339 
1340 		ve[n] = intel_execlists_create_virtual(ctx[n],
1341 						       siblings, nsibling);
1342 		if (IS_ERR(ve[n])) {
1343 			kernel_context_close(ctx[n]);
1344 			err = PTR_ERR(ve[n]);
1345 			nctx = n;
1346 			goto out;
1347 		}
1348 
1349 		err = intel_context_pin(ve[n]);
1350 		if (err) {
1351 			intel_context_put(ve[n]);
1352 			kernel_context_close(ctx[n]);
1353 			nctx = n;
1354 			goto out;
1355 		}
1356 	}
1357 
1358 	err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1359 	if (err)
1360 		goto out;
1361 
1362 	for_each_prime_number_from(prime, 1, 8192) {
1363 		times[1] = ktime_get_raw();
1364 
1365 		if (flags & CHAIN) {
1366 			for (nc = 0; nc < nctx; nc++) {
1367 				for (n = 0; n < prime; n++) {
1368 					request[nc] =
1369 						i915_request_create(ve[nc]);
1370 					if (IS_ERR(request[nc])) {
1371 						err = PTR_ERR(request[nc]);
1372 						goto out;
1373 					}
1374 
1375 					i915_request_add(request[nc]);
1376 				}
1377 			}
1378 		} else {
1379 			for (n = 0; n < prime; n++) {
1380 				for (nc = 0; nc < nctx; nc++) {
1381 					request[nc] =
1382 						i915_request_create(ve[nc]);
1383 					if (IS_ERR(request[nc])) {
1384 						err = PTR_ERR(request[nc]);
1385 						goto out;
1386 					}
1387 
1388 					i915_request_add(request[nc]);
1389 				}
1390 			}
1391 		}
1392 
1393 		for (nc = 0; nc < nctx; nc++) {
1394 			if (i915_request_wait(request[nc],
1395 					      I915_WAIT_LOCKED,
1396 					      HZ / 10) < 0) {
1397 				pr_err("%s(%s): wait for %llx:%lld timed out\n",
1398 				       __func__, ve[0]->engine->name,
1399 				       request[nc]->fence.context,
1400 				       request[nc]->fence.seqno);
1401 
1402 				GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1403 					  __func__, ve[0]->engine->name,
1404 					  request[nc]->fence.context,
1405 					  request[nc]->fence.seqno);
1406 				GEM_TRACE_DUMP();
1407 				i915_gem_set_wedged(i915);
1408 				break;
1409 			}
1410 		}
1411 
1412 		times[1] = ktime_sub(ktime_get_raw(), times[1]);
1413 		if (prime == 1)
1414 			times[0] = times[1];
1415 
1416 		if (__igt_timeout(end_time, NULL))
1417 			break;
1418 	}
1419 
1420 	err = igt_live_test_end(&t);
1421 	if (err)
1422 		goto out;
1423 
1424 	pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1425 		nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1426 		prime, div64_u64(ktime_to_ns(times[1]), prime));
1427 
1428 out:
1429 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1430 		err = -EIO;
1431 
1432 	for (nc = 0; nc < nctx; nc++) {
1433 		intel_context_unpin(ve[nc]);
1434 		intel_context_put(ve[nc]);
1435 		kernel_context_close(ctx[nc]);
1436 	}
1437 	return err;
1438 }
1439 
1440 static int live_virtual_engine(void *arg)
1441 {
1442 	struct drm_i915_private *i915 = arg;
1443 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1444 	struct intel_engine_cs *engine;
1445 	enum intel_engine_id id;
1446 	unsigned int class, inst;
1447 	int err = -ENODEV;
1448 
1449 	if (USES_GUC_SUBMISSION(i915))
1450 		return 0;
1451 
1452 	mutex_lock(&i915->drm.struct_mutex);
1453 
1454 	for_each_engine(engine, i915, id) {
1455 		err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1456 		if (err) {
1457 			pr_err("Failed to wrap engine %s: err=%d\n",
1458 			       engine->name, err);
1459 			goto out_unlock;
1460 		}
1461 	}
1462 
1463 	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1464 		int nsibling, n;
1465 
1466 		nsibling = 0;
1467 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1468 			if (!i915->engine_class[class][inst])
1469 				continue;
1470 
1471 			siblings[nsibling++] = i915->engine_class[class][inst];
1472 		}
1473 		if (nsibling < 2)
1474 			continue;
1475 
1476 		for (n = 1; n <= nsibling + 1; n++) {
1477 			err = nop_virtual_engine(i915, siblings, nsibling,
1478 						 n, 0);
1479 			if (err)
1480 				goto out_unlock;
1481 		}
1482 
1483 		err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1484 		if (err)
1485 			goto out_unlock;
1486 	}
1487 
1488 out_unlock:
1489 	mutex_unlock(&i915->drm.struct_mutex);
1490 	return err;
1491 }
1492 
1493 static int mask_virtual_engine(struct drm_i915_private *i915,
1494 			       struct intel_engine_cs **siblings,
1495 			       unsigned int nsibling)
1496 {
1497 	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
1498 	struct i915_gem_context *ctx;
1499 	struct intel_context *ve;
1500 	struct igt_live_test t;
1501 	unsigned int n;
1502 	int err;
1503 
1504 	/*
1505 	 * Check that by setting the execution mask on a request, we can
1506 	 * restrict it to our desired engine within the virtual engine.
1507 	 */
1508 
1509 	ctx = kernel_context(i915);
1510 	if (!ctx)
1511 		return -ENOMEM;
1512 
1513 	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
1514 	if (IS_ERR(ve)) {
1515 		err = PTR_ERR(ve);
1516 		goto out_close;
1517 	}
1518 
1519 	err = intel_context_pin(ve);
1520 	if (err)
1521 		goto out_put;
1522 
1523 	err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
1524 	if (err)
1525 		goto out_unpin;
1526 
1527 	for (n = 0; n < nsibling; n++) {
1528 		request[n] = i915_request_create(ve);
1529 		if (IS_ERR(request)) {
1530 			err = PTR_ERR(request);
1531 			nsibling = n;
1532 			goto out;
1533 		}
1534 
1535 		/* Reverse order as it's more likely to be unnatural */
1536 		request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
1537 
1538 		i915_request_get(request[n]);
1539 		i915_request_add(request[n]);
1540 	}
1541 
1542 	for (n = 0; n < nsibling; n++) {
1543 		if (i915_request_wait(request[n], I915_WAIT_LOCKED, HZ / 10) < 0) {
1544 			pr_err("%s(%s): wait for %llx:%lld timed out\n",
1545 			       __func__, ve->engine->name,
1546 			       request[n]->fence.context,
1547 			       request[n]->fence.seqno);
1548 
1549 			GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1550 				  __func__, ve->engine->name,
1551 				  request[n]->fence.context,
1552 				  request[n]->fence.seqno);
1553 			GEM_TRACE_DUMP();
1554 			i915_gem_set_wedged(i915);
1555 			err = -EIO;
1556 			goto out;
1557 		}
1558 
1559 		if (request[n]->engine != siblings[nsibling - n - 1]) {
1560 			pr_err("Executed on wrong sibling '%s', expected '%s'\n",
1561 			       request[n]->engine->name,
1562 			       siblings[nsibling - n - 1]->name);
1563 			err = -EINVAL;
1564 			goto out;
1565 		}
1566 	}
1567 
1568 	err = igt_live_test_end(&t);
1569 	if (err)
1570 		goto out;
1571 
1572 out:
1573 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1574 		err = -EIO;
1575 
1576 	for (n = 0; n < nsibling; n++)
1577 		i915_request_put(request[n]);
1578 
1579 out_unpin:
1580 	intel_context_unpin(ve);
1581 out_put:
1582 	intel_context_put(ve);
1583 out_close:
1584 	kernel_context_close(ctx);
1585 	return err;
1586 }
1587 
1588 static int live_virtual_mask(void *arg)
1589 {
1590 	struct drm_i915_private *i915 = arg;
1591 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1592 	unsigned int class, inst;
1593 	int err = 0;
1594 
1595 	if (USES_GUC_SUBMISSION(i915))
1596 		return 0;
1597 
1598 	mutex_lock(&i915->drm.struct_mutex);
1599 
1600 	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1601 		unsigned int nsibling;
1602 
1603 		nsibling = 0;
1604 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1605 			if (!i915->engine_class[class][inst])
1606 				break;
1607 
1608 			siblings[nsibling++] = i915->engine_class[class][inst];
1609 		}
1610 		if (nsibling < 2)
1611 			continue;
1612 
1613 		err = mask_virtual_engine(i915, siblings, nsibling);
1614 		if (err)
1615 			goto out_unlock;
1616 	}
1617 
1618 out_unlock:
1619 	mutex_unlock(&i915->drm.struct_mutex);
1620 	return err;
1621 }
1622 
1623 static int bond_virtual_engine(struct drm_i915_private *i915,
1624 			       unsigned int class,
1625 			       struct intel_engine_cs **siblings,
1626 			       unsigned int nsibling,
1627 			       unsigned int flags)
1628 #define BOND_SCHEDULE BIT(0)
1629 {
1630 	struct intel_engine_cs *master;
1631 	struct i915_gem_context *ctx;
1632 	struct i915_request *rq[16];
1633 	enum intel_engine_id id;
1634 	unsigned long n;
1635 	int err;
1636 
1637 	GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
1638 
1639 	ctx = kernel_context(i915);
1640 	if (!ctx)
1641 		return -ENOMEM;
1642 
1643 	err = 0;
1644 	rq[0] = ERR_PTR(-ENOMEM);
1645 	for_each_engine(master, i915, id) {
1646 		struct i915_sw_fence fence = {};
1647 
1648 		if (master->class == class)
1649 			continue;
1650 
1651 		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
1652 
1653 		rq[0] = igt_request_alloc(ctx, master);
1654 		if (IS_ERR(rq[0])) {
1655 			err = PTR_ERR(rq[0]);
1656 			goto out;
1657 		}
1658 		i915_request_get(rq[0]);
1659 
1660 		if (flags & BOND_SCHEDULE) {
1661 			onstack_fence_init(&fence);
1662 			err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
1663 							       &fence,
1664 							       GFP_KERNEL);
1665 		}
1666 		i915_request_add(rq[0]);
1667 		if (err < 0)
1668 			goto out;
1669 
1670 		for (n = 0; n < nsibling; n++) {
1671 			struct intel_context *ve;
1672 
1673 			ve = intel_execlists_create_virtual(ctx,
1674 							    siblings,
1675 							    nsibling);
1676 			if (IS_ERR(ve)) {
1677 				err = PTR_ERR(ve);
1678 				onstack_fence_fini(&fence);
1679 				goto out;
1680 			}
1681 
1682 			err = intel_virtual_engine_attach_bond(ve->engine,
1683 							       master,
1684 							       siblings[n]);
1685 			if (err) {
1686 				intel_context_put(ve);
1687 				onstack_fence_fini(&fence);
1688 				goto out;
1689 			}
1690 
1691 			err = intel_context_pin(ve);
1692 			intel_context_put(ve);
1693 			if (err) {
1694 				onstack_fence_fini(&fence);
1695 				goto out;
1696 			}
1697 
1698 			rq[n + 1] = i915_request_create(ve);
1699 			intel_context_unpin(ve);
1700 			if (IS_ERR(rq[n + 1])) {
1701 				err = PTR_ERR(rq[n + 1]);
1702 				onstack_fence_fini(&fence);
1703 				goto out;
1704 			}
1705 			i915_request_get(rq[n + 1]);
1706 
1707 			err = i915_request_await_execution(rq[n + 1],
1708 							   &rq[0]->fence,
1709 							   ve->engine->bond_execute);
1710 			i915_request_add(rq[n + 1]);
1711 			if (err < 0) {
1712 				onstack_fence_fini(&fence);
1713 				goto out;
1714 			}
1715 		}
1716 		onstack_fence_fini(&fence);
1717 
1718 		if (i915_request_wait(rq[0],
1719 				      I915_WAIT_LOCKED,
1720 				      HZ / 10) < 0) {
1721 			pr_err("Master request did not execute (on %s)!\n",
1722 			       rq[0]->engine->name);
1723 			err = -EIO;
1724 			goto out;
1725 		}
1726 
1727 		for (n = 0; n < nsibling; n++) {
1728 			if (i915_request_wait(rq[n + 1],
1729 					      I915_WAIT_LOCKED,
1730 					      MAX_SCHEDULE_TIMEOUT) < 0) {
1731 				err = -EIO;
1732 				goto out;
1733 			}
1734 
1735 			if (rq[n + 1]->engine != siblings[n]) {
1736 				pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
1737 				       siblings[n]->name,
1738 				       rq[n + 1]->engine->name,
1739 				       rq[0]->engine->name);
1740 				err = -EINVAL;
1741 				goto out;
1742 			}
1743 		}
1744 
1745 		for (n = 0; !IS_ERR(rq[n]); n++)
1746 			i915_request_put(rq[n]);
1747 		rq[0] = ERR_PTR(-ENOMEM);
1748 	}
1749 
1750 out:
1751 	for (n = 0; !IS_ERR(rq[n]); n++)
1752 		i915_request_put(rq[n]);
1753 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1754 		err = -EIO;
1755 
1756 	kernel_context_close(ctx);
1757 	return err;
1758 }
1759 
1760 static int live_virtual_bond(void *arg)
1761 {
1762 	static const struct phase {
1763 		const char *name;
1764 		unsigned int flags;
1765 	} phases[] = {
1766 		{ "", 0 },
1767 		{ "schedule", BOND_SCHEDULE },
1768 		{ },
1769 	};
1770 	struct drm_i915_private *i915 = arg;
1771 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1772 	unsigned int class, inst;
1773 	int err = 0;
1774 
1775 	if (USES_GUC_SUBMISSION(i915))
1776 		return 0;
1777 
1778 	mutex_lock(&i915->drm.struct_mutex);
1779 
1780 	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1781 		const struct phase *p;
1782 		int nsibling;
1783 
1784 		nsibling = 0;
1785 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1786 			if (!i915->engine_class[class][inst])
1787 				break;
1788 
1789 			GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
1790 			siblings[nsibling++] = i915->engine_class[class][inst];
1791 		}
1792 		if (nsibling < 2)
1793 			continue;
1794 
1795 		for (p = phases; p->name; p++) {
1796 			err = bond_virtual_engine(i915,
1797 						  class, siblings, nsibling,
1798 						  p->flags);
1799 			if (err) {
1800 				pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
1801 				       __func__, p->name, class, nsibling, err);
1802 				goto out_unlock;
1803 			}
1804 		}
1805 	}
1806 
1807 out_unlock:
1808 	mutex_unlock(&i915->drm.struct_mutex);
1809 	return err;
1810 }
1811 
1812 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1813 {
1814 	static const struct i915_subtest tests[] = {
1815 		SUBTEST(live_sanitycheck),
1816 		SUBTEST(live_busywait_preempt),
1817 		SUBTEST(live_preempt),
1818 		SUBTEST(live_late_preempt),
1819 		SUBTEST(live_suppress_self_preempt),
1820 		SUBTEST(live_suppress_wait_preempt),
1821 		SUBTEST(live_chain_preempt),
1822 		SUBTEST(live_preempt_hang),
1823 		SUBTEST(live_preempt_smoke),
1824 		SUBTEST(live_virtual_engine),
1825 		SUBTEST(live_virtual_mask),
1826 		SUBTEST(live_virtual_bond),
1827 	};
1828 
1829 	if (!HAS_EXECLISTS(i915))
1830 		return 0;
1831 
1832 	if (i915_terminally_wedged(i915))
1833 		return 0;
1834 
1835 	return i915_subtests(tests, i915);
1836 }
1837