1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kthread.h>
26 
27 #include "gem/i915_gem_context.h"
28 
29 #include "intel_gt.h"
30 #include "intel_engine_heartbeat.h"
31 #include "intel_engine_pm.h"
32 
33 #include "i915_selftest.h"
34 #include "selftests/i915_random.h"
35 #include "selftests/igt_flush_test.h"
36 #include "selftests/igt_reset.h"
37 #include "selftests/igt_atomic.h"
38 
39 #include "selftests/mock_drm.h"
40 
41 #include "gem/selftests/mock_context.h"
42 #include "gem/selftests/igt_gem_utils.h"
43 
44 #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
45 
46 struct hang {
47 	struct intel_gt *gt;
48 	struct drm_i915_gem_object *hws;
49 	struct drm_i915_gem_object *obj;
50 	struct i915_gem_context *ctx;
51 	u32 *seqno;
52 	u32 *batch;
53 };
54 
55 static int hang_init(struct hang *h, struct intel_gt *gt)
56 {
57 	void *vaddr;
58 	int err;
59 
60 	memset(h, 0, sizeof(*h));
61 	h->gt = gt;
62 
63 	h->ctx = kernel_context(gt->i915);
64 	if (IS_ERR(h->ctx))
65 		return PTR_ERR(h->ctx);
66 
67 	GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
68 
69 	h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
70 	if (IS_ERR(h->hws)) {
71 		err = PTR_ERR(h->hws);
72 		goto err_ctx;
73 	}
74 
75 	h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
76 	if (IS_ERR(h->obj)) {
77 		err = PTR_ERR(h->obj);
78 		goto err_hws;
79 	}
80 
81 	i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
82 	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
83 	if (IS_ERR(vaddr)) {
84 		err = PTR_ERR(vaddr);
85 		goto err_obj;
86 	}
87 	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
88 
89 	vaddr = i915_gem_object_pin_map(h->obj,
90 					i915_coherent_map_type(gt->i915));
91 	if (IS_ERR(vaddr)) {
92 		err = PTR_ERR(vaddr);
93 		goto err_unpin_hws;
94 	}
95 	h->batch = vaddr;
96 
97 	return 0;
98 
99 err_unpin_hws:
100 	i915_gem_object_unpin_map(h->hws);
101 err_obj:
102 	i915_gem_object_put(h->obj);
103 err_hws:
104 	i915_gem_object_put(h->hws);
105 err_ctx:
106 	kernel_context_close(h->ctx);
107 	return err;
108 }
109 
110 static u64 hws_address(const struct i915_vma *hws,
111 		       const struct i915_request *rq)
112 {
113 	return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
114 }
115 
116 static int move_to_active(struct i915_vma *vma,
117 			  struct i915_request *rq,
118 			  unsigned int flags)
119 {
120 	int err;
121 
122 	i915_vma_lock(vma);
123 	err = i915_request_await_object(rq, vma->obj,
124 					flags & EXEC_OBJECT_WRITE);
125 	if (err == 0)
126 		err = i915_vma_move_to_active(vma, rq, flags);
127 	i915_vma_unlock(vma);
128 
129 	return err;
130 }
131 
132 static struct i915_request *
133 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
134 {
135 	struct intel_gt *gt = h->gt;
136 	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
137 	struct drm_i915_gem_object *obj;
138 	struct i915_request *rq = NULL;
139 	struct i915_vma *hws, *vma;
140 	unsigned int flags;
141 	void *vaddr;
142 	u32 *batch;
143 	int err;
144 
145 	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
146 	if (IS_ERR(obj)) {
147 		i915_vm_put(vm);
148 		return ERR_CAST(obj);
149 	}
150 
151 	vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
152 	if (IS_ERR(vaddr)) {
153 		i915_gem_object_put(obj);
154 		i915_vm_put(vm);
155 		return ERR_CAST(vaddr);
156 	}
157 
158 	i915_gem_object_unpin_map(h->obj);
159 	i915_gem_object_put(h->obj);
160 
161 	h->obj = obj;
162 	h->batch = vaddr;
163 
164 	vma = i915_vma_instance(h->obj, vm, NULL);
165 	if (IS_ERR(vma)) {
166 		i915_vm_put(vm);
167 		return ERR_CAST(vma);
168 	}
169 
170 	hws = i915_vma_instance(h->hws, vm, NULL);
171 	if (IS_ERR(hws)) {
172 		i915_vm_put(vm);
173 		return ERR_CAST(hws);
174 	}
175 
176 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
177 	if (err) {
178 		i915_vm_put(vm);
179 		return ERR_PTR(err);
180 	}
181 
182 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
183 	if (err)
184 		goto unpin_vma;
185 
186 	rq = igt_request_alloc(h->ctx, engine);
187 	if (IS_ERR(rq)) {
188 		err = PTR_ERR(rq);
189 		goto unpin_hws;
190 	}
191 
192 	err = move_to_active(vma, rq, 0);
193 	if (err)
194 		goto cancel_rq;
195 
196 	err = move_to_active(hws, rq, 0);
197 	if (err)
198 		goto cancel_rq;
199 
200 	batch = h->batch;
201 	if (INTEL_GEN(gt->i915) >= 8) {
202 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
203 		*batch++ = lower_32_bits(hws_address(hws, rq));
204 		*batch++ = upper_32_bits(hws_address(hws, rq));
205 		*batch++ = rq->fence.seqno;
206 		*batch++ = MI_ARB_CHECK;
207 
208 		memset(batch, 0, 1024);
209 		batch += 1024 / sizeof(*batch);
210 
211 		*batch++ = MI_ARB_CHECK;
212 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
213 		*batch++ = lower_32_bits(vma->node.start);
214 		*batch++ = upper_32_bits(vma->node.start);
215 	} else if (INTEL_GEN(gt->i915) >= 6) {
216 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
217 		*batch++ = 0;
218 		*batch++ = lower_32_bits(hws_address(hws, rq));
219 		*batch++ = rq->fence.seqno;
220 		*batch++ = MI_ARB_CHECK;
221 
222 		memset(batch, 0, 1024);
223 		batch += 1024 / sizeof(*batch);
224 
225 		*batch++ = MI_ARB_CHECK;
226 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
227 		*batch++ = lower_32_bits(vma->node.start);
228 	} else if (INTEL_GEN(gt->i915) >= 4) {
229 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
230 		*batch++ = 0;
231 		*batch++ = lower_32_bits(hws_address(hws, rq));
232 		*batch++ = rq->fence.seqno;
233 		*batch++ = MI_ARB_CHECK;
234 
235 		memset(batch, 0, 1024);
236 		batch += 1024 / sizeof(*batch);
237 
238 		*batch++ = MI_ARB_CHECK;
239 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
240 		*batch++ = lower_32_bits(vma->node.start);
241 	} else {
242 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
243 		*batch++ = lower_32_bits(hws_address(hws, rq));
244 		*batch++ = rq->fence.seqno;
245 		*batch++ = MI_ARB_CHECK;
246 
247 		memset(batch, 0, 1024);
248 		batch += 1024 / sizeof(*batch);
249 
250 		*batch++ = MI_ARB_CHECK;
251 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
252 		*batch++ = lower_32_bits(vma->node.start);
253 	}
254 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
255 	intel_gt_chipset_flush(engine->gt);
256 
257 	if (rq->engine->emit_init_breadcrumb) {
258 		err = rq->engine->emit_init_breadcrumb(rq);
259 		if (err)
260 			goto cancel_rq;
261 	}
262 
263 	flags = 0;
264 	if (INTEL_GEN(gt->i915) <= 5)
265 		flags |= I915_DISPATCH_SECURE;
266 
267 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
268 
269 cancel_rq:
270 	if (err) {
271 		i915_request_set_error_once(rq, err);
272 		i915_request_add(rq);
273 	}
274 unpin_hws:
275 	i915_vma_unpin(hws);
276 unpin_vma:
277 	i915_vma_unpin(vma);
278 	i915_vm_put(vm);
279 	return err ? ERR_PTR(err) : rq;
280 }
281 
282 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
283 {
284 	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
285 }
286 
287 static void hang_fini(struct hang *h)
288 {
289 	*h->batch = MI_BATCH_BUFFER_END;
290 	intel_gt_chipset_flush(h->gt);
291 
292 	i915_gem_object_unpin_map(h->obj);
293 	i915_gem_object_put(h->obj);
294 
295 	i915_gem_object_unpin_map(h->hws);
296 	i915_gem_object_put(h->hws);
297 
298 	kernel_context_close(h->ctx);
299 
300 	igt_flush_test(h->gt->i915);
301 }
302 
303 static bool wait_until_running(struct hang *h, struct i915_request *rq)
304 {
305 	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
306 					       rq->fence.seqno),
307 			     10) &&
308 		 wait_for(i915_seqno_passed(hws_seqno(h, rq),
309 					    rq->fence.seqno),
310 			  1000));
311 }
312 
313 static void engine_heartbeat_disable(struct intel_engine_cs *engine)
314 {
315 	engine->props.heartbeat_interval_ms = 0;
316 
317 	intel_engine_pm_get(engine);
318 	intel_engine_park_heartbeat(engine);
319 }
320 
321 static void engine_heartbeat_enable(struct intel_engine_cs *engine)
322 {
323 	intel_engine_pm_put(engine);
324 
325 	engine->props.heartbeat_interval_ms =
326 		engine->defaults.heartbeat_interval_ms;
327 }
328 
329 static int igt_hang_sanitycheck(void *arg)
330 {
331 	struct intel_gt *gt = arg;
332 	struct i915_request *rq;
333 	struct intel_engine_cs *engine;
334 	enum intel_engine_id id;
335 	struct hang h;
336 	int err;
337 
338 	/* Basic check that we can execute our hanging batch */
339 
340 	err = hang_init(&h, gt);
341 	if (err)
342 		return err;
343 
344 	for_each_engine(engine, gt, id) {
345 		struct intel_wedge_me w;
346 		long timeout;
347 
348 		if (!intel_engine_can_store_dword(engine))
349 			continue;
350 
351 		rq = hang_create_request(&h, engine);
352 		if (IS_ERR(rq)) {
353 			err = PTR_ERR(rq);
354 			pr_err("Failed to create request for %s, err=%d\n",
355 			       engine->name, err);
356 			goto fini;
357 		}
358 
359 		i915_request_get(rq);
360 
361 		*h.batch = MI_BATCH_BUFFER_END;
362 		intel_gt_chipset_flush(engine->gt);
363 
364 		i915_request_add(rq);
365 
366 		timeout = 0;
367 		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
368 			timeout = i915_request_wait(rq, 0,
369 						    MAX_SCHEDULE_TIMEOUT);
370 		if (intel_gt_is_wedged(gt))
371 			timeout = -EIO;
372 
373 		i915_request_put(rq);
374 
375 		if (timeout < 0) {
376 			err = timeout;
377 			pr_err("Wait for request failed on %s, err=%d\n",
378 			       engine->name, err);
379 			goto fini;
380 		}
381 	}
382 
383 fini:
384 	hang_fini(&h);
385 	return err;
386 }
387 
388 static bool wait_for_idle(struct intel_engine_cs *engine)
389 {
390 	return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
391 }
392 
393 static int igt_reset_nop(void *arg)
394 {
395 	struct intel_gt *gt = arg;
396 	struct i915_gpu_error *global = &gt->i915->gpu_error;
397 	struct intel_engine_cs *engine;
398 	unsigned int reset_count, count;
399 	enum intel_engine_id id;
400 	IGT_TIMEOUT(end_time);
401 	int err = 0;
402 
403 	/* Check that we can reset during non-user portions of requests */
404 
405 	reset_count = i915_reset_count(global);
406 	count = 0;
407 	do {
408 		for_each_engine(engine, gt, id) {
409 			struct intel_context *ce;
410 			int i;
411 
412 			ce = intel_context_create(engine);
413 			if (IS_ERR(ce)) {
414 				err = PTR_ERR(ce);
415 				break;
416 			}
417 
418 			for (i = 0; i < 16; i++) {
419 				struct i915_request *rq;
420 
421 				rq = intel_context_create_request(ce);
422 				if (IS_ERR(rq)) {
423 					err = PTR_ERR(rq);
424 					break;
425 				}
426 
427 				i915_request_add(rq);
428 			}
429 
430 			intel_context_put(ce);
431 		}
432 
433 		igt_global_reset_lock(gt);
434 		intel_gt_reset(gt, ALL_ENGINES, NULL);
435 		igt_global_reset_unlock(gt);
436 
437 		if (intel_gt_is_wedged(gt)) {
438 			err = -EIO;
439 			break;
440 		}
441 
442 		if (i915_reset_count(global) != reset_count + ++count) {
443 			pr_err("Full GPU reset not recorded!\n");
444 			err = -EINVAL;
445 			break;
446 		}
447 
448 		err = igt_flush_test(gt->i915);
449 		if (err)
450 			break;
451 	} while (time_before(jiffies, end_time));
452 	pr_info("%s: %d resets\n", __func__, count);
453 
454 	if (igt_flush_test(gt->i915))
455 		err = -EIO;
456 	return err;
457 }
458 
459 static int igt_reset_nop_engine(void *arg)
460 {
461 	struct intel_gt *gt = arg;
462 	struct i915_gpu_error *global = &gt->i915->gpu_error;
463 	struct intel_engine_cs *engine;
464 	enum intel_engine_id id;
465 
466 	/* Check that we can engine-reset during non-user portions */
467 
468 	if (!intel_has_reset_engine(gt))
469 		return 0;
470 
471 	for_each_engine(engine, gt, id) {
472 		unsigned int reset_count, reset_engine_count, count;
473 		struct intel_context *ce;
474 		IGT_TIMEOUT(end_time);
475 		int err;
476 
477 		ce = intel_context_create(engine);
478 		if (IS_ERR(ce))
479 			return PTR_ERR(ce);
480 
481 		reset_count = i915_reset_count(global);
482 		reset_engine_count = i915_reset_engine_count(global, engine);
483 		count = 0;
484 
485 		engine_heartbeat_disable(engine);
486 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
487 		do {
488 			int i;
489 
490 			if (!wait_for_idle(engine)) {
491 				pr_err("%s failed to idle before reset\n",
492 				       engine->name);
493 				err = -EIO;
494 				break;
495 			}
496 
497 			for (i = 0; i < 16; i++) {
498 				struct i915_request *rq;
499 
500 				rq = intel_context_create_request(ce);
501 				if (IS_ERR(rq)) {
502 					err = PTR_ERR(rq);
503 					break;
504 				}
505 
506 				i915_request_add(rq);
507 			}
508 			err = intel_engine_reset(engine, NULL);
509 			if (err) {
510 				pr_err("i915_reset_engine failed\n");
511 				break;
512 			}
513 
514 			if (i915_reset_count(global) != reset_count) {
515 				pr_err("Full GPU reset recorded! (engine reset expected)\n");
516 				err = -EINVAL;
517 				break;
518 			}
519 
520 			if (i915_reset_engine_count(global, engine) !=
521 			    reset_engine_count + ++count) {
522 				pr_err("%s engine reset not recorded!\n",
523 				       engine->name);
524 				err = -EINVAL;
525 				break;
526 			}
527 		} while (time_before(jiffies, end_time));
528 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
529 		engine_heartbeat_enable(engine);
530 
531 		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
532 
533 		intel_context_put(ce);
534 		if (igt_flush_test(gt->i915))
535 			err = -EIO;
536 		if (err)
537 			return err;
538 	}
539 
540 	return 0;
541 }
542 
543 static int __igt_reset_engine(struct intel_gt *gt, bool active)
544 {
545 	struct i915_gpu_error *global = &gt->i915->gpu_error;
546 	struct intel_engine_cs *engine;
547 	enum intel_engine_id id;
548 	struct hang h;
549 	int err = 0;
550 
551 	/* Check that we can issue an engine reset on an idle engine (no-op) */
552 
553 	if (!intel_has_reset_engine(gt))
554 		return 0;
555 
556 	if (active) {
557 		err = hang_init(&h, gt);
558 		if (err)
559 			return err;
560 	}
561 
562 	for_each_engine(engine, gt, id) {
563 		unsigned int reset_count, reset_engine_count;
564 		IGT_TIMEOUT(end_time);
565 
566 		if (active && !intel_engine_can_store_dword(engine))
567 			continue;
568 
569 		if (!wait_for_idle(engine)) {
570 			pr_err("%s failed to idle before reset\n",
571 			       engine->name);
572 			err = -EIO;
573 			break;
574 		}
575 
576 		reset_count = i915_reset_count(global);
577 		reset_engine_count = i915_reset_engine_count(global, engine);
578 
579 		engine_heartbeat_disable(engine);
580 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
581 		do {
582 			if (active) {
583 				struct i915_request *rq;
584 
585 				rq = hang_create_request(&h, engine);
586 				if (IS_ERR(rq)) {
587 					err = PTR_ERR(rq);
588 					break;
589 				}
590 
591 				i915_request_get(rq);
592 				i915_request_add(rq);
593 
594 				if (!wait_until_running(&h, rq)) {
595 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
596 
597 					pr_err("%s: Failed to start request %llx, at %x\n",
598 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
599 					intel_engine_dump(engine, &p,
600 							  "%s\n", engine->name);
601 
602 					i915_request_put(rq);
603 					err = -EIO;
604 					break;
605 				}
606 
607 				i915_request_put(rq);
608 			}
609 
610 			err = intel_engine_reset(engine, NULL);
611 			if (err) {
612 				pr_err("i915_reset_engine failed\n");
613 				break;
614 			}
615 
616 			if (i915_reset_count(global) != reset_count) {
617 				pr_err("Full GPU reset recorded! (engine reset expected)\n");
618 				err = -EINVAL;
619 				break;
620 			}
621 
622 			if (i915_reset_engine_count(global, engine) !=
623 			    ++reset_engine_count) {
624 				pr_err("%s engine reset not recorded!\n",
625 				       engine->name);
626 				err = -EINVAL;
627 				break;
628 			}
629 		} while (time_before(jiffies, end_time));
630 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
631 		engine_heartbeat_enable(engine);
632 
633 		if (err)
634 			break;
635 
636 		err = igt_flush_test(gt->i915);
637 		if (err)
638 			break;
639 	}
640 
641 	if (intel_gt_is_wedged(gt))
642 		err = -EIO;
643 
644 	if (active)
645 		hang_fini(&h);
646 
647 	return err;
648 }
649 
650 static int igt_reset_idle_engine(void *arg)
651 {
652 	return __igt_reset_engine(arg, false);
653 }
654 
655 static int igt_reset_active_engine(void *arg)
656 {
657 	return __igt_reset_engine(arg, true);
658 }
659 
660 struct active_engine {
661 	struct task_struct *task;
662 	struct intel_engine_cs *engine;
663 	unsigned long resets;
664 	unsigned int flags;
665 };
666 
667 #define TEST_ACTIVE	BIT(0)
668 #define TEST_OTHERS	BIT(1)
669 #define TEST_SELF	BIT(2)
670 #define TEST_PRIORITY	BIT(3)
671 
672 static int active_request_put(struct i915_request *rq)
673 {
674 	int err = 0;
675 
676 	if (!rq)
677 		return 0;
678 
679 	if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
680 		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
681 			  rq->engine->name,
682 			  rq->fence.context,
683 			  rq->fence.seqno);
684 		GEM_TRACE_DUMP();
685 
686 		intel_gt_set_wedged(rq->engine->gt);
687 		err = -EIO;
688 	}
689 
690 	i915_request_put(rq);
691 
692 	return err;
693 }
694 
695 static int active_engine(void *data)
696 {
697 	I915_RND_STATE(prng);
698 	struct active_engine *arg = data;
699 	struct intel_engine_cs *engine = arg->engine;
700 	struct i915_request *rq[8] = {};
701 	struct intel_context *ce[ARRAY_SIZE(rq)];
702 	unsigned long count;
703 	int err = 0;
704 
705 	for (count = 0; count < ARRAY_SIZE(ce); count++) {
706 		ce[count] = intel_context_create(engine);
707 		if (IS_ERR(ce[count])) {
708 			err = PTR_ERR(ce[count]);
709 			while (--count)
710 				intel_context_put(ce[count]);
711 			return err;
712 		}
713 	}
714 
715 	count = 0;
716 	while (!kthread_should_stop()) {
717 		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
718 		struct i915_request *old = rq[idx];
719 		struct i915_request *new;
720 
721 		new = intel_context_create_request(ce[idx]);
722 		if (IS_ERR(new)) {
723 			err = PTR_ERR(new);
724 			break;
725 		}
726 
727 		rq[idx] = i915_request_get(new);
728 		i915_request_add(new);
729 
730 		if (engine->schedule && arg->flags & TEST_PRIORITY) {
731 			struct i915_sched_attr attr = {
732 				.priority =
733 					i915_prandom_u32_max_state(512, &prng),
734 			};
735 			engine->schedule(rq[idx], &attr);
736 		}
737 
738 		err = active_request_put(old);
739 		if (err)
740 			break;
741 
742 		cond_resched();
743 	}
744 
745 	for (count = 0; count < ARRAY_SIZE(rq); count++) {
746 		int err__ = active_request_put(rq[count]);
747 
748 		/* Keep the first error */
749 		if (!err)
750 			err = err__;
751 
752 		intel_context_put(ce[count]);
753 	}
754 
755 	return err;
756 }
757 
758 static int __igt_reset_engines(struct intel_gt *gt,
759 			       const char *test_name,
760 			       unsigned int flags)
761 {
762 	struct i915_gpu_error *global = &gt->i915->gpu_error;
763 	struct intel_engine_cs *engine, *other;
764 	enum intel_engine_id id, tmp;
765 	struct hang h;
766 	int err = 0;
767 
768 	/* Check that issuing a reset on one engine does not interfere
769 	 * with any other engine.
770 	 */
771 
772 	if (!intel_has_reset_engine(gt))
773 		return 0;
774 
775 	if (flags & TEST_ACTIVE) {
776 		err = hang_init(&h, gt);
777 		if (err)
778 			return err;
779 
780 		if (flags & TEST_PRIORITY)
781 			h.ctx->sched.priority = 1024;
782 	}
783 
784 	for_each_engine(engine, gt, id) {
785 		struct active_engine threads[I915_NUM_ENGINES] = {};
786 		unsigned long device = i915_reset_count(global);
787 		unsigned long count = 0, reported;
788 		IGT_TIMEOUT(end_time);
789 
790 		if (flags & TEST_ACTIVE &&
791 		    !intel_engine_can_store_dword(engine))
792 			continue;
793 
794 		if (!wait_for_idle(engine)) {
795 			pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
796 			       engine->name, test_name);
797 			err = -EIO;
798 			break;
799 		}
800 
801 		memset(threads, 0, sizeof(threads));
802 		for_each_engine(other, gt, tmp) {
803 			struct task_struct *tsk;
804 
805 			threads[tmp].resets =
806 				i915_reset_engine_count(global, other);
807 
808 			if (!(flags & TEST_OTHERS))
809 				continue;
810 
811 			if (other == engine && !(flags & TEST_SELF))
812 				continue;
813 
814 			threads[tmp].engine = other;
815 			threads[tmp].flags = flags;
816 
817 			tsk = kthread_run(active_engine, &threads[tmp],
818 					  "igt/%s", other->name);
819 			if (IS_ERR(tsk)) {
820 				err = PTR_ERR(tsk);
821 				goto unwind;
822 			}
823 
824 			threads[tmp].task = tsk;
825 			get_task_struct(tsk);
826 		}
827 
828 		yield(); /* start all threads before we begin */
829 
830 		engine_heartbeat_disable(engine);
831 		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
832 		do {
833 			struct i915_request *rq = NULL;
834 
835 			if (flags & TEST_ACTIVE) {
836 				rq = hang_create_request(&h, engine);
837 				if (IS_ERR(rq)) {
838 					err = PTR_ERR(rq);
839 					break;
840 				}
841 
842 				i915_request_get(rq);
843 				i915_request_add(rq);
844 
845 				if (!wait_until_running(&h, rq)) {
846 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
847 
848 					pr_err("%s: Failed to start request %llx, at %x\n",
849 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
850 					intel_engine_dump(engine, &p,
851 							  "%s\n", engine->name);
852 
853 					i915_request_put(rq);
854 					err = -EIO;
855 					break;
856 				}
857 			}
858 
859 			err = intel_engine_reset(engine, NULL);
860 			if (err) {
861 				pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
862 				       engine->name, test_name, err);
863 				break;
864 			}
865 
866 			count++;
867 
868 			if (rq) {
869 				if (i915_request_wait(rq, 0, HZ / 5) < 0) {
870 					struct drm_printer p =
871 						drm_info_printer(gt->i915->drm.dev);
872 
873 					pr_err("i915_reset_engine(%s:%s):"
874 					       " failed to complete request after reset\n",
875 					       engine->name, test_name);
876 					intel_engine_dump(engine, &p,
877 							  "%s\n", engine->name);
878 					i915_request_put(rq);
879 
880 					GEM_TRACE_DUMP();
881 					intel_gt_set_wedged(gt);
882 					err = -EIO;
883 					break;
884 				}
885 
886 				i915_request_put(rq);
887 			}
888 
889 			if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
890 				struct drm_printer p =
891 					drm_info_printer(gt->i915->drm.dev);
892 
893 				pr_err("i915_reset_engine(%s:%s):"
894 				       " failed to idle after reset\n",
895 				       engine->name, test_name);
896 				intel_engine_dump(engine, &p,
897 						  "%s\n", engine->name);
898 
899 				err = -EIO;
900 				break;
901 			}
902 		} while (time_before(jiffies, end_time));
903 		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
904 		engine_heartbeat_enable(engine);
905 
906 		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
907 			engine->name, test_name, count);
908 
909 		reported = i915_reset_engine_count(global, engine);
910 		reported -= threads[engine->id].resets;
911 		if (reported != count) {
912 			pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
913 			       engine->name, test_name, count, reported);
914 			if (!err)
915 				err = -EINVAL;
916 		}
917 
918 unwind:
919 		for_each_engine(other, gt, tmp) {
920 			int ret;
921 
922 			if (!threads[tmp].task)
923 				continue;
924 
925 			ret = kthread_stop(threads[tmp].task);
926 			if (ret) {
927 				pr_err("kthread for other engine %s failed, err=%d\n",
928 				       other->name, ret);
929 				if (!err)
930 					err = ret;
931 			}
932 			put_task_struct(threads[tmp].task);
933 
934 			if (other->uabi_class != engine->uabi_class &&
935 			    threads[tmp].resets !=
936 			    i915_reset_engine_count(global, other)) {
937 				pr_err("Innocent engine %s was reset (count=%ld)\n",
938 				       other->name,
939 				       i915_reset_engine_count(global, other) -
940 				       threads[tmp].resets);
941 				if (!err)
942 					err = -EINVAL;
943 			}
944 		}
945 
946 		if (device != i915_reset_count(global)) {
947 			pr_err("Global reset (count=%ld)!\n",
948 			       i915_reset_count(global) - device);
949 			if (!err)
950 				err = -EINVAL;
951 		}
952 
953 		if (err)
954 			break;
955 
956 		err = igt_flush_test(gt->i915);
957 		if (err)
958 			break;
959 	}
960 
961 	if (intel_gt_is_wedged(gt))
962 		err = -EIO;
963 
964 	if (flags & TEST_ACTIVE)
965 		hang_fini(&h);
966 
967 	return err;
968 }
969 
970 static int igt_reset_engines(void *arg)
971 {
972 	static const struct {
973 		const char *name;
974 		unsigned int flags;
975 	} phases[] = {
976 		{ "idle", 0 },
977 		{ "active", TEST_ACTIVE },
978 		{ "others-idle", TEST_OTHERS },
979 		{ "others-active", TEST_OTHERS | TEST_ACTIVE },
980 		{
981 			"others-priority",
982 			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
983 		},
984 		{
985 			"self-priority",
986 			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
987 		},
988 		{ }
989 	};
990 	struct intel_gt *gt = arg;
991 	typeof(*phases) *p;
992 	int err;
993 
994 	for (p = phases; p->name; p++) {
995 		if (p->flags & TEST_PRIORITY) {
996 			if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
997 				continue;
998 		}
999 
1000 		err = __igt_reset_engines(arg, p->name, p->flags);
1001 		if (err)
1002 			return err;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
1009 {
1010 	u32 count = i915_reset_count(&gt->i915->gpu_error);
1011 
1012 	intel_gt_reset(gt, mask, NULL);
1013 
1014 	return count;
1015 }
1016 
1017 static int igt_reset_wait(void *arg)
1018 {
1019 	struct intel_gt *gt = arg;
1020 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1021 	struct intel_engine_cs *engine = gt->engine[RCS0];
1022 	struct i915_request *rq;
1023 	unsigned int reset_count;
1024 	struct hang h;
1025 	long timeout;
1026 	int err;
1027 
1028 	if (!engine || !intel_engine_can_store_dword(engine))
1029 		return 0;
1030 
1031 	/* Check that we detect a stuck waiter and issue a reset */
1032 
1033 	igt_global_reset_lock(gt);
1034 
1035 	err = hang_init(&h, gt);
1036 	if (err)
1037 		goto unlock;
1038 
1039 	rq = hang_create_request(&h, engine);
1040 	if (IS_ERR(rq)) {
1041 		err = PTR_ERR(rq);
1042 		goto fini;
1043 	}
1044 
1045 	i915_request_get(rq);
1046 	i915_request_add(rq);
1047 
1048 	if (!wait_until_running(&h, rq)) {
1049 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1050 
1051 		pr_err("%s: Failed to start request %llx, at %x\n",
1052 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1053 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1054 
1055 		intel_gt_set_wedged(gt);
1056 
1057 		err = -EIO;
1058 		goto out_rq;
1059 	}
1060 
1061 	reset_count = fake_hangcheck(gt, ALL_ENGINES);
1062 
1063 	timeout = i915_request_wait(rq, 0, 10);
1064 	if (timeout < 0) {
1065 		pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
1066 		       timeout);
1067 		err = timeout;
1068 		goto out_rq;
1069 	}
1070 
1071 	if (i915_reset_count(global) == reset_count) {
1072 		pr_err("No GPU reset recorded!\n");
1073 		err = -EINVAL;
1074 		goto out_rq;
1075 	}
1076 
1077 out_rq:
1078 	i915_request_put(rq);
1079 fini:
1080 	hang_fini(&h);
1081 unlock:
1082 	igt_global_reset_unlock(gt);
1083 
1084 	if (intel_gt_is_wedged(gt))
1085 		return -EIO;
1086 
1087 	return err;
1088 }
1089 
1090 struct evict_vma {
1091 	struct completion completion;
1092 	struct i915_vma *vma;
1093 };
1094 
1095 static int evict_vma(void *data)
1096 {
1097 	struct evict_vma *arg = data;
1098 	struct i915_address_space *vm = arg->vma->vm;
1099 	struct drm_mm_node evict = arg->vma->node;
1100 	int err;
1101 
1102 	complete(&arg->completion);
1103 
1104 	mutex_lock(&vm->mutex);
1105 	err = i915_gem_evict_for_node(vm, &evict, 0);
1106 	mutex_unlock(&vm->mutex);
1107 
1108 	return err;
1109 }
1110 
1111 static int evict_fence(void *data)
1112 {
1113 	struct evict_vma *arg = data;
1114 	int err;
1115 
1116 	complete(&arg->completion);
1117 
1118 	/* Mark the fence register as dirty to force the mmio update. */
1119 	err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
1120 	if (err) {
1121 		pr_err("Invalid Y-tiling settings; err:%d\n", err);
1122 		return err;
1123 	}
1124 
1125 	err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
1126 	if (err) {
1127 		pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
1128 		return err;
1129 	}
1130 
1131 	err = i915_vma_pin_fence(arg->vma);
1132 	i915_vma_unpin(arg->vma);
1133 	if (err) {
1134 		pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
1135 		return err;
1136 	}
1137 
1138 	i915_vma_unpin_fence(arg->vma);
1139 
1140 	return 0;
1141 }
1142 
1143 static int __igt_reset_evict_vma(struct intel_gt *gt,
1144 				 struct i915_address_space *vm,
1145 				 int (*fn)(void *),
1146 				 unsigned int flags)
1147 {
1148 	struct intel_engine_cs *engine = gt->engine[RCS0];
1149 	struct drm_i915_gem_object *obj;
1150 	struct task_struct *tsk = NULL;
1151 	struct i915_request *rq;
1152 	struct evict_vma arg;
1153 	struct hang h;
1154 	unsigned int pin_flags;
1155 	int err;
1156 
1157 	if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
1158 		return 0;
1159 
1160 	if (!engine || !intel_engine_can_store_dword(engine))
1161 		return 0;
1162 
1163 	/* Check that we can recover an unbind stuck on a hanging request */
1164 
1165 	err = hang_init(&h, gt);
1166 	if (err)
1167 		return err;
1168 
1169 	obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
1170 	if (IS_ERR(obj)) {
1171 		err = PTR_ERR(obj);
1172 		goto fini;
1173 	}
1174 
1175 	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1176 		err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
1177 		if (err) {
1178 			pr_err("Invalid X-tiling settings; err:%d\n", err);
1179 			goto out_obj;
1180 		}
1181 	}
1182 
1183 	arg.vma = i915_vma_instance(obj, vm, NULL);
1184 	if (IS_ERR(arg.vma)) {
1185 		err = PTR_ERR(arg.vma);
1186 		goto out_obj;
1187 	}
1188 
1189 	rq = hang_create_request(&h, engine);
1190 	if (IS_ERR(rq)) {
1191 		err = PTR_ERR(rq);
1192 		goto out_obj;
1193 	}
1194 
1195 	pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
1196 
1197 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
1198 		pin_flags |= PIN_MAPPABLE;
1199 
1200 	err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
1201 	if (err) {
1202 		i915_request_add(rq);
1203 		goto out_obj;
1204 	}
1205 
1206 	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1207 		err = i915_vma_pin_fence(arg.vma);
1208 		if (err) {
1209 			pr_err("Unable to pin X-tiled fence; err:%d\n", err);
1210 			i915_vma_unpin(arg.vma);
1211 			i915_request_add(rq);
1212 			goto out_obj;
1213 		}
1214 	}
1215 
1216 	i915_vma_lock(arg.vma);
1217 	err = i915_request_await_object(rq, arg.vma->obj,
1218 					flags & EXEC_OBJECT_WRITE);
1219 	if (err == 0)
1220 		err = i915_vma_move_to_active(arg.vma, rq, flags);
1221 	i915_vma_unlock(arg.vma);
1222 
1223 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
1224 		i915_vma_unpin_fence(arg.vma);
1225 	i915_vma_unpin(arg.vma);
1226 
1227 	i915_request_get(rq);
1228 	i915_request_add(rq);
1229 	if (err)
1230 		goto out_rq;
1231 
1232 	if (!wait_until_running(&h, rq)) {
1233 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1234 
1235 		pr_err("%s: Failed to start request %llx, at %x\n",
1236 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1237 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1238 
1239 		intel_gt_set_wedged(gt);
1240 		goto out_reset;
1241 	}
1242 
1243 	init_completion(&arg.completion);
1244 
1245 	tsk = kthread_run(fn, &arg, "igt/evict_vma");
1246 	if (IS_ERR(tsk)) {
1247 		err = PTR_ERR(tsk);
1248 		tsk = NULL;
1249 		goto out_reset;
1250 	}
1251 	get_task_struct(tsk);
1252 
1253 	wait_for_completion(&arg.completion);
1254 
1255 	if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1256 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1257 
1258 		pr_err("igt/evict_vma kthread did not wait\n");
1259 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1260 
1261 		intel_gt_set_wedged(gt);
1262 		goto out_reset;
1263 	}
1264 
1265 out_reset:
1266 	igt_global_reset_lock(gt);
1267 	fake_hangcheck(gt, rq->engine->mask);
1268 	igt_global_reset_unlock(gt);
1269 
1270 	if (tsk) {
1271 		struct intel_wedge_me w;
1272 
1273 		/* The reset, even indirectly, should take less than 10ms. */
1274 		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
1275 			err = kthread_stop(tsk);
1276 
1277 		put_task_struct(tsk);
1278 	}
1279 
1280 out_rq:
1281 	i915_request_put(rq);
1282 out_obj:
1283 	i915_gem_object_put(obj);
1284 fini:
1285 	hang_fini(&h);
1286 	if (intel_gt_is_wedged(gt))
1287 		return -EIO;
1288 
1289 	return err;
1290 }
1291 
1292 static int igt_reset_evict_ggtt(void *arg)
1293 {
1294 	struct intel_gt *gt = arg;
1295 
1296 	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1297 				     evict_vma, EXEC_OBJECT_WRITE);
1298 }
1299 
1300 static int igt_reset_evict_ppgtt(void *arg)
1301 {
1302 	struct intel_gt *gt = arg;
1303 	struct i915_ppgtt *ppgtt;
1304 	int err;
1305 
1306 	/* aliasing == global gtt locking, covered above */
1307 	if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
1308 		return 0;
1309 
1310 	ppgtt = i915_ppgtt_create(gt);
1311 	if (IS_ERR(ppgtt))
1312 		return PTR_ERR(ppgtt);
1313 
1314 	err = __igt_reset_evict_vma(gt, &ppgtt->vm,
1315 				    evict_vma, EXEC_OBJECT_WRITE);
1316 	i915_vm_put(&ppgtt->vm);
1317 
1318 	return err;
1319 }
1320 
1321 static int igt_reset_evict_fence(void *arg)
1322 {
1323 	struct intel_gt *gt = arg;
1324 
1325 	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1326 				     evict_fence, EXEC_OBJECT_NEEDS_FENCE);
1327 }
1328 
1329 static int wait_for_others(struct intel_gt *gt,
1330 			   struct intel_engine_cs *exclude)
1331 {
1332 	struct intel_engine_cs *engine;
1333 	enum intel_engine_id id;
1334 
1335 	for_each_engine(engine, gt, id) {
1336 		if (engine == exclude)
1337 			continue;
1338 
1339 		if (!wait_for_idle(engine))
1340 			return -EIO;
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 static int igt_reset_queue(void *arg)
1347 {
1348 	struct intel_gt *gt = arg;
1349 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1350 	struct intel_engine_cs *engine;
1351 	enum intel_engine_id id;
1352 	struct hang h;
1353 	int err;
1354 
1355 	/* Check that we replay pending requests following a hang */
1356 
1357 	igt_global_reset_lock(gt);
1358 
1359 	err = hang_init(&h, gt);
1360 	if (err)
1361 		goto unlock;
1362 
1363 	for_each_engine(engine, gt, id) {
1364 		struct i915_request *prev;
1365 		IGT_TIMEOUT(end_time);
1366 		unsigned int count;
1367 
1368 		if (!intel_engine_can_store_dword(engine))
1369 			continue;
1370 
1371 		prev = hang_create_request(&h, engine);
1372 		if (IS_ERR(prev)) {
1373 			err = PTR_ERR(prev);
1374 			goto fini;
1375 		}
1376 
1377 		i915_request_get(prev);
1378 		i915_request_add(prev);
1379 
1380 		count = 0;
1381 		do {
1382 			struct i915_request *rq;
1383 			unsigned int reset_count;
1384 
1385 			rq = hang_create_request(&h, engine);
1386 			if (IS_ERR(rq)) {
1387 				err = PTR_ERR(rq);
1388 				goto fini;
1389 			}
1390 
1391 			i915_request_get(rq);
1392 			i915_request_add(rq);
1393 
1394 			/*
1395 			 * XXX We don't handle resetting the kernel context
1396 			 * very well. If we trigger a device reset twice in
1397 			 * quick succession while the kernel context is
1398 			 * executing, we may end up skipping the breadcrumb.
1399 			 * This is really only a problem for the selftest as
1400 			 * normally there is a large interlude between resets
1401 			 * (hangcheck), or we focus on resetting just one
1402 			 * engine and so avoid repeatedly resetting innocents.
1403 			 */
1404 			err = wait_for_others(gt, engine);
1405 			if (err) {
1406 				pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
1407 				       __func__, engine->name);
1408 				i915_request_put(rq);
1409 				i915_request_put(prev);
1410 
1411 				GEM_TRACE_DUMP();
1412 				intel_gt_set_wedged(gt);
1413 				goto fini;
1414 			}
1415 
1416 			if (!wait_until_running(&h, prev)) {
1417 				struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1418 
1419 				pr_err("%s(%s): Failed to start request %llx, at %x\n",
1420 				       __func__, engine->name,
1421 				       prev->fence.seqno, hws_seqno(&h, prev));
1422 				intel_engine_dump(engine, &p,
1423 						  "%s\n", engine->name);
1424 
1425 				i915_request_put(rq);
1426 				i915_request_put(prev);
1427 
1428 				intel_gt_set_wedged(gt);
1429 
1430 				err = -EIO;
1431 				goto fini;
1432 			}
1433 
1434 			reset_count = fake_hangcheck(gt, BIT(id));
1435 
1436 			if (prev->fence.error != -EIO) {
1437 				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
1438 				       prev->fence.error);
1439 				i915_request_put(rq);
1440 				i915_request_put(prev);
1441 				err = -EINVAL;
1442 				goto fini;
1443 			}
1444 
1445 			if (rq->fence.error) {
1446 				pr_err("Fence error status not zero [%d] after unrelated reset\n",
1447 				       rq->fence.error);
1448 				i915_request_put(rq);
1449 				i915_request_put(prev);
1450 				err = -EINVAL;
1451 				goto fini;
1452 			}
1453 
1454 			if (i915_reset_count(global) == reset_count) {
1455 				pr_err("No GPU reset recorded!\n");
1456 				i915_request_put(rq);
1457 				i915_request_put(prev);
1458 				err = -EINVAL;
1459 				goto fini;
1460 			}
1461 
1462 			i915_request_put(prev);
1463 			prev = rq;
1464 			count++;
1465 		} while (time_before(jiffies, end_time));
1466 		pr_info("%s: Completed %d resets\n", engine->name, count);
1467 
1468 		*h.batch = MI_BATCH_BUFFER_END;
1469 		intel_gt_chipset_flush(engine->gt);
1470 
1471 		i915_request_put(prev);
1472 
1473 		err = igt_flush_test(gt->i915);
1474 		if (err)
1475 			break;
1476 	}
1477 
1478 fini:
1479 	hang_fini(&h);
1480 unlock:
1481 	igt_global_reset_unlock(gt);
1482 
1483 	if (intel_gt_is_wedged(gt))
1484 		return -EIO;
1485 
1486 	return err;
1487 }
1488 
1489 static int igt_handle_error(void *arg)
1490 {
1491 	struct intel_gt *gt = arg;
1492 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1493 	struct intel_engine_cs *engine = gt->engine[RCS0];
1494 	struct hang h;
1495 	struct i915_request *rq;
1496 	struct i915_gpu_coredump *error;
1497 	int err;
1498 
1499 	/* Check that we can issue a global GPU and engine reset */
1500 
1501 	if (!intel_has_reset_engine(gt))
1502 		return 0;
1503 
1504 	if (!engine || !intel_engine_can_store_dword(engine))
1505 		return 0;
1506 
1507 	err = hang_init(&h, gt);
1508 	if (err)
1509 		return err;
1510 
1511 	rq = hang_create_request(&h, engine);
1512 	if (IS_ERR(rq)) {
1513 		err = PTR_ERR(rq);
1514 		goto err_fini;
1515 	}
1516 
1517 	i915_request_get(rq);
1518 	i915_request_add(rq);
1519 
1520 	if (!wait_until_running(&h, rq)) {
1521 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1522 
1523 		pr_err("%s: Failed to start request %llx, at %x\n",
1524 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1525 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1526 
1527 		intel_gt_set_wedged(gt);
1528 
1529 		err = -EIO;
1530 		goto err_request;
1531 	}
1532 
1533 	/* Temporarily disable error capture */
1534 	error = xchg(&global->first_error, (void *)-1);
1535 
1536 	intel_gt_handle_error(gt, engine->mask, 0, NULL);
1537 
1538 	xchg(&global->first_error, error);
1539 
1540 	if (rq->fence.error != -EIO) {
1541 		pr_err("Guilty request not identified!\n");
1542 		err = -EINVAL;
1543 		goto err_request;
1544 	}
1545 
1546 err_request:
1547 	i915_request_put(rq);
1548 err_fini:
1549 	hang_fini(&h);
1550 	return err;
1551 }
1552 
1553 static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1554 				     const struct igt_atomic_section *p,
1555 				     const char *mode)
1556 {
1557 	struct tasklet_struct * const t = &engine->execlists.tasklet;
1558 	int err;
1559 
1560 	GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
1561 		  engine->name, mode, p->name);
1562 
1563 	tasklet_disable(t);
1564 	p->critical_section_begin();
1565 
1566 	err = intel_engine_reset(engine, NULL);
1567 
1568 	p->critical_section_end();
1569 	tasklet_enable(t);
1570 
1571 	if (err)
1572 		pr_err("i915_reset_engine(%s:%s) failed under %s\n",
1573 		       engine->name, mode, p->name);
1574 
1575 	return err;
1576 }
1577 
1578 static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1579 				   const struct igt_atomic_section *p)
1580 {
1581 	struct i915_request *rq;
1582 	struct hang h;
1583 	int err;
1584 
1585 	err = __igt_atomic_reset_engine(engine, p, "idle");
1586 	if (err)
1587 		return err;
1588 
1589 	err = hang_init(&h, engine->gt);
1590 	if (err)
1591 		return err;
1592 
1593 	rq = hang_create_request(&h, engine);
1594 	if (IS_ERR(rq)) {
1595 		err = PTR_ERR(rq);
1596 		goto out;
1597 	}
1598 
1599 	i915_request_get(rq);
1600 	i915_request_add(rq);
1601 
1602 	if (wait_until_running(&h, rq)) {
1603 		err = __igt_atomic_reset_engine(engine, p, "active");
1604 	} else {
1605 		pr_err("%s(%s): Failed to start request %llx, at %x\n",
1606 		       __func__, engine->name,
1607 		       rq->fence.seqno, hws_seqno(&h, rq));
1608 		intel_gt_set_wedged(engine->gt);
1609 		err = -EIO;
1610 	}
1611 
1612 	if (err == 0) {
1613 		struct intel_wedge_me w;
1614 
1615 		intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
1616 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1617 		if (intel_gt_is_wedged(engine->gt))
1618 			err = -EIO;
1619 	}
1620 
1621 	i915_request_put(rq);
1622 out:
1623 	hang_fini(&h);
1624 	return err;
1625 }
1626 
1627 static int igt_reset_engines_atomic(void *arg)
1628 {
1629 	struct intel_gt *gt = arg;
1630 	const typeof(*igt_atomic_phases) *p;
1631 	int err = 0;
1632 
1633 	/* Check that the engines resets are usable from atomic context */
1634 
1635 	if (!intel_has_reset_engine(gt))
1636 		return 0;
1637 
1638 	if (intel_uc_uses_guc_submission(&gt->uc))
1639 		return 0;
1640 
1641 	igt_global_reset_lock(gt);
1642 
1643 	/* Flush any requests before we get started and check basics */
1644 	if (!igt_force_reset(gt))
1645 		goto unlock;
1646 
1647 	for (p = igt_atomic_phases; p->name; p++) {
1648 		struct intel_engine_cs *engine;
1649 		enum intel_engine_id id;
1650 
1651 		for_each_engine(engine, gt, id) {
1652 			err = igt_atomic_reset_engine(engine, p);
1653 			if (err)
1654 				goto out;
1655 		}
1656 	}
1657 
1658 out:
1659 	/* As we poke around the guts, do a full reset before continuing. */
1660 	igt_force_reset(gt);
1661 unlock:
1662 	igt_global_reset_unlock(gt);
1663 
1664 	return err;
1665 }
1666 
1667 int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
1668 {
1669 	static const struct i915_subtest tests[] = {
1670 		SUBTEST(igt_hang_sanitycheck),
1671 		SUBTEST(igt_reset_nop),
1672 		SUBTEST(igt_reset_nop_engine),
1673 		SUBTEST(igt_reset_idle_engine),
1674 		SUBTEST(igt_reset_active_engine),
1675 		SUBTEST(igt_reset_engines),
1676 		SUBTEST(igt_reset_engines_atomic),
1677 		SUBTEST(igt_reset_queue),
1678 		SUBTEST(igt_reset_wait),
1679 		SUBTEST(igt_reset_evict_ggtt),
1680 		SUBTEST(igt_reset_evict_ppgtt),
1681 		SUBTEST(igt_reset_evict_fence),
1682 		SUBTEST(igt_handle_error),
1683 	};
1684 	struct intel_gt *gt = &i915->gt;
1685 	intel_wakeref_t wakeref;
1686 	int err;
1687 
1688 	if (!intel_has_gpu_reset(gt))
1689 		return 0;
1690 
1691 	if (intel_gt_is_wedged(gt))
1692 		return -EIO; /* we're long past hope of a successful reset */
1693 
1694 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1695 
1696 	err = intel_gt_live_subtests(tests, gt);
1697 
1698 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1699 
1700 	return err;
1701 }
1702