1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016 Intel Corporation
4  */
5 
6 #include <linux/kthread.h>
7 
8 #include "gem/i915_gem_context.h"
9 #include "gem/i915_gem_internal.h"
10 
11 #include "i915_gem_evict.h"
12 #include "intel_gt.h"
13 #include "intel_engine_heartbeat.h"
14 #include "intel_engine_pm.h"
15 #include "selftest_engine_heartbeat.h"
16 
17 #include "i915_selftest.h"
18 #include "selftests/i915_random.h"
19 #include "selftests/igt_flush_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_atomic.h"
22 #include "selftests/igt_spinner.h"
23 #include "selftests/intel_scheduler_helpers.h"
24 
25 #include "selftests/mock_drm.h"
26 
27 #include "gem/selftests/mock_context.h"
28 #include "gem/selftests/igt_gem_utils.h"
29 
30 #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
31 
32 struct hang {
33 	struct intel_gt *gt;
34 	struct drm_i915_gem_object *hws;
35 	struct drm_i915_gem_object *obj;
36 	struct i915_gem_context *ctx;
37 	u32 *seqno;
38 	u32 *batch;
39 };
40 
41 static int hang_init(struct hang *h, struct intel_gt *gt)
42 {
43 	void *vaddr;
44 	int err;
45 
46 	memset(h, 0, sizeof(*h));
47 	h->gt = gt;
48 
49 	h->ctx = kernel_context(gt->i915, NULL);
50 	if (IS_ERR(h->ctx))
51 		return PTR_ERR(h->ctx);
52 
53 	GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
54 
55 	h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
56 	if (IS_ERR(h->hws)) {
57 		err = PTR_ERR(h->hws);
58 		goto err_ctx;
59 	}
60 
61 	h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
62 	if (IS_ERR(h->obj)) {
63 		err = PTR_ERR(h->obj);
64 		goto err_hws;
65 	}
66 
67 	i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
68 	vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
69 	if (IS_ERR(vaddr)) {
70 		err = PTR_ERR(vaddr);
71 		goto err_obj;
72 	}
73 	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
74 
75 	vaddr = i915_gem_object_pin_map_unlocked(h->obj,
76 						 i915_coherent_map_type(gt->i915, h->obj, false));
77 	if (IS_ERR(vaddr)) {
78 		err = PTR_ERR(vaddr);
79 		goto err_unpin_hws;
80 	}
81 	h->batch = vaddr;
82 
83 	return 0;
84 
85 err_unpin_hws:
86 	i915_gem_object_unpin_map(h->hws);
87 err_obj:
88 	i915_gem_object_put(h->obj);
89 err_hws:
90 	i915_gem_object_put(h->hws);
91 err_ctx:
92 	kernel_context_close(h->ctx);
93 	return err;
94 }
95 
96 static u64 hws_address(const struct i915_vma *hws,
97 		       const struct i915_request *rq)
98 {
99 	return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
100 }
101 
102 static int move_to_active(struct i915_vma *vma,
103 			  struct i915_request *rq,
104 			  unsigned int flags)
105 {
106 	int err;
107 
108 	i915_vma_lock(vma);
109 	err = i915_request_await_object(rq, vma->obj,
110 					flags & EXEC_OBJECT_WRITE);
111 	if (err == 0)
112 		err = i915_vma_move_to_active(vma, rq, flags);
113 	i915_vma_unlock(vma);
114 
115 	return err;
116 }
117 
118 static struct i915_request *
119 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
120 {
121 	struct intel_gt *gt = h->gt;
122 	struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
123 	struct drm_i915_gem_object *obj;
124 	struct i915_request *rq = NULL;
125 	struct i915_vma *hws, *vma;
126 	unsigned int flags;
127 	void *vaddr;
128 	u32 *batch;
129 	int err;
130 
131 	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
132 	if (IS_ERR(obj)) {
133 		i915_vm_put(vm);
134 		return ERR_CAST(obj);
135 	}
136 
137 	vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
138 	if (IS_ERR(vaddr)) {
139 		i915_gem_object_put(obj);
140 		i915_vm_put(vm);
141 		return ERR_CAST(vaddr);
142 	}
143 
144 	i915_gem_object_unpin_map(h->obj);
145 	i915_gem_object_put(h->obj);
146 
147 	h->obj = obj;
148 	h->batch = vaddr;
149 
150 	vma = i915_vma_instance(h->obj, vm, NULL);
151 	if (IS_ERR(vma)) {
152 		i915_vm_put(vm);
153 		return ERR_CAST(vma);
154 	}
155 
156 	hws = i915_vma_instance(h->hws, vm, NULL);
157 	if (IS_ERR(hws)) {
158 		i915_vm_put(vm);
159 		return ERR_CAST(hws);
160 	}
161 
162 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
163 	if (err) {
164 		i915_vm_put(vm);
165 		return ERR_PTR(err);
166 	}
167 
168 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
169 	if (err)
170 		goto unpin_vma;
171 
172 	rq = igt_request_alloc(h->ctx, engine);
173 	if (IS_ERR(rq)) {
174 		err = PTR_ERR(rq);
175 		goto unpin_hws;
176 	}
177 
178 	err = move_to_active(vma, rq, 0);
179 	if (err)
180 		goto cancel_rq;
181 
182 	err = move_to_active(hws, rq, 0);
183 	if (err)
184 		goto cancel_rq;
185 
186 	batch = h->batch;
187 	if (GRAPHICS_VER(gt->i915) >= 8) {
188 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
189 		*batch++ = lower_32_bits(hws_address(hws, rq));
190 		*batch++ = upper_32_bits(hws_address(hws, rq));
191 		*batch++ = rq->fence.seqno;
192 		*batch++ = MI_NOOP;
193 
194 		memset(batch, 0, 1024);
195 		batch += 1024 / sizeof(*batch);
196 
197 		*batch++ = MI_NOOP;
198 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
199 		*batch++ = lower_32_bits(vma->node.start);
200 		*batch++ = upper_32_bits(vma->node.start);
201 	} else if (GRAPHICS_VER(gt->i915) >= 6) {
202 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
203 		*batch++ = 0;
204 		*batch++ = lower_32_bits(hws_address(hws, rq));
205 		*batch++ = rq->fence.seqno;
206 		*batch++ = MI_NOOP;
207 
208 		memset(batch, 0, 1024);
209 		batch += 1024 / sizeof(*batch);
210 
211 		*batch++ = MI_NOOP;
212 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
213 		*batch++ = lower_32_bits(vma->node.start);
214 	} else if (GRAPHICS_VER(gt->i915) >= 4) {
215 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
216 		*batch++ = 0;
217 		*batch++ = lower_32_bits(hws_address(hws, rq));
218 		*batch++ = rq->fence.seqno;
219 		*batch++ = MI_NOOP;
220 
221 		memset(batch, 0, 1024);
222 		batch += 1024 / sizeof(*batch);
223 
224 		*batch++ = MI_NOOP;
225 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
226 		*batch++ = lower_32_bits(vma->node.start);
227 	} else {
228 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
229 		*batch++ = lower_32_bits(hws_address(hws, rq));
230 		*batch++ = rq->fence.seqno;
231 		*batch++ = MI_NOOP;
232 
233 		memset(batch, 0, 1024);
234 		batch += 1024 / sizeof(*batch);
235 
236 		*batch++ = MI_NOOP;
237 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
238 		*batch++ = lower_32_bits(vma->node.start);
239 	}
240 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
241 	intel_gt_chipset_flush(engine->gt);
242 
243 	if (rq->engine->emit_init_breadcrumb) {
244 		err = rq->engine->emit_init_breadcrumb(rq);
245 		if (err)
246 			goto cancel_rq;
247 	}
248 
249 	flags = 0;
250 	if (GRAPHICS_VER(gt->i915) <= 5)
251 		flags |= I915_DISPATCH_SECURE;
252 
253 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
254 
255 cancel_rq:
256 	if (err) {
257 		i915_request_set_error_once(rq, err);
258 		i915_request_add(rq);
259 	}
260 unpin_hws:
261 	i915_vma_unpin(hws);
262 unpin_vma:
263 	i915_vma_unpin(vma);
264 	i915_vm_put(vm);
265 	return err ? ERR_PTR(err) : rq;
266 }
267 
268 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
269 {
270 	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
271 }
272 
273 static void hang_fini(struct hang *h)
274 {
275 	*h->batch = MI_BATCH_BUFFER_END;
276 	intel_gt_chipset_flush(h->gt);
277 
278 	i915_gem_object_unpin_map(h->obj);
279 	i915_gem_object_put(h->obj);
280 
281 	i915_gem_object_unpin_map(h->hws);
282 	i915_gem_object_put(h->hws);
283 
284 	kernel_context_close(h->ctx);
285 
286 	igt_flush_test(h->gt->i915);
287 }
288 
289 static bool wait_until_running(struct hang *h, struct i915_request *rq)
290 {
291 	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
292 					       rq->fence.seqno),
293 			     10) &&
294 		 wait_for(i915_seqno_passed(hws_seqno(h, rq),
295 					    rq->fence.seqno),
296 			  1000));
297 }
298 
299 static int igt_hang_sanitycheck(void *arg)
300 {
301 	struct intel_gt *gt = arg;
302 	struct i915_request *rq;
303 	struct intel_engine_cs *engine;
304 	enum intel_engine_id id;
305 	struct hang h;
306 	int err;
307 
308 	/* Basic check that we can execute our hanging batch */
309 
310 	err = hang_init(&h, gt);
311 	if (err)
312 		return err;
313 
314 	for_each_engine(engine, gt, id) {
315 		struct intel_wedge_me w;
316 		long timeout;
317 
318 		if (!intel_engine_can_store_dword(engine))
319 			continue;
320 
321 		rq = hang_create_request(&h, engine);
322 		if (IS_ERR(rq)) {
323 			err = PTR_ERR(rq);
324 			pr_err("Failed to create request for %s, err=%d\n",
325 			       engine->name, err);
326 			goto fini;
327 		}
328 
329 		i915_request_get(rq);
330 
331 		*h.batch = MI_BATCH_BUFFER_END;
332 		intel_gt_chipset_flush(engine->gt);
333 
334 		i915_request_add(rq);
335 
336 		timeout = 0;
337 		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
338 			timeout = i915_request_wait(rq, 0,
339 						    MAX_SCHEDULE_TIMEOUT);
340 		if (intel_gt_is_wedged(gt))
341 			timeout = -EIO;
342 
343 		i915_request_put(rq);
344 
345 		if (timeout < 0) {
346 			err = timeout;
347 			pr_err("Wait for request failed on %s, err=%d\n",
348 			       engine->name, err);
349 			goto fini;
350 		}
351 	}
352 
353 fini:
354 	hang_fini(&h);
355 	return err;
356 }
357 
358 static bool wait_for_idle(struct intel_engine_cs *engine)
359 {
360 	return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
361 }
362 
363 static int igt_reset_nop(void *arg)
364 {
365 	struct intel_gt *gt = arg;
366 	struct i915_gpu_error *global = &gt->i915->gpu_error;
367 	struct intel_engine_cs *engine;
368 	unsigned int reset_count, count;
369 	enum intel_engine_id id;
370 	IGT_TIMEOUT(end_time);
371 	int err = 0;
372 
373 	/* Check that we can reset during non-user portions of requests */
374 
375 	reset_count = i915_reset_count(global);
376 	count = 0;
377 	do {
378 		for_each_engine(engine, gt, id) {
379 			struct intel_context *ce;
380 			int i;
381 
382 			ce = intel_context_create(engine);
383 			if (IS_ERR(ce)) {
384 				err = PTR_ERR(ce);
385 				pr_err("[%s] Create context failed: %d!\n", engine->name, err);
386 				break;
387 			}
388 
389 			for (i = 0; i < 16; i++) {
390 				struct i915_request *rq;
391 
392 				rq = intel_context_create_request(ce);
393 				if (IS_ERR(rq)) {
394 					err = PTR_ERR(rq);
395 					pr_err("[%s] Create request failed: %d!\n",
396 					       engine->name, err);
397 					break;
398 				}
399 
400 				i915_request_add(rq);
401 			}
402 
403 			intel_context_put(ce);
404 		}
405 
406 		igt_global_reset_lock(gt);
407 		intel_gt_reset(gt, ALL_ENGINES, NULL);
408 		igt_global_reset_unlock(gt);
409 
410 		if (intel_gt_is_wedged(gt)) {
411 			pr_err("[%s] GT is wedged!\n", engine->name);
412 			err = -EIO;
413 			break;
414 		}
415 
416 		if (i915_reset_count(global) != reset_count + ++count) {
417 			pr_err("[%s] Reset not recorded: %d vs %d + %d!\n",
418 			       engine->name, i915_reset_count(global), reset_count, count);
419 			err = -EINVAL;
420 			break;
421 		}
422 
423 		err = igt_flush_test(gt->i915);
424 		if (err) {
425 			pr_err("[%s] Flush failed: %d!\n", engine->name, err);
426 			break;
427 		}
428 	} while (time_before(jiffies, end_time));
429 	pr_info("%s: %d resets\n", __func__, count);
430 
431 	if (igt_flush_test(gt->i915)) {
432 		pr_err("Post flush failed: %d!\n", err);
433 		err = -EIO;
434 	}
435 
436 	return err;
437 }
438 
439 static int igt_reset_nop_engine(void *arg)
440 {
441 	struct intel_gt *gt = arg;
442 	struct i915_gpu_error *global = &gt->i915->gpu_error;
443 	struct intel_engine_cs *engine;
444 	enum intel_engine_id id;
445 
446 	/* Check that we can engine-reset during non-user portions */
447 
448 	if (!intel_has_reset_engine(gt))
449 		return 0;
450 
451 	for_each_engine(engine, gt, id) {
452 		unsigned int reset_count, reset_engine_count, count;
453 		struct intel_context *ce;
454 		IGT_TIMEOUT(end_time);
455 		int err;
456 
457 		if (intel_engine_uses_guc(engine)) {
458 			/* Engine level resets are triggered by GuC when a hang
459 			 * is detected. They can't be triggered by the KMD any
460 			 * more. Thus a nop batch cannot be used as a reset test
461 			 */
462 			continue;
463 		}
464 
465 		ce = intel_context_create(engine);
466 		if (IS_ERR(ce)) {
467 			pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
468 			return PTR_ERR(ce);
469 		}
470 
471 		reset_count = i915_reset_count(global);
472 		reset_engine_count = i915_reset_engine_count(global, engine);
473 		count = 0;
474 
475 		st_engine_heartbeat_disable(engine);
476 		GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
477 					    &gt->reset.flags));
478 		do {
479 			int i;
480 
481 			if (!wait_for_idle(engine)) {
482 				pr_err("%s failed to idle before reset\n",
483 				       engine->name);
484 				err = -EIO;
485 				break;
486 			}
487 
488 			for (i = 0; i < 16; i++) {
489 				struct i915_request *rq;
490 
491 				rq = intel_context_create_request(ce);
492 				if (IS_ERR(rq)) {
493 					struct drm_printer p =
494 						drm_info_printer(gt->i915->drm.dev);
495 					intel_engine_dump(engine, &p,
496 							  "%s(%s): failed to submit request\n",
497 							  __func__,
498 							  engine->name);
499 
500 					GEM_TRACE("%s(%s): failed to submit request\n",
501 						  __func__,
502 						  engine->name);
503 					GEM_TRACE_DUMP();
504 
505 					intel_gt_set_wedged(gt);
506 
507 					err = PTR_ERR(rq);
508 					break;
509 				}
510 
511 				i915_request_add(rq);
512 			}
513 			err = intel_engine_reset(engine, NULL);
514 			if (err) {
515 				pr_err("intel_engine_reset(%s) failed, err:%d\n",
516 				       engine->name, err);
517 				break;
518 			}
519 
520 			if (i915_reset_count(global) != reset_count) {
521 				pr_err("Full GPU reset recorded! (engine reset expected)\n");
522 				err = -EINVAL;
523 				break;
524 			}
525 
526 			if (i915_reset_engine_count(global, engine) !=
527 			    reset_engine_count + ++count) {
528 				pr_err("%s engine reset not recorded!\n",
529 				       engine->name);
530 				err = -EINVAL;
531 				break;
532 			}
533 		} while (time_before(jiffies, end_time));
534 		clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
535 		st_engine_heartbeat_enable(engine);
536 
537 		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
538 
539 		intel_context_put(ce);
540 		if (igt_flush_test(gt->i915))
541 			err = -EIO;
542 		if (err)
543 			return err;
544 	}
545 
546 	return 0;
547 }
548 
549 static void force_reset_timeout(struct intel_engine_cs *engine)
550 {
551 	engine->reset_timeout.probability = 999;
552 	atomic_set(&engine->reset_timeout.times, -1);
553 }
554 
555 static void cancel_reset_timeout(struct intel_engine_cs *engine)
556 {
557 	memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
558 }
559 
560 static int igt_reset_fail_engine(void *arg)
561 {
562 	struct intel_gt *gt = arg;
563 	struct intel_engine_cs *engine;
564 	enum intel_engine_id id;
565 
566 	/* Check that we can recover from engine-reset failues */
567 
568 	if (!intel_has_reset_engine(gt))
569 		return 0;
570 
571 	for_each_engine(engine, gt, id) {
572 		unsigned int count;
573 		struct intel_context *ce;
574 		IGT_TIMEOUT(end_time);
575 		int err;
576 
577 		/* Can't manually break the reset if i915 doesn't perform it */
578 		if (intel_engine_uses_guc(engine))
579 			continue;
580 
581 		ce = intel_context_create(engine);
582 		if (IS_ERR(ce)) {
583 			pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
584 			return PTR_ERR(ce);
585 		}
586 
587 		st_engine_heartbeat_disable(engine);
588 		GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
589 					    &gt->reset.flags));
590 
591 		force_reset_timeout(engine);
592 		err = intel_engine_reset(engine, NULL);
593 		cancel_reset_timeout(engine);
594 		if (err == 0) /* timeouts only generated on gen8+ */
595 			goto skip;
596 
597 		count = 0;
598 		do {
599 			struct i915_request *last = NULL;
600 			int i;
601 
602 			if (!wait_for_idle(engine)) {
603 				pr_err("%s failed to idle before reset\n",
604 				       engine->name);
605 				err = -EIO;
606 				break;
607 			}
608 
609 			for (i = 0; i < count % 15; i++) {
610 				struct i915_request *rq;
611 
612 				rq = intel_context_create_request(ce);
613 				if (IS_ERR(rq)) {
614 					struct drm_printer p =
615 						drm_info_printer(gt->i915->drm.dev);
616 					intel_engine_dump(engine, &p,
617 							  "%s(%s): failed to submit request\n",
618 							  __func__,
619 							  engine->name);
620 
621 					GEM_TRACE("%s(%s): failed to submit request\n",
622 						  __func__,
623 						  engine->name);
624 					GEM_TRACE_DUMP();
625 
626 					intel_gt_set_wedged(gt);
627 					if (last)
628 						i915_request_put(last);
629 
630 					err = PTR_ERR(rq);
631 					goto out;
632 				}
633 
634 				if (last)
635 					i915_request_put(last);
636 				last = i915_request_get(rq);
637 				i915_request_add(rq);
638 			}
639 
640 			if (count & 1) {
641 				err = intel_engine_reset(engine, NULL);
642 				if (err) {
643 					GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
644 						      engine->name, err);
645 					GEM_TRACE_DUMP();
646 					i915_request_put(last);
647 					break;
648 				}
649 			} else {
650 				force_reset_timeout(engine);
651 				err = intel_engine_reset(engine, NULL);
652 				cancel_reset_timeout(engine);
653 				if (err != -ETIMEDOUT) {
654 					pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
655 					       engine->name, err);
656 					i915_request_put(last);
657 					break;
658 				}
659 			}
660 
661 			err = 0;
662 			if (last) {
663 				if (i915_request_wait(last, 0, HZ / 2) < 0) {
664 					struct drm_printer p =
665 						drm_info_printer(gt->i915->drm.dev);
666 
667 					intel_engine_dump(engine, &p,
668 							  "%s(%s): failed to complete request\n",
669 							  __func__,
670 							  engine->name);
671 
672 					GEM_TRACE("%s(%s): failed to complete request\n",
673 						  __func__,
674 						  engine->name);
675 					GEM_TRACE_DUMP();
676 
677 					err = -EIO;
678 				}
679 				i915_request_put(last);
680 			}
681 			count++;
682 		} while (err == 0 && time_before(jiffies, end_time));
683 out:
684 		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
685 skip:
686 		clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
687 		st_engine_heartbeat_enable(engine);
688 		intel_context_put(ce);
689 
690 		if (igt_flush_test(gt->i915))
691 			err = -EIO;
692 		if (err)
693 			return err;
694 	}
695 
696 	return 0;
697 }
698 
699 static int __igt_reset_engine(struct intel_gt *gt, bool active)
700 {
701 	struct i915_gpu_error *global = &gt->i915->gpu_error;
702 	struct intel_engine_cs *engine;
703 	enum intel_engine_id id;
704 	struct hang h;
705 	int err = 0;
706 
707 	/* Check that we can issue an engine reset on an idle engine (no-op) */
708 
709 	if (!intel_has_reset_engine(gt))
710 		return 0;
711 
712 	if (active) {
713 		err = hang_init(&h, gt);
714 		if (err)
715 			return err;
716 	}
717 
718 	for_each_engine(engine, gt, id) {
719 		unsigned int reset_count, reset_engine_count;
720 		unsigned long count;
721 		bool using_guc = intel_engine_uses_guc(engine);
722 		IGT_TIMEOUT(end_time);
723 
724 		if (using_guc && !active)
725 			continue;
726 
727 		if (active && !intel_engine_can_store_dword(engine))
728 			continue;
729 
730 		if (!wait_for_idle(engine)) {
731 			pr_err("%s failed to idle before reset\n",
732 			       engine->name);
733 			err = -EIO;
734 			break;
735 		}
736 
737 		reset_count = i915_reset_count(global);
738 		reset_engine_count = i915_reset_engine_count(global, engine);
739 
740 		st_engine_heartbeat_disable(engine);
741 		GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
742 					    &gt->reset.flags));
743 		count = 0;
744 		do {
745 			struct i915_request *rq = NULL;
746 			struct intel_selftest_saved_policy saved;
747 			int err2;
748 
749 			err = intel_selftest_modify_policy(engine, &saved,
750 							   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
751 			if (err) {
752 				pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
753 				break;
754 			}
755 
756 			if (active) {
757 				rq = hang_create_request(&h, engine);
758 				if (IS_ERR(rq)) {
759 					err = PTR_ERR(rq);
760 					pr_err("[%s] Create hang request failed: %d!\n",
761 					       engine->name, err);
762 					goto restore;
763 				}
764 
765 				i915_request_get(rq);
766 				i915_request_add(rq);
767 
768 				if (!wait_until_running(&h, rq)) {
769 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
770 
771 					pr_err("%s: Failed to start request %llx, at %x\n",
772 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
773 					intel_engine_dump(engine, &p,
774 							  "%s\n", engine->name);
775 
776 					i915_request_put(rq);
777 					err = -EIO;
778 					goto restore;
779 				}
780 			}
781 
782 			if (!using_guc) {
783 				err = intel_engine_reset(engine, NULL);
784 				if (err) {
785 					pr_err("intel_engine_reset(%s) failed, err:%d\n",
786 					       engine->name, err);
787 					goto skip;
788 				}
789 			}
790 
791 			if (rq) {
792 				/* Ensure the reset happens and kills the engine */
793 				err = intel_selftest_wait_for_rq(rq);
794 				if (err)
795 					pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
796 					       engine->name, rq->fence.context,
797 					       rq->fence.seqno, rq->context->guc_id.id, err);
798 			}
799 
800 skip:
801 			if (rq)
802 				i915_request_put(rq);
803 
804 			if (i915_reset_count(global) != reset_count) {
805 				pr_err("Full GPU reset recorded! (engine reset expected)\n");
806 				err = -EINVAL;
807 				goto restore;
808 			}
809 
810 			/* GuC based resets are not logged per engine */
811 			if (!using_guc) {
812 				if (i915_reset_engine_count(global, engine) !=
813 				    ++reset_engine_count) {
814 					pr_err("%s engine reset not recorded!\n",
815 					       engine->name);
816 					err = -EINVAL;
817 					goto restore;
818 				}
819 			}
820 
821 			count++;
822 
823 restore:
824 			err2 = intel_selftest_restore_policy(engine, &saved);
825 			if (err2)
826 				pr_err("[%s] Restore policy failed: %d!\n", engine->name, err);
827 			if (err == 0)
828 				err = err2;
829 			if (err)
830 				break;
831 		} while (time_before(jiffies, end_time));
832 		clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
833 		st_engine_heartbeat_enable(engine);
834 		pr_info("%s: Completed %lu %s resets\n",
835 			engine->name, count, active ? "active" : "idle");
836 
837 		if (err)
838 			break;
839 
840 		err = igt_flush_test(gt->i915);
841 		if (err) {
842 			pr_err("[%s] Flush failed: %d!\n", engine->name, err);
843 			break;
844 		}
845 	}
846 
847 	if (intel_gt_is_wedged(gt)) {
848 		pr_err("GT is wedged!\n");
849 		err = -EIO;
850 	}
851 
852 	if (active)
853 		hang_fini(&h);
854 
855 	return err;
856 }
857 
858 static int igt_reset_idle_engine(void *arg)
859 {
860 	return __igt_reset_engine(arg, false);
861 }
862 
863 static int igt_reset_active_engine(void *arg)
864 {
865 	return __igt_reset_engine(arg, true);
866 }
867 
868 struct active_engine {
869 	struct task_struct *task;
870 	struct intel_engine_cs *engine;
871 	unsigned long resets;
872 	unsigned int flags;
873 };
874 
875 #define TEST_ACTIVE	BIT(0)
876 #define TEST_OTHERS	BIT(1)
877 #define TEST_SELF	BIT(2)
878 #define TEST_PRIORITY	BIT(3)
879 
880 static int active_request_put(struct i915_request *rq)
881 {
882 	int err = 0;
883 
884 	if (!rq)
885 		return 0;
886 
887 	if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
888 		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
889 			  rq->engine->name,
890 			  rq->fence.context,
891 			  rq->fence.seqno);
892 		GEM_TRACE_DUMP();
893 
894 		intel_gt_set_wedged(rq->engine->gt);
895 		err = -EIO;
896 	}
897 
898 	i915_request_put(rq);
899 
900 	return err;
901 }
902 
903 static int active_engine(void *data)
904 {
905 	I915_RND_STATE(prng);
906 	struct active_engine *arg = data;
907 	struct intel_engine_cs *engine = arg->engine;
908 	struct i915_request *rq[8] = {};
909 	struct intel_context *ce[ARRAY_SIZE(rq)];
910 	unsigned long count;
911 	int err = 0;
912 
913 	for (count = 0; count < ARRAY_SIZE(ce); count++) {
914 		ce[count] = intel_context_create(engine);
915 		if (IS_ERR(ce[count])) {
916 			err = PTR_ERR(ce[count]);
917 			pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
918 			while (--count)
919 				intel_context_put(ce[count]);
920 			return err;
921 		}
922 	}
923 
924 	count = 0;
925 	while (!kthread_should_stop()) {
926 		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
927 		struct i915_request *old = rq[idx];
928 		struct i915_request *new;
929 
930 		new = intel_context_create_request(ce[idx]);
931 		if (IS_ERR(new)) {
932 			err = PTR_ERR(new);
933 			pr_err("[%s] Create request #%d failed: %d!\n", engine->name, idx, err);
934 			break;
935 		}
936 
937 		rq[idx] = i915_request_get(new);
938 		i915_request_add(new);
939 
940 		if (engine->sched_engine->schedule && arg->flags & TEST_PRIORITY) {
941 			struct i915_sched_attr attr = {
942 				.priority =
943 					i915_prandom_u32_max_state(512, &prng),
944 			};
945 			engine->sched_engine->schedule(rq[idx], &attr);
946 		}
947 
948 		err = active_request_put(old);
949 		if (err) {
950 			pr_err("[%s] Request put failed: %d!\n", engine->name, err);
951 			break;
952 		}
953 
954 		cond_resched();
955 	}
956 
957 	for (count = 0; count < ARRAY_SIZE(rq); count++) {
958 		int err__ = active_request_put(rq[count]);
959 
960 		if (err)
961 			pr_err("[%s] Request put #%ld failed: %d!\n", engine->name, count, err);
962 
963 		/* Keep the first error */
964 		if (!err)
965 			err = err__;
966 
967 		intel_context_put(ce[count]);
968 	}
969 
970 	return err;
971 }
972 
973 static int __igt_reset_engines(struct intel_gt *gt,
974 			       const char *test_name,
975 			       unsigned int flags)
976 {
977 	struct i915_gpu_error *global = &gt->i915->gpu_error;
978 	struct intel_engine_cs *engine, *other;
979 	struct active_engine *threads;
980 	enum intel_engine_id id, tmp;
981 	struct hang h;
982 	int err = 0;
983 
984 	/* Check that issuing a reset on one engine does not interfere
985 	 * with any other engine.
986 	 */
987 
988 	if (!intel_has_reset_engine(gt))
989 		return 0;
990 
991 	if (flags & TEST_ACTIVE) {
992 		err = hang_init(&h, gt);
993 		if (err)
994 			return err;
995 
996 		if (flags & TEST_PRIORITY)
997 			h.ctx->sched.priority = 1024;
998 	}
999 
1000 	threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
1001 	if (!threads)
1002 		return -ENOMEM;
1003 
1004 	for_each_engine(engine, gt, id) {
1005 		unsigned long device = i915_reset_count(global);
1006 		unsigned long count = 0, reported;
1007 		bool using_guc = intel_engine_uses_guc(engine);
1008 		IGT_TIMEOUT(end_time);
1009 
1010 		if (flags & TEST_ACTIVE) {
1011 			if (!intel_engine_can_store_dword(engine))
1012 				continue;
1013 		} else if (using_guc)
1014 			continue;
1015 
1016 		if (!wait_for_idle(engine)) {
1017 			pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
1018 			       engine->name, test_name);
1019 			err = -EIO;
1020 			break;
1021 		}
1022 
1023 		memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
1024 		for_each_engine(other, gt, tmp) {
1025 			struct task_struct *tsk;
1026 
1027 			threads[tmp].resets =
1028 				i915_reset_engine_count(global, other);
1029 
1030 			if (other == engine && !(flags & TEST_SELF))
1031 				continue;
1032 
1033 			if (other != engine && !(flags & TEST_OTHERS))
1034 				continue;
1035 
1036 			threads[tmp].engine = other;
1037 			threads[tmp].flags = flags;
1038 
1039 			tsk = kthread_run(active_engine, &threads[tmp],
1040 					  "igt/%s", other->name);
1041 			if (IS_ERR(tsk)) {
1042 				err = PTR_ERR(tsk);
1043 				pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
1044 				goto unwind;
1045 			}
1046 
1047 			threads[tmp].task = tsk;
1048 			get_task_struct(tsk);
1049 		}
1050 
1051 		yield(); /* start all threads before we begin */
1052 
1053 		st_engine_heartbeat_disable_no_pm(engine);
1054 		GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
1055 					    &gt->reset.flags));
1056 		do {
1057 			struct i915_request *rq = NULL;
1058 			struct intel_selftest_saved_policy saved;
1059 			int err2;
1060 
1061 			err = intel_selftest_modify_policy(engine, &saved,
1062 							   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
1063 			if (err) {
1064 				pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
1065 				break;
1066 			}
1067 
1068 			if (flags & TEST_ACTIVE) {
1069 				rq = hang_create_request(&h, engine);
1070 				if (IS_ERR(rq)) {
1071 					err = PTR_ERR(rq);
1072 					pr_err("[%s] Create hang request failed: %d!\n",
1073 					       engine->name, err);
1074 					goto restore;
1075 				}
1076 
1077 				i915_request_get(rq);
1078 				i915_request_add(rq);
1079 
1080 				if (!wait_until_running(&h, rq)) {
1081 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1082 
1083 					pr_err("%s: Failed to start request %llx, at %x\n",
1084 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1085 					intel_engine_dump(engine, &p,
1086 							  "%s\n", engine->name);
1087 
1088 					i915_request_put(rq);
1089 					err = -EIO;
1090 					goto restore;
1091 				}
1092 			} else {
1093 				intel_engine_pm_get(engine);
1094 			}
1095 
1096 			if (!using_guc) {
1097 				err = intel_engine_reset(engine, NULL);
1098 				if (err) {
1099 					pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
1100 					       engine->name, test_name, err);
1101 					goto restore;
1102 				}
1103 			}
1104 
1105 			if (rq) {
1106 				/* Ensure the reset happens and kills the engine */
1107 				err = intel_selftest_wait_for_rq(rq);
1108 				if (err)
1109 					pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
1110 					       engine->name, rq->fence.context,
1111 					       rq->fence.seqno, rq->context->guc_id.id, err);
1112 			}
1113 
1114 			count++;
1115 
1116 			if (rq) {
1117 				if (rq->fence.error != -EIO) {
1118 					pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
1119 					       engine->name, test_name,
1120 					       rq->fence.context,
1121 					       rq->fence.seqno, rq->context->guc_id.id);
1122 					i915_request_put(rq);
1123 
1124 					GEM_TRACE_DUMP();
1125 					intel_gt_set_wedged(gt);
1126 					err = -EIO;
1127 					goto restore;
1128 				}
1129 
1130 				if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1131 					struct drm_printer p =
1132 						drm_info_printer(gt->i915->drm.dev);
1133 
1134 					pr_err("i915_reset_engine(%s:%s):"
1135 					       " failed to complete request %llx:%lld after reset\n",
1136 					       engine->name, test_name,
1137 					       rq->fence.context,
1138 					       rq->fence.seqno);
1139 					intel_engine_dump(engine, &p,
1140 							  "%s\n", engine->name);
1141 					i915_request_put(rq);
1142 
1143 					GEM_TRACE_DUMP();
1144 					intel_gt_set_wedged(gt);
1145 					err = -EIO;
1146 					goto restore;
1147 				}
1148 
1149 				i915_request_put(rq);
1150 			}
1151 
1152 			if (!(flags & TEST_ACTIVE))
1153 				intel_engine_pm_put(engine);
1154 
1155 			if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
1156 				struct drm_printer p =
1157 					drm_info_printer(gt->i915->drm.dev);
1158 
1159 				pr_err("i915_reset_engine(%s:%s):"
1160 				       " failed to idle after reset\n",
1161 				       engine->name, test_name);
1162 				intel_engine_dump(engine, &p,
1163 						  "%s\n", engine->name);
1164 
1165 				err = -EIO;
1166 				goto restore;
1167 			}
1168 
1169 restore:
1170 			err2 = intel_selftest_restore_policy(engine, &saved);
1171 			if (err2)
1172 				pr_err("[%s] Restore policy failed: %d!\n", engine->name, err2);
1173 			if (err == 0)
1174 				err = err2;
1175 			if (err)
1176 				break;
1177 		} while (time_before(jiffies, end_time));
1178 		clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
1179 		st_engine_heartbeat_enable_no_pm(engine);
1180 
1181 		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
1182 			engine->name, test_name, count);
1183 
1184 		/* GuC based resets are not logged per engine */
1185 		if (!using_guc) {
1186 			reported = i915_reset_engine_count(global, engine);
1187 			reported -= threads[engine->id].resets;
1188 			if (reported != count) {
1189 				pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
1190 				       engine->name, test_name, count, reported);
1191 				if (!err)
1192 					err = -EINVAL;
1193 			}
1194 		}
1195 
1196 unwind:
1197 		for_each_engine(other, gt, tmp) {
1198 			int ret;
1199 
1200 			if (!threads[tmp].task)
1201 				continue;
1202 
1203 			ret = kthread_stop(threads[tmp].task);
1204 			if (ret) {
1205 				pr_err("kthread for other engine %s failed, err=%d\n",
1206 				       other->name, ret);
1207 				if (!err)
1208 					err = ret;
1209 			}
1210 			put_task_struct(threads[tmp].task);
1211 
1212 			/* GuC based resets are not logged per engine */
1213 			if (!using_guc) {
1214 				if (other->uabi_class != engine->uabi_class &&
1215 				    threads[tmp].resets !=
1216 				    i915_reset_engine_count(global, other)) {
1217 					pr_err("Innocent engine %s was reset (count=%ld)\n",
1218 					       other->name,
1219 					       i915_reset_engine_count(global, other) -
1220 					       threads[tmp].resets);
1221 					if (!err)
1222 						err = -EINVAL;
1223 				}
1224 			}
1225 		}
1226 
1227 		if (device != i915_reset_count(global)) {
1228 			pr_err("Global reset (count=%ld)!\n",
1229 			       i915_reset_count(global) - device);
1230 			if (!err)
1231 				err = -EINVAL;
1232 		}
1233 
1234 		if (err)
1235 			break;
1236 
1237 		err = igt_flush_test(gt->i915);
1238 		if (err) {
1239 			pr_err("[%s] Flush failed: %d!\n", engine->name, err);
1240 			break;
1241 		}
1242 	}
1243 	kfree(threads);
1244 
1245 	if (intel_gt_is_wedged(gt))
1246 		err = -EIO;
1247 
1248 	if (flags & TEST_ACTIVE)
1249 		hang_fini(&h);
1250 
1251 	return err;
1252 }
1253 
1254 static int igt_reset_engines(void *arg)
1255 {
1256 	static const struct {
1257 		const char *name;
1258 		unsigned int flags;
1259 	} phases[] = {
1260 		{ "idle", 0 },
1261 		{ "active", TEST_ACTIVE },
1262 		{ "others-idle", TEST_OTHERS },
1263 		{ "others-active", TEST_OTHERS | TEST_ACTIVE },
1264 		{
1265 			"others-priority",
1266 			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
1267 		},
1268 		{
1269 			"self-priority",
1270 			TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
1271 		},
1272 		{ }
1273 	};
1274 	struct intel_gt *gt = arg;
1275 	typeof(*phases) *p;
1276 	int err;
1277 
1278 	for (p = phases; p->name; p++) {
1279 		if (p->flags & TEST_PRIORITY) {
1280 			if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1281 				continue;
1282 		}
1283 
1284 		err = __igt_reset_engines(arg, p->name, p->flags);
1285 		if (err)
1286 			return err;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
1293 {
1294 	u32 count = i915_reset_count(&gt->i915->gpu_error);
1295 
1296 	intel_gt_reset(gt, mask, NULL);
1297 
1298 	return count;
1299 }
1300 
1301 static int igt_reset_wait(void *arg)
1302 {
1303 	struct intel_gt *gt = arg;
1304 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1305 	struct intel_engine_cs *engine = gt->engine[RCS0];
1306 	struct i915_request *rq;
1307 	unsigned int reset_count;
1308 	struct hang h;
1309 	long timeout;
1310 	int err;
1311 
1312 	if (!engine || !intel_engine_can_store_dword(engine))
1313 		return 0;
1314 
1315 	/* Check that we detect a stuck waiter and issue a reset */
1316 
1317 	igt_global_reset_lock(gt);
1318 
1319 	err = hang_init(&h, gt);
1320 	if (err) {
1321 		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1322 		goto unlock;
1323 	}
1324 
1325 	rq = hang_create_request(&h, engine);
1326 	if (IS_ERR(rq)) {
1327 		err = PTR_ERR(rq);
1328 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1329 		goto fini;
1330 	}
1331 
1332 	i915_request_get(rq);
1333 	i915_request_add(rq);
1334 
1335 	if (!wait_until_running(&h, rq)) {
1336 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1337 
1338 		pr_err("%s: Failed to start request %llx, at %x\n",
1339 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1340 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1341 
1342 		intel_gt_set_wedged(gt);
1343 
1344 		err = -EIO;
1345 		goto out_rq;
1346 	}
1347 
1348 	reset_count = fake_hangcheck(gt, ALL_ENGINES);
1349 
1350 	timeout = i915_request_wait(rq, 0, 10);
1351 	if (timeout < 0) {
1352 		pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
1353 		       timeout);
1354 		err = timeout;
1355 		goto out_rq;
1356 	}
1357 
1358 	if (i915_reset_count(global) == reset_count) {
1359 		pr_err("No GPU reset recorded!\n");
1360 		err = -EINVAL;
1361 		goto out_rq;
1362 	}
1363 
1364 out_rq:
1365 	i915_request_put(rq);
1366 fini:
1367 	hang_fini(&h);
1368 unlock:
1369 	igt_global_reset_unlock(gt);
1370 
1371 	if (intel_gt_is_wedged(gt))
1372 		return -EIO;
1373 
1374 	return err;
1375 }
1376 
1377 struct evict_vma {
1378 	struct completion completion;
1379 	struct i915_vma *vma;
1380 };
1381 
1382 static int evict_vma(void *data)
1383 {
1384 	struct evict_vma *arg = data;
1385 	struct i915_address_space *vm = arg->vma->vm;
1386 	struct drm_mm_node evict = arg->vma->node;
1387 	int err;
1388 
1389 	complete(&arg->completion);
1390 
1391 	mutex_lock(&vm->mutex);
1392 	err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
1393 	mutex_unlock(&vm->mutex);
1394 
1395 	return err;
1396 }
1397 
1398 static int evict_fence(void *data)
1399 {
1400 	struct evict_vma *arg = data;
1401 	int err;
1402 
1403 	complete(&arg->completion);
1404 
1405 	/* Mark the fence register as dirty to force the mmio update. */
1406 	err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
1407 	if (err) {
1408 		pr_err("Invalid Y-tiling settings; err:%d\n", err);
1409 		return err;
1410 	}
1411 
1412 	err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
1413 	if (err) {
1414 		pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
1415 		return err;
1416 	}
1417 
1418 	err = i915_vma_pin_fence(arg->vma);
1419 	i915_vma_unpin(arg->vma);
1420 	if (err) {
1421 		pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
1422 		return err;
1423 	}
1424 
1425 	i915_vma_unpin_fence(arg->vma);
1426 
1427 	return 0;
1428 }
1429 
1430 static int __igt_reset_evict_vma(struct intel_gt *gt,
1431 				 struct i915_address_space *vm,
1432 				 int (*fn)(void *),
1433 				 unsigned int flags)
1434 {
1435 	struct intel_engine_cs *engine = gt->engine[RCS0];
1436 	struct drm_i915_gem_object *obj;
1437 	struct task_struct *tsk = NULL;
1438 	struct i915_request *rq;
1439 	struct evict_vma arg;
1440 	struct hang h;
1441 	unsigned int pin_flags;
1442 	int err;
1443 
1444 	if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
1445 		return 0;
1446 
1447 	if (!engine || !intel_engine_can_store_dword(engine))
1448 		return 0;
1449 
1450 	/* Check that we can recover an unbind stuck on a hanging request */
1451 
1452 	err = hang_init(&h, gt);
1453 	if (err) {
1454 		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1455 		return err;
1456 	}
1457 
1458 	obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
1459 	if (IS_ERR(obj)) {
1460 		err = PTR_ERR(obj);
1461 		pr_err("[%s] Create object failed: %d!\n", engine->name, err);
1462 		goto fini;
1463 	}
1464 
1465 	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1466 		err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
1467 		if (err) {
1468 			pr_err("Invalid X-tiling settings; err:%d\n", err);
1469 			goto out_obj;
1470 		}
1471 	}
1472 
1473 	arg.vma = i915_vma_instance(obj, vm, NULL);
1474 	if (IS_ERR(arg.vma)) {
1475 		err = PTR_ERR(arg.vma);
1476 		pr_err("[%s] VMA instance failed: %d!\n", engine->name, err);
1477 		goto out_obj;
1478 	}
1479 
1480 	rq = hang_create_request(&h, engine);
1481 	if (IS_ERR(rq)) {
1482 		err = PTR_ERR(rq);
1483 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1484 		goto out_obj;
1485 	}
1486 
1487 	pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
1488 
1489 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
1490 		pin_flags |= PIN_MAPPABLE;
1491 
1492 	err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
1493 	if (err) {
1494 		i915_request_add(rq);
1495 		pr_err("[%s] VMA pin failed: %d!\n", engine->name, err);
1496 		goto out_obj;
1497 	}
1498 
1499 	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1500 		err = i915_vma_pin_fence(arg.vma);
1501 		if (err) {
1502 			pr_err("Unable to pin X-tiled fence; err:%d\n", err);
1503 			i915_vma_unpin(arg.vma);
1504 			i915_request_add(rq);
1505 			goto out_obj;
1506 		}
1507 	}
1508 
1509 	i915_vma_lock(arg.vma);
1510 	err = i915_request_await_object(rq, arg.vma->obj,
1511 					flags & EXEC_OBJECT_WRITE);
1512 	if (err == 0) {
1513 		err = i915_vma_move_to_active(arg.vma, rq, flags);
1514 		if (err)
1515 			pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
1516 	} else {
1517 		pr_err("[%s] Request await failed: %d!\n", engine->name, err);
1518 	}
1519 
1520 	i915_vma_unlock(arg.vma);
1521 
1522 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
1523 		i915_vma_unpin_fence(arg.vma);
1524 	i915_vma_unpin(arg.vma);
1525 
1526 	i915_request_get(rq);
1527 	i915_request_add(rq);
1528 	if (err)
1529 		goto out_rq;
1530 
1531 	if (!wait_until_running(&h, rq)) {
1532 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1533 
1534 		pr_err("%s: Failed to start request %llx, at %x\n",
1535 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1536 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1537 
1538 		intel_gt_set_wedged(gt);
1539 		goto out_reset;
1540 	}
1541 
1542 	init_completion(&arg.completion);
1543 
1544 	tsk = kthread_run(fn, &arg, "igt/evict_vma");
1545 	if (IS_ERR(tsk)) {
1546 		err = PTR_ERR(tsk);
1547 		pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
1548 		tsk = NULL;
1549 		goto out_reset;
1550 	}
1551 	get_task_struct(tsk);
1552 
1553 	wait_for_completion(&arg.completion);
1554 
1555 	if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1556 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1557 
1558 		pr_err("igt/evict_vma kthread did not wait\n");
1559 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1560 
1561 		intel_gt_set_wedged(gt);
1562 		goto out_reset;
1563 	}
1564 
1565 out_reset:
1566 	igt_global_reset_lock(gt);
1567 	fake_hangcheck(gt, rq->engine->mask);
1568 	igt_global_reset_unlock(gt);
1569 
1570 	if (tsk) {
1571 		struct intel_wedge_me w;
1572 
1573 		/* The reset, even indirectly, should take less than 10ms. */
1574 		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
1575 			err = kthread_stop(tsk);
1576 
1577 		put_task_struct(tsk);
1578 	}
1579 
1580 out_rq:
1581 	i915_request_put(rq);
1582 out_obj:
1583 	i915_gem_object_put(obj);
1584 fini:
1585 	hang_fini(&h);
1586 	if (intel_gt_is_wedged(gt))
1587 		return -EIO;
1588 
1589 	return err;
1590 }
1591 
1592 static int igt_reset_evict_ggtt(void *arg)
1593 {
1594 	struct intel_gt *gt = arg;
1595 
1596 	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1597 				     evict_vma, EXEC_OBJECT_WRITE);
1598 }
1599 
1600 static int igt_reset_evict_ppgtt(void *arg)
1601 {
1602 	struct intel_gt *gt = arg;
1603 	struct i915_ppgtt *ppgtt;
1604 	int err;
1605 
1606 	/* aliasing == global gtt locking, covered above */
1607 	if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
1608 		return 0;
1609 
1610 	ppgtt = i915_ppgtt_create(gt, 0);
1611 	if (IS_ERR(ppgtt))
1612 		return PTR_ERR(ppgtt);
1613 
1614 	err = __igt_reset_evict_vma(gt, &ppgtt->vm,
1615 				    evict_vma, EXEC_OBJECT_WRITE);
1616 	i915_vm_put(&ppgtt->vm);
1617 
1618 	return err;
1619 }
1620 
1621 static int igt_reset_evict_fence(void *arg)
1622 {
1623 	struct intel_gt *gt = arg;
1624 
1625 	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1626 				     evict_fence, EXEC_OBJECT_NEEDS_FENCE);
1627 }
1628 
1629 static int wait_for_others(struct intel_gt *gt,
1630 			   struct intel_engine_cs *exclude)
1631 {
1632 	struct intel_engine_cs *engine;
1633 	enum intel_engine_id id;
1634 
1635 	for_each_engine(engine, gt, id) {
1636 		if (engine == exclude)
1637 			continue;
1638 
1639 		if (!wait_for_idle(engine))
1640 			return -EIO;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static int igt_reset_queue(void *arg)
1647 {
1648 	struct intel_gt *gt = arg;
1649 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1650 	struct intel_engine_cs *engine;
1651 	enum intel_engine_id id;
1652 	struct hang h;
1653 	int err;
1654 
1655 	/* Check that we replay pending requests following a hang */
1656 
1657 	igt_global_reset_lock(gt);
1658 
1659 	err = hang_init(&h, gt);
1660 	if (err)
1661 		goto unlock;
1662 
1663 	for_each_engine(engine, gt, id) {
1664 		struct intel_selftest_saved_policy saved;
1665 		struct i915_request *prev;
1666 		IGT_TIMEOUT(end_time);
1667 		unsigned int count;
1668 		bool using_guc = intel_engine_uses_guc(engine);
1669 
1670 		if (!intel_engine_can_store_dword(engine))
1671 			continue;
1672 
1673 		if (using_guc) {
1674 			err = intel_selftest_modify_policy(engine, &saved,
1675 							   SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK);
1676 			if (err) {
1677 				pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
1678 				goto fini;
1679 			}
1680 		}
1681 
1682 		prev = hang_create_request(&h, engine);
1683 		if (IS_ERR(prev)) {
1684 			err = PTR_ERR(prev);
1685 			pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
1686 			goto restore;
1687 		}
1688 
1689 		i915_request_get(prev);
1690 		i915_request_add(prev);
1691 
1692 		count = 0;
1693 		do {
1694 			struct i915_request *rq;
1695 			unsigned int reset_count;
1696 
1697 			rq = hang_create_request(&h, engine);
1698 			if (IS_ERR(rq)) {
1699 				err = PTR_ERR(rq);
1700 				pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1701 				goto restore;
1702 			}
1703 
1704 			i915_request_get(rq);
1705 			i915_request_add(rq);
1706 
1707 			/*
1708 			 * XXX We don't handle resetting the kernel context
1709 			 * very well. If we trigger a device reset twice in
1710 			 * quick succession while the kernel context is
1711 			 * executing, we may end up skipping the breadcrumb.
1712 			 * This is really only a problem for the selftest as
1713 			 * normally there is a large interlude between resets
1714 			 * (hangcheck), or we focus on resetting just one
1715 			 * engine and so avoid repeatedly resetting innocents.
1716 			 */
1717 			err = wait_for_others(gt, engine);
1718 			if (err) {
1719 				pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
1720 				       __func__, engine->name);
1721 				i915_request_put(rq);
1722 				i915_request_put(prev);
1723 
1724 				GEM_TRACE_DUMP();
1725 				intel_gt_set_wedged(gt);
1726 				goto restore;
1727 			}
1728 
1729 			if (!wait_until_running(&h, prev)) {
1730 				struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1731 
1732 				pr_err("%s(%s): Failed to start request %llx, at %x\n",
1733 				       __func__, engine->name,
1734 				       prev->fence.seqno, hws_seqno(&h, prev));
1735 				intel_engine_dump(engine, &p,
1736 						  "%s\n", engine->name);
1737 
1738 				i915_request_put(rq);
1739 				i915_request_put(prev);
1740 
1741 				intel_gt_set_wedged(gt);
1742 
1743 				err = -EIO;
1744 				goto restore;
1745 			}
1746 
1747 			reset_count = fake_hangcheck(gt, BIT(id));
1748 
1749 			if (prev->fence.error != -EIO) {
1750 				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
1751 				       prev->fence.error);
1752 				i915_request_put(rq);
1753 				i915_request_put(prev);
1754 				err = -EINVAL;
1755 				goto restore;
1756 			}
1757 
1758 			if (rq->fence.error) {
1759 				pr_err("Fence error status not zero [%d] after unrelated reset\n",
1760 				       rq->fence.error);
1761 				i915_request_put(rq);
1762 				i915_request_put(prev);
1763 				err = -EINVAL;
1764 				goto restore;
1765 			}
1766 
1767 			if (i915_reset_count(global) == reset_count) {
1768 				pr_err("No GPU reset recorded!\n");
1769 				i915_request_put(rq);
1770 				i915_request_put(prev);
1771 				err = -EINVAL;
1772 				goto restore;
1773 			}
1774 
1775 			i915_request_put(prev);
1776 			prev = rq;
1777 			count++;
1778 		} while (time_before(jiffies, end_time));
1779 		pr_info("%s: Completed %d queued resets\n",
1780 			engine->name, count);
1781 
1782 		*h.batch = MI_BATCH_BUFFER_END;
1783 		intel_gt_chipset_flush(engine->gt);
1784 
1785 		i915_request_put(prev);
1786 
1787 restore:
1788 		if (using_guc) {
1789 			int err2 = intel_selftest_restore_policy(engine, &saved);
1790 
1791 			if (err2)
1792 				pr_err("%s:%d> [%s] Restore policy failed: %d!\n",
1793 				       __func__, __LINE__, engine->name, err2);
1794 			if (err == 0)
1795 				err = err2;
1796 		}
1797 		if (err)
1798 			goto fini;
1799 
1800 		err = igt_flush_test(gt->i915);
1801 		if (err) {
1802 			pr_err("[%s] Flush failed: %d!\n", engine->name, err);
1803 			break;
1804 		}
1805 	}
1806 
1807 fini:
1808 	hang_fini(&h);
1809 unlock:
1810 	igt_global_reset_unlock(gt);
1811 
1812 	if (intel_gt_is_wedged(gt))
1813 		return -EIO;
1814 
1815 	return err;
1816 }
1817 
1818 static int igt_handle_error(void *arg)
1819 {
1820 	struct intel_gt *gt = arg;
1821 	struct i915_gpu_error *global = &gt->i915->gpu_error;
1822 	struct intel_engine_cs *engine = gt->engine[RCS0];
1823 	struct hang h;
1824 	struct i915_request *rq;
1825 	struct i915_gpu_coredump *error;
1826 	int err;
1827 
1828 	/* Check that we can issue a global GPU and engine reset */
1829 
1830 	if (!intel_has_reset_engine(gt))
1831 		return 0;
1832 
1833 	if (!engine || !intel_engine_can_store_dword(engine))
1834 		return 0;
1835 
1836 	err = hang_init(&h, gt);
1837 	if (err) {
1838 		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1839 		return err;
1840 	}
1841 
1842 	rq = hang_create_request(&h, engine);
1843 	if (IS_ERR(rq)) {
1844 		err = PTR_ERR(rq);
1845 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1846 		goto err_fini;
1847 	}
1848 
1849 	i915_request_get(rq);
1850 	i915_request_add(rq);
1851 
1852 	if (!wait_until_running(&h, rq)) {
1853 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1854 
1855 		pr_err("%s: Failed to start request %llx, at %x\n",
1856 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1857 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1858 
1859 		intel_gt_set_wedged(gt);
1860 
1861 		err = -EIO;
1862 		goto err_request;
1863 	}
1864 
1865 	/* Temporarily disable error capture */
1866 	error = xchg(&global->first_error, (void *)-1);
1867 
1868 	intel_gt_handle_error(gt, engine->mask, 0, NULL);
1869 
1870 	xchg(&global->first_error, error);
1871 
1872 	if (rq->fence.error != -EIO) {
1873 		pr_err("Guilty request not identified!\n");
1874 		err = -EINVAL;
1875 		goto err_request;
1876 	}
1877 
1878 err_request:
1879 	i915_request_put(rq);
1880 err_fini:
1881 	hang_fini(&h);
1882 	return err;
1883 }
1884 
1885 static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1886 				     const struct igt_atomic_section *p,
1887 				     const char *mode)
1888 {
1889 	struct tasklet_struct * const t = &engine->sched_engine->tasklet;
1890 	int err;
1891 
1892 	GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
1893 		  engine->name, mode, p->name);
1894 
1895 	if (t->func)
1896 		tasklet_disable(t);
1897 	if (strcmp(p->name, "softirq"))
1898 		local_bh_disable();
1899 	p->critical_section_begin();
1900 
1901 	err = __intel_engine_reset_bh(engine, NULL);
1902 
1903 	p->critical_section_end();
1904 	if (strcmp(p->name, "softirq"))
1905 		local_bh_enable();
1906 	if (t->func) {
1907 		tasklet_enable(t);
1908 		tasklet_hi_schedule(t);
1909 	}
1910 
1911 	if (err)
1912 		pr_err("i915_reset_engine(%s:%s) failed under %s\n",
1913 		       engine->name, mode, p->name);
1914 
1915 	return err;
1916 }
1917 
1918 static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1919 				   const struct igt_atomic_section *p)
1920 {
1921 	struct i915_request *rq;
1922 	struct hang h;
1923 	int err;
1924 
1925 	err = __igt_atomic_reset_engine(engine, p, "idle");
1926 	if (err)
1927 		return err;
1928 
1929 	err = hang_init(&h, engine->gt);
1930 	if (err) {
1931 		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1932 		return err;
1933 	}
1934 
1935 	rq = hang_create_request(&h, engine);
1936 	if (IS_ERR(rq)) {
1937 		err = PTR_ERR(rq);
1938 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1939 		goto out;
1940 	}
1941 
1942 	i915_request_get(rq);
1943 	i915_request_add(rq);
1944 
1945 	if (wait_until_running(&h, rq)) {
1946 		err = __igt_atomic_reset_engine(engine, p, "active");
1947 	} else {
1948 		pr_err("%s(%s): Failed to start request %llx, at %x\n",
1949 		       __func__, engine->name,
1950 		       rq->fence.seqno, hws_seqno(&h, rq));
1951 		intel_gt_set_wedged(engine->gt);
1952 		err = -EIO;
1953 	}
1954 
1955 	if (err == 0) {
1956 		struct intel_wedge_me w;
1957 
1958 		intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
1959 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1960 		if (intel_gt_is_wedged(engine->gt))
1961 			err = -EIO;
1962 	}
1963 
1964 	i915_request_put(rq);
1965 out:
1966 	hang_fini(&h);
1967 	return err;
1968 }
1969 
1970 static int igt_reset_engines_atomic(void *arg)
1971 {
1972 	struct intel_gt *gt = arg;
1973 	const typeof(*igt_atomic_phases) *p;
1974 	int err = 0;
1975 
1976 	/* Check that the engines resets are usable from atomic context */
1977 
1978 	if (!intel_has_reset_engine(gt))
1979 		return 0;
1980 
1981 	if (intel_uc_uses_guc_submission(&gt->uc))
1982 		return 0;
1983 
1984 	igt_global_reset_lock(gt);
1985 
1986 	/* Flush any requests before we get started and check basics */
1987 	if (!igt_force_reset(gt))
1988 		goto unlock;
1989 
1990 	for (p = igt_atomic_phases; p->name; p++) {
1991 		struct intel_engine_cs *engine;
1992 		enum intel_engine_id id;
1993 
1994 		for_each_engine(engine, gt, id) {
1995 			err = igt_atomic_reset_engine(engine, p);
1996 			if (err)
1997 				goto out;
1998 		}
1999 	}
2000 
2001 out:
2002 	/* As we poke around the guts, do a full reset before continuing. */
2003 	igt_force_reset(gt);
2004 unlock:
2005 	igt_global_reset_unlock(gt);
2006 
2007 	return err;
2008 }
2009 
2010 int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
2011 {
2012 	static const struct i915_subtest tests[] = {
2013 		SUBTEST(igt_hang_sanitycheck),
2014 		SUBTEST(igt_reset_nop),
2015 		SUBTEST(igt_reset_nop_engine),
2016 		SUBTEST(igt_reset_idle_engine),
2017 		SUBTEST(igt_reset_active_engine),
2018 		SUBTEST(igt_reset_fail_engine),
2019 		SUBTEST(igt_reset_engines),
2020 		SUBTEST(igt_reset_engines_atomic),
2021 		SUBTEST(igt_reset_queue),
2022 		SUBTEST(igt_reset_wait),
2023 		SUBTEST(igt_reset_evict_ggtt),
2024 		SUBTEST(igt_reset_evict_ppgtt),
2025 		SUBTEST(igt_reset_evict_fence),
2026 		SUBTEST(igt_handle_error),
2027 	};
2028 	struct intel_gt *gt = to_gt(i915);
2029 	intel_wakeref_t wakeref;
2030 	int err;
2031 
2032 	if (!intel_has_gpu_reset(gt))
2033 		return 0;
2034 
2035 	if (intel_gt_is_wedged(gt))
2036 		return -EIO; /* we're long past hope of a successful reset */
2037 
2038 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
2039 
2040 	err = intel_gt_live_subtests(tests, gt);
2041 
2042 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
2043 
2044 	return err;
2045 }
2046