1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2018 Intel Corporation
4  */
5 
6 #include "i915_selftest.h"
7 #include "selftests/igt_reset.h"
8 #include "selftests/igt_atomic.h"
9 
10 static int igt_global_reset(void *arg)
11 {
12 	struct intel_gt *gt = arg;
13 	unsigned int reset_count;
14 	intel_wakeref_t wakeref;
15 	int err = 0;
16 
17 	/* Check that we can issue a global GPU reset */
18 
19 	igt_global_reset_lock(gt);
20 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
21 
22 	reset_count = i915_reset_count(&gt->i915->gpu_error);
23 
24 	intel_gt_reset(gt, ALL_ENGINES, NULL);
25 
26 	if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
27 		pr_err("No GPU reset recorded!\n");
28 		err = -EINVAL;
29 	}
30 
31 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
32 	igt_global_reset_unlock(gt);
33 
34 	if (intel_gt_is_wedged(gt))
35 		err = -EIO;
36 
37 	return err;
38 }
39 
40 static int igt_wedged_reset(void *arg)
41 {
42 	struct intel_gt *gt = arg;
43 	intel_wakeref_t wakeref;
44 
45 	/* Check that we can recover a wedged device with a GPU reset */
46 
47 	igt_global_reset_lock(gt);
48 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
49 
50 	intel_gt_set_wedged(gt);
51 
52 	GEM_BUG_ON(!intel_gt_is_wedged(gt));
53 	intel_gt_reset(gt, ALL_ENGINES, NULL);
54 
55 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
56 	igt_global_reset_unlock(gt);
57 
58 	return intel_gt_is_wedged(gt) ? -EIO : 0;
59 }
60 
61 static int igt_atomic_reset(void *arg)
62 {
63 	struct intel_gt *gt = arg;
64 	const typeof(*igt_atomic_phases) *p;
65 	int err = 0;
66 
67 	/* Check that the resets are usable from atomic context */
68 
69 	intel_gt_pm_get(gt);
70 	igt_global_reset_lock(gt);
71 
72 	/* Flush any requests before we get started and check basics */
73 	if (!igt_force_reset(gt))
74 		goto unlock;
75 
76 	for (p = igt_atomic_phases; p->name; p++) {
77 		intel_engine_mask_t awake;
78 
79 		GEM_TRACE("__intel_gt_reset under %s\n", p->name);
80 
81 		awake = reset_prepare(gt);
82 		p->critical_section_begin();
83 
84 		err = __intel_gt_reset(gt, ALL_ENGINES);
85 
86 		p->critical_section_end();
87 		reset_finish(gt, awake);
88 
89 		if (err) {
90 			pr_err("__intel_gt_reset failed under %s\n", p->name);
91 			break;
92 		}
93 	}
94 
95 	/* As we poke around the guts, do a full reset before continuing. */
96 	igt_force_reset(gt);
97 
98 unlock:
99 	igt_global_reset_unlock(gt);
100 	intel_gt_pm_put(gt);
101 
102 	return err;
103 }
104 
105 static int igt_atomic_engine_reset(void *arg)
106 {
107 	struct intel_gt *gt = arg;
108 	const typeof(*igt_atomic_phases) *p;
109 	struct intel_engine_cs *engine;
110 	enum intel_engine_id id;
111 	int err = 0;
112 
113 	/* Check that the resets are usable from atomic context */
114 
115 	if (!intel_has_reset_engine(gt))
116 		return 0;
117 
118 	if (USES_GUC_SUBMISSION(gt->i915))
119 		return 0;
120 
121 	intel_gt_pm_get(gt);
122 	igt_global_reset_lock(gt);
123 
124 	/* Flush any requests before we get started and check basics */
125 	if (!igt_force_reset(gt))
126 		goto out_unlock;
127 
128 	for_each_engine(engine, gt, id) {
129 		tasklet_disable(&engine->execlists.tasklet);
130 		intel_engine_pm_get(engine);
131 
132 		for (p = igt_atomic_phases; p->name; p++) {
133 			GEM_TRACE("intel_engine_reset(%s) under %s\n",
134 				  engine->name, p->name);
135 
136 			p->critical_section_begin();
137 			err = intel_engine_reset(engine, NULL);
138 			p->critical_section_end();
139 
140 			if (err) {
141 				pr_err("intel_engine_reset(%s) failed under %s\n",
142 				       engine->name, p->name);
143 				break;
144 			}
145 		}
146 
147 		intel_engine_pm_put(engine);
148 		tasklet_enable(&engine->execlists.tasklet);
149 		if (err)
150 			break;
151 	}
152 
153 	/* As we poke around the guts, do a full reset before continuing. */
154 	igt_force_reset(gt);
155 
156 out_unlock:
157 	igt_global_reset_unlock(gt);
158 	intel_gt_pm_put(gt);
159 
160 	return err;
161 }
162 
163 int intel_reset_live_selftests(struct drm_i915_private *i915)
164 {
165 	static const struct i915_subtest tests[] = {
166 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
167 		SUBTEST(igt_wedged_reset),
168 		SUBTEST(igt_atomic_reset),
169 		SUBTEST(igt_atomic_engine_reset),
170 	};
171 	struct intel_gt *gt = &i915->gt;
172 
173 	if (!intel_has_gpu_reset(gt))
174 		return 0;
175 
176 	if (intel_gt_is_wedged(gt))
177 		return -EIO; /* we're long past hope of a successful reset */
178 
179 	return intel_gt_live_subtests(tests, gt);
180 }
181