1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_request.h"
9 
10 #include "intel_context.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
13 #include "intel_engine.h"
14 #include "intel_gt.h"
15 #include "intel_reset.h"
16 
17 /*
18  * While the engine is active, we send a periodic pulse along the engine
19  * to check on its health and to flush any idle-barriers. If that request
20  * is stuck, and we fail to preempt it, we declare the engine hung and
21  * issue a reset -- in the hope that restores progress.
22  */
23 
24 static bool next_heartbeat(struct intel_engine_cs *engine)
25 {
26 	long delay;
27 
28 	delay = READ_ONCE(engine->props.heartbeat_interval_ms);
29 	if (!delay)
30 		return false;
31 
32 	delay = msecs_to_jiffies_timeout(delay);
33 	if (delay >= HZ)
34 		delay = round_jiffies_up_relative(delay);
35 	mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay);
36 
37 	return true;
38 }
39 
40 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
41 {
42 	engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
43 	i915_request_add_active_barriers(rq);
44 }
45 
46 static void show_heartbeat(const struct i915_request *rq,
47 			   struct intel_engine_cs *engine)
48 {
49 	struct drm_printer p = drm_debug_printer("heartbeat");
50 
51 	intel_engine_dump(engine, &p,
52 			  "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
53 			  engine->name,
54 			  rq->fence.context,
55 			  rq->fence.seqno,
56 			  rq->sched.attr.priority);
57 }
58 
59 static void heartbeat(struct work_struct *wrk)
60 {
61 	struct i915_sched_attr attr = {
62 		.priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
63 	};
64 	struct intel_engine_cs *engine =
65 		container_of(wrk, typeof(*engine), heartbeat.work.work);
66 	struct intel_context *ce = engine->kernel_context;
67 	struct i915_request *rq;
68 	unsigned long serial;
69 
70 	/* Just in case everything has gone horribly wrong, give it a kick */
71 	intel_engine_flush_submission(engine);
72 
73 	rq = engine->heartbeat.systole;
74 	if (rq && i915_request_completed(rq)) {
75 		i915_request_put(rq);
76 		engine->heartbeat.systole = NULL;
77 	}
78 
79 	if (!intel_engine_pm_get_if_awake(engine))
80 		return;
81 
82 	if (intel_gt_is_wedged(engine->gt))
83 		goto out;
84 
85 	if (engine->heartbeat.systole) {
86 		if (!i915_sw_fence_signaled(&rq->submit)) {
87 			/*
88 			 * Not yet submitted, system is stalled.
89 			 *
90 			 * This more often happens for ring submission,
91 			 * where all contexts are funnelled into a common
92 			 * ringbuffer. If one context is blocked on an
93 			 * external fence, not only is it not submitted,
94 			 * but all other contexts, including the kernel
95 			 * context are stuck waiting for the signal.
96 			 */
97 		} else if (engine->schedule &&
98 			   rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
99 			/*
100 			 * Gradually raise the priority of the heartbeat to
101 			 * give high priority work [which presumably desires
102 			 * low latency and no jitter] the chance to naturally
103 			 * complete before being preempted.
104 			 */
105 			attr.priority = I915_PRIORITY_MASK;
106 			if (rq->sched.attr.priority >= attr.priority)
107 				attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
108 			if (rq->sched.attr.priority >= attr.priority)
109 				attr.priority = I915_PRIORITY_BARRIER;
110 
111 			local_bh_disable();
112 			engine->schedule(rq, &attr);
113 			local_bh_enable();
114 		} else {
115 			if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
116 				show_heartbeat(rq, engine);
117 
118 			intel_gt_handle_error(engine->gt, engine->mask,
119 					      I915_ERROR_CAPTURE,
120 					      "stopped heartbeat on %s",
121 					      engine->name);
122 		}
123 		goto out;
124 	}
125 
126 	serial = READ_ONCE(engine->serial);
127 	if (engine->wakeref_serial == serial)
128 		goto out;
129 
130 	if (!mutex_trylock(&ce->timeline->mutex)) {
131 		/* Unable to lock the kernel timeline, is the engine stuck? */
132 		if (xchg(&engine->heartbeat.blocked, serial) == serial)
133 			intel_gt_handle_error(engine->gt, engine->mask,
134 					      I915_ERROR_CAPTURE,
135 					      "no heartbeat on %s",
136 					      engine->name);
137 		goto out;
138 	}
139 
140 	intel_context_enter(ce);
141 	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
142 	intel_context_exit(ce);
143 	if (IS_ERR(rq))
144 		goto unlock;
145 
146 	idle_pulse(engine, rq);
147 	if (engine->i915->params.enable_hangcheck)
148 		engine->heartbeat.systole = i915_request_get(rq);
149 
150 	__i915_request_commit(rq);
151 	__i915_request_queue(rq, &attr);
152 
153 unlock:
154 	mutex_unlock(&ce->timeline->mutex);
155 out:
156 	if (!next_heartbeat(engine))
157 		i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
158 	intel_engine_pm_put(engine);
159 }
160 
161 void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
162 {
163 	if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
164 		return;
165 
166 	next_heartbeat(engine);
167 }
168 
169 void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
170 {
171 	if (cancel_delayed_work(&engine->heartbeat.work))
172 		i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
173 }
174 
175 void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
176 {
177 	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
178 }
179 
180 int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
181 			       unsigned long delay)
182 {
183 	int err;
184 
185 	/* Send one last pulse before to cleanup persistent hogs */
186 	if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
187 		err = intel_engine_pulse(engine);
188 		if (err)
189 			return err;
190 	}
191 
192 	WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
193 
194 	if (intel_engine_pm_get_if_awake(engine)) {
195 		if (delay)
196 			intel_engine_unpark_heartbeat(engine);
197 		else
198 			intel_engine_park_heartbeat(engine);
199 		intel_engine_pm_put(engine);
200 	}
201 
202 	return 0;
203 }
204 
205 int intel_engine_pulse(struct intel_engine_cs *engine)
206 {
207 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
208 	struct intel_context *ce = engine->kernel_context;
209 	struct i915_request *rq;
210 	int err;
211 
212 	if (!intel_engine_has_preemption(engine))
213 		return -ENODEV;
214 
215 	if (!intel_engine_pm_get_if_awake(engine))
216 		return 0;
217 
218 	if (mutex_lock_interruptible(&ce->timeline->mutex)) {
219 		err = -EINTR;
220 		goto out_rpm;
221 	}
222 
223 	intel_context_enter(ce);
224 	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
225 	intel_context_exit(ce);
226 	if (IS_ERR(rq)) {
227 		err = PTR_ERR(rq);
228 		goto out_unlock;
229 	}
230 
231 	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
232 	idle_pulse(engine, rq);
233 
234 	__i915_request_commit(rq);
235 	__i915_request_queue(rq, &attr);
236 	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
237 	err = 0;
238 
239 out_unlock:
240 	mutex_unlock(&ce->timeline->mutex);
241 out_rpm:
242 	intel_engine_pm_put(engine);
243 	return err;
244 }
245 
246 int intel_engine_flush_barriers(struct intel_engine_cs *engine)
247 {
248 	struct i915_request *rq;
249 	int err = 0;
250 
251 	if (llist_empty(&engine->barrier_tasks))
252 		return 0;
253 
254 	if (!intel_engine_pm_get_if_awake(engine))
255 		return 0;
256 
257 	rq = i915_request_create(engine->kernel_context);
258 	if (IS_ERR(rq)) {
259 		err = PTR_ERR(rq);
260 		goto out_rpm;
261 	}
262 
263 	idle_pulse(engine, rq);
264 	i915_request_add(rq);
265 
266 out_rpm:
267 	intel_engine_pm_put(engine);
268 	return err;
269 }
270 
271 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
272 #include "selftest_engine_heartbeat.c"
273 #endif
274