1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright � 2008-2018 Intel Corporation
5  */
6 
7 #ifndef _I915_GPU_ERROR_H_
8 #define _I915_GPU_ERROR_H_
9 
10 #include <linux/kref.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13 
14 #include <drm/drm_mm.h>
15 
16 #include "intel_device_info.h"
17 #include "intel_ringbuffer.h"
18 #include "intel_uc_fw.h"
19 
20 #include "i915_gem.h"
21 #include "i915_gem_gtt.h"
22 #include "i915_params.h"
23 #include "i915_scheduler.h"
24 
25 struct drm_i915_private;
26 struct intel_overlay_error_state;
27 struct intel_display_error_state;
28 
29 struct i915_gpu_state {
30 	struct kref ref;
31 	ktime_t time;
32 	ktime_t boottime;
33 	ktime_t uptime;
34 	unsigned long capture;
35 	unsigned long epoch;
36 
37 	struct drm_i915_private *i915;
38 
39 	char error_msg[128];
40 	bool simulated;
41 	bool awake;
42 	bool wakelock;
43 	bool suspended;
44 	int iommu;
45 	u32 reset_count;
46 	u32 suspend_count;
47 	struct intel_device_info device_info;
48 	struct intel_driver_caps driver_caps;
49 	struct i915_params params;
50 
51 	struct i915_error_uc {
52 		struct intel_uc_fw guc_fw;
53 		struct intel_uc_fw huc_fw;
54 		struct drm_i915_error_object *guc_log;
55 	} uc;
56 
57 	/* Generic register state */
58 	u32 eir;
59 	u32 pgtbl_er;
60 	u32 ier;
61 	u32 gtier[6], ngtier;
62 	u32 ccid;
63 	u32 derrmr;
64 	u32 forcewake;
65 	u32 error; /* gen6+ */
66 	u32 err_int; /* gen7 */
67 	u32 fault_data0; /* gen8, gen9 */
68 	u32 fault_data1; /* gen8, gen9 */
69 	u32 done_reg;
70 	u32 gac_eco;
71 	u32 gam_ecochk;
72 	u32 gab_ctl;
73 	u32 gfx_mode;
74 
75 	u32 nfence;
76 	u64 fence[I915_MAX_NUM_FENCES];
77 	struct intel_overlay_error_state *overlay;
78 	struct intel_display_error_state *display;
79 
80 	struct drm_i915_error_engine {
81 		int engine_id;
82 		/* Software tracked state */
83 		bool idle;
84 		bool waiting;
85 		int num_waiters;
86 		unsigned long hangcheck_timestamp;
87 		bool hangcheck_stalled;
88 		enum intel_engine_hangcheck_action hangcheck_action;
89 		struct i915_address_space *vm;
90 		int num_requests;
91 		u32 reset_count;
92 
93 		/* position of active request inside the ring */
94 		u32 rq_head, rq_post, rq_tail;
95 
96 		/* our own tracking of ring head and tail */
97 		u32 cpu_ring_head;
98 		u32 cpu_ring_tail;
99 
100 		u32 last_seqno;
101 
102 		/* Register state */
103 		u32 start;
104 		u32 tail;
105 		u32 head;
106 		u32 ctl;
107 		u32 mode;
108 		u32 hws;
109 		u32 ipeir;
110 		u32 ipehr;
111 		u32 bbstate;
112 		u32 instpm;
113 		u32 instps;
114 		u32 seqno;
115 		u64 bbaddr;
116 		u64 acthd;
117 		u32 fault_reg;
118 		u64 faddr;
119 		u32 rc_psmi; /* sleep state */
120 		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
121 		struct intel_instdone instdone;
122 
123 		struct drm_i915_error_context {
124 			char comm[TASK_COMM_LEN];
125 			pid_t pid;
126 			u32 handle;
127 			u32 hw_id;
128 			int ban_score;
129 			int active;
130 			int guilty;
131 			bool bannable;
132 			struct i915_sched_attr sched_attr;
133 		} context;
134 
135 		struct drm_i915_error_object {
136 			u64 gtt_offset;
137 			u64 gtt_size;
138 			int num_pages;
139 			int page_count;
140 			int unused;
141 			u32 *pages[0];
142 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
143 
144 		struct drm_i915_error_object **user_bo;
145 		long user_bo_count;
146 
147 		struct drm_i915_error_object *wa_ctx;
148 		struct drm_i915_error_object *default_state;
149 
150 		struct drm_i915_error_request {
151 			long jiffies;
152 			pid_t pid;
153 			u32 context;
154 			int ban_score;
155 			u32 seqno;
156 			u32 start;
157 			u32 head;
158 			u32 tail;
159 			struct i915_sched_attr sched_attr;
160 		} *requests, execlist[EXECLIST_MAX_PORTS];
161 		unsigned int num_ports;
162 
163 		struct drm_i915_error_waiter {
164 			char comm[TASK_COMM_LEN];
165 			pid_t pid;
166 			u32 seqno;
167 		} *waiters;
168 
169 		struct {
170 			u32 gfx_mode;
171 			union {
172 				u64 pdp[4];
173 				u32 pp_dir_base;
174 			};
175 		} vm_info;
176 	} engine[I915_NUM_ENGINES];
177 
178 	struct drm_i915_error_buffer {
179 		u32 size;
180 		u32 name;
181 		u32 wseqno;
182 		u64 gtt_offset;
183 		u32 read_domains;
184 		u32 write_domain;
185 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 		u32 tiling:2;
187 		u32 dirty:1;
188 		u32 purgeable:1;
189 		u32 userptr:1;
190 		s32 engine:4;
191 		u32 cache_level:3;
192 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
193 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
194 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
195 
196 	struct scatterlist *sgl, *fit;
197 };
198 
199 struct i915_gpu_error {
200 	/* For hangcheck timer */
201 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
202 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
203 
204 	struct delayed_work hangcheck_work;
205 
206 	/* For reset and error_state handling. */
207 	spinlock_t lock;
208 	/* Protected by the above dev->gpu_error.lock. */
209 	struct i915_gpu_state *first_error;
210 
211 	atomic_t pending_fb_pin;
212 
213 	unsigned long missed_irq_rings;
214 
215 	/**
216 	 * State variable controlling the reset flow and count
217 	 *
218 	 * This is a counter which gets incremented when reset is triggered,
219 	 *
220 	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
221 	 * meaning that any waiters holding onto the struct_mutex should
222 	 * relinquish the lock immediately in order for the reset to start.
223 	 *
224 	 * If reset is not completed successfully, the I915_WEDGE bit is
225 	 * set meaning that hardware is terminally sour and there is no
226 	 * recovery. All waiters on the reset_queue will be woken when
227 	 * that happens.
228 	 *
229 	 * This counter is used by the wait_seqno code to notice that reset
230 	 * event happened and it needs to restart the entire ioctl (since most
231 	 * likely the seqno it waited for won't ever signal anytime soon).
232 	 *
233 	 * This is important for lock-free wait paths, where no contended lock
234 	 * naturally enforces the correct ordering between the bail-out of the
235 	 * waiter and the gpu reset work code.
236 	 */
237 	unsigned long reset_count;
238 
239 	/**
240 	 * flags: Control various stages of the GPU reset
241 	 *
242 	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
243 	 * other users acquiring the struct_mutex. To do this we set the
244 	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
245 	 * and then check for that bit before acquiring the struct_mutex (in
246 	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
247 	 * secondary role in preventing two concurrent global reset attempts.
248 	 *
249 	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
250 	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
251 	 * but it may be held by some long running waiter (that we cannot
252 	 * interrupt without causing trouble). Once we are ready to do the GPU
253 	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
254 	 * they already hold the struct_mutex and want to participate they can
255 	 * inspect the bit and do the reset directly, otherwise the worker
256 	 * waits for the struct_mutex.
257 	 *
258 	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
259 	 * acquire the struct_mutex to reset an engine, we need an explicit
260 	 * flag to prevent two concurrent reset attempts in the same engine.
261 	 * As the number of engines continues to grow, allocate the flags from
262 	 * the most significant bits.
263 	 *
264 	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
265 	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
266 	 * i915_request_alloc(), this bit is checked and the sequence
267 	 * aborted (with -EIO reported to userspace) if set.
268 	 */
269 	unsigned long flags;
270 #define I915_RESET_BACKOFF	0
271 #define I915_RESET_HANDOFF	1
272 #define I915_RESET_MODESET	2
273 #define I915_WEDGED		(BITS_PER_LONG - 1)
274 #define I915_RESET_ENGINE	(I915_WEDGED - I915_NUM_ENGINES)
275 
276 	/** Number of times an engine has been reset */
277 	u32 reset_engine_count[I915_NUM_ENGINES];
278 
279 	/** Set of stalled engines with guilty requests, in the current reset */
280 	u32 stalled_mask;
281 
282 	/** Reason for the current *global* reset */
283 	const char *reason;
284 
285 	/**
286 	 * Waitqueue to signal when a hang is detected. Used to for waiters
287 	 * to release the struct_mutex for the reset to procede.
288 	 */
289 	wait_queue_head_t wait_queue;
290 
291 	/**
292 	 * Waitqueue to signal when the reset has completed. Used by clients
293 	 * that wait for dev_priv->mm.wedged to settle.
294 	 */
295 	wait_queue_head_t reset_queue;
296 
297 	/* For missed irq/seqno simulation. */
298 	unsigned long test_irq_rings;
299 };
300 
301 struct drm_i915_error_state_buf {
302 	struct drm_i915_private *i915;
303 	struct scatterlist *sgl, *cur, *end;
304 
305 	char *buf;
306 	size_t bytes;
307 	size_t size;
308 	loff_t iter;
309 
310 	int err;
311 };
312 
313 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
314 
315 __printf(2, 3)
316 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
317 
318 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
319 void i915_capture_error_state(struct drm_i915_private *dev_priv,
320 			      u32 engine_mask,
321 			      const char *error_msg);
322 
323 static inline struct i915_gpu_state *
324 i915_gpu_state_get(struct i915_gpu_state *gpu)
325 {
326 	kref_get(&gpu->ref);
327 	return gpu;
328 }
329 
330 ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
331 				      char *buf, loff_t offset, size_t count);
332 
333 void __i915_gpu_state_free(struct kref *kref);
334 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
335 {
336 	if (gpu)
337 		kref_put(&gpu->ref, __i915_gpu_state_free);
338 }
339 
340 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
341 void i915_reset_error_state(struct drm_i915_private *i915);
342 void i915_disable_error_state(struct drm_i915_private *i915, int err);
343 
344 #else
345 
346 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
347 					    u32 engine_mask,
348 					    const char *error_msg)
349 {
350 }
351 
352 static inline struct i915_gpu_state *
353 i915_first_error_state(struct drm_i915_private *i915)
354 {
355 	return ERR_PTR(-ENODEV);
356 }
357 
358 static inline void i915_reset_error_state(struct drm_i915_private *i915)
359 {
360 }
361 
362 static inline void i915_disable_error_state(struct drm_i915_private *i915,
363 					    int err)
364 {
365 }
366 
367 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
368 
369 #endif /* _I915_GPU_ERROR_H_ */
370