1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright � 2008-2018 Intel Corporation
5  */
6 
7 #ifndef _I915_GPU_ERROR_H_
8 #define _I915_GPU_ERROR_H_
9 
10 #include <linux/kref.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13 
14 #include <drm/drm_mm.h>
15 
16 #include "gt/intel_engine.h"
17 
18 #include "intel_device_info.h"
19 #include "intel_uc_fw.h"
20 
21 #include "i915_gem.h"
22 #include "i915_gem_gtt.h"
23 #include "i915_params.h"
24 #include "i915_scheduler.h"
25 
26 struct drm_i915_private;
27 struct intel_overlay_error_state;
28 struct intel_display_error_state;
29 
30 struct i915_gpu_state {
31 	struct kref ref;
32 	ktime_t time;
33 	ktime_t boottime;
34 	ktime_t uptime;
35 	unsigned long capture;
36 	unsigned long epoch;
37 
38 	struct drm_i915_private *i915;
39 
40 	char error_msg[128];
41 	bool simulated;
42 	bool awake;
43 	bool wakelock;
44 	bool suspended;
45 	int iommu;
46 	u32 reset_count;
47 	u32 suspend_count;
48 	struct intel_device_info device_info;
49 	struct intel_runtime_info runtime_info;
50 	struct intel_driver_caps driver_caps;
51 	struct i915_params params;
52 
53 	struct i915_error_uc {
54 		struct intel_uc_fw guc_fw;
55 		struct intel_uc_fw huc_fw;
56 		struct drm_i915_error_object *guc_log;
57 	} uc;
58 
59 	/* Generic register state */
60 	u32 eir;
61 	u32 pgtbl_er;
62 	u32 ier;
63 	u32 gtier[6], ngtier;
64 	u32 ccid;
65 	u32 derrmr;
66 	u32 forcewake;
67 	u32 error; /* gen6+ */
68 	u32 err_int; /* gen7 */
69 	u32 fault_data0; /* gen8, gen9 */
70 	u32 fault_data1; /* gen8, gen9 */
71 	u32 done_reg;
72 	u32 gac_eco;
73 	u32 gam_ecochk;
74 	u32 gab_ctl;
75 	u32 gfx_mode;
76 
77 	u32 nfence;
78 	u64 fence[I915_MAX_NUM_FENCES];
79 	struct intel_overlay_error_state *overlay;
80 	struct intel_display_error_state *display;
81 
82 	struct drm_i915_error_engine {
83 		int engine_id;
84 		/* Software tracked state */
85 		bool idle;
86 		unsigned long hangcheck_timestamp;
87 		struct i915_address_space *vm;
88 		int num_requests;
89 		u32 reset_count;
90 
91 		/* position of active request inside the ring */
92 		u32 rq_head, rq_post, rq_tail;
93 
94 		/* our own tracking of ring head and tail */
95 		u32 cpu_ring_head;
96 		u32 cpu_ring_tail;
97 
98 		/* Register state */
99 		u32 start;
100 		u32 tail;
101 		u32 head;
102 		u32 ctl;
103 		u32 mode;
104 		u32 hws;
105 		u32 ipeir;
106 		u32 ipehr;
107 		u32 bbstate;
108 		u32 instpm;
109 		u32 instps;
110 		u64 bbaddr;
111 		u64 acthd;
112 		u32 fault_reg;
113 		u64 faddr;
114 		u32 rc_psmi; /* sleep state */
115 		struct intel_instdone instdone;
116 
117 		struct drm_i915_error_context {
118 			char comm[TASK_COMM_LEN];
119 			pid_t pid;
120 			u32 hw_id;
121 			int active;
122 			int guilty;
123 			struct i915_sched_attr sched_attr;
124 		} context;
125 
126 		struct drm_i915_error_object {
127 			u64 gtt_offset;
128 			u64 gtt_size;
129 			int num_pages;
130 			int page_count;
131 			int unused;
132 			u32 *pages[0];
133 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
134 
135 		struct drm_i915_error_object **user_bo;
136 		long user_bo_count;
137 
138 		struct drm_i915_error_object *wa_ctx;
139 		struct drm_i915_error_object *default_state;
140 
141 		struct drm_i915_error_request {
142 			unsigned long flags;
143 			long jiffies;
144 			pid_t pid;
145 			u32 context;
146 			u32 seqno;
147 			u32 start;
148 			u32 head;
149 			u32 tail;
150 			struct i915_sched_attr sched_attr;
151 		} *requests, execlist[EXECLIST_MAX_PORTS];
152 		unsigned int num_ports;
153 
154 		struct {
155 			u32 gfx_mode;
156 			union {
157 				u64 pdp[4];
158 				u32 pp_dir_base;
159 			};
160 		} vm_info;
161 	} engine[I915_NUM_ENGINES];
162 
163 	struct drm_i915_error_buffer {
164 		u32 size;
165 		u32 name;
166 		u64 gtt_offset;
167 		u32 read_domains;
168 		u32 write_domain;
169 		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
170 		u32 tiling:2;
171 		u32 dirty:1;
172 		u32 purgeable:1;
173 		u32 userptr:1;
174 		u32 cache_level:3;
175 	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
176 	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
177 	struct i915_address_space *active_vm[I915_NUM_ENGINES];
178 
179 	struct scatterlist *sgl, *fit;
180 };
181 
182 struct i915_gpu_error {
183 	/* For hangcheck timer */
184 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
185 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
186 
187 	struct delayed_work hangcheck_work;
188 
189 	/* For reset and error_state handling. */
190 	spinlock_t lock;
191 	/* Protected by the above dev->gpu_error.lock. */
192 	struct i915_gpu_state *first_error;
193 
194 	atomic_t pending_fb_pin;
195 
196 	/**
197 	 * flags: Control various stages of the GPU reset
198 	 *
199 	 * #I915_RESET_BACKOFF - When we start a global reset, we need to
200 	 * serialise with any other users attempting to do the same, and
201 	 * any global resources that may be clobber by the reset (such as
202 	 * FENCE registers).
203 	 *
204 	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
205 	 * acquire the struct_mutex to reset an engine, we need an explicit
206 	 * flag to prevent two concurrent reset attempts in the same engine.
207 	 * As the number of engines continues to grow, allocate the flags from
208 	 * the most significant bits.
209 	 *
210 	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
211 	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
212 	 * i915_request_alloc(), this bit is checked and the sequence
213 	 * aborted (with -EIO reported to userspace) if set.
214 	 */
215 	unsigned long flags;
216 #define I915_RESET_BACKOFF	0
217 #define I915_RESET_MODESET	1
218 #define I915_RESET_ENGINE	2
219 #define I915_WEDGED		(BITS_PER_LONG - 1)
220 
221 	/** Number of times the device has been reset (global) */
222 	u32 reset_count;
223 
224 	/** Number of times an engine has been reset */
225 	u32 reset_engine_count[I915_NUM_ENGINES];
226 
227 	struct mutex wedge_mutex; /* serialises wedging/unwedging */
228 
229 	/**
230 	 * Waitqueue to signal when a hang is detected. Used to for waiters
231 	 * to release the struct_mutex for the reset to procede.
232 	 */
233 	wait_queue_head_t wait_queue;
234 
235 	/**
236 	 * Waitqueue to signal when the reset has completed. Used by clients
237 	 * that wait for dev_priv->mm.wedged to settle.
238 	 */
239 	wait_queue_head_t reset_queue;
240 
241 	struct srcu_struct reset_backoff_srcu;
242 };
243 
244 struct drm_i915_error_state_buf {
245 	struct drm_i915_private *i915;
246 	struct scatterlist *sgl, *cur, *end;
247 
248 	char *buf;
249 	size_t bytes;
250 	size_t size;
251 	loff_t iter;
252 
253 	int err;
254 };
255 
256 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
257 
258 __printf(2, 3)
259 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
260 
261 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
262 void i915_capture_error_state(struct drm_i915_private *dev_priv,
263 			      intel_engine_mask_t engine_mask,
264 			      const char *error_msg);
265 
266 static inline struct i915_gpu_state *
267 i915_gpu_state_get(struct i915_gpu_state *gpu)
268 {
269 	kref_get(&gpu->ref);
270 	return gpu;
271 }
272 
273 ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
274 				      char *buf, loff_t offset, size_t count);
275 
276 void __i915_gpu_state_free(struct kref *kref);
277 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
278 {
279 	if (gpu)
280 		kref_put(&gpu->ref, __i915_gpu_state_free);
281 }
282 
283 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
284 void i915_reset_error_state(struct drm_i915_private *i915);
285 void i915_disable_error_state(struct drm_i915_private *i915, int err);
286 
287 #else
288 
289 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
290 					    u32 engine_mask,
291 					    const char *error_msg)
292 {
293 }
294 
295 static inline struct i915_gpu_state *
296 i915_first_error_state(struct drm_i915_private *i915)
297 {
298 	return ERR_PTR(-ENODEV);
299 }
300 
301 static inline void i915_reset_error_state(struct drm_i915_private *i915)
302 {
303 }
304 
305 static inline void i915_disable_error_state(struct drm_i915_private *i915,
306 					    int err)
307 {
308 }
309 
310 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
311 
312 #endif /* _I915_GPU_ERROR_H_ */
313