1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright � 2008-2018 Intel Corporation 5 */ 6 7 #ifndef _I915_GPU_ERROR_H_ 8 #define _I915_GPU_ERROR_H_ 9 10 #include <linux/kref.h> 11 #include <linux/ktime.h> 12 #include <linux/sched.h> 13 14 #include <drm/drm_mm.h> 15 16 #include "intel_device_info.h" 17 #include "intel_ringbuffer.h" 18 #include "intel_uc_fw.h" 19 20 #include "i915_gem.h" 21 #include "i915_gem_gtt.h" 22 #include "i915_params.h" 23 #include "i915_scheduler.h" 24 25 struct drm_i915_private; 26 struct intel_overlay_error_state; 27 struct intel_display_error_state; 28 29 struct i915_gpu_state { 30 struct kref ref; 31 ktime_t time; 32 ktime_t boottime; 33 ktime_t uptime; 34 unsigned long capture; 35 unsigned long epoch; 36 37 struct drm_i915_private *i915; 38 39 char error_msg[128]; 40 bool simulated; 41 bool awake; 42 bool wakelock; 43 bool suspended; 44 int iommu; 45 u32 reset_count; 46 u32 suspend_count; 47 struct intel_device_info device_info; 48 struct intel_runtime_info runtime_info; 49 struct intel_driver_caps driver_caps; 50 struct i915_params params; 51 52 struct i915_error_uc { 53 struct intel_uc_fw guc_fw; 54 struct intel_uc_fw huc_fw; 55 struct drm_i915_error_object *guc_log; 56 } uc; 57 58 /* Generic register state */ 59 u32 eir; 60 u32 pgtbl_er; 61 u32 ier; 62 u32 gtier[6], ngtier; 63 u32 ccid; 64 u32 derrmr; 65 u32 forcewake; 66 u32 error; /* gen6+ */ 67 u32 err_int; /* gen7 */ 68 u32 fault_data0; /* gen8, gen9 */ 69 u32 fault_data1; /* gen8, gen9 */ 70 u32 done_reg; 71 u32 gac_eco; 72 u32 gam_ecochk; 73 u32 gab_ctl; 74 u32 gfx_mode; 75 76 u32 nfence; 77 u64 fence[I915_MAX_NUM_FENCES]; 78 struct intel_overlay_error_state *overlay; 79 struct intel_display_error_state *display; 80 81 struct drm_i915_error_engine { 82 int engine_id; 83 /* Software tracked state */ 84 bool idle; 85 unsigned long hangcheck_timestamp; 86 struct i915_address_space *vm; 87 int num_requests; 88 u32 reset_count; 89 90 /* position of active request inside the ring */ 91 u32 rq_head, rq_post, rq_tail; 92 93 /* our own tracking of ring head and tail */ 94 u32 cpu_ring_head; 95 u32 cpu_ring_tail; 96 97 u32 last_seqno; 98 99 /* Register state */ 100 u32 start; 101 u32 tail; 102 u32 head; 103 u32 ctl; 104 u32 mode; 105 u32 hws; 106 u32 ipeir; 107 u32 ipehr; 108 u32 bbstate; 109 u32 instpm; 110 u32 instps; 111 u32 seqno; 112 u64 bbaddr; 113 u64 acthd; 114 u32 fault_reg; 115 u64 faddr; 116 u32 rc_psmi; /* sleep state */ 117 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 118 struct intel_instdone instdone; 119 120 struct drm_i915_error_context { 121 char comm[TASK_COMM_LEN]; 122 pid_t pid; 123 u32 handle; 124 u32 hw_id; 125 int ban_score; 126 int active; 127 int guilty; 128 bool bannable; 129 struct i915_sched_attr sched_attr; 130 } context; 131 132 struct drm_i915_error_object { 133 u64 gtt_offset; 134 u64 gtt_size; 135 int num_pages; 136 int page_count; 137 int unused; 138 u32 *pages[0]; 139 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 140 141 struct drm_i915_error_object **user_bo; 142 long user_bo_count; 143 144 struct drm_i915_error_object *wa_ctx; 145 struct drm_i915_error_object *default_state; 146 147 struct drm_i915_error_request { 148 unsigned long flags; 149 long jiffies; 150 pid_t pid; 151 u32 context; 152 int ban_score; 153 u32 seqno; 154 u32 start; 155 u32 head; 156 u32 tail; 157 struct i915_sched_attr sched_attr; 158 } *requests, execlist[EXECLIST_MAX_PORTS]; 159 unsigned int num_ports; 160 161 struct { 162 u32 gfx_mode; 163 union { 164 u64 pdp[4]; 165 u32 pp_dir_base; 166 }; 167 } vm_info; 168 } engine[I915_NUM_ENGINES]; 169 170 struct drm_i915_error_buffer { 171 u32 size; 172 u32 name; 173 u32 wseqno; 174 u64 gtt_offset; 175 u32 read_domains; 176 u32 write_domain; 177 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 178 u32 tiling:2; 179 u32 dirty:1; 180 u32 purgeable:1; 181 u32 userptr:1; 182 s32 engine:4; 183 u32 cache_level:3; 184 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 185 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 186 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 187 188 struct scatterlist *sgl, *fit; 189 }; 190 191 struct i915_gpu_restart; 192 193 struct i915_gpu_error { 194 /* For hangcheck timer */ 195 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 196 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 197 198 struct delayed_work hangcheck_work; 199 200 /* For reset and error_state handling. */ 201 spinlock_t lock; 202 /* Protected by the above dev->gpu_error.lock. */ 203 struct i915_gpu_state *first_error; 204 205 atomic_t pending_fb_pin; 206 207 /** 208 * State variable controlling the reset flow and count 209 * 210 * This is a counter which gets incremented when reset is triggered, 211 * 212 * Before the reset commences, the I915_RESET_BACKOFF bit is set 213 * meaning that any waiters holding onto the struct_mutex should 214 * relinquish the lock immediately in order for the reset to start. 215 * 216 * If reset is not completed successfully, the I915_WEDGE bit is 217 * set meaning that hardware is terminally sour and there is no 218 * recovery. All waiters on the reset_queue will be woken when 219 * that happens. 220 * 221 * This counter is used by the wait_seqno code to notice that reset 222 * event happened and it needs to restart the entire ioctl (since most 223 * likely the seqno it waited for won't ever signal anytime soon). 224 * 225 * This is important for lock-free wait paths, where no contended lock 226 * naturally enforces the correct ordering between the bail-out of the 227 * waiter and the gpu reset work code. 228 */ 229 unsigned long reset_count; 230 231 /** 232 * flags: Control various stages of the GPU reset 233 * 234 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any 235 * other users acquiring the struct_mutex. To do this we set the 236 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset 237 * and then check for that bit before acquiring the struct_mutex (in 238 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a 239 * secondary role in preventing two concurrent global reset attempts. 240 * 241 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to 242 * acquire the struct_mutex to reset an engine, we need an explicit 243 * flag to prevent two concurrent reset attempts in the same engine. 244 * As the number of engines continues to grow, allocate the flags from 245 * the most significant bits. 246 * 247 * #I915_WEDGED - If reset fails and we can no longer use the GPU, 248 * we set the #I915_WEDGED bit. Prior to command submission, e.g. 249 * i915_request_alloc(), this bit is checked and the sequence 250 * aborted (with -EIO reported to userspace) if set. 251 */ 252 unsigned long flags; 253 #define I915_RESET_BACKOFF 0 254 #define I915_RESET_MODESET 1 255 #define I915_RESET_ENGINE 2 256 #define I915_WEDGED (BITS_PER_LONG - 1) 257 258 /** Number of times an engine has been reset */ 259 u32 reset_engine_count[I915_NUM_ENGINES]; 260 261 struct mutex wedge_mutex; /* serialises wedging/unwedging */ 262 263 /** 264 * Waitqueue to signal when a hang is detected. Used to for waiters 265 * to release the struct_mutex for the reset to procede. 266 */ 267 wait_queue_head_t wait_queue; 268 269 /** 270 * Waitqueue to signal when the reset has completed. Used by clients 271 * that wait for dev_priv->mm.wedged to settle. 272 */ 273 wait_queue_head_t reset_queue; 274 275 struct i915_gpu_restart *restart; 276 }; 277 278 struct drm_i915_error_state_buf { 279 struct drm_i915_private *i915; 280 struct scatterlist *sgl, *cur, *end; 281 282 char *buf; 283 size_t bytes; 284 size_t size; 285 loff_t iter; 286 287 int err; 288 }; 289 290 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 291 292 __printf(2, 3) 293 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 294 295 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); 296 void i915_capture_error_state(struct drm_i915_private *dev_priv, 297 unsigned long engine_mask, 298 const char *error_msg); 299 300 static inline struct i915_gpu_state * 301 i915_gpu_state_get(struct i915_gpu_state *gpu) 302 { 303 kref_get(&gpu->ref); 304 return gpu; 305 } 306 307 ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, 308 char *buf, loff_t offset, size_t count); 309 310 void __i915_gpu_state_free(struct kref *kref); 311 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) 312 { 313 if (gpu) 314 kref_put(&gpu->ref, __i915_gpu_state_free); 315 } 316 317 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 318 void i915_reset_error_state(struct drm_i915_private *i915); 319 void i915_disable_error_state(struct drm_i915_private *i915, int err); 320 321 #else 322 323 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 324 u32 engine_mask, 325 const char *error_msg) 326 { 327 } 328 329 static inline struct i915_gpu_state * 330 i915_first_error_state(struct drm_i915_private *i915) 331 { 332 return ERR_PTR(-ENODEV); 333 } 334 335 static inline void i915_reset_error_state(struct drm_i915_private *i915) 336 { 337 } 338 339 static inline void i915_disable_error_state(struct drm_i915_private *i915, 340 int err) 341 { 342 } 343 344 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ 345 346 #endif /* _I915_GPU_ERROR_H_ */ 347