xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_reset.c (revision 8365a898)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6 
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9 
10 #include "display/intel_display_types.h"
11 #include "display/intel_overlay.h"
12 
13 #include "gem/i915_gem_context.h"
14 
15 #include "i915_drv.h"
16 #include "i915_gpu_error.h"
17 #include "i915_irq.h"
18 #include "intel_engine_pm.h"
19 #include "intel_gt.h"
20 #include "intel_gt_pm.h"
21 #include "intel_reset.h"
22 
23 #include "uc/intel_guc.h"
24 #include "uc/intel_guc_submission.h"
25 
26 #define RESET_MAX_RETRIES 3
27 
28 /* XXX How to handle concurrent GGTT updates using tiling registers? */
29 #define RESET_UNDER_STOP_MACHINE 0
30 
31 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
32 {
33 	intel_uncore_rmw_fw(uncore, reg, 0, set);
34 }
35 
36 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
37 {
38 	intel_uncore_rmw_fw(uncore, reg, clr, 0);
39 }
40 
41 static void engine_skip_context(struct i915_request *rq)
42 {
43 	struct intel_engine_cs *engine = rq->engine;
44 	struct intel_context *hung_ctx = rq->context;
45 
46 	if (!i915_request_is_active(rq))
47 		return;
48 
49 	lockdep_assert_held(&engine->active.lock);
50 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
51 		if (rq->context == hung_ctx) {
52 			i915_request_set_error_once(rq, -EIO);
53 			__i915_request_skip(rq);
54 		}
55 }
56 
57 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
58 {
59 	struct drm_i915_file_private *file_priv = ctx->file_priv;
60 	unsigned long prev_hang;
61 	unsigned int score;
62 
63 	if (IS_ERR_OR_NULL(file_priv))
64 		return;
65 
66 	score = 0;
67 	if (banned)
68 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
69 
70 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
71 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
72 		score += I915_CLIENT_SCORE_HANG_FAST;
73 
74 	if (score) {
75 		atomic_add(score, &file_priv->ban_score);
76 
77 		drm_dbg(&ctx->i915->drm,
78 			"client %s: gained %u ban score, now %u\n",
79 			ctx->name, score,
80 			atomic_read(&file_priv->ban_score));
81 	}
82 }
83 
84 static bool mark_guilty(struct i915_request *rq)
85 {
86 	struct i915_gem_context *ctx;
87 	unsigned long prev_hang;
88 	bool banned;
89 	int i;
90 
91 	if (intel_context_is_closed(rq->context)) {
92 		intel_context_set_banned(rq->context);
93 		return true;
94 	}
95 
96 	rcu_read_lock();
97 	ctx = rcu_dereference(rq->context->gem_context);
98 	if (ctx && !kref_get_unless_zero(&ctx->ref))
99 		ctx = NULL;
100 	rcu_read_unlock();
101 	if (!ctx)
102 		return intel_context_is_banned(rq->context);
103 
104 	atomic_inc(&ctx->guilty_count);
105 
106 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
107 	if (!i915_gem_context_is_bannable(ctx)) {
108 		banned = false;
109 		goto out;
110 	}
111 
112 	drm_notice(&ctx->i915->drm,
113 		   "%s context reset due to GPU hang\n",
114 		   ctx->name);
115 
116 	/* Record the timestamp for the last N hangs */
117 	prev_hang = ctx->hang_timestamp[0];
118 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
119 		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
120 	ctx->hang_timestamp[i] = jiffies;
121 
122 	/* If we have hung N+1 times in rapid succession, we ban the context! */
123 	banned = !i915_gem_context_is_recoverable(ctx);
124 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
125 		banned = true;
126 	if (banned) {
127 		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
128 			ctx->name, atomic_read(&ctx->guilty_count));
129 		intel_context_set_banned(rq->context);
130 	}
131 
132 	client_mark_guilty(ctx, banned);
133 
134 out:
135 	i915_gem_context_put(ctx);
136 	return banned;
137 }
138 
139 static void mark_innocent(struct i915_request *rq)
140 {
141 	struct i915_gem_context *ctx;
142 
143 	rcu_read_lock();
144 	ctx = rcu_dereference(rq->context->gem_context);
145 	if (ctx)
146 		atomic_inc(&ctx->active_count);
147 	rcu_read_unlock();
148 }
149 
150 void __i915_request_reset(struct i915_request *rq, bool guilty)
151 {
152 	RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
153 
154 	GEM_BUG_ON(i915_request_completed(rq));
155 
156 	rcu_read_lock(); /* protect the GEM context */
157 	if (guilty) {
158 		i915_request_set_error_once(rq, -EIO);
159 		__i915_request_skip(rq);
160 		if (mark_guilty(rq))
161 			engine_skip_context(rq);
162 	} else {
163 		i915_request_set_error_once(rq, -EAGAIN);
164 		mark_innocent(rq);
165 	}
166 	rcu_read_unlock();
167 }
168 
169 static bool i915_in_reset(struct pci_dev *pdev)
170 {
171 	u8 gdrst;
172 
173 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
174 	return gdrst & GRDOM_RESET_STATUS;
175 }
176 
177 static int i915_do_reset(struct intel_gt *gt,
178 			 intel_engine_mask_t engine_mask,
179 			 unsigned int retry)
180 {
181 	struct pci_dev *pdev = gt->i915->drm.pdev;
182 	int err;
183 
184 	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
185 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
186 	udelay(50);
187 	err = wait_for_atomic(i915_in_reset(pdev), 50);
188 
189 	/* Clear the reset request. */
190 	pci_write_config_byte(pdev, I915_GDRST, 0);
191 	udelay(50);
192 	if (!err)
193 		err = wait_for_atomic(!i915_in_reset(pdev), 50);
194 
195 	return err;
196 }
197 
198 static bool g4x_reset_complete(struct pci_dev *pdev)
199 {
200 	u8 gdrst;
201 
202 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
203 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
204 }
205 
206 static int g33_do_reset(struct intel_gt *gt,
207 			intel_engine_mask_t engine_mask,
208 			unsigned int retry)
209 {
210 	struct pci_dev *pdev = gt->i915->drm.pdev;
211 
212 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
213 	return wait_for_atomic(g4x_reset_complete(pdev), 50);
214 }
215 
216 static int g4x_do_reset(struct intel_gt *gt,
217 			intel_engine_mask_t engine_mask,
218 			unsigned int retry)
219 {
220 	struct pci_dev *pdev = gt->i915->drm.pdev;
221 	struct intel_uncore *uncore = gt->uncore;
222 	int ret;
223 
224 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
225 	rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
226 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
227 
228 	pci_write_config_byte(pdev, I915_GDRST,
229 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
230 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
231 	if (ret) {
232 		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
233 		goto out;
234 	}
235 
236 	pci_write_config_byte(pdev, I915_GDRST,
237 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
238 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
239 	if (ret) {
240 		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
241 		goto out;
242 	}
243 
244 out:
245 	pci_write_config_byte(pdev, I915_GDRST, 0);
246 
247 	rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
248 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
249 
250 	return ret;
251 }
252 
253 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
254 			unsigned int retry)
255 {
256 	struct intel_uncore *uncore = gt->uncore;
257 	int ret;
258 
259 	intel_uncore_write_fw(uncore, ILK_GDSR,
260 			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
261 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
262 					   ILK_GRDOM_RESET_ENABLE, 0,
263 					   5000, 0,
264 					   NULL);
265 	if (ret) {
266 		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
267 		goto out;
268 	}
269 
270 	intel_uncore_write_fw(uncore, ILK_GDSR,
271 			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
272 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
273 					   ILK_GRDOM_RESET_ENABLE, 0,
274 					   5000, 0,
275 					   NULL);
276 	if (ret) {
277 		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
278 		goto out;
279 	}
280 
281 out:
282 	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
283 	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
284 	return ret;
285 }
286 
287 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
288 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
289 {
290 	struct intel_uncore *uncore = gt->uncore;
291 	int err;
292 
293 	/*
294 	 * GEN6_GDRST is not in the gt power well, no need to check
295 	 * for fifo space for the write or forcewake the chip for
296 	 * the read
297 	 */
298 	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
299 
300 	/* Wait for the device to ack the reset requests */
301 	err = __intel_wait_for_register_fw(uncore,
302 					   GEN6_GDRST, hw_domain_mask, 0,
303 					   500, 0,
304 					   NULL);
305 	if (err)
306 		drm_dbg(&gt->i915->drm,
307 			"Wait for 0x%08x engines reset failed\n",
308 			hw_domain_mask);
309 
310 	return err;
311 }
312 
313 static int gen6_reset_engines(struct intel_gt *gt,
314 			      intel_engine_mask_t engine_mask,
315 			      unsigned int retry)
316 {
317 	static const u32 hw_engine_mask[] = {
318 		[RCS0]  = GEN6_GRDOM_RENDER,
319 		[BCS0]  = GEN6_GRDOM_BLT,
320 		[VCS0]  = GEN6_GRDOM_MEDIA,
321 		[VCS1]  = GEN8_GRDOM_MEDIA2,
322 		[VECS0] = GEN6_GRDOM_VECS,
323 	};
324 	struct intel_engine_cs *engine;
325 	u32 hw_mask;
326 
327 	if (engine_mask == ALL_ENGINES) {
328 		hw_mask = GEN6_GRDOM_FULL;
329 	} else {
330 		intel_engine_mask_t tmp;
331 
332 		hw_mask = 0;
333 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
334 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
335 			hw_mask |= hw_engine_mask[engine->id];
336 		}
337 	}
338 
339 	return gen6_hw_domain_reset(gt, hw_mask);
340 }
341 
342 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
343 {
344 	struct intel_uncore *uncore = engine->uncore;
345 	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
346 	i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
347 	u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
348 	i915_reg_t sfc_usage;
349 	u32 sfc_usage_bit;
350 	u32 sfc_reset_bit;
351 	int ret;
352 
353 	switch (engine->class) {
354 	case VIDEO_DECODE_CLASS:
355 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
356 			return 0;
357 
358 		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
359 		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
360 
361 		sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
362 		sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
363 
364 		sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
365 		sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
366 		sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
367 		break;
368 
369 	case VIDEO_ENHANCEMENT_CLASS:
370 		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
371 		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
372 
373 		sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
374 		sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
375 
376 		sfc_usage = GEN11_VECS_SFC_USAGE(engine);
377 		sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
378 		sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
379 		break;
380 
381 	default:
382 		return 0;
383 	}
384 
385 	/*
386 	 * If the engine is using a SFC, tell the engine that a software reset
387 	 * is going to happen. The engine will then try to force lock the SFC.
388 	 * If SFC ends up being locked to the engine we want to reset, we have
389 	 * to reset it as well (we will unlock it once the reset sequence is
390 	 * completed).
391 	 */
392 	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
393 		return 0;
394 
395 	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
396 
397 	ret = __intel_wait_for_register_fw(uncore,
398 					   sfc_forced_lock_ack,
399 					   sfc_forced_lock_ack_bit,
400 					   sfc_forced_lock_ack_bit,
401 					   1000, 0, NULL);
402 
403 	/* Was the SFC released while we were trying to lock it? */
404 	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
405 		return 0;
406 
407 	if (ret) {
408 		drm_dbg(&engine->i915->drm,
409 			"Wait for SFC forced lock ack failed\n");
410 		return ret;
411 	}
412 
413 	*hw_mask |= sfc_reset_bit;
414 	return 0;
415 }
416 
417 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
418 {
419 	struct intel_uncore *uncore = engine->uncore;
420 	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
421 	i915_reg_t sfc_forced_lock;
422 	u32 sfc_forced_lock_bit;
423 
424 	switch (engine->class) {
425 	case VIDEO_DECODE_CLASS:
426 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
427 			return;
428 
429 		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
430 		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
431 		break;
432 
433 	case VIDEO_ENHANCEMENT_CLASS:
434 		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
435 		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
436 		break;
437 
438 	default:
439 		return;
440 	}
441 
442 	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
443 }
444 
445 static int gen11_reset_engines(struct intel_gt *gt,
446 			       intel_engine_mask_t engine_mask,
447 			       unsigned int retry)
448 {
449 	static const u32 hw_engine_mask[] = {
450 		[RCS0]  = GEN11_GRDOM_RENDER,
451 		[BCS0]  = GEN11_GRDOM_BLT,
452 		[VCS0]  = GEN11_GRDOM_MEDIA,
453 		[VCS1]  = GEN11_GRDOM_MEDIA2,
454 		[VCS2]  = GEN11_GRDOM_MEDIA3,
455 		[VCS3]  = GEN11_GRDOM_MEDIA4,
456 		[VECS0] = GEN11_GRDOM_VECS,
457 		[VECS1] = GEN11_GRDOM_VECS2,
458 	};
459 	struct intel_engine_cs *engine;
460 	intel_engine_mask_t tmp;
461 	u32 hw_mask;
462 	int ret;
463 
464 	if (engine_mask == ALL_ENGINES) {
465 		hw_mask = GEN11_GRDOM_FULL;
466 	} else {
467 		hw_mask = 0;
468 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
469 			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
470 			hw_mask |= hw_engine_mask[engine->id];
471 			ret = gen11_lock_sfc(engine, &hw_mask);
472 			if (ret)
473 				goto sfc_unlock;
474 		}
475 	}
476 
477 	ret = gen6_hw_domain_reset(gt, hw_mask);
478 
479 sfc_unlock:
480 	/*
481 	 * We unlock the SFC based on the lock status and not the result of
482 	 * gen11_lock_sfc to make sure that we clean properly if something
483 	 * wrong happened during the lock (e.g. lock acquired after timeout
484 	 * expiration).
485 	 */
486 	if (engine_mask != ALL_ENGINES)
487 		for_each_engine_masked(engine, gt, engine_mask, tmp)
488 			gen11_unlock_sfc(engine);
489 
490 	return ret;
491 }
492 
493 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
494 {
495 	struct intel_uncore *uncore = engine->uncore;
496 	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
497 	u32 request, mask, ack;
498 	int ret;
499 
500 	ack = intel_uncore_read_fw(uncore, reg);
501 	if (ack & RESET_CTL_CAT_ERROR) {
502 		/*
503 		 * For catastrophic errors, ready-for-reset sequence
504 		 * needs to be bypassed: HAS#396813
505 		 */
506 		request = RESET_CTL_CAT_ERROR;
507 		mask = RESET_CTL_CAT_ERROR;
508 
509 		/* Catastrophic errors need to be cleared by HW */
510 		ack = 0;
511 	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
512 		request = RESET_CTL_REQUEST_RESET;
513 		mask = RESET_CTL_READY_TO_RESET;
514 		ack = RESET_CTL_READY_TO_RESET;
515 	} else {
516 		return 0;
517 	}
518 
519 	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
520 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
521 					   700, 0, NULL);
522 	if (ret)
523 		drm_err(&engine->i915->drm,
524 			"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
525 			engine->name, request,
526 			intel_uncore_read_fw(uncore, reg));
527 
528 	return ret;
529 }
530 
531 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
532 {
533 	intel_uncore_write_fw(engine->uncore,
534 			      RING_RESET_CTL(engine->mmio_base),
535 			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
536 }
537 
538 static int gen8_reset_engines(struct intel_gt *gt,
539 			      intel_engine_mask_t engine_mask,
540 			      unsigned int retry)
541 {
542 	struct intel_engine_cs *engine;
543 	const bool reset_non_ready = retry >= 1;
544 	intel_engine_mask_t tmp;
545 	int ret;
546 
547 	for_each_engine_masked(engine, gt, engine_mask, tmp) {
548 		ret = gen8_engine_reset_prepare(engine);
549 		if (ret && !reset_non_ready)
550 			goto skip_reset;
551 
552 		/*
553 		 * If this is not the first failed attempt to prepare,
554 		 * we decide to proceed anyway.
555 		 *
556 		 * By doing so we risk context corruption and with
557 		 * some gens (kbl), possible system hang if reset
558 		 * happens during active bb execution.
559 		 *
560 		 * We rather take context corruption instead of
561 		 * failed reset with a wedged driver/gpu. And
562 		 * active bb execution case should be covered by
563 		 * stop_engines() we have before the reset.
564 		 */
565 	}
566 
567 	if (INTEL_GEN(gt->i915) >= 11)
568 		ret = gen11_reset_engines(gt, engine_mask, retry);
569 	else
570 		ret = gen6_reset_engines(gt, engine_mask, retry);
571 
572 skip_reset:
573 	for_each_engine_masked(engine, gt, engine_mask, tmp)
574 		gen8_engine_reset_cancel(engine);
575 
576 	return ret;
577 }
578 
579 static int mock_reset(struct intel_gt *gt,
580 		      intel_engine_mask_t mask,
581 		      unsigned int retry)
582 {
583 	return 0;
584 }
585 
586 typedef int (*reset_func)(struct intel_gt *,
587 			  intel_engine_mask_t engine_mask,
588 			  unsigned int retry);
589 
590 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
591 {
592 	struct drm_i915_private *i915 = gt->i915;
593 
594 	if (is_mock_gt(gt))
595 		return mock_reset;
596 	else if (INTEL_GEN(i915) >= 8)
597 		return gen8_reset_engines;
598 	else if (INTEL_GEN(i915) >= 6)
599 		return gen6_reset_engines;
600 	else if (INTEL_GEN(i915) >= 5)
601 		return ilk_do_reset;
602 	else if (IS_G4X(i915))
603 		return g4x_do_reset;
604 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
605 		return g33_do_reset;
606 	else if (INTEL_GEN(i915) >= 3)
607 		return i915_do_reset;
608 	else
609 		return NULL;
610 }
611 
612 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
613 {
614 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
615 	reset_func reset;
616 	int ret = -ETIMEDOUT;
617 	int retry;
618 
619 	reset = intel_get_gpu_reset(gt);
620 	if (!reset)
621 		return -ENODEV;
622 
623 	/*
624 	 * If the power well sleeps during the reset, the reset
625 	 * request may be dropped and never completes (causing -EIO).
626 	 */
627 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
628 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
629 		GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
630 		preempt_disable();
631 		ret = reset(gt, engine_mask, retry);
632 		preempt_enable();
633 	}
634 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
635 
636 	return ret;
637 }
638 
639 bool intel_has_gpu_reset(const struct intel_gt *gt)
640 {
641 	if (!i915_modparams.reset)
642 		return NULL;
643 
644 	return intel_get_gpu_reset(gt);
645 }
646 
647 bool intel_has_reset_engine(const struct intel_gt *gt)
648 {
649 	if (i915_modparams.reset < 2)
650 		return false;
651 
652 	return INTEL_INFO(gt->i915)->has_reset_engine;
653 }
654 
655 int intel_reset_guc(struct intel_gt *gt)
656 {
657 	u32 guc_domain =
658 		INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
659 	int ret;
660 
661 	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
662 
663 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
664 	ret = gen6_hw_domain_reset(gt, guc_domain);
665 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
666 
667 	return ret;
668 }
669 
670 /*
671  * Ensure irq handler finishes, and not run again.
672  * Also return the active request so that we only search for it once.
673  */
674 static void reset_prepare_engine(struct intel_engine_cs *engine)
675 {
676 	/*
677 	 * During the reset sequence, we must prevent the engine from
678 	 * entering RC6. As the context state is undefined until we restart
679 	 * the engine, if it does enter RC6 during the reset, the state
680 	 * written to the powercontext is undefined and so we may lose
681 	 * GPU state upon resume, i.e. fail to restart after a reset.
682 	 */
683 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
684 	if (engine->reset.prepare)
685 		engine->reset.prepare(engine);
686 }
687 
688 static void revoke_mmaps(struct intel_gt *gt)
689 {
690 	int i;
691 
692 	for (i = 0; i < gt->ggtt->num_fences; i++) {
693 		struct drm_vma_offset_node *node;
694 		struct i915_vma *vma;
695 		u64 vma_offset;
696 
697 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
698 		if (!vma)
699 			continue;
700 
701 		if (!i915_vma_has_userfault(vma))
702 			continue;
703 
704 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
705 
706 		if (!vma->mmo)
707 			continue;
708 
709 		node = &vma->mmo->vma_node;
710 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
711 
712 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
713 				    drm_vma_node_offset_addr(node) + vma_offset,
714 				    vma->size,
715 				    1);
716 	}
717 }
718 
719 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
720 {
721 	struct intel_engine_cs *engine;
722 	intel_engine_mask_t awake = 0;
723 	enum intel_engine_id id;
724 
725 	for_each_engine(engine, gt, id) {
726 		if (intel_engine_pm_get_if_awake(engine))
727 			awake |= engine->mask;
728 		reset_prepare_engine(engine);
729 	}
730 
731 	intel_uc_reset_prepare(&gt->uc);
732 
733 	return awake;
734 }
735 
736 static void gt_revoke(struct intel_gt *gt)
737 {
738 	revoke_mmaps(gt);
739 }
740 
741 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
742 {
743 	struct intel_engine_cs *engine;
744 	enum intel_engine_id id;
745 	int err;
746 
747 	/*
748 	 * Everything depends on having the GTT running, so we need to start
749 	 * there.
750 	 */
751 	err = i915_ggtt_enable_hw(gt->i915);
752 	if (err)
753 		return err;
754 
755 	for_each_engine(engine, gt, id)
756 		__intel_engine_reset(engine, stalled_mask & engine->mask);
757 
758 	intel_ggtt_restore_fences(gt->ggtt);
759 
760 	return err;
761 }
762 
763 static void reset_finish_engine(struct intel_engine_cs *engine)
764 {
765 	if (engine->reset.finish)
766 		engine->reset.finish(engine);
767 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
768 
769 	intel_engine_signal_breadcrumbs(engine);
770 }
771 
772 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
773 {
774 	struct intel_engine_cs *engine;
775 	enum intel_engine_id id;
776 
777 	for_each_engine(engine, gt, id) {
778 		reset_finish_engine(engine);
779 		if (awake & engine->mask)
780 			intel_engine_pm_put(engine);
781 	}
782 }
783 
784 static void nop_submit_request(struct i915_request *request)
785 {
786 	struct intel_engine_cs *engine = request->engine;
787 	unsigned long flags;
788 
789 	RQ_TRACE(request, "-EIO\n");
790 	i915_request_set_error_once(request, -EIO);
791 
792 	spin_lock_irqsave(&engine->active.lock, flags);
793 	__i915_request_submit(request);
794 	i915_request_mark_complete(request);
795 	spin_unlock_irqrestore(&engine->active.lock, flags);
796 
797 	intel_engine_signal_breadcrumbs(engine);
798 }
799 
800 static void __intel_gt_set_wedged(struct intel_gt *gt)
801 {
802 	struct intel_engine_cs *engine;
803 	intel_engine_mask_t awake;
804 	enum intel_engine_id id;
805 
806 	if (test_bit(I915_WEDGED, &gt->reset.flags))
807 		return;
808 
809 	GT_TRACE(gt, "start\n");
810 
811 	/*
812 	 * First, stop submission to hw, but do not yet complete requests by
813 	 * rolling the global seqno forward (since this would complete requests
814 	 * for which we haven't set the fence error to EIO yet).
815 	 */
816 	awake = reset_prepare(gt);
817 
818 	/* Even if the GPU reset fails, it should still stop the engines */
819 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
820 		__intel_gt_reset(gt, ALL_ENGINES);
821 
822 	for_each_engine(engine, gt, id)
823 		engine->submit_request = nop_submit_request;
824 
825 	/*
826 	 * Make sure no request can slip through without getting completed by
827 	 * either this call here to intel_engine_write_global_seqno, or the one
828 	 * in nop_submit_request.
829 	 */
830 	synchronize_rcu_expedited();
831 	set_bit(I915_WEDGED, &gt->reset.flags);
832 
833 	/* Mark all executing requests as skipped */
834 	for_each_engine(engine, gt, id)
835 		if (engine->reset.cancel)
836 			engine->reset.cancel(engine);
837 
838 	reset_finish(gt, awake);
839 
840 	GT_TRACE(gt, "end\n");
841 }
842 
843 void intel_gt_set_wedged(struct intel_gt *gt)
844 {
845 	intel_wakeref_t wakeref;
846 
847 	if (test_bit(I915_WEDGED, &gt->reset.flags))
848 		return;
849 
850 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
851 	mutex_lock(&gt->reset.mutex);
852 
853 	if (GEM_SHOW_DEBUG()) {
854 		struct drm_printer p = drm_debug_printer(__func__);
855 		struct intel_engine_cs *engine;
856 		enum intel_engine_id id;
857 
858 		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
859 		for_each_engine(engine, gt, id) {
860 			if (intel_engine_is_idle(engine))
861 				continue;
862 
863 			intel_engine_dump(engine, &p, "%s\n", engine->name);
864 		}
865 	}
866 
867 	__intel_gt_set_wedged(gt);
868 
869 	mutex_unlock(&gt->reset.mutex);
870 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
871 }
872 
873 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
874 {
875 	struct intel_gt_timelines *timelines = &gt->timelines;
876 	struct intel_timeline *tl;
877 	bool ok;
878 
879 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
880 		return true;
881 
882 	/* Never fully initialised, recovery impossible */
883 	if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
884 		return false;
885 
886 	GT_TRACE(gt, "start\n");
887 
888 	/*
889 	 * Before unwedging, make sure that all pending operations
890 	 * are flushed and errored out - we may have requests waiting upon
891 	 * third party fences. We marked all inflight requests as EIO, and
892 	 * every execbuf since returned EIO, for consistency we want all
893 	 * the currently pending requests to also be marked as EIO, which
894 	 * is done inside our nop_submit_request - and so we must wait.
895 	 *
896 	 * No more can be submitted until we reset the wedged bit.
897 	 */
898 	spin_lock(&timelines->lock);
899 	list_for_each_entry(tl, &timelines->active_list, link) {
900 		struct dma_fence *fence;
901 
902 		fence = i915_active_fence_get(&tl->last_request);
903 		if (!fence)
904 			continue;
905 
906 		spin_unlock(&timelines->lock);
907 
908 		/*
909 		 * All internal dependencies (i915_requests) will have
910 		 * been flushed by the set-wedge, but we may be stuck waiting
911 		 * for external fences. These should all be capped to 10s
912 		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
913 		 * in the worst case.
914 		 */
915 		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
916 		dma_fence_put(fence);
917 
918 		/* Restart iteration after droping lock */
919 		spin_lock(&timelines->lock);
920 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
921 	}
922 	spin_unlock(&timelines->lock);
923 
924 	/* We must reset pending GPU events before restoring our submission */
925 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
926 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
927 		ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
928 	if (!ok) {
929 		/*
930 		 * Warn CI about the unrecoverable wedged condition.
931 		 * Time for a reboot.
932 		 */
933 		add_taint_for_CI(TAINT_WARN);
934 		return false;
935 	}
936 
937 	/*
938 	 * Undo nop_submit_request. We prevent all new i915 requests from
939 	 * being queued (by disallowing execbuf whilst wedged) so having
940 	 * waited for all active requests above, we know the system is idle
941 	 * and do not have to worry about a thread being inside
942 	 * engine->submit_request() as we swap over. So unlike installing
943 	 * the nop_submit_request on reset, we can do this from normal
944 	 * context and do not require stop_machine().
945 	 */
946 	intel_engines_reset_default_submission(gt);
947 
948 	GT_TRACE(gt, "end\n");
949 
950 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
951 	clear_bit(I915_WEDGED, &gt->reset.flags);
952 
953 	return true;
954 }
955 
956 bool intel_gt_unset_wedged(struct intel_gt *gt)
957 {
958 	bool result;
959 
960 	mutex_lock(&gt->reset.mutex);
961 	result = __intel_gt_unset_wedged(gt);
962 	mutex_unlock(&gt->reset.mutex);
963 
964 	return result;
965 }
966 
967 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
968 {
969 	int err, i;
970 
971 	gt_revoke(gt);
972 
973 	err = __intel_gt_reset(gt, ALL_ENGINES);
974 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
975 		msleep(10 * (i + 1));
976 		err = __intel_gt_reset(gt, ALL_ENGINES);
977 	}
978 	if (err)
979 		return err;
980 
981 	return gt_reset(gt, stalled_mask);
982 }
983 
984 static int resume(struct intel_gt *gt)
985 {
986 	struct intel_engine_cs *engine;
987 	enum intel_engine_id id;
988 	int ret;
989 
990 	for_each_engine(engine, gt, id) {
991 		ret = intel_engine_resume(engine);
992 		if (ret)
993 			return ret;
994 	}
995 
996 	return 0;
997 }
998 
999 /**
1000  * intel_gt_reset - reset chip after a hang
1001  * @gt: #intel_gt to reset
1002  * @stalled_mask: mask of the stalled engines with the guilty requests
1003  * @reason: user error message for why we are resetting
1004  *
1005  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1006  * on failure.
1007  *
1008  * Procedure is fairly simple:
1009  *   - reset the chip using the reset reg
1010  *   - re-init context state
1011  *   - re-init hardware status page
1012  *   - re-init ring buffer
1013  *   - re-init interrupt state
1014  *   - re-init display
1015  */
1016 void intel_gt_reset(struct intel_gt *gt,
1017 		    intel_engine_mask_t stalled_mask,
1018 		    const char *reason)
1019 {
1020 	intel_engine_mask_t awake;
1021 	int ret;
1022 
1023 	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1024 
1025 	might_sleep();
1026 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1027 	mutex_lock(&gt->reset.mutex);
1028 
1029 	/* Clear any previous failed attempts at recovery. Time to try again. */
1030 	if (!__intel_gt_unset_wedged(gt))
1031 		goto unlock;
1032 
1033 	if (reason)
1034 		drm_notice(&gt->i915->drm,
1035 			   "Resetting chip for %s\n", reason);
1036 	atomic_inc(&gt->i915->gpu_error.reset_count);
1037 
1038 	awake = reset_prepare(gt);
1039 
1040 	if (!intel_has_gpu_reset(gt)) {
1041 		if (i915_modparams.reset)
1042 			drm_err(&gt->i915->drm, "GPU reset not supported\n");
1043 		else
1044 			drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
1045 		goto error;
1046 	}
1047 
1048 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1049 		intel_runtime_pm_disable_interrupts(gt->i915);
1050 
1051 	if (do_reset(gt, stalled_mask)) {
1052 		drm_err(&gt->i915->drm, "Failed to reset chip\n");
1053 		goto taint;
1054 	}
1055 
1056 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1057 		intel_runtime_pm_enable_interrupts(gt->i915);
1058 
1059 	intel_overlay_reset(gt->i915);
1060 
1061 	/*
1062 	 * Next we need to restore the context, but we don't use those
1063 	 * yet either...
1064 	 *
1065 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
1066 	 * was running at the time of the reset (i.e. we weren't VT
1067 	 * switched away).
1068 	 */
1069 	ret = intel_gt_init_hw(gt);
1070 	if (ret) {
1071 		drm_err(&gt->i915->drm,
1072 			"Failed to initialise HW following reset (%d)\n",
1073 			ret);
1074 		goto taint;
1075 	}
1076 
1077 	ret = resume(gt);
1078 	if (ret)
1079 		goto taint;
1080 
1081 finish:
1082 	reset_finish(gt, awake);
1083 unlock:
1084 	mutex_unlock(&gt->reset.mutex);
1085 	return;
1086 
1087 taint:
1088 	/*
1089 	 * History tells us that if we cannot reset the GPU now, we
1090 	 * never will. This then impacts everything that is run
1091 	 * subsequently. On failing the reset, we mark the driver
1092 	 * as wedged, preventing further execution on the GPU.
1093 	 * We also want to go one step further and add a taint to the
1094 	 * kernel so that any subsequent faults can be traced back to
1095 	 * this failure. This is important for CI, where if the
1096 	 * GPU/driver fails we would like to reboot and restart testing
1097 	 * rather than continue on into oblivion. For everyone else,
1098 	 * the system should still plod along, but they have been warned!
1099 	 */
1100 	add_taint_for_CI(TAINT_WARN);
1101 error:
1102 	__intel_gt_set_wedged(gt);
1103 	goto finish;
1104 }
1105 
1106 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1107 {
1108 	return __intel_gt_reset(engine->gt, engine->mask);
1109 }
1110 
1111 /**
1112  * intel_engine_reset - reset GPU engine to recover from a hang
1113  * @engine: engine to reset
1114  * @msg: reason for GPU reset; or NULL for no drm_notice()
1115  *
1116  * Reset a specific GPU engine. Useful if a hang is detected.
1117  * Returns zero on successful reset or otherwise an error code.
1118  *
1119  * Procedure is:
1120  *  - identifies the request that caused the hang and it is dropped
1121  *  - reset engine (which will force the engine to idle)
1122  *  - re-init/configure engine
1123  */
1124 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1125 {
1126 	struct intel_gt *gt = engine->gt;
1127 	bool uses_guc = intel_engine_in_guc_submission_mode(engine);
1128 	int ret;
1129 
1130 	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1131 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1132 
1133 	if (!intel_engine_pm_get_if_awake(engine))
1134 		return 0;
1135 
1136 	reset_prepare_engine(engine);
1137 
1138 	if (msg)
1139 		drm_notice(&engine->i915->drm,
1140 			   "Resetting %s for %s\n", engine->name, msg);
1141 	atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1142 
1143 	if (!uses_guc)
1144 		ret = intel_gt_reset_engine(engine);
1145 	else
1146 		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1147 	if (ret) {
1148 		/* If we fail here, we expect to fallback to a global reset */
1149 		drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
1150 			uses_guc ? "GuC " : "", engine->name, ret);
1151 		goto out;
1152 	}
1153 
1154 	/*
1155 	 * The request that caused the hang is stuck on elsp, we know the
1156 	 * active request and can drop it, adjust head to skip the offending
1157 	 * request to resume executing remaining requests in the queue.
1158 	 */
1159 	__intel_engine_reset(engine, true);
1160 
1161 	/*
1162 	 * The engine and its registers (and workarounds in case of render)
1163 	 * have been reset to their default values. Follow the init_ring
1164 	 * process to program RING_MODE, HWSP and re-enable submission.
1165 	 */
1166 	ret = intel_engine_resume(engine);
1167 
1168 out:
1169 	intel_engine_cancel_stop_cs(engine);
1170 	reset_finish_engine(engine);
1171 	intel_engine_pm_put_async(engine);
1172 	return ret;
1173 }
1174 
1175 static void intel_gt_reset_global(struct intel_gt *gt,
1176 				  u32 engine_mask,
1177 				  const char *reason)
1178 {
1179 	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1180 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1181 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1182 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1183 	struct intel_wedge_me w;
1184 
1185 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1186 
1187 	drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
1188 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1189 
1190 	/* Use a watchdog to ensure that our reset completes */
1191 	intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1192 		intel_prepare_reset(gt->i915);
1193 
1194 		/* Flush everyone using a resource about to be clobbered */
1195 		synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1196 
1197 		intel_gt_reset(gt, engine_mask, reason);
1198 
1199 		intel_finish_reset(gt->i915);
1200 	}
1201 
1202 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1203 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1204 }
1205 
1206 /**
1207  * intel_gt_handle_error - handle a gpu error
1208  * @gt: the intel_gt
1209  * @engine_mask: mask representing engines that are hung
1210  * @flags: control flags
1211  * @fmt: Error message format string
1212  *
1213  * Do some basic checking of register state at error time and
1214  * dump it to the syslog.  Also call i915_capture_error_state() to make
1215  * sure we get a record and make it available in debugfs.  Fire a uevent
1216  * so userspace knows something bad happened (should trigger collection
1217  * of a ring dump etc.).
1218  */
1219 void intel_gt_handle_error(struct intel_gt *gt,
1220 			   intel_engine_mask_t engine_mask,
1221 			   unsigned long flags,
1222 			   const char *fmt, ...)
1223 {
1224 	struct intel_engine_cs *engine;
1225 	intel_wakeref_t wakeref;
1226 	intel_engine_mask_t tmp;
1227 	char error_msg[80];
1228 	char *msg = NULL;
1229 
1230 	if (fmt) {
1231 		va_list args;
1232 
1233 		va_start(args, fmt);
1234 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1235 		va_end(args);
1236 
1237 		msg = error_msg;
1238 	}
1239 
1240 	/*
1241 	 * In most cases it's guaranteed that we get here with an RPM
1242 	 * reference held, for example because there is a pending GPU
1243 	 * request that won't finish until the reset is done. This
1244 	 * isn't the case at least when we get here by doing a
1245 	 * simulated reset via debugfs, so get an RPM reference.
1246 	 */
1247 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1248 
1249 	engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1250 
1251 	if (flags & I915_ERROR_CAPTURE) {
1252 		i915_capture_error_state(gt->i915);
1253 		intel_gt_clear_error_registers(gt, engine_mask);
1254 	}
1255 
1256 	/*
1257 	 * Try engine reset when available. We fall back to full reset if
1258 	 * single reset fails.
1259 	 */
1260 	if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1261 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1262 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1263 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1264 					     &gt->reset.flags))
1265 				continue;
1266 
1267 			if (intel_engine_reset(engine, msg) == 0)
1268 				engine_mask &= ~engine->mask;
1269 
1270 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1271 					      &gt->reset.flags);
1272 		}
1273 	}
1274 
1275 	if (!engine_mask)
1276 		goto out;
1277 
1278 	/* Full reset needs the mutex, stop any other user trying to do so. */
1279 	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1280 		wait_event(gt->reset.queue,
1281 			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1282 		goto out; /* piggy-back on the other reset */
1283 	}
1284 
1285 	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1286 	synchronize_rcu_expedited();
1287 
1288 	/* Prevent any other reset-engine attempt. */
1289 	for_each_engine(engine, gt, tmp) {
1290 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1291 					&gt->reset.flags))
1292 			wait_on_bit(&gt->reset.flags,
1293 				    I915_RESET_ENGINE + engine->id,
1294 				    TASK_UNINTERRUPTIBLE);
1295 	}
1296 
1297 	intel_gt_reset_global(gt, engine_mask, msg);
1298 
1299 	for_each_engine(engine, gt, tmp)
1300 		clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1301 				 &gt->reset.flags);
1302 	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1303 	smp_mb__after_atomic();
1304 	wake_up_all(&gt->reset.queue);
1305 
1306 out:
1307 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1308 }
1309 
1310 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1311 {
1312 	might_lock(&gt->reset.backoff_srcu);
1313 	might_sleep();
1314 
1315 	rcu_read_lock();
1316 	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1317 		rcu_read_unlock();
1318 
1319 		if (wait_event_interruptible(gt->reset.queue,
1320 					     !test_bit(I915_RESET_BACKOFF,
1321 						       &gt->reset.flags)))
1322 			return -EINTR;
1323 
1324 		rcu_read_lock();
1325 	}
1326 	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1327 	rcu_read_unlock();
1328 
1329 	return 0;
1330 }
1331 
1332 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1333 __releases(&gt->reset.backoff_srcu)
1334 {
1335 	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1336 }
1337 
1338 int intel_gt_terminally_wedged(struct intel_gt *gt)
1339 {
1340 	might_sleep();
1341 
1342 	if (!intel_gt_is_wedged(gt))
1343 		return 0;
1344 
1345 	if (intel_gt_has_init_error(gt))
1346 		return -EIO;
1347 
1348 	/* Reset still in progress? Maybe we will recover? */
1349 	if (wait_event_interruptible(gt->reset.queue,
1350 				     !test_bit(I915_RESET_BACKOFF,
1351 					       &gt->reset.flags)))
1352 		return -EINTR;
1353 
1354 	return intel_gt_is_wedged(gt) ? -EIO : 0;
1355 }
1356 
1357 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1358 {
1359 	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1360 		     I915_WEDGED_ON_INIT);
1361 	intel_gt_set_wedged(gt);
1362 	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1363 }
1364 
1365 void intel_gt_init_reset(struct intel_gt *gt)
1366 {
1367 	init_waitqueue_head(&gt->reset.queue);
1368 	mutex_init(&gt->reset.mutex);
1369 	init_srcu_struct(&gt->reset.backoff_srcu);
1370 
1371 	/* no GPU until we are ready! */
1372 	__set_bit(I915_WEDGED, &gt->reset.flags);
1373 }
1374 
1375 void intel_gt_fini_reset(struct intel_gt *gt)
1376 {
1377 	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1378 }
1379 
1380 static void intel_wedge_me(struct work_struct *work)
1381 {
1382 	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1383 
1384 	drm_err(&w->gt->i915->drm,
1385 		"%s timed out, cancelling all in-flight rendering.\n",
1386 		w->name);
1387 	intel_gt_set_wedged(w->gt);
1388 }
1389 
1390 void __intel_init_wedge(struct intel_wedge_me *w,
1391 			struct intel_gt *gt,
1392 			long timeout,
1393 			const char *name)
1394 {
1395 	w->gt = gt;
1396 	w->name = name;
1397 
1398 	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1399 	schedule_delayed_work(&w->work, timeout);
1400 }
1401 
1402 void __intel_fini_wedge(struct intel_wedge_me *w)
1403 {
1404 	cancel_delayed_work_sync(&w->work);
1405 	destroy_delayed_work_on_stack(&w->work);
1406 	w->gt = NULL;
1407 }
1408 
1409 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1410 #include "selftest_reset.c"
1411 #include "selftest_hangcheck.c"
1412 #endif
1413