Lines Matching full:gt

15 #include "gt/intel_gt_regs.h"
17 #include "gt/uc/intel_gsc_fw.h"
157 static int i915_do_reset(struct intel_gt *gt, in i915_do_reset() argument
161 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in i915_do_reset()
186 static int g33_do_reset(struct intel_gt *gt, in g33_do_reset() argument
190 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g33_do_reset()
196 static int g4x_do_reset(struct intel_gt *gt, in g4x_do_reset() argument
200 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g4x_do_reset()
201 struct intel_uncore *uncore = gt->uncore; in g4x_do_reset()
212 GT_TRACE(gt, "Wait for media reset failed\n"); in g4x_do_reset()
220 GT_TRACE(gt, "Wait for render reset failed\n"); in g4x_do_reset()
233 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, in ilk_do_reset() argument
236 struct intel_uncore *uncore = gt->uncore; in ilk_do_reset()
246 GT_TRACE(gt, "Wait for render reset failed\n"); in ilk_do_reset()
257 GT_TRACE(gt, "Wait for media reset failed\n"); in ilk_do_reset()
268 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) in gen6_hw_domain_reset() argument
270 struct intel_uncore *uncore = gt->uncore; in gen6_hw_domain_reset()
290 loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1; in gen6_hw_domain_reset()
293 * GEN6_GDRST is not in the gt power well, no need to check in gen6_hw_domain_reset()
307 GT_TRACE(gt, in gen6_hw_domain_reset()
320 static int __gen6_reset_engines(struct intel_gt *gt, in __gen6_reset_engines() argument
333 for_each_engine_masked(engine, gt, engine_mask, tmp) { in __gen6_reset_engines()
338 return gen6_hw_domain_reset(gt, hw_mask); in __gen6_reset_engines()
341 static int gen6_reset_engines(struct intel_gt *gt, in gen6_reset_engines() argument
348 spin_lock_irqsave(&gt->uncore->lock, flags); in gen6_reset_engines()
349 ret = __gen6_reset_engines(gt, engine_mask, retry); in gen6_reset_engines()
350 spin_unlock_irqrestore(&gt->uncore->lock, flags); in gen6_reset_engines()
363 return engine->gt->engine[vecs_id]; in find_sfc_paired_vecs_engine()
415 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_lock_sfc()
505 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_unlock_sfc()
521 static int __gen11_reset_engines(struct intel_gt *gt, in __gen11_reset_engines() argument
534 for_each_engine_masked(engine, gt, engine_mask, tmp) { in __gen11_reset_engines()
542 ret = gen6_hw_domain_reset(gt, reset_mask); in __gen11_reset_engines()
556 for_each_engine_masked(engine, gt, unlock_mask, tmp) in __gen11_reset_engines()
610 static int gen8_reset_engines(struct intel_gt *gt, in gen8_reset_engines() argument
620 spin_lock_irqsave(&gt->uncore->lock, flags); in gen8_reset_engines()
622 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen8_reset_engines()
648 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) in gen8_reset_engines()
649 __gen11_reset_engines(gt, gt->info.engine_mask, 0); in gen8_reset_engines()
651 if (GRAPHICS_VER(gt->i915) >= 11) in gen8_reset_engines()
652 ret = __gen11_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
654 ret = __gen6_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
657 for_each_engine_masked(engine, gt, engine_mask, tmp) in gen8_reset_engines()
660 spin_unlock_irqrestore(&gt->uncore->lock, flags); in gen8_reset_engines()
665 static int mock_reset(struct intel_gt *gt, in mock_reset() argument
676 static reset_func intel_get_gpu_reset(const struct intel_gt *gt) in intel_get_gpu_reset() argument
678 struct drm_i915_private *i915 = gt->i915; in intel_get_gpu_reset()
680 if (is_mock_gt(gt)) in intel_get_gpu_reset()
698 static int __reset_guc(struct intel_gt *gt) in __reset_guc() argument
701 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; in __reset_guc()
703 return gen6_hw_domain_reset(gt, guc_domain); in __reset_guc()
706 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask) in needs_wa_14015076503() argument
708 if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0)) in needs_wa_14015076503()
714 return intel_gsc_uc_fw_init_done(&gt->uc.gsc); in needs_wa_14015076503()
718 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first) in wa_14015076503_start() argument
720 if (!needs_wa_14015076503(gt, engine_mask)) in wa_14015076503_start()
737 if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) { in wa_14015076503_start()
738 __reset_guc(gt); in wa_14015076503_start()
739 engine_mask = gt->info.engine_mask & ~BIT(GSC0); in wa_14015076503_start()
741 intel_uncore_rmw(gt->uncore, in wa_14015076503_start()
746 intel_uncore_rmw(gt->uncore, in wa_14015076503_start()
756 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask) in wa_14015076503_end() argument
758 if (!needs_wa_14015076503(gt, engine_mask)) in wa_14015076503_end()
761 intel_uncore_rmw(gt->uncore, in wa_14015076503_end()
766 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) in __intel_gt_reset() argument
773 reset = intel_get_gpu_reset(gt); in __intel_gt_reset()
781 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
785 reset_mask = wa_14015076503_start(gt, engine_mask, !retry); in __intel_gt_reset()
787 GT_TRACE(gt, "engine_mask=%x\n", reset_mask); in __intel_gt_reset()
789 ret = reset(gt, reset_mask, retry); in __intel_gt_reset()
792 wa_14015076503_end(gt, reset_mask); in __intel_gt_reset()
794 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
799 bool intel_has_gpu_reset(const struct intel_gt *gt) in intel_has_gpu_reset() argument
801 if (!gt->i915->params.reset) in intel_has_gpu_reset()
804 return intel_get_gpu_reset(gt); in intel_has_gpu_reset()
807 bool intel_has_reset_engine(const struct intel_gt *gt) in intel_has_reset_engine() argument
809 if (gt->i915->params.reset < 2) in intel_has_reset_engine()
812 return INTEL_INFO(gt->i915)->has_reset_engine; in intel_has_reset_engine()
815 int intel_reset_guc(struct intel_gt *gt) in intel_reset_guc() argument
819 GEM_BUG_ON(!HAS_GT_UC(gt->i915)); in intel_reset_guc()
821 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
822 ret = __reset_guc(gt); in intel_reset_guc()
823 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
846 static void revoke_mmaps(struct intel_gt *gt) in revoke_mmaps() argument
850 for (i = 0; i < gt->ggtt->num_fences; i++) { in revoke_mmaps()
855 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); in revoke_mmaps()
862 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]); in revoke_mmaps()
870 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, in revoke_mmaps()
877 static intel_engine_mask_t reset_prepare(struct intel_gt *gt) in reset_prepare() argument
884 intel_uc_reset_prepare(&gt->uc); in reset_prepare()
886 for_each_engine(engine, gt, id) { in reset_prepare()
895 static void gt_revoke(struct intel_gt *gt) in gt_revoke() argument
897 revoke_mmaps(gt); in gt_revoke()
900 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in gt_reset() argument
910 err = i915_ggtt_enable_hw(gt->i915); in gt_reset()
915 for_each_engine(engine, gt, id) in gt_reset()
919 intel_uc_reset(&gt->uc, ALL_ENGINES); in gt_reset()
921 intel_ggtt_restore_fences(gt->ggtt); in gt_reset()
935 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) in reset_finish() argument
940 for_each_engine(engine, gt, id) { in reset_finish()
946 intel_uc_reset_finish(&gt->uc); in reset_finish()
962 static void __intel_gt_set_wedged(struct intel_gt *gt) in __intel_gt_set_wedged() argument
968 if (test_bit(I915_WEDGED, &gt->reset.flags)) in __intel_gt_set_wedged()
971 GT_TRACE(gt, "start\n"); in __intel_gt_set_wedged()
978 awake = reset_prepare(gt); in __intel_gt_set_wedged()
981 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_set_wedged()
982 __intel_gt_reset(gt, ALL_ENGINES); in __intel_gt_set_wedged()
984 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
993 set_bit(I915_WEDGED, &gt->reset.flags); in __intel_gt_set_wedged()
997 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
1000 intel_uc_cancel_requests(&gt->uc); in __intel_gt_set_wedged()
1003 reset_finish(gt, awake); in __intel_gt_set_wedged()
1005 GT_TRACE(gt, "end\n"); in __intel_gt_set_wedged()
1008 void intel_gt_set_wedged(struct intel_gt *gt) in intel_gt_set_wedged() argument
1012 if (test_bit(I915_WEDGED, &gt->reset.flags)) in intel_gt_set_wedged()
1015 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_set_wedged()
1016 mutex_lock(&gt->reset.mutex); in intel_gt_set_wedged()
1024 for_each_engine(engine, gt, id) { in intel_gt_set_wedged()
1032 __intel_gt_set_wedged(gt); in intel_gt_set_wedged()
1034 mutex_unlock(&gt->reset.mutex); in intel_gt_set_wedged()
1035 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_set_wedged()
1038 static bool __intel_gt_unset_wedged(struct intel_gt *gt) in __intel_gt_unset_wedged() argument
1040 struct intel_gt_timelines *timelines = &gt->timelines; in __intel_gt_unset_wedged()
1044 if (!test_bit(I915_WEDGED, &gt->reset.flags)) in __intel_gt_unset_wedged()
1048 if (intel_gt_has_unrecoverable_error(gt)) in __intel_gt_unset_wedged()
1051 GT_TRACE(gt, "start\n"); in __intel_gt_unset_wedged()
1090 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ in __intel_gt_unset_wedged()
1091 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_unset_wedged()
1092 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; in __intel_gt_unset_wedged()
1098 add_taint_for_CI(gt->i915, TAINT_WARN); in __intel_gt_unset_wedged()
1111 intel_engines_reset_default_submission(gt); in __intel_gt_unset_wedged()
1113 GT_TRACE(gt, "end\n"); in __intel_gt_unset_wedged()
1116 clear_bit(I915_WEDGED, &gt->reset.flags); in __intel_gt_unset_wedged()
1121 bool intel_gt_unset_wedged(struct intel_gt *gt) in intel_gt_unset_wedged() argument
1125 mutex_lock(&gt->reset.mutex); in intel_gt_unset_wedged()
1126 result = __intel_gt_unset_wedged(gt); in intel_gt_unset_wedged()
1127 mutex_unlock(&gt->reset.mutex); in intel_gt_unset_wedged()
1132 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in do_reset() argument
1136 err = __intel_gt_reset(gt, ALL_ENGINES); in do_reset()
1139 err = __intel_gt_reset(gt, ALL_ENGINES); in do_reset()
1144 return gt_reset(gt, stalled_mask); in do_reset()
1147 static int resume(struct intel_gt *gt) in resume() argument
1153 for_each_engine(engine, gt, id) { in resume()
1164 * @gt: #intel_gt to reset
1179 void intel_gt_reset(struct intel_gt *gt, in intel_gt_reset() argument
1186 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); in intel_gt_reset()
1189 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); in intel_gt_reset()
1195 gt_revoke(gt); in intel_gt_reset()
1197 mutex_lock(&gt->reset.mutex); in intel_gt_reset()
1200 if (!__intel_gt_unset_wedged(gt)) in intel_gt_reset()
1204 drm_notice(&gt->i915->drm, in intel_gt_reset()
1206 atomic_inc(&gt->i915->gpu_error.reset_count); in intel_gt_reset()
1208 awake = reset_prepare(gt); in intel_gt_reset()
1210 if (!intel_has_gpu_reset(gt)) { in intel_gt_reset()
1211 if (gt->i915->params.reset) in intel_gt_reset()
1212 drm_err(&gt->i915->drm, "GPU reset not supported\n"); in intel_gt_reset()
1214 drm_dbg(&gt->i915->drm, "GPU reset disabled\n"); in intel_gt_reset()
1218 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1219 intel_runtime_pm_disable_interrupts(gt->i915); in intel_gt_reset()
1221 if (do_reset(gt, stalled_mask)) { in intel_gt_reset()
1222 drm_err(&gt->i915->drm, "Failed to reset chip\n"); in intel_gt_reset()
1226 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1227 intel_runtime_pm_enable_interrupts(gt->i915); in intel_gt_reset()
1229 intel_overlay_reset(gt->i915); in intel_gt_reset()
1239 ret = intel_gt_init_hw(gt); in intel_gt_reset()
1241 drm_err(&gt->i915->drm, in intel_gt_reset()
1247 ret = resume(gt); in intel_gt_reset()
1252 reset_finish(gt, awake); in intel_gt_reset()
1254 mutex_unlock(&gt->reset.mutex); in intel_gt_reset()
1270 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_reset()
1272 __intel_gt_set_wedged(gt); in intel_gt_reset()
1278 return __intel_gt_reset(engine->gt, engine->mask); in intel_gt_reset_engine()
1283 struct intel_gt *gt = engine->gt; in __intel_engine_reset_bh() local
1286 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); in __intel_engine_reset_bh()
1287 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags)); in __intel_engine_reset_bh()
1354 static void intel_gt_reset_global(struct intel_gt *gt, in intel_gt_reset_global() argument
1358 struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj; in intel_gt_reset_global()
1366 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask); in intel_gt_reset_global()
1370 intel_wedge_on_timeout(&w, gt, 60 * HZ) { in intel_gt_reset_global()
1371 intel_display_reset_prepare(gt->i915); in intel_gt_reset_global()
1373 intel_gt_reset(gt, engine_mask, reason); in intel_gt_reset_global()
1375 intel_display_reset_finish(gt->i915); in intel_gt_reset_global()
1378 if (!test_bit(I915_WEDGED, &gt->reset.flags)) in intel_gt_reset_global()
1384 * @gt: the intel_gt
1395 void intel_gt_handle_error(struct intel_gt *gt, in intel_gt_handle_error() argument
1423 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_handle_error()
1425 engine_mask &= gt->info.engine_mask; in intel_gt_handle_error()
1428 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE); in intel_gt_handle_error()
1429 intel_gt_clear_error_registers(gt, engine_mask); in intel_gt_handle_error()
1436 if (!intel_uc_uses_guc_submission(&gt->uc) && in intel_gt_handle_error()
1437 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { in intel_gt_handle_error()
1439 for_each_engine_masked(engine, gt, engine_mask, tmp) { in intel_gt_handle_error()
1442 &gt->reset.flags)) in intel_gt_handle_error()
1449 &gt->reset.flags); in intel_gt_handle_error()
1458 if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) { in intel_gt_handle_error()
1459 wait_event(gt->reset.queue, in intel_gt_handle_error()
1460 !test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); in intel_gt_handle_error()
1471 if (!intel_uc_uses_guc_submission(&gt->uc)) { in intel_gt_handle_error()
1472 for_each_engine(engine, gt, tmp) { in intel_gt_handle_error()
1474 &gt->reset.flags)) in intel_gt_handle_error()
1475 wait_on_bit(&gt->reset.flags, in intel_gt_handle_error()
1482 synchronize_srcu_expedited(&gt->reset.backoff_srcu); in intel_gt_handle_error()
1484 intel_gt_reset_global(gt, engine_mask, msg); in intel_gt_handle_error()
1486 if (!intel_uc_uses_guc_submission(&gt->uc)) { in intel_gt_handle_error()
1487 for_each_engine(engine, gt, tmp) in intel_gt_handle_error()
1489 &gt->reset.flags); in intel_gt_handle_error()
1491 clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags); in intel_gt_handle_error()
1493 wake_up_all(&gt->reset.queue); in intel_gt_handle_error()
1496 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_handle_error()
1499 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry) in _intel_gt_reset_lock() argument
1501 might_lock(&gt->reset.backoff_srcu); in _intel_gt_reset_lock()
1506 while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) { in _intel_gt_reset_lock()
1512 if (wait_event_interruptible(gt->reset.queue, in _intel_gt_reset_lock()
1514 &gt->reset.flags))) in _intel_gt_reset_lock()
1519 *srcu = srcu_read_lock(&gt->reset.backoff_srcu); in _intel_gt_reset_lock()
1525 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) in intel_gt_reset_trylock() argument
1527 return _intel_gt_reset_lock(gt, srcu, false); in intel_gt_reset_trylock()
1530 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu) in intel_gt_reset_lock_interruptible() argument
1532 return _intel_gt_reset_lock(gt, srcu, true); in intel_gt_reset_lock_interruptible()
1535 void intel_gt_reset_unlock(struct intel_gt *gt, int tag) in intel_gt_reset_unlock() argument
1536 __releases(&gt->reset.backoff_srcu) in intel_gt_reset_unlock()
1538 srcu_read_unlock(&gt->reset.backoff_srcu, tag); in intel_gt_reset_unlock()
1541 int intel_gt_terminally_wedged(struct intel_gt *gt) in intel_gt_terminally_wedged() argument
1545 if (!intel_gt_is_wedged(gt)) in intel_gt_terminally_wedged()
1548 if (intel_gt_has_unrecoverable_error(gt)) in intel_gt_terminally_wedged()
1552 if (wait_event_interruptible(gt->reset.queue, in intel_gt_terminally_wedged()
1554 &gt->reset.flags))) in intel_gt_terminally_wedged()
1557 return intel_gt_is_wedged(gt) ? -EIO : 0; in intel_gt_terminally_wedged()
1560 void intel_gt_set_wedged_on_init(struct intel_gt *gt) in intel_gt_set_wedged_on_init() argument
1564 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_init()
1565 i915_disable_error_state(gt->i915, -ENODEV); in intel_gt_set_wedged_on_init()
1566 set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags); in intel_gt_set_wedged_on_init()
1569 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_set_wedged_on_init()
1572 void intel_gt_set_wedged_on_fini(struct intel_gt *gt) in intel_gt_set_wedged_on_fini() argument
1574 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_fini()
1575 i915_disable_error_state(gt->i915, -ENODEV); in intel_gt_set_wedged_on_fini()
1576 set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags); in intel_gt_set_wedged_on_fini()
1577 intel_gt_retire_requests(gt); /* cleanup any wedged requests */ in intel_gt_set_wedged_on_fini()
1580 void intel_gt_init_reset(struct intel_gt *gt) in intel_gt_init_reset() argument
1582 init_waitqueue_head(&gt->reset.queue); in intel_gt_init_reset()
1583 mutex_init(&gt->reset.mutex); in intel_gt_init_reset()
1584 init_srcu_struct(&gt->reset.backoff_srcu); in intel_gt_init_reset()
1595 i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex); in intel_gt_init_reset()
1598 __set_bit(I915_WEDGED, &gt->reset.flags); in intel_gt_init_reset()
1601 void intel_gt_fini_reset(struct intel_gt *gt) in intel_gt_fini_reset() argument
1603 cleanup_srcu_struct(&gt->reset.backoff_srcu); in intel_gt_fini_reset()
1610 drm_err(&w->gt->i915->drm, in intel_wedge_me()
1613 intel_gt_set_wedged(w->gt); in intel_wedge_me()
1617 struct intel_gt *gt, in __intel_init_wedge() argument
1621 w->gt = gt; in __intel_init_wedge()
1625 queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout); in __intel_init_wedge()
1632 w->gt = NULL; in __intel_fini_wedge()
1639 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt) in intel_engine_reset_needs_wa_22011802037() argument
1641 if (GRAPHICS_VER(gt->i915) < 11) in intel_engine_reset_needs_wa_22011802037()
1644 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)) in intel_engine_reset_needs_wa_22011802037()
1647 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) in intel_engine_reset_needs_wa_22011802037()