xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt_pm.c (revision 99f2eb96)
179ffac85SChris Wilson /*
279ffac85SChris Wilson  * SPDX-License-Identifier: MIT
379ffac85SChris Wilson  *
479ffac85SChris Wilson  * Copyright © 2019 Intel Corporation
579ffac85SChris Wilson  */
679ffac85SChris Wilson 
779ffac85SChris Wilson #include "i915_drv.h"
879ffac85SChris Wilson #include "intel_gt_pm.h"
979ffac85SChris Wilson #include "intel_pm.h"
1079ffac85SChris Wilson #include "intel_wakeref.h"
1179ffac85SChris Wilson 
1279ffac85SChris Wilson static void pm_notify(struct drm_i915_private *i915, int state)
1379ffac85SChris Wilson {
1479ffac85SChris Wilson 	blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
1579ffac85SChris Wilson }
1679ffac85SChris Wilson 
1779ffac85SChris Wilson static int intel_gt_unpark(struct intel_wakeref *wf)
1879ffac85SChris Wilson {
1979ffac85SChris Wilson 	struct drm_i915_private *i915 =
2079ffac85SChris Wilson 		container_of(wf, typeof(*i915), gt.wakeref);
2179ffac85SChris Wilson 
2279ffac85SChris Wilson 	GEM_TRACE("\n");
2379ffac85SChris Wilson 
2479ffac85SChris Wilson 	/*
2579ffac85SChris Wilson 	 * It seems that the DMC likes to transition between the DC states a lot
2679ffac85SChris Wilson 	 * when there are no connected displays (no active power domains) during
2779ffac85SChris Wilson 	 * command submission.
2879ffac85SChris Wilson 	 *
2979ffac85SChris Wilson 	 * This activity has negative impact on the performance of the chip with
3079ffac85SChris Wilson 	 * huge latencies observed in the interrupt handler and elsewhere.
3179ffac85SChris Wilson 	 *
3279ffac85SChris Wilson 	 * Work around it by grabbing a GT IRQ power domain whilst there is any
3379ffac85SChris Wilson 	 * GT activity, preventing any DC state transitions.
3479ffac85SChris Wilson 	 */
3579ffac85SChris Wilson 	i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
3679ffac85SChris Wilson 	GEM_BUG_ON(!i915->gt.awake);
3779ffac85SChris Wilson 
3879ffac85SChris Wilson 	intel_enable_gt_powersave(i915);
3979ffac85SChris Wilson 
4079ffac85SChris Wilson 	i915_update_gfx_val(i915);
4179ffac85SChris Wilson 	if (INTEL_GEN(i915) >= 6)
4279ffac85SChris Wilson 		gen6_rps_busy(i915);
4379ffac85SChris Wilson 
4479ffac85SChris Wilson 	i915_pmu_gt_unparked(i915);
4579ffac85SChris Wilson 
4679ffac85SChris Wilson 	i915_queue_hangcheck(i915);
4779ffac85SChris Wilson 
4879ffac85SChris Wilson 	pm_notify(i915, INTEL_GT_UNPARK);
4979ffac85SChris Wilson 
5079ffac85SChris Wilson 	return 0;
5179ffac85SChris Wilson }
5279ffac85SChris Wilson 
5379ffac85SChris Wilson void intel_gt_pm_get(struct drm_i915_private *i915)
5479ffac85SChris Wilson {
5558a111f0SDaniele Ceraolo Spurio 	intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
5679ffac85SChris Wilson }
5779ffac85SChris Wilson 
5879ffac85SChris Wilson static int intel_gt_park(struct intel_wakeref *wf)
5979ffac85SChris Wilson {
6079ffac85SChris Wilson 	struct drm_i915_private *i915 =
6179ffac85SChris Wilson 		container_of(wf, typeof(*i915), gt.wakeref);
6279ffac85SChris Wilson 	intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
6379ffac85SChris Wilson 
6479ffac85SChris Wilson 	GEM_TRACE("\n");
6579ffac85SChris Wilson 
6679ffac85SChris Wilson 	pm_notify(i915, INTEL_GT_PARK);
6779ffac85SChris Wilson 
6879ffac85SChris Wilson 	i915_pmu_gt_parked(i915);
6979ffac85SChris Wilson 	if (INTEL_GEN(i915) >= 6)
7079ffac85SChris Wilson 		gen6_rps_idle(i915);
7179ffac85SChris Wilson 
7279ffac85SChris Wilson 	GEM_BUG_ON(!wakeref);
7379ffac85SChris Wilson 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
7479ffac85SChris Wilson 
7579ffac85SChris Wilson 	return 0;
7679ffac85SChris Wilson }
7779ffac85SChris Wilson 
7879ffac85SChris Wilson void intel_gt_pm_put(struct drm_i915_private *i915)
7979ffac85SChris Wilson {
8058a111f0SDaniele Ceraolo Spurio 	intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
8179ffac85SChris Wilson }
8279ffac85SChris Wilson 
8399f2eb96STvrtko Ursulin void intel_gt_pm_init_early(struct intel_gt *gt)
8479ffac85SChris Wilson {
8599f2eb96STvrtko Ursulin 	intel_wakeref_init(&gt->wakeref);
8699f2eb96STvrtko Ursulin 	BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
8779ffac85SChris Wilson }
8879ffac85SChris Wilson 
8979ffac85SChris Wilson static bool reset_engines(struct drm_i915_private *i915)
9079ffac85SChris Wilson {
9179ffac85SChris Wilson 	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
9279ffac85SChris Wilson 		return false;
9379ffac85SChris Wilson 
9479ffac85SChris Wilson 	return intel_gpu_reset(i915, ALL_ENGINES) == 0;
9579ffac85SChris Wilson }
9679ffac85SChris Wilson 
9779ffac85SChris Wilson /**
9879ffac85SChris Wilson  * intel_gt_sanitize: called after the GPU has lost power
9979ffac85SChris Wilson  * @i915: the i915 device
10079ffac85SChris Wilson  * @force: ignore a failed reset and sanitize engine state anyway
10179ffac85SChris Wilson  *
10279ffac85SChris Wilson  * Anytime we reset the GPU, either with an explicit GPU reset or through a
10379ffac85SChris Wilson  * PCI power cycle, the GPU loses state and we must reset our state tracking
10479ffac85SChris Wilson  * to match. Note that calling intel_gt_sanitize() if the GPU has not
10579ffac85SChris Wilson  * been reset results in much confusion!
10679ffac85SChris Wilson  */
10779ffac85SChris Wilson void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
10879ffac85SChris Wilson {
10979ffac85SChris Wilson 	struct intel_engine_cs *engine;
11079ffac85SChris Wilson 	enum intel_engine_id id;
11179ffac85SChris Wilson 
11279ffac85SChris Wilson 	GEM_TRACE("\n");
11379ffac85SChris Wilson 
11479ffac85SChris Wilson 	if (!reset_engines(i915) && !force)
11579ffac85SChris Wilson 		return;
11679ffac85SChris Wilson 
11779ffac85SChris Wilson 	for_each_engine(engine, i915, id)
11879ffac85SChris Wilson 		intel_engine_reset(engine, false);
11979ffac85SChris Wilson }
12079ffac85SChris Wilson 
12179ffac85SChris Wilson void intel_gt_resume(struct drm_i915_private *i915)
12279ffac85SChris Wilson {
12379ffac85SChris Wilson 	struct intel_engine_cs *engine;
12479ffac85SChris Wilson 	enum intel_engine_id id;
12579ffac85SChris Wilson 
12679ffac85SChris Wilson 	/*
12779ffac85SChris Wilson 	 * After resume, we may need to poke into the pinned kernel
12879ffac85SChris Wilson 	 * contexts to paper over any damage caused by the sudden suspend.
12979ffac85SChris Wilson 	 * Only the kernel contexts should remain pinned over suspend,
13079ffac85SChris Wilson 	 * allowing us to fixup the user contexts on their first pin.
13179ffac85SChris Wilson 	 */
13279ffac85SChris Wilson 	for_each_engine(engine, i915, id) {
13379ffac85SChris Wilson 		struct intel_context *ce;
13479ffac85SChris Wilson 
13579ffac85SChris Wilson 		ce = engine->kernel_context;
13679ffac85SChris Wilson 		if (ce)
13779ffac85SChris Wilson 			ce->ops->reset(ce);
13879ffac85SChris Wilson 
13979ffac85SChris Wilson 		ce = engine->preempt_context;
14079ffac85SChris Wilson 		if (ce)
14179ffac85SChris Wilson 			ce->ops->reset(ce);
14279ffac85SChris Wilson 	}
14379ffac85SChris Wilson }
144