124f90d66SChris Wilson // SPDX-License-Identifier: MIT
279ffac85SChris Wilson /*
379ffac85SChris Wilson * Copyright © 2019 Intel Corporation
479ffac85SChris Wilson */
579ffac85SChris Wilson
601fabda8SLucas De Marchi #include <linux/string_helpers.h>
7a70a9e99SChris Wilson #include <linux/suspend.h>
8a70a9e99SChris Wilson
979ffac85SChris Wilson #include "i915_drv.h"
10801543b2SJani Nikula #include "i915_irq.h"
11cb823ed9SChris Wilson #include "i915_params.h"
12dffa8febSChris Wilson #include "intel_context.h"
13092be382SChris Wilson #include "intel_engine_pm.h"
14cb823ed9SChris Wilson #include "intel_gt.h"
159c878557SChris Wilson #include "intel_gt_clock_utils.h"
1679ffac85SChris Wilson #include "intel_gt_pm.h"
1767804e48SJohn Harrison #include "intel_gt_print.h"
1866101975SChris Wilson #include "intel_gt_requests.h"
193e7abf81SAndi Shyti #include "intel_llc.h"
20c1132367SAndi Shyti #include "intel_rc6.h"
213e7abf81SAndi Shyti #include "intel_rps.h"
2279ffac85SChris Wilson #include "intel_wakeref.h"
230cfab4cbSHuang, Sean Z #include "pxp/intel_pxp_pm.h"
2479ffac85SChris Wilson
2581387fc4SThomas Hellström #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
2681387fc4SThomas Hellström
user_forcewake(struct intel_gt * gt,bool suspend)27d4033a9bSChris Wilson static void user_forcewake(struct intel_gt *gt, bool suspend)
28d4033a9bSChris Wilson {
29d4033a9bSChris Wilson int count = atomic_read(>->user_wakeref);
30d4033a9bSChris Wilson
31d4033a9bSChris Wilson /* Inside suspend/resume so single threaded, no races to worry about. */
32d4033a9bSChris Wilson if (likely(!count))
33d4033a9bSChris Wilson return;
34d4033a9bSChris Wilson
35d4033a9bSChris Wilson intel_gt_pm_get(gt);
36d4033a9bSChris Wilson if (suspend) {
37d4033a9bSChris Wilson GEM_BUG_ON(count > atomic_read(>->wakeref.count));
38d4033a9bSChris Wilson atomic_sub(count, >->wakeref.count);
39d4033a9bSChris Wilson } else {
40d4033a9bSChris Wilson atomic_add(count, >->wakeref.count);
41d4033a9bSChris Wilson }
42d4033a9bSChris Wilson intel_gt_pm_put(gt);
43d4033a9bSChris Wilson }
44d4033a9bSChris Wilson
runtime_begin(struct intel_gt * gt)458c3b1ba0SChris Wilson static void runtime_begin(struct intel_gt *gt)
468c3b1ba0SChris Wilson {
478c3b1ba0SChris Wilson local_irq_disable();
488c3b1ba0SChris Wilson write_seqcount_begin(>->stats.lock);
498c3b1ba0SChris Wilson gt->stats.start = ktime_get();
508c3b1ba0SChris Wilson gt->stats.active = true;
518c3b1ba0SChris Wilson write_seqcount_end(>->stats.lock);
528c3b1ba0SChris Wilson local_irq_enable();
538c3b1ba0SChris Wilson }
548c3b1ba0SChris Wilson
runtime_end(struct intel_gt * gt)558c3b1ba0SChris Wilson static void runtime_end(struct intel_gt *gt)
568c3b1ba0SChris Wilson {
578c3b1ba0SChris Wilson local_irq_disable();
588c3b1ba0SChris Wilson write_seqcount_begin(>->stats.lock);
598c3b1ba0SChris Wilson gt->stats.active = false;
608c3b1ba0SChris Wilson gt->stats.total =
618c3b1ba0SChris Wilson ktime_add(gt->stats.total,
628c3b1ba0SChris Wilson ktime_sub(ktime_get(), gt->stats.start));
638c3b1ba0SChris Wilson write_seqcount_end(>->stats.lock);
648c3b1ba0SChris Wilson local_irq_enable();
658c3b1ba0SChris Wilson }
668c3b1ba0SChris Wilson
__gt_unpark(struct intel_wakeref * wf)67c7302f20SChris Wilson static int __gt_unpark(struct intel_wakeref *wf)
6879ffac85SChris Wilson {
69cb823ed9SChris Wilson struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
70cb823ed9SChris Wilson struct drm_i915_private *i915 = gt->i915;
7179ffac85SChris Wilson
72639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
7379ffac85SChris Wilson
7479ffac85SChris Wilson /*
7579ffac85SChris Wilson * It seems that the DMC likes to transition between the DC states a lot
7679ffac85SChris Wilson * when there are no connected displays (no active power domains) during
7779ffac85SChris Wilson * command submission.
7879ffac85SChris Wilson *
7979ffac85SChris Wilson * This activity has negative impact on the performance of the chip with
8079ffac85SChris Wilson * huge latencies observed in the interrupt handler and elsewhere.
8179ffac85SChris Wilson *
8279ffac85SChris Wilson * Work around it by grabbing a GT IRQ power domain whilst there is any
8379ffac85SChris Wilson * GT activity, preventing any DC state transitions.
8479ffac85SChris Wilson */
85cb823ed9SChris Wilson gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
86cb823ed9SChris Wilson GEM_BUG_ON(!gt->awake);
8779ffac85SChris Wilson
88730eaeb5SChris Wilson intel_rc6_unpark(>->rc6);
893e7abf81SAndi Shyti intel_rps_unpark(>->rps);
90da5d5167STvrtko Ursulin i915_pmu_gt_unparked(gt);
9177cdd054SUmesh Nerlige Ramappa intel_guc_busyness_unpark(gt);
9279ffac85SChris Wilson
9366101975SChris Wilson intel_gt_unpark_requests(gt);
948c3b1ba0SChris Wilson runtime_begin(gt);
9579ffac85SChris Wilson
9679ffac85SChris Wilson return 0;
9779ffac85SChris Wilson }
9879ffac85SChris Wilson
__gt_park(struct intel_wakeref * wf)99c7302f20SChris Wilson static int __gt_park(struct intel_wakeref *wf)
10079ffac85SChris Wilson {
101ee236af8STvrtko Ursulin struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
102ee236af8STvrtko Ursulin intel_wakeref_t wakeref = fetch_and_zero(>->awake);
103ee236af8STvrtko Ursulin struct drm_i915_private *i915 = gt->i915;
10479ffac85SChris Wilson
105639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
10679ffac85SChris Wilson
1078c3b1ba0SChris Wilson runtime_end(gt);
10866101975SChris Wilson intel_gt_park_requests(gt);
10979ffac85SChris Wilson
11077cdd054SUmesh Nerlige Ramappa intel_guc_busyness_park(gt);
11171e51ca8SChris Wilson i915_vma_parked(gt);
112da5d5167STvrtko Ursulin i915_pmu_gt_parked(gt);
1133e7abf81SAndi Shyti intel_rps_park(>->rps);
114730eaeb5SChris Wilson intel_rc6_park(>->rc6);
11579ffac85SChris Wilson
116c7302f20SChris Wilson /* Everything switched off, flush any residual interrupt just in case */
117c7302f20SChris Wilson intel_synchronize_irq(i915);
118c7302f20SChris Wilson
11981ff52b7SChris Wilson /* Defer dropping the display power well for 100ms, it's slow! */
12079ffac85SChris Wilson GEM_BUG_ON(!wakeref);
12181ff52b7SChris Wilson intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
12279ffac85SChris Wilson
12379ffac85SChris Wilson return 0;
12479ffac85SChris Wilson }
12579ffac85SChris Wilson
126c7302f20SChris Wilson static const struct intel_wakeref_ops wf_ops = {
127c7302f20SChris Wilson .get = __gt_unpark,
128c7302f20SChris Wilson .put = __gt_park,
129c7302f20SChris Wilson };
13079ffac85SChris Wilson
intel_gt_pm_init_early(struct intel_gt * gt)13199f2eb96STvrtko Ursulin void intel_gt_pm_init_early(struct intel_gt *gt)
13279ffac85SChris Wilson {
133bec68cc9STvrtko Ursulin /*
134bec68cc9STvrtko Ursulin * We access the runtime_pm structure via gt->i915 here rather than
135bec68cc9STvrtko Ursulin * gt->uncore as we do elsewhere in the file because gt->uncore is not
136bec68cc9STvrtko Ursulin * yet initialized for all tiles at this point in the driver startup.
137bec68cc9STvrtko Ursulin * runtime_pm is per-device rather than per-tile, so this is still the
138bec68cc9STvrtko Ursulin * correct structure.
139bec68cc9STvrtko Ursulin */
140*8d208a5eSLuca Coelho intel_wakeref_init(>->wakeref, gt->i915, &wf_ops);
1418c3b1ba0SChris Wilson seqcount_mutex_init(>->stats.lock, >->wakeref.mutex);
14279ffac85SChris Wilson }
14379ffac85SChris Wilson
intel_gt_pm_init(struct intel_gt * gt)144c1132367SAndi Shyti void intel_gt_pm_init(struct intel_gt *gt)
145c1132367SAndi Shyti {
146c1132367SAndi Shyti /*
147c1132367SAndi Shyti * Enabling power-management should be "self-healing". If we cannot
148c1132367SAndi Shyti * enable a feature, simply leave it disabled with a notice to the
149c1132367SAndi Shyti * user.
150c1132367SAndi Shyti */
151c1132367SAndi Shyti intel_rc6_init(>->rc6);
1523e7abf81SAndi Shyti intel_rps_init(>->rps);
153c1132367SAndi Shyti }
154c1132367SAndi Shyti
reset_engines(struct intel_gt * gt)155cb823ed9SChris Wilson static bool reset_engines(struct intel_gt *gt)
15679ffac85SChris Wilson {
157cb823ed9SChris Wilson if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
15879ffac85SChris Wilson return false;
15979ffac85SChris Wilson
160cb823ed9SChris Wilson return __intel_gt_reset(gt, ALL_ENGINES) == 0;
16179ffac85SChris Wilson }
16279ffac85SChris Wilson
gt_sanitize(struct intel_gt * gt,bool force)163d03b224fSChris Wilson static void gt_sanitize(struct intel_gt *gt, bool force)
16479ffac85SChris Wilson {
16579ffac85SChris Wilson struct intel_engine_cs *engine;
16679ffac85SChris Wilson enum intel_engine_id id;
167fd6fe087SChris Wilson intel_wakeref_t wakeref;
16879ffac85SChris Wilson
16901fabda8SLucas De Marchi GT_TRACE(gt, "force:%s", str_yes_no(force));
170fd6fe087SChris Wilson
171fd6fe087SChris Wilson /* Use a raw wakeref to avoid calling intel_display_power_get early */
172fd6fe087SChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
173fd6fe087SChris Wilson intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
174fd6fe087SChris Wilson
1759c878557SChris Wilson intel_gt_check_clock_frequency(gt);
1769c878557SChris Wilson
177fd6fe087SChris Wilson /*
178fd6fe087SChris Wilson * As we have just resumed the machine and woken the device up from
179fd6fe087SChris Wilson * deep PCI sleep (presumably D3_cold), assume the HW has been reset
180fd6fe087SChris Wilson * back to defaults, recovering from whatever wedged state we left it
181fd6fe087SChris Wilson * in and so worth trying to use the device once more.
182fd6fe087SChris Wilson */
183fd6fe087SChris Wilson if (intel_gt_is_wedged(gt))
184fd6fe087SChris Wilson intel_gt_unset_wedged(gt);
18579ffac85SChris Wilson
186dac38381SUmesh Nerlige Ramappa /* For GuC mode, ensure submission is disabled before stopping ring */
187dac38381SUmesh Nerlige Ramappa intel_uc_reset_prepare(>->uc);
188dac38381SUmesh Nerlige Ramappa
189dac38381SUmesh Nerlige Ramappa for_each_engine(engine, gt, id) {
1903c00660dSChris Wilson if (engine->reset.prepare)
1913c00660dSChris Wilson engine->reset.prepare(engine);
19279ffac85SChris Wilson
1934a0ca47aSChris Wilson if (engine->sanitize)
1944a0ca47aSChris Wilson engine->sanitize(engine);
195dac38381SUmesh Nerlige Ramappa }
1964a0ca47aSChris Wilson
1973c00660dSChris Wilson if (reset_engines(gt) || force) {
1985d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id)
199cb823ed9SChris Wilson __intel_engine_reset(engine, false);
20079ffac85SChris Wilson }
20179ffac85SChris Wilson
202eb5e7da7SMatthew Brost intel_uc_reset(>->uc, false);
203eb5e7da7SMatthew Brost
2045d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id)
2053c00660dSChris Wilson if (engine->reset.finish)
2063c00660dSChris Wilson engine->reset.finish(engine);
207fd6fe087SChris Wilson
208389b7f00SChris Wilson intel_rps_sanitize(>->rps);
209389b7f00SChris Wilson
210fd6fe087SChris Wilson intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
211fd6fe087SChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
2123c00660dSChris Wilson }
2133c00660dSChris Wilson
intel_gt_pm_fini(struct intel_gt * gt)214c1132367SAndi Shyti void intel_gt_pm_fini(struct intel_gt *gt)
215c1132367SAndi Shyti {
216c1132367SAndi Shyti intel_rc6_fini(>->rc6);
217c1132367SAndi Shyti }
218c1132367SAndi Shyti
intel_gt_resume(struct intel_gt * gt)219092be382SChris Wilson int intel_gt_resume(struct intel_gt *gt)
22079ffac85SChris Wilson {
22179ffac85SChris Wilson struct intel_engine_cs *engine;
22279ffac85SChris Wilson enum intel_engine_id id;
223cfe6b30fSChris Wilson int err;
22479ffac85SChris Wilson
2253f04bdceSMichał Winiarski err = intel_gt_has_unrecoverable_error(gt);
226d03b224fSChris Wilson if (err)
227d03b224fSChris Wilson return err;
228d03b224fSChris Wilson
229639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
230fd6fe087SChris Wilson
23179ffac85SChris Wilson /*
23279ffac85SChris Wilson * After resume, we may need to poke into the pinned kernel
23379ffac85SChris Wilson * contexts to paper over any damage caused by the sudden suspend.
23479ffac85SChris Wilson * Only the kernel contexts should remain pinned over suspend,
23579ffac85SChris Wilson * allowing us to fixup the user contexts on their first pin.
23679ffac85SChris Wilson */
2374243cd53SChris Wilson gt_sanitize(gt, true);
2384243cd53SChris Wilson
239092be382SChris Wilson intel_gt_pm_get(gt);
2403e7abf81SAndi Shyti
241c1132367SAndi Shyti intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
242c1132367SAndi Shyti intel_rc6_sanitize(>->rc6);
24345b152f7SChris Wilson if (intel_gt_is_wedged(gt)) {
24445b152f7SChris Wilson err = -EIO;
24545b152f7SChris Wilson goto out_fw;
24645b152f7SChris Wilson }
247c1132367SAndi Shyti
248cfe6b30fSChris Wilson /* Only when the HW is re-initialised, can we replay the requests */
249cfe6b30fSChris Wilson err = intel_gt_init_hw(gt);
250cfe6b30fSChris Wilson if (err) {
25167804e48SJohn Harrison gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n");
252d03b224fSChris Wilson goto err_wedged;
253cfe6b30fSChris Wilson }
254cfe6b30fSChris Wilson
255eb5e7da7SMatthew Brost intel_uc_reset_finish(>->uc);
256eb5e7da7SMatthew Brost
2573e7abf81SAndi Shyti intel_rps_enable(>->rps);
2583e7abf81SAndi Shyti intel_llc_enable(>->llc);
2593e7abf81SAndi Shyti
2605d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
261092be382SChris Wilson intel_engine_pm_get(engine);
262092be382SChris Wilson
263092be382SChris Wilson engine->serial++; /* kernel context lost */
264faea1792SDaniele Ceraolo Spurio err = intel_engine_resume(engine);
265092be382SChris Wilson
266092be382SChris Wilson intel_engine_pm_put(engine);
267092be382SChris Wilson if (err) {
26867804e48SJohn Harrison gt_err(gt, "Failed to restart %s (%d)\n",
269092be382SChris Wilson engine->name, err);
270d03b224fSChris Wilson goto err_wedged;
27179ffac85SChris Wilson }
27279ffac85SChris Wilson }
273c1132367SAndi Shyti
274c1132367SAndi Shyti intel_rc6_enable(>->rc6);
275fd6fe087SChris Wilson
276fd6fe087SChris Wilson intel_uc_resume(>->uc);
277fd6fe087SChris Wilson
278d4033a9bSChris Wilson user_forcewake(gt, false);
279d4033a9bSChris Wilson
280d03b224fSChris Wilson out_fw:
281c1132367SAndi Shyti intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
282092be382SChris Wilson intel_gt_pm_put(gt);
283092be382SChris Wilson return err;
284d03b224fSChris Wilson
285d03b224fSChris Wilson err_wedged:
286d03b224fSChris Wilson intel_gt_set_wedged(gt);
287d03b224fSChris Wilson goto out_fw;
288092be382SChris Wilson }
2899dfe3459SDaniele Ceraolo Spurio
wait_for_suspend(struct intel_gt * gt)290a70a9e99SChris Wilson static void wait_for_suspend(struct intel_gt *gt)
291c1132367SAndi Shyti {
292a70a9e99SChris Wilson if (!intel_gt_pm_is_awake(gt))
293a70a9e99SChris Wilson return;
294a70a9e99SChris Wilson
29581387fc4SThomas Hellström if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
296c1132367SAndi Shyti /*
297c1132367SAndi Shyti * Forcibly cancel outstanding work and leave
298c1132367SAndi Shyti * the gpu quiet.
299c1132367SAndi Shyti */
300c1132367SAndi Shyti intel_gt_set_wedged(gt);
3010cdfdf6fSChris Wilson intel_gt_retire_requests(gt);
302c1132367SAndi Shyti }
303c1132367SAndi Shyti
304c1132367SAndi Shyti intel_gt_pm_wait_for_idle(gt);
305c1132367SAndi Shyti }
306c1132367SAndi Shyti
intel_gt_suspend_prepare(struct intel_gt * gt)307a70a9e99SChris Wilson void intel_gt_suspend_prepare(struct intel_gt *gt)
308a70a9e99SChris Wilson {
309a70a9e99SChris Wilson user_forcewake(gt, true);
310a70a9e99SChris Wilson wait_for_suspend(gt);
311a70a9e99SChris Wilson }
312a70a9e99SChris Wilson
pm_suspend_target(void)313a70a9e99SChris Wilson static suspend_state_t pm_suspend_target(void)
314a70a9e99SChris Wilson {
315e435c608SChris Wilson #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
316a70a9e99SChris Wilson return pm_suspend_target_state;
317a70a9e99SChris Wilson #else
318a70a9e99SChris Wilson return PM_SUSPEND_TO_IDLE;
319a70a9e99SChris Wilson #endif
320a70a9e99SChris Wilson }
321a70a9e99SChris Wilson
intel_gt_suspend_late(struct intel_gt * gt)322a70a9e99SChris Wilson void intel_gt_suspend_late(struct intel_gt *gt)
323c1132367SAndi Shyti {
324c1132367SAndi Shyti intel_wakeref_t wakeref;
325c1132367SAndi Shyti
326c1132367SAndi Shyti /* We expect to be idle already; but also want to be independent */
327a70a9e99SChris Wilson wait_for_suspend(gt);
328c1132367SAndi Shyti
329e26b6d43SChris Wilson if (is_mock_gt(gt))
330e26b6d43SChris Wilson return;
331e26b6d43SChris Wilson
332e26b6d43SChris Wilson GEM_BUG_ON(gt->awake);
333e26b6d43SChris Wilson
334c56ce956SThomas Hellström intel_uc_suspend(>->uc);
335c56ce956SThomas Hellström
336a70a9e99SChris Wilson /*
337a70a9e99SChris Wilson * On disabling the device, we want to turn off HW access to memory
338a70a9e99SChris Wilson * that we no longer own.
339a70a9e99SChris Wilson *
340a70a9e99SChris Wilson * However, not all suspend-states disable the device. S0 (s2idle)
341a70a9e99SChris Wilson * is effectively runtime-suspend, the device is left powered on
342a70a9e99SChris Wilson * but needs to be put into a low power state. We need to keep
343a70a9e99SChris Wilson * powermanagement enabled, but we also retain system state and so
344a70a9e99SChris Wilson * it remains safe to keep on using our allocated memory.
345a70a9e99SChris Wilson */
346a70a9e99SChris Wilson if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
347a70a9e99SChris Wilson return;
348fd6fe087SChris Wilson
3493e7abf81SAndi Shyti with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
3503e7abf81SAndi Shyti intel_rps_disable(>->rps);
351c1132367SAndi Shyti intel_rc6_disable(>->rc6);
3523e7abf81SAndi Shyti intel_llc_disable(>->llc);
3533e7abf81SAndi Shyti }
354fd6fe087SChris Wilson
355d03b224fSChris Wilson gt_sanitize(gt, false);
356fd6fe087SChris Wilson
357639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
358c1132367SAndi Shyti }
359c1132367SAndi Shyti
intel_gt_runtime_suspend(struct intel_gt * gt)3609dfe3459SDaniele Ceraolo Spurio void intel_gt_runtime_suspend(struct intel_gt *gt)
3619dfe3459SDaniele Ceraolo Spurio {
3629dfe3459SDaniele Ceraolo Spurio intel_uc_runtime_suspend(>->uc);
363fd6fe087SChris Wilson
364639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
3659dfe3459SDaniele Ceraolo Spurio }
3669dfe3459SDaniele Ceraolo Spurio
intel_gt_runtime_resume(struct intel_gt * gt)3679dfe3459SDaniele Ceraolo Spurio int intel_gt_runtime_resume(struct intel_gt *gt)
3689dfe3459SDaniele Ceraolo Spurio {
3690cfab4cbSHuang, Sean Z int ret;
3700cfab4cbSHuang, Sean Z
371639f2f24SVenkata Sandeep Dhanalakota GT_TRACE(gt, "\n");
3729dfe3459SDaniele Ceraolo Spurio intel_gt_init_swizzling(gt);
373dec9cf9eSChris Wilson intel_ggtt_restore_fences(gt->ggtt);
3749dfe3459SDaniele Ceraolo Spurio
3750cfab4cbSHuang, Sean Z ret = intel_uc_runtime_resume(>->uc);
3760cfab4cbSHuang, Sean Z if (ret)
3770cfab4cbSHuang, Sean Z return ret;
3780cfab4cbSHuang, Sean Z
3790cfab4cbSHuang, Sean Z return 0;
3809dfe3459SDaniele Ceraolo Spurio }
381c1132367SAndi Shyti
__intel_gt_get_awake_time(const struct intel_gt * gt)3828c3b1ba0SChris Wilson static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
3838c3b1ba0SChris Wilson {
3848c3b1ba0SChris Wilson ktime_t total = gt->stats.total;
3858c3b1ba0SChris Wilson
3868c3b1ba0SChris Wilson if (gt->stats.active)
3878c3b1ba0SChris Wilson total = ktime_add(total,
3888c3b1ba0SChris Wilson ktime_sub(ktime_get(), gt->stats.start));
3898c3b1ba0SChris Wilson
3908c3b1ba0SChris Wilson return total;
3918c3b1ba0SChris Wilson }
3928c3b1ba0SChris Wilson
intel_gt_get_awake_time(const struct intel_gt * gt)3938c3b1ba0SChris Wilson ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
3948c3b1ba0SChris Wilson {
3958c3b1ba0SChris Wilson unsigned int seq;
3968c3b1ba0SChris Wilson ktime_t total;
3978c3b1ba0SChris Wilson
3988c3b1ba0SChris Wilson do {
3998c3b1ba0SChris Wilson seq = read_seqcount_begin(>->stats.lock);
4008c3b1ba0SChris Wilson total = __intel_gt_get_awake_time(gt);
4018c3b1ba0SChris Wilson } while (read_seqcount_retry(>->stats.lock, seq));
4028c3b1ba0SChris Wilson
4038c3b1ba0SChris Wilson return total;
4048c3b1ba0SChris Wilson }
4058c3b1ba0SChris Wilson
406c1132367SAndi Shyti #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
407c1132367SAndi Shyti #include "selftest_gt_pm.c"
408c1132367SAndi Shyti #endif
409