124f90d66SChris Wilson // SPDX-License-Identifier: MIT
279ffac85SChris Wilson /*
379ffac85SChris Wilson * Copyright © 2019 Intel Corporation
479ffac85SChris Wilson */
579ffac85SChris Wilson
679ffac85SChris Wilson #include "i915_drv.h"
779ffac85SChris Wilson
8b3786b29SChris Wilson #include "intel_breadcrumbs.h"
9e6ba7648SChris Wilson #include "intel_context.h"
1079ffac85SChris Wilson #include "intel_engine.h"
11058179e7SChris Wilson #include "intel_engine_heartbeat.h"
1279ffac85SChris Wilson #include "intel_engine_pm.h"
13cb823ed9SChris Wilson #include "intel_gt.h"
1479ffac85SChris Wilson #include "intel_gt_pm.h"
15c1132367SAndi Shyti #include "intel_rc6.h"
162871ea85SChris Wilson #include "intel_ring.h"
17be1cb55aSChris Wilson #include "shmem_utils.h"
1867b5655bSVinay Belgaumkar #include "intel_gt_regs.h"
1967b5655bSVinay Belgaumkar
intel_gsc_idle_msg_enable(struct intel_engine_cs * engine)2067b5655bSVinay Belgaumkar static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
2167b5655bSVinay Belgaumkar {
2267b5655bSVinay Belgaumkar struct drm_i915_private *i915 = engine->i915;
2367b5655bSVinay Belgaumkar
24*ec84b2a4SMatt Roper if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
2567b5655bSVinay Belgaumkar intel_uncore_write(engine->gt->uncore,
2667b5655bSVinay Belgaumkar RC_PSMI_CTRL_GSCCS,
2767b5655bSVinay Belgaumkar _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
2867b5655bSVinay Belgaumkar /* hysteresis 0xA=5us as recommended in spec*/
2967b5655bSVinay Belgaumkar intel_uncore_write(engine->gt->uncore,
3067b5655bSVinay Belgaumkar PWRCTX_MAXCNT_GSCCS,
3167b5655bSVinay Belgaumkar 0xA);
3267b5655bSVinay Belgaumkar }
3367b5655bSVinay Belgaumkar }
3479ffac85SChris Wilson
dbg_poison_ce(struct intel_context * ce)3589db9537SChris Wilson static void dbg_poison_ce(struct intel_context *ce)
3689db9537SChris Wilson {
3789db9537SChris Wilson if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3889db9537SChris Wilson return;
3989db9537SChris Wilson
4089db9537SChris Wilson if (ce->state) {
4189db9537SChris Wilson struct drm_i915_gem_object *obj = ce->state->obj;
42115cdccaSJonathan Cavitt int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
4389db9537SChris Wilson void *map;
4489db9537SChris Wilson
45d8be1357SMaarten Lankhorst if (!i915_gem_object_trylock(obj, NULL))
46b51ed60eSMaarten Lankhorst return;
47b51ed60eSMaarten Lankhorst
4889db9537SChris Wilson map = i915_gem_object_pin_map(obj, type);
4989db9537SChris Wilson if (!IS_ERR(map)) {
5089db9537SChris Wilson memset(map, CONTEXT_REDZONE, obj->base.size);
5189db9537SChris Wilson i915_gem_object_flush_map(obj);
5289db9537SChris Wilson i915_gem_object_unpin_map(obj);
5389db9537SChris Wilson }
54b51ed60eSMaarten Lankhorst i915_gem_object_unlock(obj);
5589db9537SChris Wilson }
5689db9537SChris Wilson }
5789db9537SChris Wilson
__engine_unpark(struct intel_wakeref * wf)58c34c5bcaSChris Wilson static int __engine_unpark(struct intel_wakeref *wf)
5979ffac85SChris Wilson {
6079ffac85SChris Wilson struct intel_engine_cs *engine =
6179ffac85SChris Wilson container_of(wf, typeof(*engine), wakeref);
62fb218f20SChris Wilson struct intel_context *ce;
6379ffac85SChris Wilson
64639f2f24SVenkata Sandeep Dhanalakota ENGINE_TRACE(engine, "\n");
6579ffac85SChris Wilson
660c91621cSChris Wilson intel_gt_pm_get(engine->gt);
6779ffac85SChris Wilson
68fb218f20SChris Wilson /* Discard stale context state from across idling */
69fb218f20SChris Wilson ce = engine->kernel_context;
701d0e2c93SChris Wilson if (ce) {
711d0e2c93SChris Wilson GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
721d0e2c93SChris Wilson
7389db9537SChris Wilson /* Flush all pending HW writes before we touch the context */
7489db9537SChris Wilson while (unlikely(intel_context_inflight(ce)))
7589db9537SChris Wilson intel_engine_flush_submission(engine);
7689db9537SChris Wilson
771d0e2c93SChris Wilson /* First poison the image to verify we never fully trust it */
7889db9537SChris Wilson dbg_poison_ce(ce);
791d0e2c93SChris Wilson
8089db9537SChris Wilson /* Scrub the context image after our loss of control */
81fb218f20SChris Wilson ce->ops->reset(ce);
82b436a5f8SChris Wilson
83b436a5f8SChris Wilson CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
84b436a5f8SChris Wilson ce->timeline->seqno,
85b436a5f8SChris Wilson READ_ONCE(*ce->timeline->hwsp_seqno),
86b436a5f8SChris Wilson ce->ring->emit);
87b436a5f8SChris Wilson GEM_BUG_ON(ce->timeline->seqno !=
88b436a5f8SChris Wilson READ_ONCE(*ce->timeline->hwsp_seqno));
891d0e2c93SChris Wilson }
90fb218f20SChris Wilson
9179ffac85SChris Wilson if (engine->unpark)
9279ffac85SChris Wilson engine->unpark(engine);
9379ffac85SChris Wilson
94e3ed90b8SChris Wilson intel_breadcrumbs_unpark(engine->breadcrumbs);
95058179e7SChris Wilson intel_engine_unpark_heartbeat(engine);
9679ffac85SChris Wilson return 0;
9779ffac85SChris Wilson }
9879ffac85SChris Wilson
duration(struct dma_fence * fence,struct dma_fence_cb * cb)99b81e4d9bSChris Wilson static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
100b81e4d9bSChris Wilson {
101b81e4d9bSChris Wilson struct i915_request *rq = to_request(fence);
102b81e4d9bSChris Wilson
103b81e4d9bSChris Wilson ewma__engine_latency_add(&rq->engine->latency,
104b81e4d9bSChris Wilson ktime_us_delta(rq->fence.timestamp,
105b81e4d9bSChris Wilson rq->duration.emitted));
106b81e4d9bSChris Wilson }
107b81e4d9bSChris Wilson
1085cba2884SChris Wilson static void
__queue_and_release_pm(struct i915_request * rq,struct intel_timeline * tl,struct intel_engine_cs * engine)10988a4655eSChris Wilson __queue_and_release_pm(struct i915_request *rq,
11088a4655eSChris Wilson struct intel_timeline *tl,
1115cba2884SChris Wilson struct intel_engine_cs *engine)
1125cba2884SChris Wilson {
1135cba2884SChris Wilson struct intel_gt_timelines *timelines = &engine->gt->timelines;
1145cba2884SChris Wilson
115bf9cc891SChris Wilson ENGINE_TRACE(engine, "parking\n");
11688a4655eSChris Wilson
11788a4655eSChris Wilson /*
118aa7b93ebSChris Wilson * Open coded one half of intel_context_enter, which we have to omit
119aa7b93ebSChris Wilson * here (see the large comment below) and because the other part must
120aa7b93ebSChris Wilson * not be called due constructing directly with __i915_request_create
121aa7b93ebSChris Wilson * which increments active count via intel_context_mark_active.
122aa7b93ebSChris Wilson */
123aa7b93ebSChris Wilson GEM_BUG_ON(rq->context->active_count != 1);
124aa7b93ebSChris Wilson __intel_gt_pm_get(engine->gt);
125aa7b93ebSChris Wilson
126aa7b93ebSChris Wilson /*
12788a4655eSChris Wilson * We have to serialise all potential retirement paths with our
12888a4655eSChris Wilson * submission, as we don't want to underflow either the
12988a4655eSChris Wilson * engine->wakeref.counter or our timeline->active_count.
13088a4655eSChris Wilson *
13188a4655eSChris Wilson * Equally, we cannot allow a new submission to start until
13288a4655eSChris Wilson * after we finish queueing, nor could we allow that submitter
13388a4655eSChris Wilson * to retire us before we are ready!
13488a4655eSChris Wilson */
1355cba2884SChris Wilson spin_lock(&timelines->lock);
1365cba2884SChris Wilson
13788a4655eSChris Wilson /* Let intel_gt_retire_requests() retire us (acquired under lock) */
1385cba2884SChris Wilson if (!atomic_fetch_inc(&tl->active_count))
1395cba2884SChris Wilson list_add_tail(&tl->link, &timelines->active_list);
1405cba2884SChris Wilson
14188a4655eSChris Wilson /* Hand the request over to HW and so engine_retire() */
14216f2941aSChris Wilson __i915_request_queue_bh(rq);
14388a4655eSChris Wilson
14488a4655eSChris Wilson /* Let new submissions commence (and maybe retire this timeline) */
1455cba2884SChris Wilson __intel_wakeref_defer_park(&engine->wakeref);
1465cba2884SChris Wilson
1475cba2884SChris Wilson spin_unlock(&timelines->lock);
1485cba2884SChris Wilson }
1495cba2884SChris Wilson
switch_to_kernel_context(struct intel_engine_cs * engine)15079ffac85SChris Wilson static bool switch_to_kernel_context(struct intel_engine_cs *engine)
15179ffac85SChris Wilson {
1525cba2884SChris Wilson struct intel_context *ce = engine->kernel_context;
15379ffac85SChris Wilson struct i915_request *rq;
1546dcb85a0SChris Wilson bool result = true;
15579ffac85SChris Wilson
15636332429SMatthew Brost /*
15736332429SMatthew Brost * This is execlist specific behaviour intended to ensure the GPU is
15836332429SMatthew Brost * idle by switching to a known 'safe' context. With GuC submission, the
15936332429SMatthew Brost * same idle guarantee is achieved by other means (disabling
16036332429SMatthew Brost * scheduling). Further, switching to a 'safe' context has no effect
16136332429SMatthew Brost * with GuC submission as the scheduler can just switch back again.
16236332429SMatthew Brost *
16336332429SMatthew Brost * FIXME: Move this backend scheduler specific behaviour into the
16436332429SMatthew Brost * scheduler backend.
16536332429SMatthew Brost */
16636332429SMatthew Brost if (intel_engine_uses_guc(engine))
16736332429SMatthew Brost return true;
16836332429SMatthew Brost
16945b152f7SChris Wilson /* GPU is pointing to the void, as good as in the kernel context. */
17045b152f7SChris Wilson if (intel_gt_is_wedged(engine->gt))
17145b152f7SChris Wilson return true;
17245b152f7SChris Wilson
173e6ba7648SChris Wilson GEM_BUG_ON(!intel_context_is_barrier(ce));
1742a19abb0SChris Wilson GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
175e6ba7648SChris Wilson
17679ffac85SChris Wilson /* Already inside the kernel context, safe to power down. */
17779ffac85SChris Wilson if (engine->wakeref_serial == engine->serial)
17879ffac85SChris Wilson return true;
17979ffac85SChris Wilson
18079ffac85SChris Wilson /*
18179ffac85SChris Wilson * Note, we do this without taking the timeline->mutex. We cannot
18279ffac85SChris Wilson * as we may be called while retiring the kernel context and so
18379ffac85SChris Wilson * already underneath the timeline->mutex. Instead we rely on the
184c34c5bcaSChris Wilson * exclusive property of the __engine_park that prevents anyone
18579ffac85SChris Wilson * else from creating a request on this engine. This also requires
18679ffac85SChris Wilson * that the ring is empty and we avoid any waits while constructing
18779ffac85SChris Wilson * the context, as they assume protection by the timeline->mutex.
18879ffac85SChris Wilson * This should hold true as we can only park the engine after
18979ffac85SChris Wilson * retiring the last request, thus all rings should be empty and
19079ffac85SChris Wilson * all timelines idle.
1915cba2884SChris Wilson *
1925cba2884SChris Wilson * For unlocking, there are 2 other parties and the GPU who have a
1935cba2884SChris Wilson * stake here.
1945cba2884SChris Wilson *
1955cba2884SChris Wilson * A new gpu user will be waiting on the engine-pm to start their
1965cba2884SChris Wilson * engine_unpark. New waiters are predicated on engine->wakeref.count
1975cba2884SChris Wilson * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
1985cba2884SChris Wilson * engine->wakeref.
1995cba2884SChris Wilson *
2005cba2884SChris Wilson * The other party is intel_gt_retire_requests(), which is walking the
2015cba2884SChris Wilson * list of active timelines looking for completions. Meanwhile as soon
2025cba2884SChris Wilson * as we call __i915_request_queue(), the GPU may complete our request.
2035cba2884SChris Wilson * Ergo, if we put ourselves on the timelines.active_list
2045cba2884SChris Wilson * (se intel_timeline_enter()) before we increment the
2055cba2884SChris Wilson * engine->wakeref.count, we may see the request completion and retire
2060d961c46SChen Zhou * it causing an underflow of the engine->wakeref.
20779ffac85SChris Wilson */
208bce45c26SSebastian Andrzej Siewior set_bit(CONTEXT_IS_PARKING, &ce->flags);
2095cba2884SChris Wilson GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
2106c69a454SChris Wilson
2115cba2884SChris Wilson rq = __i915_request_create(ce, GFP_NOWAIT);
21279ffac85SChris Wilson if (IS_ERR(rq))
21379ffac85SChris Wilson /* Context switch failed, hope for the best! Maybe reset? */
2146dcb85a0SChris Wilson goto out_unlock;
21579ffac85SChris Wilson
21679ffac85SChris Wilson /* Check again on the next retirement. */
21779ffac85SChris Wilson engine->wakeref_serial = engine->serial + 1;
218d8af05ffSChris Wilson i915_request_add_active_barriers(rq);
219a79ca656SChris Wilson
220a79ca656SChris Wilson /* Install ourselves as a preemption barrier */
221b5e8e954SChris Wilson rq->sched.attr.priority = I915_PRIORITY_BARRIER;
222b81e4d9bSChris Wilson if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
223b81e4d9bSChris Wilson /*
224b81e4d9bSChris Wilson * Use an interrupt for precise measurement of duration,
225b81e4d9bSChris Wilson * otherwise we rely on someone else retiring all the requests
226b81e4d9bSChris Wilson * which may delay the signaling (i.e. we will likely wait
227b81e4d9bSChris Wilson * until the background request retirement running every
228b81e4d9bSChris Wilson * second or two).
229b81e4d9bSChris Wilson */
230b81e4d9bSChris Wilson BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
231b81e4d9bSChris Wilson dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
232b81e4d9bSChris Wilson rq->duration.emitted = ktime_get();
233b81e4d9bSChris Wilson }
23479ffac85SChris Wilson
23588a4655eSChris Wilson /* Expose ourselves to the world */
23688a4655eSChris Wilson __queue_and_release_pm(rq, ce->timeline, engine);
2375cba2884SChris Wilson
2386dcb85a0SChris Wilson result = false;
2396dcb85a0SChris Wilson out_unlock:
240bce45c26SSebastian Andrzej Siewior clear_bit(CONTEXT_IS_PARKING, &ce->flags);
2416dcb85a0SChris Wilson return result;
24279ffac85SChris Wilson }
24379ffac85SChris Wilson
call_idle_barriers(struct intel_engine_cs * engine)244b7234840SChris Wilson static void call_idle_barriers(struct intel_engine_cs *engine)
245b7234840SChris Wilson {
246b7234840SChris Wilson struct llist_node *node, *next;
247b7234840SChris Wilson
248b7234840SChris Wilson llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
249b7234840SChris Wilson struct dma_fence_cb *cb =
250b7234840SChris Wilson container_of((struct list_head *)node,
251b7234840SChris Wilson typeof(*cb), node);
252b7234840SChris Wilson
253df9f85d8SChris Wilson cb->func(ERR_PTR(-EAGAIN), cb);
254b7234840SChris Wilson }
255b7234840SChris Wilson }
256b7234840SChris Wilson
__engine_park(struct intel_wakeref * wf)257c34c5bcaSChris Wilson static int __engine_park(struct intel_wakeref *wf)
25879ffac85SChris Wilson {
25979ffac85SChris Wilson struct intel_engine_cs *engine =
26079ffac85SChris Wilson container_of(wf, typeof(*engine), wakeref);
26179ffac85SChris Wilson
26244d89409SChris Wilson engine->saturated = 0;
26344d89409SChris Wilson
26479ffac85SChris Wilson /*
26579ffac85SChris Wilson * If one and only one request is completed between pm events,
26679ffac85SChris Wilson * we know that we are inside the kernel context and it is
26779ffac85SChris Wilson * safe to power down. (We are paranoid in case that runtime
26879ffac85SChris Wilson * suspend causes corruption to the active context image, and
26979ffac85SChris Wilson * want to avoid that impacting userspace.)
27079ffac85SChris Wilson */
27179ffac85SChris Wilson if (!switch_to_kernel_context(engine))
27279ffac85SChris Wilson return -EBUSY;
27379ffac85SChris Wilson
274bf9cc891SChris Wilson ENGINE_TRACE(engine, "parked\n");
27579ffac85SChris Wilson
276b7234840SChris Wilson call_idle_barriers(engine); /* cleanup after wedging */
277b7234840SChris Wilson
278058179e7SChris Wilson intel_engine_park_heartbeat(engine);
279b3786b29SChris Wilson intel_breadcrumbs_park(engine->breadcrumbs);
28079ffac85SChris Wilson
28179ffac85SChris Wilson if (engine->park)
28279ffac85SChris Wilson engine->park(engine);
28379ffac85SChris Wilson
28407779a76SChris Wilson /* While gt calls i915_vma_parked(), we have to break the lock cycle */
28507779a76SChris Wilson intel_gt_pm_put_async(engine->gt);
28679ffac85SChris Wilson return 0;
28779ffac85SChris Wilson }
28879ffac85SChris Wilson
289c7302f20SChris Wilson static const struct intel_wakeref_ops wf_ops = {
290c7302f20SChris Wilson .get = __engine_unpark,
291c7302f20SChris Wilson .put = __engine_park,
292c7302f20SChris Wilson };
29379ffac85SChris Wilson
intel_engine_init__pm(struct intel_engine_cs * engine)29479ffac85SChris Wilson void intel_engine_init__pm(struct intel_engine_cs *engine)
29579ffac85SChris Wilson {
2968d208a5eSLuca Coelho intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
297058179e7SChris Wilson intel_engine_init_heartbeat(engine);
29867b5655bSVinay Belgaumkar
29967b5655bSVinay Belgaumkar intel_gsc_idle_msg_enable(engine);
30079ffac85SChris Wilson }
301c7302f20SChris Wilson
3023e42cc61SThomas Hellström /**
3033e42cc61SThomas Hellström * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
3043e42cc61SThomas Hellström * an engine.
3053e42cc61SThomas Hellström * @engine: The engine whose pinned contexts we want to reset.
3063e42cc61SThomas Hellström *
3073e42cc61SThomas Hellström * Typically the pinned context LMEM images lose or get their content
3083e42cc61SThomas Hellström * corrupted on suspend. This function resets their images.
3093e42cc61SThomas Hellström */
intel_engine_reset_pinned_contexts(struct intel_engine_cs * engine)3103e42cc61SThomas Hellström void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
3113e42cc61SThomas Hellström {
3123e42cc61SThomas Hellström struct intel_context *ce;
3133e42cc61SThomas Hellström
3143e42cc61SThomas Hellström list_for_each_entry(ce, &engine->pinned_contexts_list,
3153e42cc61SThomas Hellström pinned_contexts_link) {
3163e42cc61SThomas Hellström /* kernel context gets reset at __engine_unpark() */
3173e42cc61SThomas Hellström if (ce == engine->kernel_context)
3183e42cc61SThomas Hellström continue;
3193e42cc61SThomas Hellström
3203e42cc61SThomas Hellström dbg_poison_ce(ce);
3213e42cc61SThomas Hellström ce->ops->reset(ce);
3223e42cc61SThomas Hellström }
3233e42cc61SThomas Hellström }
3243e42cc61SThomas Hellström
325c7302f20SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
326c7302f20SChris Wilson #include "selftest_engine_pm.c"
327c7302f20SChris Wilson #endif
328