124f90d66SChris Wilson // SPDX-License-Identifier: MIT
2112ed2d3SChris Wilson /*
3112ed2d3SChris Wilson * Copyright © 2019 Intel Corporation
4112ed2d3SChris Wilson */
5112ed2d3SChris Wilson
610be98a7SChris Wilson #include "gem/i915_gem_context.h"
710be98a7SChris Wilson #include "gem/i915_gem_pm.h"
810be98a7SChris Wilson
9112ed2d3SChris Wilson #include "i915_drv.h"
10e03b5906SMatthew Brost #include "i915_trace.h"
11112ed2d3SChris Wilson
12112ed2d3SChris Wilson #include "intel_context.h"
13112ed2d3SChris Wilson #include "intel_engine.h"
1479ffac85SChris Wilson #include "intel_engine_pm.h"
152871ea85SChris Wilson #include "intel_ring.h"
16112ed2d3SChris Wilson
172dcec7d3SDaniel Vetter static struct kmem_cache *slab_ce;
18112ed2d3SChris Wilson
intel_context_alloc(void)195e2a0419SChris Wilson static struct intel_context *intel_context_alloc(void)
20112ed2d3SChris Wilson {
212dcec7d3SDaniel Vetter return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
22112ed2d3SChris Wilson }
23112ed2d3SChris Wilson
rcu_context_free(struct rcu_head * rcu)249261a1dbSChris Wilson static void rcu_context_free(struct rcu_head *rcu)
259261a1dbSChris Wilson {
269261a1dbSChris Wilson struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
279261a1dbSChris Wilson
28e03b5906SMatthew Brost trace_intel_context_free(ce);
292dcec7d3SDaniel Vetter kmem_cache_free(slab_ce, ce);
309261a1dbSChris Wilson }
319261a1dbSChris Wilson
intel_context_free(struct intel_context * ce)32112ed2d3SChris Wilson void intel_context_free(struct intel_context *ce)
33112ed2d3SChris Wilson {
349261a1dbSChris Wilson call_rcu(&ce->rcu, rcu_context_free);
35112ed2d3SChris Wilson }
36112ed2d3SChris Wilson
37112ed2d3SChris Wilson struct intel_context *
intel_context_create(struct intel_engine_cs * engine)38e6ba7648SChris Wilson intel_context_create(struct intel_engine_cs *engine)
39112ed2d3SChris Wilson {
405e2a0419SChris Wilson struct intel_context *ce;
41112ed2d3SChris Wilson
42112ed2d3SChris Wilson ce = intel_context_alloc();
43112ed2d3SChris Wilson if (!ce)
44112ed2d3SChris Wilson return ERR_PTR(-ENOMEM);
45112ed2d3SChris Wilson
46e6ba7648SChris Wilson intel_context_init(ce, engine);
47e03b5906SMatthew Brost trace_intel_context_create(ce);
485e2a0419SChris Wilson return ce;
49112ed2d3SChris Wilson }
50112ed2d3SChris Wilson
intel_context_alloc_state(struct intel_context * ce)5189f98d63SChris Wilson int intel_context_alloc_state(struct intel_context *ce)
5289f98d63SChris Wilson {
5389f98d63SChris Wilson int err = 0;
5489f98d63SChris Wilson
5589f98d63SChris Wilson if (mutex_lock_interruptible(&ce->pin_mutex))
5689f98d63SChris Wilson return -EINTR;
5789f98d63SChris Wilson
5889f98d63SChris Wilson if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
59373f27f2SChris Wilson if (intel_context_is_banned(ce)) {
60373f27f2SChris Wilson err = -EIO;
61373f27f2SChris Wilson goto unlock;
62373f27f2SChris Wilson }
63373f27f2SChris Wilson
6489f98d63SChris Wilson err = ce->ops->alloc(ce);
6589f98d63SChris Wilson if (unlikely(err))
6689f98d63SChris Wilson goto unlock;
6789f98d63SChris Wilson
6889f98d63SChris Wilson set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
6989f98d63SChris Wilson }
7089f98d63SChris Wilson
7189f98d63SChris Wilson unlock:
7289f98d63SChris Wilson mutex_unlock(&ce->pin_mutex);
7389f98d63SChris Wilson return err;
7489f98d63SChris Wilson }
7589f98d63SChris Wilson
intel_context_active_acquire(struct intel_context * ce)76b11b28eaSChris Wilson static int intel_context_active_acquire(struct intel_context *ce)
77b11b28eaSChris Wilson {
78b11b28eaSChris Wilson int err;
79b11b28eaSChris Wilson
80e5429340SChris Wilson __i915_active_acquire(&ce->active);
81e5429340SChris Wilson
82a88afcfaSMatthew Brost if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
83a88afcfaSMatthew Brost intel_context_is_parallel(ce))
84e5429340SChris Wilson return 0;
85b11b28eaSChris Wilson
86b11b28eaSChris Wilson /* Preallocate tracking nodes */
87b11b28eaSChris Wilson err = i915_active_acquire_preallocate_barrier(&ce->active,
88b11b28eaSChris Wilson ce->engine);
89e5429340SChris Wilson if (err)
90b11b28eaSChris Wilson i915_active_release(&ce->active);
91b11b28eaSChris Wilson
92e5429340SChris Wilson return err;
93b11b28eaSChris Wilson }
94b11b28eaSChris Wilson
intel_context_active_release(struct intel_context * ce)95b11b28eaSChris Wilson static void intel_context_active_release(struct intel_context *ce)
96b11b28eaSChris Wilson {
97b11b28eaSChris Wilson /* Nodes preallocated in intel_context_active() */
98b11b28eaSChris Wilson i915_active_acquire_barrier(&ce->active);
99b11b28eaSChris Wilson i915_active_release(&ce->active);
100b11b28eaSChris Wilson }
101b11b28eaSChris Wilson
__context_pin_state(struct i915_vma * vma,struct i915_gem_ww_ctx * ww)10247b08693SMaarten Lankhorst static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
103112ed2d3SChris Wilson {
104ccd20945SChris Wilson unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
105ce476c80SChris Wilson int err;
106112ed2d3SChris Wilson
10747b08693SMaarten Lankhorst err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
108ce476c80SChris Wilson if (err)
109ce476c80SChris Wilson return err;
110ce476c80SChris Wilson
1111b8bfc57SChris Wilson err = i915_active_acquire(&vma->active);
1121b8bfc57SChris Wilson if (err)
1131b8bfc57SChris Wilson goto err_unpin;
1141b8bfc57SChris Wilson
115ce476c80SChris Wilson /*
116ce476c80SChris Wilson * And mark it as a globally pinned object to let the shrinker know
117ce476c80SChris Wilson * it cannot reclaim the object until we release it.
118ce476c80SChris Wilson */
1191aff1903SChris Wilson i915_vma_make_unshrinkable(vma);
120ce476c80SChris Wilson vma->obj->mm.dirty = true;
121ce476c80SChris Wilson
122ce476c80SChris Wilson return 0;
1231b8bfc57SChris Wilson
1241b8bfc57SChris Wilson err_unpin:
1251b8bfc57SChris Wilson i915_vma_unpin(vma);
1261b8bfc57SChris Wilson return err;
127ce476c80SChris Wilson }
128ce476c80SChris Wilson
__context_unpin_state(struct i915_vma * vma)129ce476c80SChris Wilson static void __context_unpin_state(struct i915_vma *vma)
130ce476c80SChris Wilson {
1311aff1903SChris Wilson i915_vma_make_shrinkable(vma);
1321b8bfc57SChris Wilson i915_active_release(&vma->active);
13399013b10SChris Wilson __i915_vma_unpin(vma);
134ce476c80SChris Wilson }
135ce476c80SChris Wilson
__ring_active(struct intel_ring * ring,struct i915_gem_ww_ctx * ww)13647b08693SMaarten Lankhorst static int __ring_active(struct intel_ring *ring,
13747b08693SMaarten Lankhorst struct i915_gem_ww_ctx *ww)
1388ccfc20aSChris Wilson {
1398ccfc20aSChris Wilson int err;
1408ccfc20aSChris Wilson
14147b08693SMaarten Lankhorst err = intel_ring_pin(ring, ww);
1428ccfc20aSChris Wilson if (err)
1438ccfc20aSChris Wilson return err;
1448ccfc20aSChris Wilson
1455a383d44SChris Wilson err = i915_active_acquire(&ring->vma->active);
1468ccfc20aSChris Wilson if (err)
1475a383d44SChris Wilson goto err_pin;
1488ccfc20aSChris Wilson
1498ccfc20aSChris Wilson return 0;
1508ccfc20aSChris Wilson
1515a383d44SChris Wilson err_pin:
1525a383d44SChris Wilson intel_ring_unpin(ring);
1538ccfc20aSChris Wilson return err;
1548ccfc20aSChris Wilson }
1558ccfc20aSChris Wilson
__ring_retire(struct intel_ring * ring)1568ccfc20aSChris Wilson static void __ring_retire(struct intel_ring *ring)
1578ccfc20aSChris Wilson {
1588ccfc20aSChris Wilson i915_active_release(&ring->vma->active);
1595a383d44SChris Wilson intel_ring_unpin(ring);
1608ccfc20aSChris Wilson }
1618ccfc20aSChris Wilson
intel_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww)16247b08693SMaarten Lankhorst static int intel_context_pre_pin(struct intel_context *ce,
16347b08693SMaarten Lankhorst struct i915_gem_ww_ctx *ww)
164ce476c80SChris Wilson {
16512c255b5SChris Wilson int err;
16612c255b5SChris Wilson
1673fbbbef4SChris Wilson CE_TRACE(ce, "active\n");
1683fbbbef4SChris Wilson
16947b08693SMaarten Lankhorst err = __ring_active(ce->ring, ww);
17012c255b5SChris Wilson if (err)
1713999a708SMaarten Lankhorst return err;
17212c255b5SChris Wilson
17347b08693SMaarten Lankhorst err = intel_timeline_pin(ce->timeline, ww);
17475d0a7f3SChris Wilson if (err)
17575d0a7f3SChris Wilson goto err_ring;
17675d0a7f3SChris Wilson
17712c255b5SChris Wilson if (!ce->state)
17812c255b5SChris Wilson return 0;
17912c255b5SChris Wilson
18047b08693SMaarten Lankhorst err = __context_pin_state(ce->state, ww);
18112c255b5SChris Wilson if (err)
18275d0a7f3SChris Wilson goto err_timeline;
18312c255b5SChris Wilson
1843999a708SMaarten Lankhorst
18512c255b5SChris Wilson return 0;
18612c255b5SChris Wilson
18775d0a7f3SChris Wilson err_timeline:
18875d0a7f3SChris Wilson intel_timeline_unpin(ce->timeline);
18912c255b5SChris Wilson err_ring:
1908ccfc20aSChris Wilson __ring_retire(ce->ring);
1913999a708SMaarten Lankhorst return err;
1923999a708SMaarten Lankhorst }
1933999a708SMaarten Lankhorst
intel_context_post_unpin(struct intel_context * ce)1943999a708SMaarten Lankhorst static void intel_context_post_unpin(struct intel_context *ce)
1953999a708SMaarten Lankhorst {
1963999a708SMaarten Lankhorst if (ce->state)
1973999a708SMaarten Lankhorst __context_unpin_state(ce->state);
1983999a708SMaarten Lankhorst
1993999a708SMaarten Lankhorst intel_timeline_unpin(ce->timeline);
2003999a708SMaarten Lankhorst __ring_retire(ce->ring);
2013999a708SMaarten Lankhorst }
2023999a708SMaarten Lankhorst
__intel_context_do_pin_ww(struct intel_context * ce,struct i915_gem_ww_ctx * ww)20347b08693SMaarten Lankhorst int __intel_context_do_pin_ww(struct intel_context *ce,
20447b08693SMaarten Lankhorst struct i915_gem_ww_ctx *ww)
2053999a708SMaarten Lankhorst {
2063999a708SMaarten Lankhorst bool handoff = false;
2073999a708SMaarten Lankhorst void *vaddr;
2083999a708SMaarten Lankhorst int err = 0;
2093999a708SMaarten Lankhorst
2103999a708SMaarten Lankhorst if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
2113999a708SMaarten Lankhorst err = intel_context_alloc_state(ce);
2123999a708SMaarten Lankhorst if (err)
2133999a708SMaarten Lankhorst return err;
2143999a708SMaarten Lankhorst }
2153999a708SMaarten Lankhorst
2163999a708SMaarten Lankhorst /*
2173999a708SMaarten Lankhorst * We always pin the context/ring/timeline here, to ensure a pin
2183999a708SMaarten Lankhorst * refcount for __intel_context_active(), which prevent a lock
2193999a708SMaarten Lankhorst * inversion of ce->pin_mutex vs dma_resv_lock().
2203999a708SMaarten Lankhorst */
22147b08693SMaarten Lankhorst
22247b08693SMaarten Lankhorst err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
223e6e1a304SMaarten Lankhorst if (!err)
22447b08693SMaarten Lankhorst err = i915_gem_object_lock(ce->ring->vma->obj, ww);
22547b08693SMaarten Lankhorst if (!err && ce->state)
22647b08693SMaarten Lankhorst err = i915_gem_object_lock(ce->state->obj, ww);
22747b08693SMaarten Lankhorst if (!err)
22847b08693SMaarten Lankhorst err = intel_context_pre_pin(ce, ww);
2293999a708SMaarten Lankhorst if (err)
2303999a708SMaarten Lankhorst return err;
2313999a708SMaarten Lankhorst
23210ceccb8SMatthew Auld err = ce->ops->pre_pin(ce, ww, &vaddr);
2333999a708SMaarten Lankhorst if (err)
2343999a708SMaarten Lankhorst goto err_ctx_unpin;
2353999a708SMaarten Lankhorst
23610ceccb8SMatthew Auld err = i915_active_acquire(&ce->active);
2373999a708SMaarten Lankhorst if (err)
23810ceccb8SMatthew Auld goto err_post_unpin;
2393999a708SMaarten Lankhorst
2403999a708SMaarten Lankhorst err = mutex_lock_interruptible(&ce->pin_mutex);
2413999a708SMaarten Lankhorst if (err)
24210ceccb8SMatthew Auld goto err_release;
2433999a708SMaarten Lankhorst
244f61eae18SMatthew Brost intel_engine_pm_might_get(ce->engine);
245f61eae18SMatthew Brost
2463999a708SMaarten Lankhorst if (unlikely(intel_context_is_closed(ce))) {
2473999a708SMaarten Lankhorst err = -ENOENT;
2483999a708SMaarten Lankhorst goto err_unlock;
2493999a708SMaarten Lankhorst }
2503999a708SMaarten Lankhorst
2513999a708SMaarten Lankhorst if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
2523999a708SMaarten Lankhorst err = intel_context_active_acquire(ce);
2533999a708SMaarten Lankhorst if (unlikely(err))
2543999a708SMaarten Lankhorst goto err_unlock;
2553999a708SMaarten Lankhorst
2563999a708SMaarten Lankhorst err = ce->ops->pin(ce, vaddr);
2573999a708SMaarten Lankhorst if (err) {
2583999a708SMaarten Lankhorst intel_context_active_release(ce);
2593999a708SMaarten Lankhorst goto err_unlock;
2603999a708SMaarten Lankhorst }
2613999a708SMaarten Lankhorst
2623999a708SMaarten Lankhorst CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
2633999a708SMaarten Lankhorst i915_ggtt_offset(ce->ring->vma),
2643999a708SMaarten Lankhorst ce->ring->head, ce->ring->tail);
2653999a708SMaarten Lankhorst
2663999a708SMaarten Lankhorst handoff = true;
2673999a708SMaarten Lankhorst smp_mb__before_atomic(); /* flush pin before it is visible */
2683999a708SMaarten Lankhorst atomic_inc(&ce->pin_count);
2693999a708SMaarten Lankhorst }
2703999a708SMaarten Lankhorst
2713999a708SMaarten Lankhorst GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
2723999a708SMaarten Lankhorst
273e03b5906SMatthew Brost trace_intel_context_do_pin(ce);
274e03b5906SMatthew Brost
2753999a708SMaarten Lankhorst err_unlock:
2763999a708SMaarten Lankhorst mutex_unlock(&ce->pin_mutex);
27710ceccb8SMatthew Auld err_release:
27810ceccb8SMatthew Auld i915_active_release(&ce->active);
2793999a708SMaarten Lankhorst err_post_unpin:
2803999a708SMaarten Lankhorst if (!handoff)
2813999a708SMaarten Lankhorst ce->ops->post_unpin(ce);
2823999a708SMaarten Lankhorst err_ctx_unpin:
2833999a708SMaarten Lankhorst intel_context_post_unpin(ce);
284e0ee152fSThomas Hellström
285e0ee152fSThomas Hellström /*
286e0ee152fSThomas Hellström * Unlock the hwsp_ggtt object since it's shared.
287e0ee152fSThomas Hellström * In principle we can unlock all the global state locked above
288e0ee152fSThomas Hellström * since it's pinned and doesn't need fencing, and will
289e0ee152fSThomas Hellström * thus remain resident until it is explicitly unpinned.
290e0ee152fSThomas Hellström */
291e0ee152fSThomas Hellström i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
292e0ee152fSThomas Hellström
2933999a708SMaarten Lankhorst return err;
2943999a708SMaarten Lankhorst }
2953999a708SMaarten Lankhorst
__intel_context_do_pin(struct intel_context * ce)29647b08693SMaarten Lankhorst int __intel_context_do_pin(struct intel_context *ce)
29747b08693SMaarten Lankhorst {
29847b08693SMaarten Lankhorst struct i915_gem_ww_ctx ww;
29947b08693SMaarten Lankhorst int err;
30047b08693SMaarten Lankhorst
30147b08693SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, true);
30247b08693SMaarten Lankhorst retry:
30347b08693SMaarten Lankhorst err = __intel_context_do_pin_ww(ce, &ww);
30447b08693SMaarten Lankhorst if (err == -EDEADLK) {
30547b08693SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww);
30647b08693SMaarten Lankhorst if (!err)
30747b08693SMaarten Lankhorst goto retry;
30847b08693SMaarten Lankhorst }
30947b08693SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww);
31047b08693SMaarten Lankhorst return err;
31147b08693SMaarten Lankhorst }
31247b08693SMaarten Lankhorst
__intel_context_do_unpin(struct intel_context * ce,int sub)313e0717063SMatthew Brost void __intel_context_do_unpin(struct intel_context *ce, int sub)
3143999a708SMaarten Lankhorst {
315e0717063SMatthew Brost if (!atomic_sub_and_test(sub, &ce->pin_count))
3163999a708SMaarten Lankhorst return;
3173999a708SMaarten Lankhorst
3183999a708SMaarten Lankhorst CE_TRACE(ce, "unpin\n");
3193999a708SMaarten Lankhorst ce->ops->unpin(ce);
3203999a708SMaarten Lankhorst ce->ops->post_unpin(ce);
3213999a708SMaarten Lankhorst
3223999a708SMaarten Lankhorst /*
3233999a708SMaarten Lankhorst * Once released, we may asynchronously drop the active reference.
3243999a708SMaarten Lankhorst * As that may be the only reference keeping the context alive,
3253999a708SMaarten Lankhorst * take an extra now so that it is not freed before we finish
3263999a708SMaarten Lankhorst * dereferencing it.
3273999a708SMaarten Lankhorst */
3283999a708SMaarten Lankhorst intel_context_get(ce);
3293999a708SMaarten Lankhorst intel_context_active_release(ce);
330e03b5906SMatthew Brost trace_intel_context_do_unpin(ce);
3313999a708SMaarten Lankhorst intel_context_put(ce);
3323999a708SMaarten Lankhorst }
3333999a708SMaarten Lankhorst
__intel_context_retire(struct i915_active * active)3343999a708SMaarten Lankhorst static void __intel_context_retire(struct i915_active *active)
3353999a708SMaarten Lankhorst {
3363999a708SMaarten Lankhorst struct intel_context *ce = container_of(active, typeof(*ce), active);
3373999a708SMaarten Lankhorst
3383999a708SMaarten Lankhorst CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
3393999a708SMaarten Lankhorst intel_context_get_total_runtime_ns(ce),
3403999a708SMaarten Lankhorst intel_context_get_avg_runtime_ns(ce));
3413999a708SMaarten Lankhorst
3423999a708SMaarten Lankhorst set_bit(CONTEXT_VALID_BIT, &ce->flags);
3433999a708SMaarten Lankhorst intel_context_post_unpin(ce);
3443999a708SMaarten Lankhorst intel_context_put(ce);
3453999a708SMaarten Lankhorst }
3463999a708SMaarten Lankhorst
__intel_context_active(struct i915_active * active)3473999a708SMaarten Lankhorst static int __intel_context_active(struct i915_active *active)
3483999a708SMaarten Lankhorst {
3493999a708SMaarten Lankhorst struct intel_context *ce = container_of(active, typeof(*ce), active);
3503999a708SMaarten Lankhorst
3513999a708SMaarten Lankhorst intel_context_get(ce);
3523999a708SMaarten Lankhorst
3533999a708SMaarten Lankhorst /* everything should already be activated by intel_context_pre_pin() */
35447b08693SMaarten Lankhorst GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
35547b08693SMaarten Lankhorst __intel_ring_pin(ce->ring);
3563999a708SMaarten Lankhorst
35747b08693SMaarten Lankhorst __intel_timeline_pin(ce->timeline);
3583999a708SMaarten Lankhorst
3593999a708SMaarten Lankhorst if (ce->state) {
3603999a708SMaarten Lankhorst GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
3613999a708SMaarten Lankhorst __i915_vma_pin(ce->state);
3623999a708SMaarten Lankhorst i915_vma_make_unshrinkable(ce->state);
3633999a708SMaarten Lankhorst }
3643999a708SMaarten Lankhorst
3653999a708SMaarten Lankhorst return 0;
36612c255b5SChris Wilson }
36712c255b5SChris Wilson
36844505168SMatthew Brost static int
sw_fence_dummy_notify(struct i915_sw_fence * sf,enum i915_sw_fence_notify state)369cdc1e6e2SHugh Dickins sw_fence_dummy_notify(struct i915_sw_fence *sf,
37062eaf0aeSMatthew Brost enum i915_sw_fence_notify state)
37162eaf0aeSMatthew Brost {
37262eaf0aeSMatthew Brost return NOTIFY_DONE;
37362eaf0aeSMatthew Brost }
37462eaf0aeSMatthew Brost
375112ed2d3SChris Wilson void
intel_context_init(struct intel_context * ce,struct intel_engine_cs * engine)3769261a1dbSChris Wilson intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
377112ed2d3SChris Wilson {
3785e2a0419SChris Wilson GEM_BUG_ON(!engine->cops);
379e6ba7648SChris Wilson GEM_BUG_ON(!engine->gt->vm);
3805e2a0419SChris Wilson
381112ed2d3SChris Wilson kref_init(&ce->ref);
382112ed2d3SChris Wilson
383112ed2d3SChris Wilson ce->engine = engine;
384112ed2d3SChris Wilson ce->ops = engine->cops;
385112ed2d3SChris Wilson ce->sseu = engine->sseu;
38674e4b909SJason Ekstrand ce->ring = NULL;
38774e4b909SJason Ekstrand ce->ring_size = SZ_4K;
388e6ba7648SChris Wilson
389bb6287cbSTvrtko Ursulin ewma_runtime_init(&ce->stats.runtime.avg);
3901883a0a4STvrtko Ursulin
391e6ba7648SChris Wilson ce->vm = i915_vm_get(engine->gt->vm);
392112ed2d3SChris Wilson
3932bfdf302SChris Wilson /* NB ce->signal_link/lock is used under RCU */
3942bfdf302SChris Wilson spin_lock_init(&ce->signal_lock);
395112ed2d3SChris Wilson INIT_LIST_HEAD(&ce->signals);
396112ed2d3SChris Wilson
397112ed2d3SChris Wilson mutex_init(&ce->pin_mutex);
398112ed2d3SChris Wilson
3993a4cdf19SMatthew Brost spin_lock_init(&ce->guc_state.lock);
400b208f2d5SMatthew Brost INIT_LIST_HEAD(&ce->guc_state.fences);
401af5bc9f2SMatthew Brost INIT_LIST_HEAD(&ce->guc_state.requests);
402d1cee2d3SMatthew Brost
403d1249022SJohn Harrison ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
4043cb3e343SMatthew Brost INIT_LIST_HEAD(&ce->guc_id.link);
4053a4cdf19SMatthew Brost
4061a52faedSMatthew Brost INIT_LIST_HEAD(&ce->destroyed_link);
4071a52faedSMatthew Brost
4083897df4cSMatthew Brost INIT_LIST_HEAD(&ce->parallel.child_list);
4093897df4cSMatthew Brost
41062eaf0aeSMatthew Brost /*
41162eaf0aeSMatthew Brost * Initialize fence to be complete as this is expected to be complete
41262eaf0aeSMatthew Brost * unless there is a pending schedule disable outstanding.
41362eaf0aeSMatthew Brost */
41452d66c06SMatthew Brost i915_sw_fence_init(&ce->guc_state.blocked,
41552d66c06SMatthew Brost sw_fence_dummy_notify);
41652d66c06SMatthew Brost i915_sw_fence_commit(&ce->guc_state.blocked);
41762eaf0aeSMatthew Brost
418b1e3177bSChris Wilson i915_active_init(&ce->active,
419c3b14760SMatthew Auld __intel_context_active, __intel_context_retire, 0);
420112ed2d3SChris Wilson }
421112ed2d3SChris Wilson
intel_context_fini(struct intel_context * ce)422df8cf31eSChris Wilson void intel_context_fini(struct intel_context *ce)
423df8cf31eSChris Wilson {
4243897df4cSMatthew Brost struct intel_context *child, *next;
4253897df4cSMatthew Brost
42675d0a7f3SChris Wilson if (ce->timeline)
42775d0a7f3SChris Wilson intel_timeline_put(ce->timeline);
428f5d974f9SChris Wilson i915_vm_put(ce->vm);
429f5d974f9SChris Wilson
4303897df4cSMatthew Brost /* Need to put the creation ref for the children */
4313897df4cSMatthew Brost if (intel_context_is_parent(ce))
4323897df4cSMatthew Brost for_each_child_safe(ce, child, next)
4333897df4cSMatthew Brost intel_context_put(child);
4343897df4cSMatthew Brost
435df8cf31eSChris Wilson mutex_destroy(&ce->pin_mutex);
436df8cf31eSChris Wilson i915_active_fini(&ce->active);
437d576b31bSMatthew Auld i915_sw_fence_fini(&ce->guc_state.blocked);
438df8cf31eSChris Wilson }
439df8cf31eSChris Wilson
i915_context_module_exit(void)4402dcec7d3SDaniel Vetter void i915_context_module_exit(void)
441112ed2d3SChris Wilson {
4422dcec7d3SDaniel Vetter kmem_cache_destroy(slab_ce);
443112ed2d3SChris Wilson }
444112ed2d3SChris Wilson
i915_context_module_init(void)4452dcec7d3SDaniel Vetter int __init i915_context_module_init(void)
446112ed2d3SChris Wilson {
4472dcec7d3SDaniel Vetter slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
4482dcec7d3SDaniel Vetter if (!slab_ce)
449112ed2d3SChris Wilson return -ENOMEM;
450112ed2d3SChris Wilson
451112ed2d3SChris Wilson return 0;
452112ed2d3SChris Wilson }
4536eee33e8SChris Wilson
intel_context_enter_engine(struct intel_context * ce)4546eee33e8SChris Wilson void intel_context_enter_engine(struct intel_context *ce)
4556eee33e8SChris Wilson {
45679ffac85SChris Wilson intel_engine_pm_get(ce->engine);
457531958f6SChris Wilson intel_timeline_enter(ce->timeline);
4586eee33e8SChris Wilson }
4596eee33e8SChris Wilson
intel_context_exit_engine(struct intel_context * ce)4606eee33e8SChris Wilson void intel_context_exit_engine(struct intel_context *ce)
4616eee33e8SChris Wilson {
462531958f6SChris Wilson intel_timeline_exit(ce->timeline);
463af7a272eSChris Wilson intel_engine_pm_put(ce->engine);
4646eee33e8SChris Wilson }
4655e2a0419SChris Wilson
intel_context_prepare_remote_request(struct intel_context * ce,struct i915_request * rq)466a9877da2SChris Wilson int intel_context_prepare_remote_request(struct intel_context *ce,
467a9877da2SChris Wilson struct i915_request *rq)
468a9877da2SChris Wilson {
46975d0a7f3SChris Wilson struct intel_timeline *tl = ce->timeline;
470a9877da2SChris Wilson int err;
471a9877da2SChris Wilson
472a9877da2SChris Wilson /* Only suitable for use in remotely modifying this context */
4739f3ccd40SChris Wilson GEM_BUG_ON(rq->context == ce);
474a9877da2SChris Wilson
475d19d71fcSChris Wilson if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
476340c4c8dSChris Wilson /* Queue this switch after current activity by this context. */
477b1e3177bSChris Wilson err = i915_active_fence_set(&tl->last_request, rq);
478340c4c8dSChris Wilson if (err)
47925ffd4b1SChris Wilson return err;
480340c4c8dSChris Wilson }
481340c4c8dSChris Wilson
482a9877da2SChris Wilson /*
483a9877da2SChris Wilson * Guarantee context image and the timeline remains pinned until the
484a9877da2SChris Wilson * modifying request is retired by setting the ce activity tracker.
485a9877da2SChris Wilson *
486a9877da2SChris Wilson * But we only need to take one pin on the account of it. Or in other
487a9877da2SChris Wilson * words transfer the pinned ce object to tracked active request.
488a9877da2SChris Wilson */
489a9877da2SChris Wilson GEM_BUG_ON(i915_active_is_idle(&ce->active));
490d19d71fcSChris Wilson return i915_active_add_request(&ce->active, rq);
491a9877da2SChris Wilson }
492a9877da2SChris Wilson
intel_context_create_request(struct intel_context * ce)4935e2a0419SChris Wilson struct i915_request *intel_context_create_request(struct intel_context *ce)
4945e2a0419SChris Wilson {
4958a929c9eSMaarten Lankhorst struct i915_gem_ww_ctx ww;
4965e2a0419SChris Wilson struct i915_request *rq;
4975e2a0419SChris Wilson int err;
4985e2a0419SChris Wilson
4998a929c9eSMaarten Lankhorst i915_gem_ww_ctx_init(&ww, true);
5008a929c9eSMaarten Lankhorst retry:
5018a929c9eSMaarten Lankhorst err = intel_context_pin_ww(ce, &ww);
5028a929c9eSMaarten Lankhorst if (!err) {
5035e2a0419SChris Wilson rq = i915_request_create(ce);
5045e2a0419SChris Wilson intel_context_unpin(ce);
5058a929c9eSMaarten Lankhorst } else if (err == -EDEADLK) {
5068a929c9eSMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww);
5078a929c9eSMaarten Lankhorst if (!err)
5088a929c9eSMaarten Lankhorst goto retry;
509159ace7fSMaarten Lankhorst rq = ERR_PTR(err);
5108a929c9eSMaarten Lankhorst } else {
5118a929c9eSMaarten Lankhorst rq = ERR_PTR(err);
5128a929c9eSMaarten Lankhorst }
5138a929c9eSMaarten Lankhorst
5148a929c9eSMaarten Lankhorst i915_gem_ww_ctx_fini(&ww);
5155e2a0419SChris Wilson
516dd878c0cSMaarten Lankhorst if (IS_ERR(rq))
517dd878c0cSMaarten Lankhorst return rq;
518dd878c0cSMaarten Lankhorst
519dd878c0cSMaarten Lankhorst /*
520dd878c0cSMaarten Lankhorst * timeline->mutex should be the inner lock, but is used as outer lock.
521dd878c0cSMaarten Lankhorst * Hack around this to shut up lockdep in selftests..
522dd878c0cSMaarten Lankhorst */
523dd878c0cSMaarten Lankhorst lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
524dd878c0cSMaarten Lankhorst mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
525dd878c0cSMaarten Lankhorst mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
526dd878c0cSMaarten Lankhorst rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
527dd878c0cSMaarten Lankhorst
5285e2a0419SChris Wilson return rq;
5295e2a0419SChris Wilson }
530d8af05ffSChris Wilson
intel_context_get_active_request(struct intel_context * ce)5313700e353SJohn Harrison struct i915_request *intel_context_get_active_request(struct intel_context *ce)
532573ba126SMatthew Brost {
533872758dbSMatthew Brost struct intel_context *parent = intel_context_to_parent(ce);
534573ba126SMatthew Brost struct i915_request *rq, *active = NULL;
535573ba126SMatthew Brost unsigned long flags;
536573ba126SMatthew Brost
537573ba126SMatthew Brost GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
538573ba126SMatthew Brost
539872758dbSMatthew Brost /*
540872758dbSMatthew Brost * We search the parent list to find an active request on the submitted
541872758dbSMatthew Brost * context. The parent list contains the requests for all the contexts
542872758dbSMatthew Brost * in the relationship so we have to do a compare of each request's
543872758dbSMatthew Brost * context.
544872758dbSMatthew Brost */
545872758dbSMatthew Brost spin_lock_irqsave(&parent->guc_state.lock, flags);
546872758dbSMatthew Brost list_for_each_entry_reverse(rq, &parent->guc_state.requests,
547573ba126SMatthew Brost sched.link) {
548872758dbSMatthew Brost if (rq->context != ce)
549872758dbSMatthew Brost continue;
550573ba126SMatthew Brost if (i915_request_completed(rq))
551573ba126SMatthew Brost break;
552573ba126SMatthew Brost
553573ba126SMatthew Brost active = rq;
554573ba126SMatthew Brost }
5553700e353SJohn Harrison if (active)
5563700e353SJohn Harrison active = i915_request_get_rcu(active);
557872758dbSMatthew Brost spin_unlock_irqrestore(&parent->guc_state.lock, flags);
558573ba126SMatthew Brost
559573ba126SMatthew Brost return active;
560573ba126SMatthew Brost }
561573ba126SMatthew Brost
intel_context_bind_parent_child(struct intel_context * parent,struct intel_context * child)5623897df4cSMatthew Brost void intel_context_bind_parent_child(struct intel_context *parent,
5633897df4cSMatthew Brost struct intel_context *child)
5643897df4cSMatthew Brost {
5653897df4cSMatthew Brost /*
5663897df4cSMatthew Brost * Callers responsibility to validate that this function is used
5673897df4cSMatthew Brost * correctly but we use GEM_BUG_ON here ensure that they do.
5683897df4cSMatthew Brost */
5693897df4cSMatthew Brost GEM_BUG_ON(intel_context_is_pinned(parent));
5703897df4cSMatthew Brost GEM_BUG_ON(intel_context_is_child(parent));
5713897df4cSMatthew Brost GEM_BUG_ON(intel_context_is_pinned(child));
5723897df4cSMatthew Brost GEM_BUG_ON(intel_context_is_child(child));
5733897df4cSMatthew Brost GEM_BUG_ON(intel_context_is_parent(child));
5743897df4cSMatthew Brost
5755851387aSMatthew Brost parent->parallel.child_index = parent->parallel.number_children++;
5763897df4cSMatthew Brost list_add_tail(&child->parallel.child_link,
5773897df4cSMatthew Brost &parent->parallel.child_list);
5783897df4cSMatthew Brost child->parallel.parent = parent;
5793897df4cSMatthew Brost }
5803897df4cSMatthew Brost
intel_context_get_total_runtime_ns(struct intel_context * ce)581*5aa857dbSUmesh Nerlige Ramappa u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
582bb6287cbSTvrtko Ursulin {
583bb6287cbSTvrtko Ursulin u64 total, active;
584bb6287cbSTvrtko Ursulin
585*5aa857dbSUmesh Nerlige Ramappa if (ce->ops->update_stats)
586*5aa857dbSUmesh Nerlige Ramappa ce->ops->update_stats(ce);
587*5aa857dbSUmesh Nerlige Ramappa
588bb6287cbSTvrtko Ursulin total = ce->stats.runtime.total;
589bb6287cbSTvrtko Ursulin if (ce->ops->flags & COPS_RUNTIME_CYCLES)
590bb6287cbSTvrtko Ursulin total *= ce->engine->gt->clock_period_ns;
591bb6287cbSTvrtko Ursulin
592bb6287cbSTvrtko Ursulin active = READ_ONCE(ce->stats.active);
593bb6287cbSTvrtko Ursulin if (active)
594bb6287cbSTvrtko Ursulin active = intel_context_clock() - active;
595bb6287cbSTvrtko Ursulin
596bb6287cbSTvrtko Ursulin return total + active;
597bb6287cbSTvrtko Ursulin }
598bb6287cbSTvrtko Ursulin
intel_context_get_avg_runtime_ns(struct intel_context * ce)599bb6287cbSTvrtko Ursulin u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
600bb6287cbSTvrtko Ursulin {
601bb6287cbSTvrtko Ursulin u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
602bb6287cbSTvrtko Ursulin
603bb6287cbSTvrtko Ursulin if (ce->ops->flags & COPS_RUNTIME_CYCLES)
604bb6287cbSTvrtko Ursulin avg *= ce->engine->gt->clock_period_ns;
605bb6287cbSTvrtko Ursulin
606bb6287cbSTvrtko Ursulin return avg;
607bb6287cbSTvrtko Ursulin }
608bb6287cbSTvrtko Ursulin
intel_context_ban(struct intel_context * ce,struct i915_request * rq)60945c64ecfSTvrtko Ursulin bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
61045c64ecfSTvrtko Ursulin {
61145c64ecfSTvrtko Ursulin bool ret = intel_context_set_banned(ce);
61245c64ecfSTvrtko Ursulin
61345c64ecfSTvrtko Ursulin trace_intel_context_ban(ce);
61445c64ecfSTvrtko Ursulin
61545c64ecfSTvrtko Ursulin if (ce->ops->revoke)
61645c64ecfSTvrtko Ursulin ce->ops->revoke(ce, rq,
61745c64ecfSTvrtko Ursulin INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
61845c64ecfSTvrtko Ursulin
61945c64ecfSTvrtko Ursulin return ret;
62045c64ecfSTvrtko Ursulin }
62145c64ecfSTvrtko Ursulin
intel_context_revoke(struct intel_context * ce)6220add082cSTvrtko Ursulin bool intel_context_revoke(struct intel_context *ce)
62345c64ecfSTvrtko Ursulin {
62445c64ecfSTvrtko Ursulin bool ret = intel_context_set_exiting(ce);
62545c64ecfSTvrtko Ursulin
62645c64ecfSTvrtko Ursulin if (ce->ops->revoke)
6270add082cSTvrtko Ursulin ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
62845c64ecfSTvrtko Ursulin
62945c64ecfSTvrtko Ursulin return ret;
63045c64ecfSTvrtko Ursulin }
63145c64ecfSTvrtko Ursulin
632d8af05ffSChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
633d8af05ffSChris Wilson #include "selftest_context.c"
634d8af05ffSChris Wilson #endif
635