124f90d66SChris Wilson // SPDX-License-Identifier: MIT
2f0c02c1bSTvrtko Ursulin /*
3f0c02c1bSTvrtko Ursulin * Copyright © 2016-2018 Intel Corporation
4f0c02c1bSTvrtko Ursulin */
5f0c02c1bSTvrtko Ursulin
6*5f2ec909SJani Nikula #include <drm/drm_cache.h>
7*5f2ec909SJani Nikula
8b508d01fSJani Nikula #include "gem/i915_gem_internal.h"
9f0c02c1bSTvrtko Ursulin
10f0c02c1bSTvrtko Ursulin #include "i915_active.h"
11b508d01fSJani Nikula #include "i915_drv.h"
12f0c02c1bSTvrtko Ursulin #include "i915_syncmap.h"
132871ea85SChris Wilson #include "intel_gt.h"
142871ea85SChris Wilson #include "intel_ring.h"
152871ea85SChris Wilson #include "intel_timeline.h"
16f0c02c1bSTvrtko Ursulin
1712ca695dSMaarten Lankhorst #define TIMELINE_SEQNO_BYTES 8
18f0c02c1bSTvrtko Ursulin
hwsp_alloc(struct intel_gt * gt)1912ca695dSMaarten Lankhorst static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
20f0c02c1bSTvrtko Ursulin {
21f0c02c1bSTvrtko Ursulin struct drm_i915_private *i915 = gt->i915;
22f0c02c1bSTvrtko Ursulin struct drm_i915_gem_object *obj;
23f0c02c1bSTvrtko Ursulin struct i915_vma *vma;
24f0c02c1bSTvrtko Ursulin
25f0c02c1bSTvrtko Ursulin obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
26f0c02c1bSTvrtko Ursulin if (IS_ERR(obj))
27f0c02c1bSTvrtko Ursulin return ERR_CAST(obj);
28f0c02c1bSTvrtko Ursulin
29f0c02c1bSTvrtko Ursulin i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
30f0c02c1bSTvrtko Ursulin
31f0c02c1bSTvrtko Ursulin vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
32f0c02c1bSTvrtko Ursulin if (IS_ERR(vma))
33f0c02c1bSTvrtko Ursulin i915_gem_object_put(obj);
34f0c02c1bSTvrtko Ursulin
35f0c02c1bSTvrtko Ursulin return vma;
36f0c02c1bSTvrtko Ursulin }
37f0c02c1bSTvrtko Ursulin
__timeline_retire(struct i915_active * active)3812ca695dSMaarten Lankhorst static void __timeline_retire(struct i915_active *active)
39f0c02c1bSTvrtko Ursulin {
4012ca695dSMaarten Lankhorst struct intel_timeline *tl =
4112ca695dSMaarten Lankhorst container_of(active, typeof(*tl), active);
42f0c02c1bSTvrtko Ursulin
4312ca695dSMaarten Lankhorst i915_vma_unpin(tl->hwsp_ggtt);
4412ca695dSMaarten Lankhorst intel_timeline_put(tl);
45f0c02c1bSTvrtko Ursulin }
46f0c02c1bSTvrtko Ursulin
__timeline_active(struct i915_active * active)4712ca695dSMaarten Lankhorst static int __timeline_active(struct i915_active *active)
4812c255b5SChris Wilson {
4912ca695dSMaarten Lankhorst struct intel_timeline *tl =
5012ca695dSMaarten Lankhorst container_of(active, typeof(*tl), active);
5112c255b5SChris Wilson
5212ca695dSMaarten Lankhorst __i915_vma_pin(tl->hwsp_ggtt);
5312ca695dSMaarten Lankhorst intel_timeline_get(tl);
5412c255b5SChris Wilson return 0;
5512c255b5SChris Wilson }
5612c255b5SChris Wilson
572c8ab333SMaarten Lankhorst I915_SELFTEST_EXPORT int
intel_timeline_pin_map(struct intel_timeline * timeline)582c8ab333SMaarten Lankhorst intel_timeline_pin_map(struct intel_timeline *timeline)
592c8ab333SMaarten Lankhorst {
602c8ab333SMaarten Lankhorst struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
612c8ab333SMaarten Lankhorst u32 ofs = offset_in_page(timeline->hwsp_offset);
622c8ab333SMaarten Lankhorst void *vaddr;
632c8ab333SMaarten Lankhorst
642c8ab333SMaarten Lankhorst vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
652c8ab333SMaarten Lankhorst if (IS_ERR(vaddr))
662c8ab333SMaarten Lankhorst return PTR_ERR(vaddr);
672c8ab333SMaarten Lankhorst
682c8ab333SMaarten Lankhorst timeline->hwsp_map = vaddr;
692c8ab333SMaarten Lankhorst timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
709ced1218SVille Syrjälä drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
712c8ab333SMaarten Lankhorst
722c8ab333SMaarten Lankhorst return 0;
732c8ab333SMaarten Lankhorst }
742c8ab333SMaarten Lankhorst
intel_timeline_init(struct intel_timeline * timeline,struct intel_gt * gt,struct i915_vma * hwsp,unsigned int offset)75e31fe02eSMika Kuoppala static int intel_timeline_init(struct intel_timeline *timeline,
76f0c02c1bSTvrtko Ursulin struct intel_gt *gt,
77d1bf5dd8SChris Wilson struct i915_vma *hwsp,
78d1bf5dd8SChris Wilson unsigned int offset)
79f0c02c1bSTvrtko Ursulin {
80f0ca820cSChris Wilson kref_init(&timeline->kref);
81ccb23d2dSChris Wilson atomic_set(&timeline->pin_count, 0);
82f0ca820cSChris Wilson
83f0c02c1bSTvrtko Ursulin timeline->gt = gt;
84f0ca820cSChris Wilson
8512ca695dSMaarten Lankhorst if (hwsp) {
8612ca695dSMaarten Lankhorst timeline->hwsp_offset = offset;
8712ca695dSMaarten Lankhorst timeline->hwsp_ggtt = i915_vma_get(hwsp);
8812ca695dSMaarten Lankhorst } else {
8912ca695dSMaarten Lankhorst timeline->has_initial_breadcrumb = true;
9012ca695dSMaarten Lankhorst hwsp = hwsp_alloc(gt);
91f0c02c1bSTvrtko Ursulin if (IS_ERR(hwsp))
92f0c02c1bSTvrtko Ursulin return PTR_ERR(hwsp);
9312ca695dSMaarten Lankhorst timeline->hwsp_ggtt = hwsp;
94f0c02c1bSTvrtko Ursulin }
95f0c02c1bSTvrtko Ursulin
962c8ab333SMaarten Lankhorst timeline->hwsp_map = NULL;
972c8ab333SMaarten Lankhorst timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
98f0c02c1bSTvrtko Ursulin
99f0c02c1bSTvrtko Ursulin GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
100f0c02c1bSTvrtko Ursulin
101f0c02c1bSTvrtko Ursulin timeline->fence_context = dma_fence_context_alloc(1);
102f0c02c1bSTvrtko Ursulin
103f0c02c1bSTvrtko Ursulin mutex_init(&timeline->mutex);
104f0c02c1bSTvrtko Ursulin
105df9f85d8SChris Wilson INIT_ACTIVE_FENCE(&timeline->last_request);
106f0c02c1bSTvrtko Ursulin INIT_LIST_HEAD(&timeline->requests);
107f0c02c1bSTvrtko Ursulin
108f0c02c1bSTvrtko Ursulin i915_syncmap_init(&timeline->sync);
109c3b14760SMatthew Auld i915_active_init(&timeline->active, __timeline_active,
110c3b14760SMatthew Auld __timeline_retire, 0);
111f0c02c1bSTvrtko Ursulin
112f0c02c1bSTvrtko Ursulin return 0;
113f0c02c1bSTvrtko Ursulin }
114f0c02c1bSTvrtko Ursulin
intel_gt_init_timelines(struct intel_gt * gt)1154605bb73SChris Wilson void intel_gt_init_timelines(struct intel_gt *gt)
116f0c02c1bSTvrtko Ursulin {
117c6fe28b0SChris Wilson struct intel_gt_timelines *timelines = >->timelines;
118f0c02c1bSTvrtko Ursulin
119338aade9SChris Wilson spin_lock_init(&timelines->lock);
120f0c02c1bSTvrtko Ursulin INIT_LIST_HEAD(&timelines->active_list);
121f0c02c1bSTvrtko Ursulin }
122f0c02c1bSTvrtko Ursulin
intel_timeline_fini(struct rcu_head * rcu)12312ca695dSMaarten Lankhorst static void intel_timeline_fini(struct rcu_head *rcu)
124f0c02c1bSTvrtko Ursulin {
12512ca695dSMaarten Lankhorst struct intel_timeline *timeline =
12612ca695dSMaarten Lankhorst container_of(rcu, struct intel_timeline, rcu);
127f0c02c1bSTvrtko Ursulin
1282c8ab333SMaarten Lankhorst if (timeline->hwsp_map)
129f0c02c1bSTvrtko Ursulin i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
130f0c02c1bSTvrtko Ursulin
131f0c02c1bSTvrtko Ursulin i915_vma_put(timeline->hwsp_ggtt);
13212ca695dSMaarten Lankhorst i915_active_fini(&timeline->active);
133faf89098SMatthew Brost
134faf89098SMatthew Brost /*
135faf89098SMatthew Brost * A small race exists between intel_gt_retire_requests_timeout and
136faf89098SMatthew Brost * intel_timeline_exit which could result in the syncmap not getting
137faf89098SMatthew Brost * free'd. Rather than work to hard to seal this race, simply cleanup
138faf89098SMatthew Brost * the syncmap on fini.
139faf89098SMatthew Brost */
140faf89098SMatthew Brost i915_syncmap_free(&timeline->sync);
141faf89098SMatthew Brost
14212ca695dSMaarten Lankhorst kfree(timeline);
143f0c02c1bSTvrtko Ursulin }
144f0c02c1bSTvrtko Ursulin
145f0c02c1bSTvrtko Ursulin struct intel_timeline *
__intel_timeline_create(struct intel_gt * gt,struct i915_vma * global_hwsp,unsigned int offset)146d1bf5dd8SChris Wilson __intel_timeline_create(struct intel_gt *gt,
147d1bf5dd8SChris Wilson struct i915_vma *global_hwsp,
148d1bf5dd8SChris Wilson unsigned int offset)
149f0c02c1bSTvrtko Ursulin {
150f0c02c1bSTvrtko Ursulin struct intel_timeline *timeline;
151f0c02c1bSTvrtko Ursulin int err;
152f0c02c1bSTvrtko Ursulin
153f0c02c1bSTvrtko Ursulin timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
154f0c02c1bSTvrtko Ursulin if (!timeline)
155f0c02c1bSTvrtko Ursulin return ERR_PTR(-ENOMEM);
156f0c02c1bSTvrtko Ursulin
157d1bf5dd8SChris Wilson err = intel_timeline_init(timeline, gt, global_hwsp, offset);
158f0c02c1bSTvrtko Ursulin if (err) {
159f0c02c1bSTvrtko Ursulin kfree(timeline);
160f0c02c1bSTvrtko Ursulin return ERR_PTR(err);
161f0c02c1bSTvrtko Ursulin }
162f0c02c1bSTvrtko Ursulin
163f0c02c1bSTvrtko Ursulin return timeline;
164f0c02c1bSTvrtko Ursulin }
165f0c02c1bSTvrtko Ursulin
166b436a5f8SChris Wilson struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs * engine,unsigned int offset)167b436a5f8SChris Wilson intel_timeline_create_from_engine(struct intel_engine_cs *engine,
168b436a5f8SChris Wilson unsigned int offset)
169b436a5f8SChris Wilson {
170b436a5f8SChris Wilson struct i915_vma *hwsp = engine->status_page.vma;
171b436a5f8SChris Wilson struct intel_timeline *tl;
172b436a5f8SChris Wilson
173b436a5f8SChris Wilson tl = __intel_timeline_create(engine->gt, hwsp, offset);
174b436a5f8SChris Wilson if (IS_ERR(tl))
175b436a5f8SChris Wilson return tl;
176b436a5f8SChris Wilson
177b436a5f8SChris Wilson /* Borrow a nearby lock; we only create these timelines during init */
178b436a5f8SChris Wilson mutex_lock(&hwsp->vm->mutex);
179b436a5f8SChris Wilson list_add_tail(&tl->engine_link, &engine->status_page.timelines);
180b436a5f8SChris Wilson mutex_unlock(&hwsp->vm->mutex);
181b436a5f8SChris Wilson
182b436a5f8SChris Wilson return tl;
183b436a5f8SChris Wilson }
184b436a5f8SChris Wilson
__intel_timeline_pin(struct intel_timeline * tl)18547b08693SMaarten Lankhorst void __intel_timeline_pin(struct intel_timeline *tl)
18647b08693SMaarten Lankhorst {
18747b08693SMaarten Lankhorst GEM_BUG_ON(!atomic_read(&tl->pin_count));
18847b08693SMaarten Lankhorst atomic_inc(&tl->pin_count);
18947b08693SMaarten Lankhorst }
19047b08693SMaarten Lankhorst
intel_timeline_pin(struct intel_timeline * tl,struct i915_gem_ww_ctx * ww)19147b08693SMaarten Lankhorst int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
192f0c02c1bSTvrtko Ursulin {
193f0c02c1bSTvrtko Ursulin int err;
194f0c02c1bSTvrtko Ursulin
195ccb23d2dSChris Wilson if (atomic_add_unless(&tl->pin_count, 1, 0))
196f0c02c1bSTvrtko Ursulin return 0;
197f0c02c1bSTvrtko Ursulin
1982c8ab333SMaarten Lankhorst if (!tl->hwsp_map) {
1992c8ab333SMaarten Lankhorst err = intel_timeline_pin_map(tl);
2002c8ab333SMaarten Lankhorst if (err)
2012c8ab333SMaarten Lankhorst return err;
2022c8ab333SMaarten Lankhorst }
2032c8ab333SMaarten Lankhorst
20447b08693SMaarten Lankhorst err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
205f0c02c1bSTvrtko Ursulin if (err)
206ccb23d2dSChris Wilson return err;
207f0c02c1bSTvrtko Ursulin
208f0c02c1bSTvrtko Ursulin tl->hwsp_offset =
209f0c02c1bSTvrtko Ursulin i915_ggtt_offset(tl->hwsp_ggtt) +
210f0c02c1bSTvrtko Ursulin offset_in_page(tl->hwsp_offset);
211d45171acSChris Wilson GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
212d45171acSChris Wilson tl->fence_context, tl->hwsp_offset);
213f0c02c1bSTvrtko Ursulin
21412ca695dSMaarten Lankhorst i915_active_acquire(&tl->active);
215ccb23d2dSChris Wilson if (atomic_fetch_inc(&tl->pin_count)) {
21612ca695dSMaarten Lankhorst i915_active_release(&tl->active);
217ccb23d2dSChris Wilson __i915_vma_unpin(tl->hwsp_ggtt);
218ccb23d2dSChris Wilson }
219f0c02c1bSTvrtko Ursulin
220f0c02c1bSTvrtko Ursulin return 0;
221f0c02c1bSTvrtko Ursulin }
222f0c02c1bSTvrtko Ursulin
intel_timeline_reset_seqno(const struct intel_timeline * tl)223bd3ec9e7SChris Wilson void intel_timeline_reset_seqno(const struct intel_timeline *tl)
224bd3ec9e7SChris Wilson {
22512ca695dSMaarten Lankhorst u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
226bd3ec9e7SChris Wilson /* Must be pinned to be writable, and no requests in flight. */
227bd3ec9e7SChris Wilson GEM_BUG_ON(!atomic_read(&tl->pin_count));
22812ca695dSMaarten Lankhorst
22912ca695dSMaarten Lankhorst memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
23012ca695dSMaarten Lankhorst WRITE_ONCE(*hwsp_seqno, tl->seqno);
231af7b6d23SVille Syrjälä drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
232bd3ec9e7SChris Wilson }
233bd3ec9e7SChris Wilson
intel_timeline_enter(struct intel_timeline * tl)234531958f6SChris Wilson void intel_timeline_enter(struct intel_timeline *tl)
235531958f6SChris Wilson {
236531958f6SChris Wilson struct intel_gt_timelines *timelines = &tl->gt->timelines;
237531958f6SChris Wilson
238a6edbca7SChris Wilson /*
239a6edbca7SChris Wilson * Pretend we are serialised by the timeline->mutex.
240a6edbca7SChris Wilson *
241a6edbca7SChris Wilson * While generally true, there are a few exceptions to the rule
242a6edbca7SChris Wilson * for the engine->kernel_context being used to manage power
243a6edbca7SChris Wilson * transitions. As the engine_park may be called from under any
244a6edbca7SChris Wilson * timeline, it uses the power mutex as a global serialisation
245a6edbca7SChris Wilson * lock to prevent any other request entering its timeline.
246a6edbca7SChris Wilson *
247a6edbca7SChris Wilson * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
248a6edbca7SChris Wilson *
249a6edbca7SChris Wilson * However, intel_gt_retire_request() does not know which engine
250a6edbca7SChris Wilson * it is retiring along and so cannot partake in the engine-pm
251a6edbca7SChris Wilson * barrier, and there we use the tl->active_count as a means to
252a6edbca7SChris Wilson * pin the timeline in the active_list while the locks are dropped.
253a6edbca7SChris Wilson * Ergo, as that is outside of the engine-pm barrier, we need to
254a6edbca7SChris Wilson * use atomic to manipulate tl->active_count.
255a6edbca7SChris Wilson */
2566c69a454SChris Wilson lockdep_assert_held(&tl->mutex);
257a6edbca7SChris Wilson
258a6edbca7SChris Wilson if (atomic_add_unless(&tl->active_count, 1, 0))
259531958f6SChris Wilson return;
260531958f6SChris Wilson
26188cec497SChris Wilson spin_lock(&timelines->lock);
262bd3ec9e7SChris Wilson if (!atomic_fetch_inc(&tl->active_count)) {
263bd3ec9e7SChris Wilson /*
264bd3ec9e7SChris Wilson * The HWSP is volatile, and may have been lost while inactive,
265bd3ec9e7SChris Wilson * e.g. across suspend/resume. Be paranoid, and ensure that
266bd3ec9e7SChris Wilson * the HWSP value matches our seqno so we don't proclaim
267bd3ec9e7SChris Wilson * the next request as already complete.
268bd3ec9e7SChris Wilson */
269bd3ec9e7SChris Wilson intel_timeline_reset_seqno(tl);
2701683d24cSChris Wilson list_add_tail(&tl->link, &timelines->active_list);
271bd3ec9e7SChris Wilson }
27288cec497SChris Wilson spin_unlock(&timelines->lock);
273531958f6SChris Wilson }
274531958f6SChris Wilson
intel_timeline_exit(struct intel_timeline * tl)275531958f6SChris Wilson void intel_timeline_exit(struct intel_timeline *tl)
276531958f6SChris Wilson {
277531958f6SChris Wilson struct intel_gt_timelines *timelines = &tl->gt->timelines;
278531958f6SChris Wilson
279a6edbca7SChris Wilson /* See intel_timeline_enter() */
2806c69a454SChris Wilson lockdep_assert_held(&tl->mutex);
2816c69a454SChris Wilson
282a6edbca7SChris Wilson GEM_BUG_ON(!atomic_read(&tl->active_count));
283a6edbca7SChris Wilson if (atomic_add_unless(&tl->active_count, -1, 1))
284531958f6SChris Wilson return;
285531958f6SChris Wilson
28688cec497SChris Wilson spin_lock(&timelines->lock);
287a6edbca7SChris Wilson if (atomic_dec_and_test(&tl->active_count))
288531958f6SChris Wilson list_del(&tl->link);
28988cec497SChris Wilson spin_unlock(&timelines->lock);
290531958f6SChris Wilson
291531958f6SChris Wilson /*
292531958f6SChris Wilson * Since this timeline is idle, all bariers upon which we were waiting
293531958f6SChris Wilson * must also be complete and so we can discard the last used barriers
294531958f6SChris Wilson * without loss of information.
295531958f6SChris Wilson */
296531958f6SChris Wilson i915_syncmap_free(&tl->sync);
297531958f6SChris Wilson }
298531958f6SChris Wilson
timeline_advance(struct intel_timeline * tl)299f0c02c1bSTvrtko Ursulin static u32 timeline_advance(struct intel_timeline *tl)
300f0c02c1bSTvrtko Ursulin {
301ccb23d2dSChris Wilson GEM_BUG_ON(!atomic_read(&tl->pin_count));
302f0c02c1bSTvrtko Ursulin GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
303f0c02c1bSTvrtko Ursulin
304f0c02c1bSTvrtko Ursulin return tl->seqno += 1 + tl->has_initial_breadcrumb;
305f0c02c1bSTvrtko Ursulin }
306f0c02c1bSTvrtko Ursulin
307f0c02c1bSTvrtko Ursulin static noinline int
__intel_timeline_get_seqno(struct intel_timeline * tl,u32 * seqno)308f0c02c1bSTvrtko Ursulin __intel_timeline_get_seqno(struct intel_timeline *tl,
309f0c02c1bSTvrtko Ursulin u32 *seqno)
310f0c02c1bSTvrtko Ursulin {
31112ca695dSMaarten Lankhorst u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
312f0c02c1bSTvrtko Ursulin
31312ca695dSMaarten Lankhorst /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
31412ca695dSMaarten Lankhorst if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
31512ca695dSMaarten Lankhorst next_ofs = offset_in_page(next_ofs + BIT(5));
3168faa7251SChris Wilson
31712ca695dSMaarten Lankhorst tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
31812ca695dSMaarten Lankhorst tl->hwsp_seqno = tl->hwsp_map + next_ofs;
31912ca695dSMaarten Lankhorst intel_timeline_reset_seqno(tl);
320f0c02c1bSTvrtko Ursulin
321f0c02c1bSTvrtko Ursulin *seqno = timeline_advance(tl);
322f0c02c1bSTvrtko Ursulin GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
323f0c02c1bSTvrtko Ursulin return 0;
324f0c02c1bSTvrtko Ursulin }
325f0c02c1bSTvrtko Ursulin
intel_timeline_get_seqno(struct intel_timeline * tl,struct i915_request * rq,u32 * seqno)326f0c02c1bSTvrtko Ursulin int intel_timeline_get_seqno(struct intel_timeline *tl,
327f0c02c1bSTvrtko Ursulin struct i915_request *rq,
328f0c02c1bSTvrtko Ursulin u32 *seqno)
329f0c02c1bSTvrtko Ursulin {
330f0c02c1bSTvrtko Ursulin *seqno = timeline_advance(tl);
331f0c02c1bSTvrtko Ursulin
332f0c02c1bSTvrtko Ursulin /* Replace the HWSP on wraparound for HW semaphores */
33312ca695dSMaarten Lankhorst if (unlikely(!*seqno && tl->has_initial_breadcrumb))
33412ca695dSMaarten Lankhorst return __intel_timeline_get_seqno(tl, seqno);
335f0c02c1bSTvrtko Ursulin
336f0c02c1bSTvrtko Ursulin return 0;
337f0c02c1bSTvrtko Ursulin }
338f0c02c1bSTvrtko Ursulin
intel_timeline_read_hwsp(struct i915_request * from,struct i915_request * to,u32 * hwsp)339f0c02c1bSTvrtko Ursulin int intel_timeline_read_hwsp(struct i915_request *from,
340f0c02c1bSTvrtko Ursulin struct i915_request *to,
341f0c02c1bSTvrtko Ursulin u32 *hwsp)
342f0c02c1bSTvrtko Ursulin {
34312ca695dSMaarten Lankhorst struct intel_timeline *tl;
344f0c02c1bSTvrtko Ursulin int err;
345f0c02c1bSTvrtko Ursulin
3469eee0dd7SChris Wilson rcu_read_lock();
34712ca695dSMaarten Lankhorst tl = rcu_dereference(from->timeline);
34812ca695dSMaarten Lankhorst if (i915_request_signaled(from) ||
34912ca695dSMaarten Lankhorst !i915_active_acquire_if_busy(&tl->active))
35012ca695dSMaarten Lankhorst tl = NULL;
35112ca695dSMaarten Lankhorst
35212ca695dSMaarten Lankhorst if (tl) {
35312ca695dSMaarten Lankhorst /* hwsp_offset may wraparound, so use from->hwsp_seqno */
35412ca695dSMaarten Lankhorst *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
35512ca695dSMaarten Lankhorst offset_in_page(from->hwsp_seqno);
35612ca695dSMaarten Lankhorst }
35712ca695dSMaarten Lankhorst
35812ca695dSMaarten Lankhorst /* ensure we wait on the right request, if not, we completed */
35912ca695dSMaarten Lankhorst if (tl && __i915_request_is_complete(from)) {
36012ca695dSMaarten Lankhorst i915_active_release(&tl->active);
36112ca695dSMaarten Lankhorst tl = NULL;
36212ca695dSMaarten Lankhorst }
3639eee0dd7SChris Wilson rcu_read_unlock();
3649eee0dd7SChris Wilson
36512ca695dSMaarten Lankhorst if (!tl)
36685bedbf1SChris Wilson return 1;
36712ca695dSMaarten Lankhorst
36812ca695dSMaarten Lankhorst /* Can't do semaphore waits on kernel context */
36912ca695dSMaarten Lankhorst if (!tl->has_initial_breadcrumb) {
37012ca695dSMaarten Lankhorst err = -EINVAL;
37112ca695dSMaarten Lankhorst goto out;
37212ca695dSMaarten Lankhorst }
37312ca695dSMaarten Lankhorst
37412ca695dSMaarten Lankhorst err = i915_active_add_request(&tl->active, to);
37512ca695dSMaarten Lankhorst
37612ca695dSMaarten Lankhorst out:
37712ca695dSMaarten Lankhorst i915_active_release(&tl->active);
37812ca695dSMaarten Lankhorst return err;
379f0c02c1bSTvrtko Ursulin }
380f0c02c1bSTvrtko Ursulin
intel_timeline_unpin(struct intel_timeline * tl)381f0c02c1bSTvrtko Ursulin void intel_timeline_unpin(struct intel_timeline *tl)
382f0c02c1bSTvrtko Ursulin {
383ccb23d2dSChris Wilson GEM_BUG_ON(!atomic_read(&tl->pin_count));
384ccb23d2dSChris Wilson if (!atomic_dec_and_test(&tl->pin_count))
385f0c02c1bSTvrtko Ursulin return;
386f0c02c1bSTvrtko Ursulin
38712ca695dSMaarten Lankhorst i915_active_release(&tl->active);
388f0c02c1bSTvrtko Ursulin __i915_vma_unpin(tl->hwsp_ggtt);
389f0c02c1bSTvrtko Ursulin }
390f0c02c1bSTvrtko Ursulin
__intel_timeline_free(struct kref * kref)391f0c02c1bSTvrtko Ursulin void __intel_timeline_free(struct kref *kref)
392f0c02c1bSTvrtko Ursulin {
393f0c02c1bSTvrtko Ursulin struct intel_timeline *timeline =
394f0c02c1bSTvrtko Ursulin container_of(kref, typeof(*timeline), kref);
395f0c02c1bSTvrtko Ursulin
39612ca695dSMaarten Lankhorst GEM_BUG_ON(atomic_read(&timeline->pin_count));
39712ca695dSMaarten Lankhorst GEM_BUG_ON(!list_empty(&timeline->requests));
39812ca695dSMaarten Lankhorst GEM_BUG_ON(timeline->retire);
39912ca695dSMaarten Lankhorst
40012ca695dSMaarten Lankhorst call_rcu(&timeline->rcu, intel_timeline_fini);
401f0c02c1bSTvrtko Ursulin }
402f0c02c1bSTvrtko Ursulin
intel_gt_fini_timelines(struct intel_gt * gt)4034605bb73SChris Wilson void intel_gt_fini_timelines(struct intel_gt *gt)
404f0c02c1bSTvrtko Ursulin {
405c6fe28b0SChris Wilson struct intel_gt_timelines *timelines = >->timelines;
406f0c02c1bSTvrtko Ursulin
407f0c02c1bSTvrtko Ursulin GEM_BUG_ON(!list_empty(&timelines->active_list));
408f0c02c1bSTvrtko Ursulin }
409f0c02c1bSTvrtko Ursulin
intel_gt_show_timelines(struct intel_gt * gt,struct drm_printer * m,void (* show_request)(struct drm_printer * m,const struct i915_request * rq,const char * prefix,int indent))4100986317aSChris Wilson void intel_gt_show_timelines(struct intel_gt *gt,
4110986317aSChris Wilson struct drm_printer *m,
4120986317aSChris Wilson void (*show_request)(struct drm_printer *m,
4130986317aSChris Wilson const struct i915_request *rq,
4140986317aSChris Wilson const char *prefix,
4150986317aSChris Wilson int indent))
4160986317aSChris Wilson {
4170986317aSChris Wilson struct intel_gt_timelines *timelines = >->timelines;
4180986317aSChris Wilson struct intel_timeline *tl, *tn;
4190986317aSChris Wilson LIST_HEAD(free);
4200986317aSChris Wilson
4210986317aSChris Wilson spin_lock(&timelines->lock);
4220986317aSChris Wilson list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
4230986317aSChris Wilson unsigned long count, ready, inflight;
4240986317aSChris Wilson struct i915_request *rq, *rn;
4250986317aSChris Wilson struct dma_fence *fence;
4260986317aSChris Wilson
4270986317aSChris Wilson if (!mutex_trylock(&tl->mutex)) {
4280986317aSChris Wilson drm_printf(m, "Timeline %llx: busy; skipping\n",
4290986317aSChris Wilson tl->fence_context);
4300986317aSChris Wilson continue;
4310986317aSChris Wilson }
4320986317aSChris Wilson
4330986317aSChris Wilson intel_timeline_get(tl);
4340986317aSChris Wilson GEM_BUG_ON(!atomic_read(&tl->active_count));
4350986317aSChris Wilson atomic_inc(&tl->active_count); /* pin the list element */
4360986317aSChris Wilson spin_unlock(&timelines->lock);
4370986317aSChris Wilson
4380986317aSChris Wilson count = 0;
4390986317aSChris Wilson ready = 0;
4400986317aSChris Wilson inflight = 0;
4410986317aSChris Wilson list_for_each_entry_safe(rq, rn, &tl->requests, link) {
4420986317aSChris Wilson if (i915_request_completed(rq))
4430986317aSChris Wilson continue;
4440986317aSChris Wilson
4450986317aSChris Wilson count++;
4460986317aSChris Wilson if (i915_request_is_ready(rq))
4470986317aSChris Wilson ready++;
4480986317aSChris Wilson if (i915_request_is_active(rq))
4490986317aSChris Wilson inflight++;
4500986317aSChris Wilson }
4510986317aSChris Wilson
4520986317aSChris Wilson drm_printf(m, "Timeline %llx: { ", tl->fence_context);
4530986317aSChris Wilson drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
4540986317aSChris Wilson count, ready, inflight);
4550986317aSChris Wilson drm_printf(m, ", seqno: { current: %d, last: %d }",
4560986317aSChris Wilson *tl->hwsp_seqno, tl->seqno);
4570986317aSChris Wilson fence = i915_active_fence_get(&tl->last_request);
4580986317aSChris Wilson if (fence) {
4590986317aSChris Wilson drm_printf(m, ", engine: %s",
4600986317aSChris Wilson to_request(fence)->engine->name);
4610986317aSChris Wilson dma_fence_put(fence);
4620986317aSChris Wilson }
4630986317aSChris Wilson drm_printf(m, " }\n");
4640986317aSChris Wilson
4650986317aSChris Wilson if (show_request) {
4660986317aSChris Wilson list_for_each_entry_safe(rq, rn, &tl->requests, link)
4670986317aSChris Wilson show_request(m, rq, "", 2);
4680986317aSChris Wilson }
4690986317aSChris Wilson
4700986317aSChris Wilson mutex_unlock(&tl->mutex);
4710986317aSChris Wilson spin_lock(&timelines->lock);
4720986317aSChris Wilson
4730986317aSChris Wilson /* Resume list iteration after reacquiring spinlock */
4740986317aSChris Wilson list_safe_reset_next(tl, tn, link);
4750986317aSChris Wilson if (atomic_dec_and_test(&tl->active_count))
4760986317aSChris Wilson list_del(&tl->link);
4770986317aSChris Wilson
4780986317aSChris Wilson /* Defer the final release to after the spinlock */
4790986317aSChris Wilson if (refcount_dec_and_test(&tl->kref.refcount)) {
4800986317aSChris Wilson GEM_BUG_ON(atomic_read(&tl->active_count));
4810986317aSChris Wilson list_add(&tl->link, &free);
4820986317aSChris Wilson }
4830986317aSChris Wilson }
4840986317aSChris Wilson spin_unlock(&timelines->lock);
4850986317aSChris Wilson
4860986317aSChris Wilson list_for_each_entry_safe(tl, tn, &free, link)
4870986317aSChris Wilson __intel_timeline_free(&tl->kref);
4880986317aSChris Wilson }
4890986317aSChris Wilson
490f0c02c1bSTvrtko Ursulin #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
491f0c02c1bSTvrtko Ursulin #include "gt/selftests/mock_timeline.c"
492f0c02c1bSTvrtko Ursulin #include "gt/selftest_timeline.c"
493f0c02c1bSTvrtko Ursulin #endif
494