1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_TIMELINE_TYPES_H__
8 #define __I915_TIMELINE_TYPES_H__
9 
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/mutex.h>
13 #include <linux/rcupdate.h>
14 #include <linux/types.h>
15 
16 #include "i915_active_types.h"
17 
18 struct i915_vma;
19 struct i915_syncmap;
20 struct intel_gt;
21 struct intel_timeline_hwsp;
22 
23 struct intel_timeline {
24 	u64 fence_context;
25 	u32 seqno;
26 
27 	struct mutex mutex; /* protects the flow of requests */
28 
29 	/*
30 	 * pin_count and active_count track essentially the same thing:
31 	 * How many requests are in flight or may be under construction.
32 	 *
33 	 * We need two distinct counters so that we can assign different
34 	 * lifetimes to the events for different use-cases. For example,
35 	 * we want to permanently keep the timeline pinned for the kernel
36 	 * context so that we can issue requests at any time without having
37 	 * to acquire space in the GGTT. However, we want to keep tracking
38 	 * the activity (to be able to detect when we become idle) along that
39 	 * permanently pinned timeline and so end up requiring two counters.
40 	 *
41 	 * Note that the active_count is protected by the intel_timeline.mutex,
42 	 * but the pin_count is protected by a combination of serialisation
43 	 * from the intel_context caller plus internal atomicity.
44 	 */
45 	atomic_t pin_count;
46 	atomic_t active_count;
47 
48 	const u32 *hwsp_seqno;
49 	struct i915_vma *hwsp_ggtt;
50 	u32 hwsp_offset;
51 
52 	struct intel_timeline_cacheline *hwsp_cacheline;
53 
54 	bool has_initial_breadcrumb;
55 
56 	/**
57 	 * List of breadcrumbs associated with GPU requests currently
58 	 * outstanding.
59 	 */
60 	struct list_head requests;
61 
62 	/*
63 	 * Contains an RCU guarded pointer to the last request. No reference is
64 	 * held to the request, users must carefully acquire a reference to
65 	 * the request using i915_active_fence_get(), or manage the RCU
66 	 * protection themselves (cf the i915_active_fence API).
67 	 */
68 	struct i915_active_fence last_request;
69 
70 	/** A chain of completed timelines ready for early retirement. */
71 	struct intel_timeline *retire;
72 
73 	/**
74 	 * We track the most recent seqno that we wait on in every context so
75 	 * that we only have to emit a new await and dependency on a more
76 	 * recent sync point. As the contexts may be executed out-of-order, we
77 	 * have to track each individually and can not rely on an absolute
78 	 * global_seqno. When we know that all tracked fences are completed
79 	 * (i.e. when the driver is idle), we know that the syncmap is
80 	 * redundant and we can discard it without loss of generality.
81 	 */
82 	struct i915_syncmap *sync;
83 
84 	struct list_head link;
85 	struct intel_gt *gt;
86 
87 	struct list_head engine_link;
88 
89 	struct kref kref;
90 	struct rcu_head rcu;
91 };
92 
93 struct intel_timeline_cacheline {
94 	struct i915_active active;
95 
96 	struct intel_timeline_hwsp *hwsp;
97 	void *vaddr;
98 
99 	u32 ggtt_offset;
100 
101 	struct rcu_head rcu;
102 };
103 
104 #endif /* __I915_TIMELINE_TYPES_H__ */
105