1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
8 
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
14 
15 #include "i915_active_types.h"
16 #include "i915_sw_fence.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
20 
21 #include "uc/intel_guc_fwif.h"
22 
23 #define CONTEXT_REDZONE POISON_INUSE
24 DECLARE_EWMA(runtime, 3, 8);
25 
26 struct i915_gem_context;
27 struct i915_gem_ww_ctx;
28 struct i915_vma;
29 struct intel_breadcrumbs;
30 struct intel_context;
31 struct intel_ring;
32 
33 struct intel_context_ops {
34 	unsigned long flags;
35 #define COPS_HAS_INFLIGHT_BIT 0
36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
37 
38 #define COPS_RUNTIME_CYCLES_BIT 1
39 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
40 
41 	int (*alloc)(struct intel_context *ce);
42 
43 	void (*revoke)(struct intel_context *ce, struct i915_request *rq,
44 		       unsigned int preempt_timeout_ms);
45 
46 	void (*close)(struct intel_context *ce);
47 
48 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
49 	int (*pin)(struct intel_context *ce, void *vaddr);
50 	void (*unpin)(struct intel_context *ce);
51 	void (*post_unpin)(struct intel_context *ce);
52 
53 	void (*cancel_request)(struct intel_context *ce,
54 			       struct i915_request *rq);
55 
56 	void (*enter)(struct intel_context *ce);
57 	void (*exit)(struct intel_context *ce);
58 
59 	void (*sched_disable)(struct intel_context *ce);
60 
61 	void (*reset)(struct intel_context *ce);
62 	void (*destroy)(struct kref *kref);
63 
64 	/* virtual/parallel engine/context interface */
65 	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
66 						unsigned int count,
67 						unsigned long flags);
68 	struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
69 						 unsigned int num_siblings,
70 						 unsigned int width);
71 	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
72 					       unsigned int sibling);
73 };
74 
75 struct intel_context {
76 	/*
77 	 * Note: Some fields may be accessed under RCU.
78 	 *
79 	 * Unless otherwise noted a field can safely be assumed to be protected
80 	 * by strong reference counting.
81 	 */
82 	union {
83 		struct kref ref; /* no kref_get_unless_zero()! */
84 		struct rcu_head rcu;
85 	};
86 
87 	struct intel_engine_cs *engine;
88 	struct intel_engine_cs *inflight;
89 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
90 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
91 #define intel_context_inflight(ce) \
92 	__intel_context_inflight(READ_ONCE((ce)->inflight))
93 #define intel_context_inflight_count(ce) \
94 	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
95 
96 	struct i915_address_space *vm;
97 	struct i915_gem_context __rcu *gem_context;
98 
99 	/*
100 	 * @signal_lock protects the list of requests that need signaling,
101 	 * @signals. While there are any requests that need signaling,
102 	 * we add the context to the breadcrumbs worker, and remove it
103 	 * upon completion/cancellation of the last request.
104 	 */
105 	struct list_head signal_link; /* Accessed under RCU */
106 	struct list_head signals; /* Guarded by signal_lock */
107 	spinlock_t signal_lock; /* protects signals, the list of requests */
108 
109 	struct i915_vma *state;
110 	u32 ring_size;
111 	struct intel_ring *ring;
112 	struct intel_timeline *timeline;
113 
114 	unsigned long flags;
115 #define CONTEXT_BARRIER_BIT		0
116 #define CONTEXT_ALLOC_BIT		1
117 #define CONTEXT_INIT_BIT		2
118 #define CONTEXT_VALID_BIT		3
119 #define CONTEXT_CLOSED_BIT		4
120 #define CONTEXT_USE_SEMAPHORES		5
121 #define CONTEXT_BANNED			6
122 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
123 #define CONTEXT_NOPREEMPT		8
124 #define CONTEXT_LRCA_DIRTY		9
125 #define CONTEXT_GUC_INIT		10
126 #define CONTEXT_PERMA_PIN		11
127 #define CONTEXT_IS_PARKING		12
128 #define CONTEXT_EXITING			13
129 
130 	struct {
131 		u64 timeout_us;
132 	} watchdog;
133 
134 	u32 *lrc_reg_state;
135 	union {
136 		struct {
137 			u32 lrca;
138 			u32 ccid;
139 		};
140 		u64 desc;
141 	} lrc;
142 	u32 tag; /* cookie passed to HW to track this context on submission */
143 
144 	/** stats: Context GPU engine busyness tracking. */
145 	struct intel_context_stats {
146 		u64 active;
147 
148 		/* Time on GPU as tracked by the hw. */
149 		struct {
150 			struct ewma_runtime avg;
151 			u64 total;
152 			u32 last;
153 			I915_SELFTEST_DECLARE(u32 num_underflow);
154 			I915_SELFTEST_DECLARE(u32 max_underflow);
155 		} runtime;
156 	} stats;
157 
158 	unsigned int active_count; /* protected by timeline->mutex */
159 
160 	atomic_t pin_count;
161 	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
162 
163 	/**
164 	 * active: Active tracker for the rq activity (inc. external) on this
165 	 * intel_context object.
166 	 */
167 	struct i915_active active;
168 
169 	const struct intel_context_ops *ops;
170 
171 	/** sseu: Control eu/slice partitioning */
172 	struct intel_sseu sseu;
173 
174 	/**
175 	 * pinned_contexts_link: List link for the engine's pinned contexts.
176 	 * This is only used if this is a perma-pinned kernel context and
177 	 * the list is assumed to only be manipulated during driver load
178 	 * or unload time so no mutex protection currently.
179 	 */
180 	struct list_head pinned_contexts_link;
181 
182 	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
183 
184 	struct {
185 		/** @lock: protects everything in guc_state */
186 		spinlock_t lock;
187 		/**
188 		 * @sched_state: scheduling state of this context using GuC
189 		 * submission
190 		 */
191 		u32 sched_state;
192 		/*
193 		 * @fences: maintains a list of requests that are currently
194 		 * being fenced until a GuC operation completes
195 		 */
196 		struct list_head fences;
197 		/**
198 		 * @blocked: fence used to signal when the blocking of a
199 		 * context's submissions is complete.
200 		 */
201 		struct i915_sw_fence blocked;
202 		/** @requests: list of active requests on this context */
203 		struct list_head requests;
204 		/** @prio: the context's current guc priority */
205 		u8 prio;
206 		/**
207 		 * @prio_count: a counter of the number requests in flight in
208 		 * each priority bucket
209 		 */
210 		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
211 		/**
212 		 * @sched_disable_delay_work: worker to disable scheduling on this
213 		 * context
214 		 */
215 		struct delayed_work sched_disable_delay_work;
216 	} guc_state;
217 
218 	struct {
219 		/**
220 		 * @id: handle which is used to uniquely identify this context
221 		 * with the GuC, protected by guc->submission_state.lock
222 		 */
223 		u16 id;
224 		/**
225 		 * @ref: the number of references to the guc_id, when
226 		 * transitioning in and out of zero protected by
227 		 * guc->submission_state.lock
228 		 */
229 		atomic_t ref;
230 		/**
231 		 * @link: in guc->guc_id_list when the guc_id has no refs but is
232 		 * still valid, protected by guc->submission_state.lock
233 		 */
234 		struct list_head link;
235 	} guc_id;
236 
237 	/**
238 	 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
239 	 * list when context is pending to be destroyed (deregistered with the
240 	 * GuC), protected by guc->submission_state.lock
241 	 */
242 	struct list_head destroyed_link;
243 
244 	/** @parallel: sub-structure for parallel submission members */
245 	struct {
246 		union {
247 			/**
248 			 * @child_list: parent's list of children
249 			 * contexts, no protection as immutable after context
250 			 * creation
251 			 */
252 			struct list_head child_list;
253 			/**
254 			 * @child_link: child's link into parent's list of
255 			 * children
256 			 */
257 			struct list_head child_link;
258 		};
259 		/** @parent: pointer to parent if child */
260 		struct intel_context *parent;
261 		/**
262 		 * @last_rq: last request submitted on a parallel context, used
263 		 * to insert submit fences between requests in the parallel
264 		 * context
265 		 */
266 		struct i915_request *last_rq;
267 		/**
268 		 * @fence_context: fence context composite fence when doing
269 		 * parallel submission
270 		 */
271 		u64 fence_context;
272 		/**
273 		 * @seqno: seqno for composite fence when doing parallel
274 		 * submission
275 		 */
276 		u32 seqno;
277 		/** @number_children: number of children if parent */
278 		u8 number_children;
279 		/** @child_index: index into child_list if child */
280 		u8 child_index;
281 		/** @guc: GuC specific members for parallel submission */
282 		struct {
283 			/** @wqi_head: cached head pointer in work queue */
284 			u16 wqi_head;
285 			/** @wqi_tail: cached tail pointer in work queue */
286 			u16 wqi_tail;
287 			/** @wq_head: pointer to the actual head in work queue */
288 			u32 *wq_head;
289 			/** @wq_tail: pointer to the actual head in work queue */
290 			u32 *wq_tail;
291 			/** @wq_status: pointer to the status in work queue */
292 			u32 *wq_status;
293 
294 			/**
295 			 * @parent_page: page in context state (ce->state) used
296 			 * by parent for work queue, process descriptor
297 			 */
298 			u8 parent_page;
299 		} guc;
300 	} parallel;
301 
302 #ifdef CONFIG_DRM_I915_SELFTEST
303 	/**
304 	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
305 	 */
306 	bool drop_schedule_enable;
307 
308 	/**
309 	 * @drop_schedule_disable: Force drop of schedule disable G2H for
310 	 * selftest
311 	 */
312 	bool drop_schedule_disable;
313 
314 	/**
315 	 * @drop_deregister: Force drop of deregister G2H for selftest
316 	 */
317 	bool drop_deregister;
318 #endif
319 };
320 
321 #endif /* __INTEL_CONTEXT_TYPES__ */
322