1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
8 
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
14 
15 #include "i915_active_types.h"
16 #include "i915_sw_fence.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
20 
21 #include "uc/intel_guc_fwif.h"
22 
23 #define CONTEXT_REDZONE POISON_INUSE
24 DECLARE_EWMA(runtime, 3, 8);
25 
26 struct i915_gem_context;
27 struct i915_gem_ww_ctx;
28 struct i915_vma;
29 struct intel_breadcrumbs;
30 struct intel_context;
31 struct intel_ring;
32 
33 struct intel_context_ops {
34 	unsigned long flags;
35 #define COPS_HAS_INFLIGHT_BIT 0
36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
37 
38 #define COPS_RUNTIME_CYCLES_BIT 1
39 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
40 
41 	int (*alloc)(struct intel_context *ce);
42 
43 	void (*revoke)(struct intel_context *ce, struct i915_request *rq,
44 		       unsigned int preempt_timeout_ms);
45 
46 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
47 	int (*pin)(struct intel_context *ce, void *vaddr);
48 	void (*unpin)(struct intel_context *ce);
49 	void (*post_unpin)(struct intel_context *ce);
50 
51 	void (*cancel_request)(struct intel_context *ce,
52 			       struct i915_request *rq);
53 
54 	void (*enter)(struct intel_context *ce);
55 	void (*exit)(struct intel_context *ce);
56 
57 	void (*sched_disable)(struct intel_context *ce);
58 
59 	void (*reset)(struct intel_context *ce);
60 	void (*destroy)(struct kref *kref);
61 
62 	/* virtual/parallel engine/context interface */
63 	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
64 						unsigned int count,
65 						unsigned long flags);
66 	struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
67 						 unsigned int num_siblings,
68 						 unsigned int width);
69 	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
70 					       unsigned int sibling);
71 };
72 
73 struct intel_context {
74 	/*
75 	 * Note: Some fields may be accessed under RCU.
76 	 *
77 	 * Unless otherwise noted a field can safely be assumed to be protected
78 	 * by strong reference counting.
79 	 */
80 	union {
81 		struct kref ref; /* no kref_get_unless_zero()! */
82 		struct rcu_head rcu;
83 	};
84 
85 	struct intel_engine_cs *engine;
86 	struct intel_engine_cs *inflight;
87 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
88 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
89 #define intel_context_inflight(ce) \
90 	__intel_context_inflight(READ_ONCE((ce)->inflight))
91 #define intel_context_inflight_count(ce) \
92 	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
93 
94 	struct i915_address_space *vm;
95 	struct i915_gem_context __rcu *gem_context;
96 
97 	/*
98 	 * @signal_lock protects the list of requests that need signaling,
99 	 * @signals. While there are any requests that need signaling,
100 	 * we add the context to the breadcrumbs worker, and remove it
101 	 * upon completion/cancellation of the last request.
102 	 */
103 	struct list_head signal_link; /* Accessed under RCU */
104 	struct list_head signals; /* Guarded by signal_lock */
105 	spinlock_t signal_lock; /* protects signals, the list of requests */
106 
107 	struct i915_vma *state;
108 	u32 ring_size;
109 	struct intel_ring *ring;
110 	struct intel_timeline *timeline;
111 
112 	unsigned long flags;
113 #define CONTEXT_BARRIER_BIT		0
114 #define CONTEXT_ALLOC_BIT		1
115 #define CONTEXT_INIT_BIT		2
116 #define CONTEXT_VALID_BIT		3
117 #define CONTEXT_CLOSED_BIT		4
118 #define CONTEXT_USE_SEMAPHORES		5
119 #define CONTEXT_BANNED			6
120 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
121 #define CONTEXT_NOPREEMPT		8
122 #define CONTEXT_LRCA_DIRTY		9
123 #define CONTEXT_GUC_INIT		10
124 #define CONTEXT_PERMA_PIN		11
125 #define CONTEXT_IS_PARKING		12
126 #define CONTEXT_EXITING			13
127 
128 	struct {
129 		u64 timeout_us;
130 	} watchdog;
131 
132 	u32 *lrc_reg_state;
133 	union {
134 		struct {
135 			u32 lrca;
136 			u32 ccid;
137 		};
138 		u64 desc;
139 	} lrc;
140 	u32 tag; /* cookie passed to HW to track this context on submission */
141 
142 	/** stats: Context GPU engine busyness tracking. */
143 	struct intel_context_stats {
144 		u64 active;
145 
146 		/* Time on GPU as tracked by the hw. */
147 		struct {
148 			struct ewma_runtime avg;
149 			u64 total;
150 			u32 last;
151 			I915_SELFTEST_DECLARE(u32 num_underflow);
152 			I915_SELFTEST_DECLARE(u32 max_underflow);
153 		} runtime;
154 	} stats;
155 
156 	unsigned int active_count; /* protected by timeline->mutex */
157 
158 	atomic_t pin_count;
159 	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
160 
161 	/**
162 	 * active: Active tracker for the rq activity (inc. external) on this
163 	 * intel_context object.
164 	 */
165 	struct i915_active active;
166 
167 	const struct intel_context_ops *ops;
168 
169 	/** sseu: Control eu/slice partitioning */
170 	struct intel_sseu sseu;
171 
172 	/**
173 	 * pinned_contexts_link: List link for the engine's pinned contexts.
174 	 * This is only used if this is a perma-pinned kernel context and
175 	 * the list is assumed to only be manipulated during driver load
176 	 * or unload time so no mutex protection currently.
177 	 */
178 	struct list_head pinned_contexts_link;
179 
180 	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
181 
182 	struct {
183 		/** @lock: protects everything in guc_state */
184 		spinlock_t lock;
185 		/**
186 		 * @sched_state: scheduling state of this context using GuC
187 		 * submission
188 		 */
189 		u32 sched_state;
190 		/*
191 		 * @fences: maintains a list of requests that are currently
192 		 * being fenced until a GuC operation completes
193 		 */
194 		struct list_head fences;
195 		/**
196 		 * @blocked: fence used to signal when the blocking of a
197 		 * context's submissions is complete.
198 		 */
199 		struct i915_sw_fence blocked;
200 		/** @number_committed_requests: number of committed requests */
201 		int number_committed_requests;
202 		/** @requests: list of active requests on this context */
203 		struct list_head requests;
204 		/** @prio: the context's current guc priority */
205 		u8 prio;
206 		/**
207 		 * @prio_count: a counter of the number requests in flight in
208 		 * each priority bucket
209 		 */
210 		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
211 	} guc_state;
212 
213 	struct {
214 		/**
215 		 * @id: handle which is used to uniquely identify this context
216 		 * with the GuC, protected by guc->submission_state.lock
217 		 */
218 		u16 id;
219 		/**
220 		 * @ref: the number of references to the guc_id, when
221 		 * transitioning in and out of zero protected by
222 		 * guc->submission_state.lock
223 		 */
224 		atomic_t ref;
225 		/**
226 		 * @link: in guc->guc_id_list when the guc_id has no refs but is
227 		 * still valid, protected by guc->submission_state.lock
228 		 */
229 		struct list_head link;
230 	} guc_id;
231 
232 	/**
233 	 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
234 	 * list when context is pending to be destroyed (deregistered with the
235 	 * GuC), protected by guc->submission_state.lock
236 	 */
237 	struct list_head destroyed_link;
238 
239 	/** @parallel: sub-structure for parallel submission members */
240 	struct {
241 		union {
242 			/**
243 			 * @child_list: parent's list of children
244 			 * contexts, no protection as immutable after context
245 			 * creation
246 			 */
247 			struct list_head child_list;
248 			/**
249 			 * @child_link: child's link into parent's list of
250 			 * children
251 			 */
252 			struct list_head child_link;
253 		};
254 		/** @parent: pointer to parent if child */
255 		struct intel_context *parent;
256 		/**
257 		 * @last_rq: last request submitted on a parallel context, used
258 		 * to insert submit fences between requests in the parallel
259 		 * context
260 		 */
261 		struct i915_request *last_rq;
262 		/**
263 		 * @fence_context: fence context composite fence when doing
264 		 * parallel submission
265 		 */
266 		u64 fence_context;
267 		/**
268 		 * @seqno: seqno for composite fence when doing parallel
269 		 * submission
270 		 */
271 		u32 seqno;
272 		/** @number_children: number of children if parent */
273 		u8 number_children;
274 		/** @child_index: index into child_list if child */
275 		u8 child_index;
276 		/** @guc: GuC specific members for parallel submission */
277 		struct {
278 			/** @wqi_head: cached head pointer in work queue */
279 			u16 wqi_head;
280 			/** @wqi_tail: cached tail pointer in work queue */
281 			u16 wqi_tail;
282 			/** @wq_head: pointer to the actual head in work queue */
283 			u32 *wq_head;
284 			/** @wq_tail: pointer to the actual head in work queue */
285 			u32 *wq_tail;
286 			/** @wq_status: pointer to the status in work queue */
287 			u32 *wq_status;
288 
289 			/**
290 			 * @parent_page: page in context state (ce->state) used
291 			 * by parent for work queue, process descriptor
292 			 */
293 			u8 parent_page;
294 		} guc;
295 	} parallel;
296 
297 #ifdef CONFIG_DRM_I915_SELFTEST
298 	/**
299 	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
300 	 */
301 	bool drop_schedule_enable;
302 
303 	/**
304 	 * @drop_schedule_disable: Force drop of schedule disable G2H for
305 	 * selftest
306 	 */
307 	bool drop_schedule_disable;
308 
309 	/**
310 	 * @drop_deregister: Force drop of deregister G2H for selftest
311 	 */
312 	bool drop_deregister;
313 #endif
314 };
315 
316 #endif /* __INTEL_CONTEXT_TYPES__ */
317