1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 #include <linux/lockdep.h>
30 
31 #include "gt/intel_engine_types.h"
32 
33 #include "i915_gem.h"
34 #include "i915_scheduler.h"
35 #include "i915_selftest.h"
36 #include "i915_sw_fence.h"
37 
38 #include <uapi/drm/i915_drm.h>
39 
40 struct drm_file;
41 struct drm_i915_gem_object;
42 struct i915_request;
43 struct i915_timeline;
44 struct i915_timeline_cacheline;
45 
46 struct i915_capture_list {
47 	struct i915_capture_list *next;
48 	struct i915_vma *vma;
49 };
50 
51 enum {
52 	/*
53 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
54 	 *
55 	 * Set by __i915_request_submit() on handing over to HW, and cleared
56 	 * by __i915_request_unsubmit() if we preempt this request.
57 	 *
58 	 * Finally cleared for consistency on retiring the request, when
59 	 * we know the HW is no longer running this request.
60 	 *
61 	 * See i915_request_is_active()
62 	 */
63 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
64 
65 	/*
66 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
67 	 *
68 	 * Internal bookkeeping used by the breadcrumb code to track when
69 	 * a request is on the various signal_list.
70 	 */
71 	I915_FENCE_FLAG_SIGNAL,
72 };
73 
74 /**
75  * Request queue structure.
76  *
77  * The request queue allows us to note sequence numbers that have been emitted
78  * and may be associated with active buffers to be retired.
79  *
80  * By keeping this list, we can avoid having to do questionable sequence
81  * number comparisons on buffer last_read|write_seqno. It also allows an
82  * emission time to be associated with the request for tracking how far ahead
83  * of the GPU the submission is.
84  *
85  * When modifying this structure be very aware that we perform a lockless
86  * RCU lookup of it that may race against reallocation of the struct
87  * from the slab freelist. We intentionally do not zero the structure on
88  * allocation so that the lookup can use the dangling pointers (and is
89  * cogniscent that those pointers may be wrong). Instead, everything that
90  * needs to be initialised must be done so explicitly.
91  *
92  * The requests are reference counted.
93  */
94 struct i915_request {
95 	struct dma_fence fence;
96 	spinlock_t lock;
97 
98 	/** On Which ring this request was generated */
99 	struct drm_i915_private *i915;
100 
101 	/**
102 	 * Context and ring buffer related to this request
103 	 * Contexts are refcounted, so when this request is associated with a
104 	 * context, we must increment the context's refcount, to guarantee that
105 	 * it persists while any request is linked to it. Requests themselves
106 	 * are also refcounted, so the request will only be freed when the last
107 	 * reference to it is dismissed, and the code in
108 	 * i915_request_free() will then decrement the refcount on the
109 	 * context.
110 	 */
111 	struct i915_gem_context *gem_context;
112 	struct intel_engine_cs *engine;
113 	struct intel_context *hw_context;
114 	struct intel_ring *ring;
115 	struct i915_timeline *timeline;
116 	struct list_head signal_link;
117 
118 	/*
119 	 * The rcu epoch of when this request was allocated. Used to judiciously
120 	 * apply backpressure on future allocations to ensure that under
121 	 * mempressure there is sufficient RCU ticks for us to reclaim our
122 	 * RCU protected slabs.
123 	 */
124 	unsigned long rcustate;
125 
126 	/*
127 	 * We pin the timeline->mutex while constructing the request to
128 	 * ensure that no caller accidentally drops it during construction.
129 	 * The timeline->mutex must be held to ensure that only this caller
130 	 * can use the ring and manipulate the associated timeline during
131 	 * construction.
132 	 */
133 	struct pin_cookie cookie;
134 
135 	/*
136 	 * Fences for the various phases in the request's lifetime.
137 	 *
138 	 * The submit fence is used to await upon all of the request's
139 	 * dependencies. When it is signaled, the request is ready to run.
140 	 * It is used by the driver to then queue the request for execution.
141 	 */
142 	struct i915_sw_fence submit;
143 	union {
144 		wait_queue_entry_t submitq;
145 		struct i915_sw_dma_fence_cb dmaq;
146 	};
147 	struct list_head execute_cb;
148 	struct i915_sw_fence semaphore;
149 
150 	/*
151 	 * A list of everyone we wait upon, and everyone who waits upon us.
152 	 * Even though we will not be submitted to the hardware before the
153 	 * submit fence is signaled (it waits for all external events as well
154 	 * as our own requests), the scheduler still needs to know the
155 	 * dependency tree for the lifetime of the request (from execbuf
156 	 * to retirement), i.e. bidirectional dependency information for the
157 	 * request not tied to individual fences.
158 	 */
159 	struct i915_sched_node sched;
160 	struct i915_dependency dep;
161 	intel_engine_mask_t execution_mask;
162 
163 	/*
164 	 * A convenience pointer to the current breadcrumb value stored in
165 	 * the HW status page (or our timeline's local equivalent). The full
166 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
167 	 */
168 	const u32 *hwsp_seqno;
169 
170 	/*
171 	 * If we need to access the timeline's seqno for this request in
172 	 * another request, we need to keep a read reference to this associated
173 	 * cacheline, so that we do not free and recycle it before the foreign
174 	 * observers have completed. Hence, we keep a pointer to the cacheline
175 	 * inside the timeline's HWSP vma, but it is only valid while this
176 	 * request has not completed and guarded by the timeline mutex.
177 	 */
178 	struct i915_timeline_cacheline *hwsp_cacheline;
179 
180 	/** Position in the ring of the start of the request */
181 	u32 head;
182 
183 	/** Position in the ring of the start of the user packets */
184 	u32 infix;
185 
186 	/**
187 	 * Position in the ring of the start of the postfix.
188 	 * This is required to calculate the maximum available ring space
189 	 * without overwriting the postfix.
190 	 */
191 	u32 postfix;
192 
193 	/** Position in the ring of the end of the whole request */
194 	u32 tail;
195 
196 	/** Position in the ring of the end of any workarounds after the tail */
197 	u32 wa_tail;
198 
199 	/** Preallocate space in the ring for the emitting the request */
200 	u32 reserved_space;
201 
202 	/** Batch buffer related to this request if any (used for
203 	 * error state dump only).
204 	 */
205 	struct i915_vma *batch;
206 	/**
207 	 * Additional buffers requested by userspace to be captured upon
208 	 * a GPU hang. The vma/obj on this list are protected by their
209 	 * active reference - all objects on this list must also be
210 	 * on the active_list (of their final request).
211 	 */
212 	struct i915_capture_list *capture_list;
213 	struct list_head active_list;
214 
215 	/** Time at which this request was emitted, in jiffies. */
216 	unsigned long emitted_jiffies;
217 
218 	bool waitboost;
219 
220 	/** timeline->request entry for this request */
221 	struct list_head link;
222 
223 	/** ring->request_list entry for this request */
224 	struct list_head ring_link;
225 
226 	struct drm_i915_file_private *file_priv;
227 	/** file_priv list entry for this request */
228 	struct list_head client_link;
229 
230 	I915_SELFTEST_DECLARE(struct {
231 		struct list_head link;
232 		unsigned long delay;
233 	} mock;)
234 };
235 
236 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
237 
238 extern const struct dma_fence_ops i915_fence_ops;
239 
240 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
241 {
242 	return fence->ops == &i915_fence_ops;
243 }
244 
245 struct i915_request * __must_check
246 __i915_request_create(struct intel_context *ce, gfp_t gfp);
247 struct i915_request * __must_check
248 i915_request_create(struct intel_context *ce);
249 
250 struct i915_request *__i915_request_commit(struct i915_request *request);
251 
252 void i915_request_retire_upto(struct i915_request *rq);
253 
254 static inline struct i915_request *
255 to_request(struct dma_fence *fence)
256 {
257 	/* We assume that NULL fence/request are interoperable */
258 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
259 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
260 	return container_of(fence, struct i915_request, fence);
261 }
262 
263 static inline struct i915_request *
264 i915_request_get(struct i915_request *rq)
265 {
266 	return to_request(dma_fence_get(&rq->fence));
267 }
268 
269 static inline struct i915_request *
270 i915_request_get_rcu(struct i915_request *rq)
271 {
272 	return to_request(dma_fence_get_rcu(&rq->fence));
273 }
274 
275 static inline void
276 i915_request_put(struct i915_request *rq)
277 {
278 	dma_fence_put(&rq->fence);
279 }
280 
281 int i915_request_await_object(struct i915_request *to,
282 			      struct drm_i915_gem_object *obj,
283 			      bool write);
284 int i915_request_await_dma_fence(struct i915_request *rq,
285 				 struct dma_fence *fence);
286 int i915_request_await_execution(struct i915_request *rq,
287 				 struct dma_fence *fence,
288 				 void (*hook)(struct i915_request *rq,
289 					      struct dma_fence *signal));
290 
291 void i915_request_add(struct i915_request *rq);
292 
293 void __i915_request_submit(struct i915_request *request);
294 void i915_request_submit(struct i915_request *request);
295 
296 void i915_request_skip(struct i915_request *request, int error);
297 
298 void __i915_request_unsubmit(struct i915_request *request);
299 void i915_request_unsubmit(struct i915_request *request);
300 
301 /* Note: part of the intel_breadcrumbs family */
302 bool i915_request_enable_breadcrumb(struct i915_request *request);
303 void i915_request_cancel_breadcrumb(struct i915_request *request);
304 
305 long i915_request_wait(struct i915_request *rq,
306 		       unsigned int flags,
307 		       long timeout)
308 	__attribute__((nonnull(1)));
309 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
310 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
311 #define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
312 #define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
313 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
314 
315 static inline bool i915_request_signaled(const struct i915_request *rq)
316 {
317 	/* The request may live longer than its HWSP, so check flags first! */
318 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
319 }
320 
321 static inline bool i915_request_is_active(const struct i915_request *rq)
322 {
323 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
324 }
325 
326 /**
327  * Returns true if seq1 is later than seq2.
328  */
329 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
330 {
331 	return (s32)(seq1 - seq2) >= 0;
332 }
333 
334 static inline u32 __hwsp_seqno(const struct i915_request *rq)
335 {
336 	return READ_ONCE(*rq->hwsp_seqno);
337 }
338 
339 /**
340  * hwsp_seqno - the current breadcrumb value in the HW status page
341  * @rq: the request, to chase the relevant HW status page
342  *
343  * The emphasis in naming here is that hwsp_seqno() is not a property of the
344  * request, but an indication of the current HW state (associated with this
345  * request). Its value will change as the GPU executes more requests.
346  *
347  * Returns the current breadcrumb value in the associated HW status page (or
348  * the local timeline's equivalent) for this request. The request itself
349  * has the associated breadcrumb value of rq->fence.seqno, when the HW
350  * status page has that breadcrumb or later, this request is complete.
351  */
352 static inline u32 hwsp_seqno(const struct i915_request *rq)
353 {
354 	u32 seqno;
355 
356 	rcu_read_lock(); /* the HWSP may be freed at runtime */
357 	seqno = __hwsp_seqno(rq);
358 	rcu_read_unlock();
359 
360 	return seqno;
361 }
362 
363 static inline bool __i915_request_has_started(const struct i915_request *rq)
364 {
365 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
366 }
367 
368 /**
369  * i915_request_started - check if the request has begun being executed
370  * @rq: the request
371  *
372  * If the timeline is not using initial breadcrumbs, a request is
373  * considered started if the previous request on its timeline (i.e.
374  * context) has been signaled.
375  *
376  * If the timeline is using semaphores, it will also be emitting an
377  * "initial breadcrumb" after the semaphores are complete and just before
378  * it began executing the user payload. A request can therefore be active
379  * on the HW and not yet started as it is still busywaiting on its
380  * dependencies (via HW semaphores).
381  *
382  * If the request has started, its dependencies will have been signaled
383  * (either by fences or by semaphores) and it will have begun processing
384  * the user payload.
385  *
386  * However, even if a request has started, it may have been preempted and
387  * so no longer active, or it may have already completed.
388  *
389  * See also i915_request_is_active().
390  *
391  * Returns true if the request has begun executing the user payload, or
392  * has completed:
393  */
394 static inline bool i915_request_started(const struct i915_request *rq)
395 {
396 	if (i915_request_signaled(rq))
397 		return true;
398 
399 	/* Remember: started but may have since been preempted! */
400 	return __i915_request_has_started(rq);
401 }
402 
403 /**
404  * i915_request_is_running - check if the request may actually be executing
405  * @rq: the request
406  *
407  * Returns true if the request is currently submitted to hardware, has passed
408  * its start point (i.e. the context is setup and not busywaiting). Note that
409  * it may no longer be running by the time the function returns!
410  */
411 static inline bool i915_request_is_running(const struct i915_request *rq)
412 {
413 	if (!i915_request_is_active(rq))
414 		return false;
415 
416 	return __i915_request_has_started(rq);
417 }
418 
419 static inline bool i915_request_completed(const struct i915_request *rq)
420 {
421 	if (i915_request_signaled(rq))
422 		return true;
423 
424 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
425 }
426 
427 static inline void i915_request_mark_complete(struct i915_request *rq)
428 {
429 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
430 }
431 
432 bool i915_retire_requests(struct drm_i915_private *i915);
433 
434 #endif /* I915_REQUEST_H */
435