xref: /openbmc/linux/drivers/gpu/drm/i915/i915_request.h (revision 817396dc9f6ab2481b94071de2e586aae876e89c)
1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 #include <linux/lockdep.h>
30 
31 #include "gem/i915_gem_context_types.h"
32 #include "gt/intel_context_types.h"
33 #include "gt/intel_engine_types.h"
34 #include "gt/intel_timeline_types.h"
35 
36 #include "i915_gem.h"
37 #include "i915_scheduler.h"
38 #include "i915_selftest.h"
39 #include "i915_sw_fence.h"
40 
41 #include <uapi/drm/i915_drm.h>
42 
43 struct drm_file;
44 struct drm_i915_gem_object;
45 struct i915_request;
46 
47 struct i915_capture_list {
48 	struct i915_capture_list *next;
49 	struct i915_vma *vma;
50 };
51 
52 #define RQ_TRACE(rq, fmt, ...) do {					\
53 	const struct i915_request *rq__ = (rq);				\
54 	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt,	\
55 		     rq__->fence.context, rq__->fence.seqno,		\
56 		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
57 } while (0)
58 
59 enum {
60 	/*
61 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
62 	 *
63 	 * Set by __i915_request_submit() on handing over to HW, and cleared
64 	 * by __i915_request_unsubmit() if we preempt this request.
65 	 *
66 	 * Finally cleared for consistency on retiring the request, when
67 	 * we know the HW is no longer running this request.
68 	 *
69 	 * See i915_request_is_active()
70 	 */
71 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
72 
73 	/*
74 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
75 	 *
76 	 * Internal bookkeeping used by the breadcrumb code to track when
77 	 * a request is on the various signal_list.
78 	 */
79 	I915_FENCE_FLAG_SIGNAL,
80 };
81 
82 /**
83  * Request queue structure.
84  *
85  * The request queue allows us to note sequence numbers that have been emitted
86  * and may be associated with active buffers to be retired.
87  *
88  * By keeping this list, we can avoid having to do questionable sequence
89  * number comparisons on buffer last_read|write_seqno. It also allows an
90  * emission time to be associated with the request for tracking how far ahead
91  * of the GPU the submission is.
92  *
93  * When modifying this structure be very aware that we perform a lockless
94  * RCU lookup of it that may race against reallocation of the struct
95  * from the slab freelist. We intentionally do not zero the structure on
96  * allocation so that the lookup can use the dangling pointers (and is
97  * cogniscent that those pointers may be wrong). Instead, everything that
98  * needs to be initialised must be done so explicitly.
99  *
100  * The requests are reference counted.
101  */
102 struct i915_request {
103 	struct dma_fence fence;
104 	spinlock_t lock;
105 
106 	/** On Which ring this request was generated */
107 	struct drm_i915_private *i915;
108 
109 	/**
110 	 * Context and ring buffer related to this request
111 	 * Contexts are refcounted, so when this request is associated with a
112 	 * context, we must increment the context's refcount, to guarantee that
113 	 * it persists while any request is linked to it. Requests themselves
114 	 * are also refcounted, so the request will only be freed when the last
115 	 * reference to it is dismissed, and the code in
116 	 * i915_request_free() will then decrement the refcount on the
117 	 * context.
118 	 */
119 	struct intel_engine_cs *engine;
120 	struct intel_context *context;
121 	struct intel_ring *ring;
122 	struct intel_timeline __rcu *timeline;
123 	struct list_head signal_link;
124 
125 	/*
126 	 * The rcu epoch of when this request was allocated. Used to judiciously
127 	 * apply backpressure on future allocations to ensure that under
128 	 * mempressure there is sufficient RCU ticks for us to reclaim our
129 	 * RCU protected slabs.
130 	 */
131 	unsigned long rcustate;
132 
133 	/*
134 	 * We pin the timeline->mutex while constructing the request to
135 	 * ensure that no caller accidentally drops it during construction.
136 	 * The timeline->mutex must be held to ensure that only this caller
137 	 * can use the ring and manipulate the associated timeline during
138 	 * construction.
139 	 */
140 	struct pin_cookie cookie;
141 
142 	/*
143 	 * Fences for the various phases in the request's lifetime.
144 	 *
145 	 * The submit fence is used to await upon all of the request's
146 	 * dependencies. When it is signaled, the request is ready to run.
147 	 * It is used by the driver to then queue the request for execution.
148 	 */
149 	struct i915_sw_fence submit;
150 	union {
151 		wait_queue_entry_t submitq;
152 		struct i915_sw_dma_fence_cb dmaq;
153 		struct i915_request_duration_cb {
154 			struct dma_fence_cb cb;
155 			ktime_t emitted;
156 		} duration;
157 	};
158 	struct list_head execute_cb;
159 	struct i915_sw_fence semaphore;
160 
161 	/*
162 	 * A list of everyone we wait upon, and everyone who waits upon us.
163 	 * Even though we will not be submitted to the hardware before the
164 	 * submit fence is signaled (it waits for all external events as well
165 	 * as our own requests), the scheduler still needs to know the
166 	 * dependency tree for the lifetime of the request (from execbuf
167 	 * to retirement), i.e. bidirectional dependency information for the
168 	 * request not tied to individual fences.
169 	 */
170 	struct i915_sched_node sched;
171 	struct i915_dependency dep;
172 	intel_engine_mask_t execution_mask;
173 
174 	/*
175 	 * A convenience pointer to the current breadcrumb value stored in
176 	 * the HW status page (or our timeline's local equivalent). The full
177 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
178 	 */
179 	const u32 *hwsp_seqno;
180 
181 	/*
182 	 * If we need to access the timeline's seqno for this request in
183 	 * another request, we need to keep a read reference to this associated
184 	 * cacheline, so that we do not free and recycle it before the foreign
185 	 * observers have completed. Hence, we keep a pointer to the cacheline
186 	 * inside the timeline's HWSP vma, but it is only valid while this
187 	 * request has not completed and guarded by the timeline mutex.
188 	 */
189 	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
190 
191 	/** Position in the ring of the start of the request */
192 	u32 head;
193 
194 	/** Position in the ring of the start of the user packets */
195 	u32 infix;
196 
197 	/**
198 	 * Position in the ring of the start of the postfix.
199 	 * This is required to calculate the maximum available ring space
200 	 * without overwriting the postfix.
201 	 */
202 	u32 postfix;
203 
204 	/** Position in the ring of the end of the whole request */
205 	u32 tail;
206 
207 	/** Position in the ring of the end of any workarounds after the tail */
208 	u32 wa_tail;
209 
210 	/** Preallocate space in the ring for the emitting the request */
211 	u32 reserved_space;
212 
213 	/** Batch buffer related to this request if any (used for
214 	 * error state dump only).
215 	 */
216 	struct i915_vma *batch;
217 	/**
218 	 * Additional buffers requested by userspace to be captured upon
219 	 * a GPU hang. The vma/obj on this list are protected by their
220 	 * active reference - all objects on this list must also be
221 	 * on the active_list (of their final request).
222 	 */
223 	struct i915_capture_list *capture_list;
224 
225 	/** Time at which this request was emitted, in jiffies. */
226 	unsigned long emitted_jiffies;
227 
228 	unsigned long flags;
229 #define I915_REQUEST_WAITBOOST	BIT(0)
230 #define I915_REQUEST_NOPREEMPT	BIT(1)
231 #define I915_REQUEST_SENTINEL	BIT(2)
232 
233 	/** timeline->request entry for this request */
234 	struct list_head link;
235 
236 	struct drm_i915_file_private *file_priv;
237 	/** file_priv list entry for this request */
238 	struct list_head client_link;
239 
240 	I915_SELFTEST_DECLARE(struct {
241 		struct list_head link;
242 		unsigned long delay;
243 	} mock;)
244 };
245 
246 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
247 
248 extern const struct dma_fence_ops i915_fence_ops;
249 
250 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
251 {
252 	return fence->ops == &i915_fence_ops;
253 }
254 
255 struct i915_request * __must_check
256 __i915_request_create(struct intel_context *ce, gfp_t gfp);
257 struct i915_request * __must_check
258 i915_request_create(struct intel_context *ce);
259 
260 struct i915_request *__i915_request_commit(struct i915_request *request);
261 void __i915_request_queue(struct i915_request *rq,
262 			  const struct i915_sched_attr *attr);
263 
264 bool i915_request_retire(struct i915_request *rq);
265 void i915_request_retire_upto(struct i915_request *rq);
266 
267 static inline struct i915_request *
268 to_request(struct dma_fence *fence)
269 {
270 	/* We assume that NULL fence/request are interoperable */
271 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
272 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
273 	return container_of(fence, struct i915_request, fence);
274 }
275 
276 static inline struct i915_request *
277 i915_request_get(struct i915_request *rq)
278 {
279 	return to_request(dma_fence_get(&rq->fence));
280 }
281 
282 static inline struct i915_request *
283 i915_request_get_rcu(struct i915_request *rq)
284 {
285 	return to_request(dma_fence_get_rcu(&rq->fence));
286 }
287 
288 static inline void
289 i915_request_put(struct i915_request *rq)
290 {
291 	dma_fence_put(&rq->fence);
292 }
293 
294 int i915_request_await_object(struct i915_request *to,
295 			      struct drm_i915_gem_object *obj,
296 			      bool write);
297 int i915_request_await_dma_fence(struct i915_request *rq,
298 				 struct dma_fence *fence);
299 int i915_request_await_execution(struct i915_request *rq,
300 				 struct dma_fence *fence,
301 				 void (*hook)(struct i915_request *rq,
302 					      struct dma_fence *signal));
303 
304 void i915_request_add(struct i915_request *rq);
305 
306 bool __i915_request_submit(struct i915_request *request);
307 void i915_request_submit(struct i915_request *request);
308 
309 void i915_request_skip(struct i915_request *request, int error);
310 
311 void __i915_request_unsubmit(struct i915_request *request);
312 void i915_request_unsubmit(struct i915_request *request);
313 
314 /* Note: part of the intel_breadcrumbs family */
315 bool i915_request_enable_breadcrumb(struct i915_request *request);
316 void i915_request_cancel_breadcrumb(struct i915_request *request);
317 
318 long i915_request_wait(struct i915_request *rq,
319 		       unsigned int flags,
320 		       long timeout)
321 	__attribute__((nonnull(1)));
322 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
323 #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
324 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
325 
326 static inline bool i915_request_signaled(const struct i915_request *rq)
327 {
328 	/* The request may live longer than its HWSP, so check flags first! */
329 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
330 }
331 
332 static inline bool i915_request_is_active(const struct i915_request *rq)
333 {
334 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
335 }
336 
337 /**
338  * Returns true if seq1 is later than seq2.
339  */
340 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
341 {
342 	return (s32)(seq1 - seq2) >= 0;
343 }
344 
345 static inline u32 __hwsp_seqno(const struct i915_request *rq)
346 {
347 	return READ_ONCE(*rq->hwsp_seqno);
348 }
349 
350 /**
351  * hwsp_seqno - the current breadcrumb value in the HW status page
352  * @rq: the request, to chase the relevant HW status page
353  *
354  * The emphasis in naming here is that hwsp_seqno() is not a property of the
355  * request, but an indication of the current HW state (associated with this
356  * request). Its value will change as the GPU executes more requests.
357  *
358  * Returns the current breadcrumb value in the associated HW status page (or
359  * the local timeline's equivalent) for this request. The request itself
360  * has the associated breadcrumb value of rq->fence.seqno, when the HW
361  * status page has that breadcrumb or later, this request is complete.
362  */
363 static inline u32 hwsp_seqno(const struct i915_request *rq)
364 {
365 	u32 seqno;
366 
367 	rcu_read_lock(); /* the HWSP may be freed at runtime */
368 	seqno = __hwsp_seqno(rq);
369 	rcu_read_unlock();
370 
371 	return seqno;
372 }
373 
374 static inline bool __i915_request_has_started(const struct i915_request *rq)
375 {
376 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
377 }
378 
379 /**
380  * i915_request_started - check if the request has begun being executed
381  * @rq: the request
382  *
383  * If the timeline is not using initial breadcrumbs, a request is
384  * considered started if the previous request on its timeline (i.e.
385  * context) has been signaled.
386  *
387  * If the timeline is using semaphores, it will also be emitting an
388  * "initial breadcrumb" after the semaphores are complete and just before
389  * it began executing the user payload. A request can therefore be active
390  * on the HW and not yet started as it is still busywaiting on its
391  * dependencies (via HW semaphores).
392  *
393  * If the request has started, its dependencies will have been signaled
394  * (either by fences or by semaphores) and it will have begun processing
395  * the user payload.
396  *
397  * However, even if a request has started, it may have been preempted and
398  * so no longer active, or it may have already completed.
399  *
400  * See also i915_request_is_active().
401  *
402  * Returns true if the request has begun executing the user payload, or
403  * has completed:
404  */
405 static inline bool i915_request_started(const struct i915_request *rq)
406 {
407 	if (i915_request_signaled(rq))
408 		return true;
409 
410 	/* Remember: started but may have since been preempted! */
411 	return __i915_request_has_started(rq);
412 }
413 
414 /**
415  * i915_request_is_running - check if the request may actually be executing
416  * @rq: the request
417  *
418  * Returns true if the request is currently submitted to hardware, has passed
419  * its start point (i.e. the context is setup and not busywaiting). Note that
420  * it may no longer be running by the time the function returns!
421  */
422 static inline bool i915_request_is_running(const struct i915_request *rq)
423 {
424 	if (!i915_request_is_active(rq))
425 		return false;
426 
427 	return __i915_request_has_started(rq);
428 }
429 
430 static inline bool i915_request_completed(const struct i915_request *rq)
431 {
432 	if (i915_request_signaled(rq))
433 		return true;
434 
435 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
436 }
437 
438 static inline void i915_request_mark_complete(struct i915_request *rq)
439 {
440 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
441 }
442 
443 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
444 {
445 	return rq->flags & I915_REQUEST_WAITBOOST;
446 }
447 
448 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
449 {
450 	/* Preemption should only be disabled very rarely */
451 	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
452 }
453 
454 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
455 {
456 	return unlikely(rq->flags & I915_REQUEST_SENTINEL);
457 }
458 
459 static inline struct intel_timeline *
460 i915_request_timeline(struct i915_request *rq)
461 {
462 	/* Valid only while the request is being constructed (or retired). */
463 	return rcu_dereference_protected(rq->timeline,
464 					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
465 }
466 
467 static inline struct i915_gem_context *
468 i915_request_gem_context(struct i915_request *rq)
469 {
470 	/* Valid only while the request is being constructed (or retired). */
471 	return rcu_dereference_protected(rq->context->gem_context, true);
472 }
473 
474 static inline struct intel_timeline *
475 i915_request_active_timeline(struct i915_request *rq)
476 {
477 	/*
478 	 * When in use during submission, we are protected by a guarantee that
479 	 * the context/timeline is pinned and must remain pinned until after
480 	 * this submission.
481 	 */
482 	return rcu_dereference_protected(rq->timeline,
483 					 lockdep_is_held(&rq->engine->active.lock));
484 }
485 
486 #endif /* I915_REQUEST_H */
487