1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 #include <linux/irq_work.h>
30 #include <linux/lockdep.h>
31 
32 #include "gem/i915_gem_context_types.h"
33 #include "gt/intel_context_types.h"
34 #include "gt/intel_engine_types.h"
35 #include "gt/intel_timeline_types.h"
36 
37 #include "i915_gem.h"
38 #include "i915_scheduler.h"
39 #include "i915_selftest.h"
40 #include "i915_sw_fence.h"
41 
42 #include <uapi/drm/i915_drm.h>
43 
44 struct drm_file;
45 struct drm_i915_gem_object;
46 struct i915_request;
47 
48 struct i915_capture_list {
49 	struct i915_capture_list *next;
50 	struct i915_vma *vma;
51 };
52 
53 #define RQ_TRACE(rq, fmt, ...) do {					\
54 	const struct i915_request *rq__ = (rq);				\
55 	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
56 		     rq__->fence.context, rq__->fence.seqno,		\
57 		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
58 } while (0)
59 
60 enum {
61 	/*
62 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63 	 *
64 	 * Set by __i915_request_submit() on handing over to HW, and cleared
65 	 * by __i915_request_unsubmit() if we preempt this request.
66 	 *
67 	 * Finally cleared for consistency on retiring the request, when
68 	 * we know the HW is no longer running this request.
69 	 *
70 	 * See i915_request_is_active()
71 	 */
72 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
73 
74 	/*
75 	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
76 	 *
77 	 * Using the scheduler, when a request is ready for execution it is put
78 	 * into the priority queue, and removed from that queue when transferred
79 	 * to the HW runlists. We want to track its membership within the
80 	 * priority queue so that we can easily check before rescheduling.
81 	 *
82 	 * See i915_request_in_priority_queue()
83 	 */
84 	I915_FENCE_FLAG_PQUEUE,
85 
86 	/*
87 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
88 	 *
89 	 * Internal bookkeeping used by the breadcrumb code to track when
90 	 * a request is on the various signal_list.
91 	 */
92 	I915_FENCE_FLAG_SIGNAL,
93 
94 	/*
95 	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
96 	 *
97 	 * This request has been suspended, pending an ongoing investigation.
98 	 */
99 	I915_FENCE_FLAG_HOLD,
100 
101 	/*
102 	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
103 	 *
104 	 * The execution of some requests should not be interrupted. This is
105 	 * a sensitive operation as it makes the request super important,
106 	 * blocking other higher priority work. Abuse of this flag will
107 	 * lead to quality of service issues.
108 	 */
109 	I915_FENCE_FLAG_NOPREEMPT,
110 
111 	/*
112 	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
113 	 *
114 	 * A high priority sentinel request may be submitted to clear the
115 	 * submission queue. As it will be the only request in-flight, upon
116 	 * execution all other active requests will have been preempted and
117 	 * unsubmitted. This preemptive pulse is used to re-evaluate the
118 	 * in-flight requests, particularly in cases where an active context
119 	 * is banned and those active requests need to be cancelled.
120 	 */
121 	I915_FENCE_FLAG_SENTINEL,
122 
123 	/*
124 	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
125 	 *
126 	 * Some requests are more important than others! In particular, a
127 	 * request that the user is waiting on is typically required for
128 	 * interactive latency, for which we want to minimise by upclocking
129 	 * the GPU. Here we track such boost requests on a per-request basis.
130 	 */
131 	I915_FENCE_FLAG_BOOST,
132 };
133 
134 /**
135  * Request queue structure.
136  *
137  * The request queue allows us to note sequence numbers that have been emitted
138  * and may be associated with active buffers to be retired.
139  *
140  * By keeping this list, we can avoid having to do questionable sequence
141  * number comparisons on buffer last_read|write_seqno. It also allows an
142  * emission time to be associated with the request for tracking how far ahead
143  * of the GPU the submission is.
144  *
145  * When modifying this structure be very aware that we perform a lockless
146  * RCU lookup of it that may race against reallocation of the struct
147  * from the slab freelist. We intentionally do not zero the structure on
148  * allocation so that the lookup can use the dangling pointers (and is
149  * cogniscent that those pointers may be wrong). Instead, everything that
150  * needs to be initialised must be done so explicitly.
151  *
152  * The requests are reference counted.
153  */
154 struct i915_request {
155 	struct dma_fence fence;
156 	spinlock_t lock;
157 
158 	/** On Which ring this request was generated */
159 	struct drm_i915_private *i915;
160 
161 	/**
162 	 * Context and ring buffer related to this request
163 	 * Contexts are refcounted, so when this request is associated with a
164 	 * context, we must increment the context's refcount, to guarantee that
165 	 * it persists while any request is linked to it. Requests themselves
166 	 * are also refcounted, so the request will only be freed when the last
167 	 * reference to it is dismissed, and the code in
168 	 * i915_request_free() will then decrement the refcount on the
169 	 * context.
170 	 */
171 	struct intel_engine_cs *engine;
172 	struct intel_context *context;
173 	struct intel_ring *ring;
174 	struct intel_timeline __rcu *timeline;
175 	struct list_head signal_link;
176 
177 	/*
178 	 * The rcu epoch of when this request was allocated. Used to judiciously
179 	 * apply backpressure on future allocations to ensure that under
180 	 * mempressure there is sufficient RCU ticks for us to reclaim our
181 	 * RCU protected slabs.
182 	 */
183 	unsigned long rcustate;
184 
185 	/*
186 	 * We pin the timeline->mutex while constructing the request to
187 	 * ensure that no caller accidentally drops it during construction.
188 	 * The timeline->mutex must be held to ensure that only this caller
189 	 * can use the ring and manipulate the associated timeline during
190 	 * construction.
191 	 */
192 	struct pin_cookie cookie;
193 
194 	/*
195 	 * Fences for the various phases in the request's lifetime.
196 	 *
197 	 * The submit fence is used to await upon all of the request's
198 	 * dependencies. When it is signaled, the request is ready to run.
199 	 * It is used by the driver to then queue the request for execution.
200 	 */
201 	struct i915_sw_fence submit;
202 	union {
203 		wait_queue_entry_t submitq;
204 		struct i915_sw_dma_fence_cb dmaq;
205 		struct i915_request_duration_cb {
206 			struct dma_fence_cb cb;
207 			ktime_t emitted;
208 		} duration;
209 	};
210 	struct list_head execute_cb;
211 	struct i915_sw_fence semaphore;
212 	struct irq_work semaphore_work;
213 
214 	/*
215 	 * A list of everyone we wait upon, and everyone who waits upon us.
216 	 * Even though we will not be submitted to the hardware before the
217 	 * submit fence is signaled (it waits for all external events as well
218 	 * as our own requests), the scheduler still needs to know the
219 	 * dependency tree for the lifetime of the request (from execbuf
220 	 * to retirement), i.e. bidirectional dependency information for the
221 	 * request not tied to individual fences.
222 	 */
223 	struct i915_sched_node sched;
224 	struct i915_dependency dep;
225 	intel_engine_mask_t execution_mask;
226 
227 	/*
228 	 * A convenience pointer to the current breadcrumb value stored in
229 	 * the HW status page (or our timeline's local equivalent). The full
230 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
231 	 */
232 	const u32 *hwsp_seqno;
233 
234 	/*
235 	 * If we need to access the timeline's seqno for this request in
236 	 * another request, we need to keep a read reference to this associated
237 	 * cacheline, so that we do not free and recycle it before the foreign
238 	 * observers have completed. Hence, we keep a pointer to the cacheline
239 	 * inside the timeline's HWSP vma, but it is only valid while this
240 	 * request has not completed and guarded by the timeline mutex.
241 	 */
242 	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
243 
244 	/** Position in the ring of the start of the request */
245 	u32 head;
246 
247 	/** Position in the ring of the start of the user packets */
248 	u32 infix;
249 
250 	/**
251 	 * Position in the ring of the start of the postfix.
252 	 * This is required to calculate the maximum available ring space
253 	 * without overwriting the postfix.
254 	 */
255 	u32 postfix;
256 
257 	/** Position in the ring of the end of the whole request */
258 	u32 tail;
259 
260 	/** Position in the ring of the end of any workarounds after the tail */
261 	u32 wa_tail;
262 
263 	/** Preallocate space in the ring for the emitting the request */
264 	u32 reserved_space;
265 
266 	/** Batch buffer related to this request if any (used for
267 	 * error state dump only).
268 	 */
269 	struct i915_vma *batch;
270 	/**
271 	 * Additional buffers requested by userspace to be captured upon
272 	 * a GPU hang. The vma/obj on this list are protected by their
273 	 * active reference - all objects on this list must also be
274 	 * on the active_list (of their final request).
275 	 */
276 	struct i915_capture_list *capture_list;
277 
278 	/** Time at which this request was emitted, in jiffies. */
279 	unsigned long emitted_jiffies;
280 
281 	/** timeline->request entry for this request */
282 	struct list_head link;
283 
284 	struct drm_i915_file_private *file_priv;
285 	/** file_priv list entry for this request */
286 	struct list_head client_link;
287 
288 	I915_SELFTEST_DECLARE(struct {
289 		struct list_head link;
290 		unsigned long delay;
291 	} mock;)
292 };
293 
294 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
295 
296 extern const struct dma_fence_ops i915_fence_ops;
297 
298 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
299 {
300 	return fence->ops == &i915_fence_ops;
301 }
302 
303 struct i915_request * __must_check
304 __i915_request_create(struct intel_context *ce, gfp_t gfp);
305 struct i915_request * __must_check
306 i915_request_create(struct intel_context *ce);
307 
308 void i915_request_set_error_once(struct i915_request *rq, int error);
309 void __i915_request_skip(struct i915_request *rq);
310 
311 struct i915_request *__i915_request_commit(struct i915_request *request);
312 void __i915_request_queue(struct i915_request *rq,
313 			  const struct i915_sched_attr *attr);
314 
315 bool i915_request_retire(struct i915_request *rq);
316 void i915_request_retire_upto(struct i915_request *rq);
317 
318 static inline struct i915_request *
319 to_request(struct dma_fence *fence)
320 {
321 	/* We assume that NULL fence/request are interoperable */
322 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
323 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
324 	return container_of(fence, struct i915_request, fence);
325 }
326 
327 static inline struct i915_request *
328 i915_request_get(struct i915_request *rq)
329 {
330 	return to_request(dma_fence_get(&rq->fence));
331 }
332 
333 static inline struct i915_request *
334 i915_request_get_rcu(struct i915_request *rq)
335 {
336 	return to_request(dma_fence_get_rcu(&rq->fence));
337 }
338 
339 static inline void
340 i915_request_put(struct i915_request *rq)
341 {
342 	dma_fence_put(&rq->fence);
343 }
344 
345 int i915_request_await_object(struct i915_request *to,
346 			      struct drm_i915_gem_object *obj,
347 			      bool write);
348 int i915_request_await_dma_fence(struct i915_request *rq,
349 				 struct dma_fence *fence);
350 int i915_request_await_execution(struct i915_request *rq,
351 				 struct dma_fence *fence,
352 				 void (*hook)(struct i915_request *rq,
353 					      struct dma_fence *signal));
354 
355 void i915_request_add(struct i915_request *rq);
356 
357 bool __i915_request_submit(struct i915_request *request);
358 void i915_request_submit(struct i915_request *request);
359 
360 void __i915_request_unsubmit(struct i915_request *request);
361 void i915_request_unsubmit(struct i915_request *request);
362 
363 /* Note: part of the intel_breadcrumbs family */
364 bool i915_request_enable_breadcrumb(struct i915_request *request);
365 void i915_request_cancel_breadcrumb(struct i915_request *request);
366 
367 long i915_request_wait(struct i915_request *rq,
368 		       unsigned int flags,
369 		       long timeout)
370 	__attribute__((nonnull(1)));
371 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
372 #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
373 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
374 
375 static inline bool i915_request_signaled(const struct i915_request *rq)
376 {
377 	/* The request may live longer than its HWSP, so check flags first! */
378 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
379 }
380 
381 static inline bool i915_request_is_active(const struct i915_request *rq)
382 {
383 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
384 }
385 
386 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
387 {
388 	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
389 }
390 
391 /**
392  * Returns true if seq1 is later than seq2.
393  */
394 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
395 {
396 	return (s32)(seq1 - seq2) >= 0;
397 }
398 
399 static inline u32 __hwsp_seqno(const struct i915_request *rq)
400 {
401 	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
402 
403 	return READ_ONCE(*hwsp);
404 }
405 
406 /**
407  * hwsp_seqno - the current breadcrumb value in the HW status page
408  * @rq: the request, to chase the relevant HW status page
409  *
410  * The emphasis in naming here is that hwsp_seqno() is not a property of the
411  * request, but an indication of the current HW state (associated with this
412  * request). Its value will change as the GPU executes more requests.
413  *
414  * Returns the current breadcrumb value in the associated HW status page (or
415  * the local timeline's equivalent) for this request. The request itself
416  * has the associated breadcrumb value of rq->fence.seqno, when the HW
417  * status page has that breadcrumb or later, this request is complete.
418  */
419 static inline u32 hwsp_seqno(const struct i915_request *rq)
420 {
421 	u32 seqno;
422 
423 	rcu_read_lock(); /* the HWSP may be freed at runtime */
424 	seqno = __hwsp_seqno(rq);
425 	rcu_read_unlock();
426 
427 	return seqno;
428 }
429 
430 static inline bool __i915_request_has_started(const struct i915_request *rq)
431 {
432 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
433 }
434 
435 /**
436  * i915_request_started - check if the request has begun being executed
437  * @rq: the request
438  *
439  * If the timeline is not using initial breadcrumbs, a request is
440  * considered started if the previous request on its timeline (i.e.
441  * context) has been signaled.
442  *
443  * If the timeline is using semaphores, it will also be emitting an
444  * "initial breadcrumb" after the semaphores are complete and just before
445  * it began executing the user payload. A request can therefore be active
446  * on the HW and not yet started as it is still busywaiting on its
447  * dependencies (via HW semaphores).
448  *
449  * If the request has started, its dependencies will have been signaled
450  * (either by fences or by semaphores) and it will have begun processing
451  * the user payload.
452  *
453  * However, even if a request has started, it may have been preempted and
454  * so no longer active, or it may have already completed.
455  *
456  * See also i915_request_is_active().
457  *
458  * Returns true if the request has begun executing the user payload, or
459  * has completed:
460  */
461 static inline bool i915_request_started(const struct i915_request *rq)
462 {
463 	if (i915_request_signaled(rq))
464 		return true;
465 
466 	/* Remember: started but may have since been preempted! */
467 	return __i915_request_has_started(rq);
468 }
469 
470 /**
471  * i915_request_is_running - check if the request may actually be executing
472  * @rq: the request
473  *
474  * Returns true if the request is currently submitted to hardware, has passed
475  * its start point (i.e. the context is setup and not busywaiting). Note that
476  * it may no longer be running by the time the function returns!
477  */
478 static inline bool i915_request_is_running(const struct i915_request *rq)
479 {
480 	if (!i915_request_is_active(rq))
481 		return false;
482 
483 	return __i915_request_has_started(rq);
484 }
485 
486 /**
487  * i915_request_is_ready - check if the request is ready for execution
488  * @rq: the request
489  *
490  * Upon construction, the request is instructed to wait upon various
491  * signals before it is ready to be executed by the HW. That is, we do
492  * not want to start execution and read data before it is written. In practice,
493  * this is controlled with a mixture of interrupts and semaphores. Once
494  * the submit fence is completed, the backend scheduler will place the
495  * request into its queue and from there submit it for execution. So we
496  * can detect when a request is eligible for execution (and is under control
497  * of the scheduler) by querying where it is in any of the scheduler's lists.
498  *
499  * Returns true if the request is ready for execution (it may be inflight),
500  * false otherwise.
501  */
502 static inline bool i915_request_is_ready(const struct i915_request *rq)
503 {
504 	return !list_empty(&rq->sched.link);
505 }
506 
507 static inline bool i915_request_completed(const struct i915_request *rq)
508 {
509 	if (i915_request_signaled(rq))
510 		return true;
511 
512 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
513 }
514 
515 static inline void i915_request_mark_complete(struct i915_request *rq)
516 {
517 	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
518 		   (u32 *)&rq->fence.seqno);
519 }
520 
521 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
522 {
523 	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
524 }
525 
526 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
527 {
528 	/* Preemption should only be disabled very rarely */
529 	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
530 }
531 
532 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
533 {
534 	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
535 }
536 
537 static inline bool i915_request_on_hold(const struct i915_request *rq)
538 {
539 	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
540 }
541 
542 static inline void i915_request_set_hold(struct i915_request *rq)
543 {
544 	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
545 }
546 
547 static inline void i915_request_clear_hold(struct i915_request *rq)
548 {
549 	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
550 }
551 
552 static inline struct intel_timeline *
553 i915_request_timeline(struct i915_request *rq)
554 {
555 	/* Valid only while the request is being constructed (or retired). */
556 	return rcu_dereference_protected(rq->timeline,
557 					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
558 }
559 
560 static inline struct i915_gem_context *
561 i915_request_gem_context(struct i915_request *rq)
562 {
563 	/* Valid only while the request is being constructed (or retired). */
564 	return rcu_dereference_protected(rq->context->gem_context, true);
565 }
566 
567 static inline struct intel_timeline *
568 i915_request_active_timeline(struct i915_request *rq)
569 {
570 	/*
571 	 * When in use during submission, we are protected by a guarantee that
572 	 * the context/timeline is pinned and must remain pinned until after
573 	 * this submission.
574 	 */
575 	return rcu_dereference_protected(rq->timeline,
576 					 lockdep_is_held(&rq->engine->active.lock));
577 }
578 
579 #endif /* I915_REQUEST_H */
580