1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 
30 #include "i915_gem.h"
31 #include "i915_sw_fence.h"
32 
33 #include <uapi/drm/i915_drm.h>
34 
35 struct drm_file;
36 struct drm_i915_gem_object;
37 struct i915_request;
38 
39 struct intel_wait {
40 	struct rb_node node;
41 	struct task_struct *tsk;
42 	struct i915_request *request;
43 	u32 seqno;
44 };
45 
46 struct intel_signal_node {
47 	struct intel_wait wait;
48 	struct list_head link;
49 };
50 
51 struct i915_dependency {
52 	struct i915_priotree *signaler;
53 	struct list_head signal_link;
54 	struct list_head wait_link;
55 	struct list_head dfs_link;
56 	unsigned long flags;
57 #define I915_DEPENDENCY_ALLOC BIT(0)
58 };
59 
60 /*
61  * "People assume that time is a strict progression of cause to effect, but
62  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
63  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
64  *
65  * Requests exist in a complex web of interdependencies. Each request
66  * has to wait for some other request to complete before it is ready to be run
67  * (e.g. we have to wait until the pixels have been rendering into a texture
68  * before we can copy from it). We track the readiness of a request in terms
69  * of fences, but we also need to keep the dependency tree for the lifetime
70  * of the request (beyond the life of an individual fence). We use the tree
71  * at various points to reorder the requests whilst keeping the requests
72  * in order with respect to their various dependencies.
73  */
74 struct i915_priotree {
75 	struct list_head signalers_list; /* those before us, we depend upon */
76 	struct list_head waiters_list; /* those after us, they depend upon us */
77 	struct list_head link;
78 	int priority;
79 };
80 
81 enum {
82 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
83 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
84 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
85 
86 	I915_PRIORITY_INVALID = INT_MIN
87 };
88 
89 struct i915_capture_list {
90 	struct i915_capture_list *next;
91 	struct i915_vma *vma;
92 };
93 
94 /**
95  * Request queue structure.
96  *
97  * The request queue allows us to note sequence numbers that have been emitted
98  * and may be associated with active buffers to be retired.
99  *
100  * By keeping this list, we can avoid having to do questionable sequence
101  * number comparisons on buffer last_read|write_seqno. It also allows an
102  * emission time to be associated with the request for tracking how far ahead
103  * of the GPU the submission is.
104  *
105  * When modifying this structure be very aware that we perform a lockless
106  * RCU lookup of it that may race against reallocation of the struct
107  * from the slab freelist. We intentionally do not zero the structure on
108  * allocation so that the lookup can use the dangling pointers (and is
109  * cogniscent that those pointers may be wrong). Instead, everything that
110  * needs to be initialised must be done so explicitly.
111  *
112  * The requests are reference counted.
113  */
114 struct i915_request {
115 	struct dma_fence fence;
116 	spinlock_t lock;
117 
118 	/** On Which ring this request was generated */
119 	struct drm_i915_private *i915;
120 
121 	/**
122 	 * Context and ring buffer related to this request
123 	 * Contexts are refcounted, so when this request is associated with a
124 	 * context, we must increment the context's refcount, to guarantee that
125 	 * it persists while any request is linked to it. Requests themselves
126 	 * are also refcounted, so the request will only be freed when the last
127 	 * reference to it is dismissed, and the code in
128 	 * i915_request_free() will then decrement the refcount on the
129 	 * context.
130 	 */
131 	struct i915_gem_context *ctx;
132 	struct intel_engine_cs *engine;
133 	struct intel_ring *ring;
134 	struct intel_timeline *timeline;
135 	struct intel_signal_node signaling;
136 
137 	/*
138 	 * Fences for the various phases in the request's lifetime.
139 	 *
140 	 * The submit fence is used to await upon all of the request's
141 	 * dependencies. When it is signaled, the request is ready to run.
142 	 * It is used by the driver to then queue the request for execution.
143 	 */
144 	struct i915_sw_fence submit;
145 	wait_queue_entry_t submitq;
146 	wait_queue_head_t execute;
147 
148 	/*
149 	 * A list of everyone we wait upon, and everyone who waits upon us.
150 	 * Even though we will not be submitted to the hardware before the
151 	 * submit fence is signaled (it waits for all external events as well
152 	 * as our own requests), the scheduler still needs to know the
153 	 * dependency tree for the lifetime of the request (from execbuf
154 	 * to retirement), i.e. bidirectional dependency information for the
155 	 * request not tied to individual fences.
156 	 */
157 	struct i915_priotree priotree;
158 	struct i915_dependency dep;
159 
160 	/**
161 	 * GEM sequence number associated with this request on the
162 	 * global execution timeline. It is zero when the request is not
163 	 * on the HW queue (i.e. not on the engine timeline list).
164 	 * Its value is guarded by the timeline spinlock.
165 	 */
166 	u32 global_seqno;
167 
168 	/** Position in the ring of the start of the request */
169 	u32 head;
170 
171 	/**
172 	 * Position in the ring of the start of the postfix.
173 	 * This is required to calculate the maximum available ring space
174 	 * without overwriting the postfix.
175 	 */
176 	u32 postfix;
177 
178 	/** Position in the ring of the end of the whole request */
179 	u32 tail;
180 
181 	/** Position in the ring of the end of any workarounds after the tail */
182 	u32 wa_tail;
183 
184 	/** Preallocate space in the ring for the emitting the request */
185 	u32 reserved_space;
186 
187 	/** Batch buffer related to this request if any (used for
188 	 * error state dump only).
189 	 */
190 	struct i915_vma *batch;
191 	/**
192 	 * Additional buffers requested by userspace to be captured upon
193 	 * a GPU hang. The vma/obj on this list are protected by their
194 	 * active reference - all objects on this list must also be
195 	 * on the active_list (of their final request).
196 	 */
197 	struct i915_capture_list *capture_list;
198 	struct list_head active_list;
199 
200 	/** Time at which this request was emitted, in jiffies. */
201 	unsigned long emitted_jiffies;
202 
203 	bool waitboost;
204 
205 	/** engine->request_list entry for this request */
206 	struct list_head link;
207 
208 	/** ring->request_list entry for this request */
209 	struct list_head ring_link;
210 
211 	struct drm_i915_file_private *file_priv;
212 	/** file_priv list entry for this request */
213 	struct list_head client_link;
214 };
215 
216 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
217 
218 extern const struct dma_fence_ops i915_fence_ops;
219 
220 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
221 {
222 	return fence->ops == &i915_fence_ops;
223 }
224 
225 struct i915_request * __must_check
226 i915_request_alloc(struct intel_engine_cs *engine,
227 		   struct i915_gem_context *ctx);
228 void i915_request_retire_upto(struct i915_request *rq);
229 
230 static inline struct i915_request *
231 to_request(struct dma_fence *fence)
232 {
233 	/* We assume that NULL fence/request are interoperable */
234 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
235 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
236 	return container_of(fence, struct i915_request, fence);
237 }
238 
239 static inline struct i915_request *
240 i915_request_get(struct i915_request *rq)
241 {
242 	return to_request(dma_fence_get(&rq->fence));
243 }
244 
245 static inline struct i915_request *
246 i915_request_get_rcu(struct i915_request *rq)
247 {
248 	return to_request(dma_fence_get_rcu(&rq->fence));
249 }
250 
251 static inline void
252 i915_request_put(struct i915_request *rq)
253 {
254 	dma_fence_put(&rq->fence);
255 }
256 
257 /**
258  * i915_request_global_seqno - report the current global seqno
259  * @request - the request
260  *
261  * A request is assigned a global seqno only when it is on the hardware
262  * execution queue. The global seqno can be used to maintain a list of
263  * requests on the same engine in retirement order, for example for
264  * constructing a priority queue for waiting. Prior to its execution, or
265  * if it is subsequently removed in the event of preemption, its global
266  * seqno is zero. As both insertion and removal from the execution queue
267  * may operate in IRQ context, it is not guarded by the usual struct_mutex
268  * BKL. Instead those relying on the global seqno must be prepared for its
269  * value to change between reads. Only when the request is complete can
270  * the global seqno be stable (due to the memory barriers on submitting
271  * the commands to the hardware to write the breadcrumb, if the HWS shows
272  * that it has passed the global seqno and the global seqno is unchanged
273  * after the read, it is indeed complete).
274  */
275 static u32
276 i915_request_global_seqno(const struct i915_request *request)
277 {
278 	return READ_ONCE(request->global_seqno);
279 }
280 
281 int i915_request_await_object(struct i915_request *to,
282 			      struct drm_i915_gem_object *obj,
283 			      bool write);
284 int i915_request_await_dma_fence(struct i915_request *rq,
285 				 struct dma_fence *fence);
286 
287 void __i915_request_add(struct i915_request *rq, bool flush_caches);
288 #define i915_request_add(rq) \
289 	__i915_request_add(rq, false)
290 
291 void __i915_request_submit(struct i915_request *request);
292 void i915_request_submit(struct i915_request *request);
293 
294 void __i915_request_unsubmit(struct i915_request *request);
295 void i915_request_unsubmit(struct i915_request *request);
296 
297 long i915_request_wait(struct i915_request *rq,
298 		       unsigned int flags,
299 		       long timeout)
300 	__attribute__((nonnull(1)));
301 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
302 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
303 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
304 
305 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
306 
307 /**
308  * Returns true if seq1 is later than seq2.
309  */
310 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
311 {
312 	return (s32)(seq1 - seq2) >= 0;
313 }
314 
315 static inline bool
316 __i915_request_completed(const struct i915_request *rq, u32 seqno)
317 {
318 	GEM_BUG_ON(!seqno);
319 	return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
320 		seqno == i915_request_global_seqno(rq);
321 }
322 
323 static inline bool i915_request_completed(const struct i915_request *rq)
324 {
325 	u32 seqno;
326 
327 	seqno = i915_request_global_seqno(rq);
328 	if (!seqno)
329 		return false;
330 
331 	return __i915_request_completed(rq, seqno);
332 }
333 
334 static inline bool i915_request_started(const struct i915_request *rq)
335 {
336 	u32 seqno;
337 
338 	seqno = i915_request_global_seqno(rq);
339 	if (!seqno)
340 		return false;
341 
342 	return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
343 				 seqno - 1);
344 }
345 
346 static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
347 {
348 	const struct i915_request *rq =
349 		container_of(pt, const struct i915_request, priotree);
350 
351 	return i915_request_completed(rq);
352 }
353 
354 void i915_retire_requests(struct drm_i915_private *i915);
355 
356 /*
357  * We treat requests as fences. This is not be to confused with our
358  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
359  * We use the fences to synchronize access from the CPU with activity on the
360  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
361  * is reading them. We also track fences at a higher level to provide
362  * implicit synchronisation around GEM objects, e.g. set-domain will wait
363  * for outstanding GPU rendering before marking the object ready for CPU
364  * access, or a pageflip will wait until the GPU is complete before showing
365  * the frame on the scanout.
366  *
367  * In order to use a fence, the object must track the fence it needs to
368  * serialise with. For example, GEM objects want to track both read and
369  * write access so that we can perform concurrent read operations between
370  * the CPU and GPU engines, as well as waiting for all rendering to
371  * complete, or waiting for the last GPU user of a "fence register". The
372  * object then embeds a #i915_gem_active to track the most recent (in
373  * retirement order) request relevant for the desired mode of access.
374  * The #i915_gem_active is updated with i915_gem_active_set() to track the
375  * most recent fence request, typically this is done as part of
376  * i915_vma_move_to_active().
377  *
378  * When the #i915_gem_active completes (is retired), it will
379  * signal its completion to the owner through a callback as well as mark
380  * itself as idle (i915_gem_active.request == NULL). The owner
381  * can then perform any action, such as delayed freeing of an active
382  * resource including itself.
383  */
384 struct i915_gem_active;
385 
386 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
387 				   struct i915_request *);
388 
389 struct i915_gem_active {
390 	struct i915_request __rcu *request;
391 	struct list_head link;
392 	i915_gem_retire_fn retire;
393 };
394 
395 void i915_gem_retire_noop(struct i915_gem_active *,
396 			  struct i915_request *request);
397 
398 /**
399  * init_request_active - prepares the activity tracker for use
400  * @active - the active tracker
401  * @func - a callback when then the tracker is retired (becomes idle),
402  *         can be NULL
403  *
404  * init_request_active() prepares the embedded @active struct for use as
405  * an activity tracker, that is for tracking the last known active request
406  * associated with it. When the last request becomes idle, when it is retired
407  * after completion, the optional callback @func is invoked.
408  */
409 static inline void
410 init_request_active(struct i915_gem_active *active,
411 		    i915_gem_retire_fn retire)
412 {
413 	INIT_LIST_HEAD(&active->link);
414 	active->retire = retire ?: i915_gem_retire_noop;
415 }
416 
417 /**
418  * i915_gem_active_set - updates the tracker to watch the current request
419  * @active - the active tracker
420  * @request - the request to watch
421  *
422  * i915_gem_active_set() watches the given @request for completion. Whilst
423  * that @request is busy, the @active reports busy. When that @request is
424  * retired, the @active tracker is updated to report idle.
425  */
426 static inline void
427 i915_gem_active_set(struct i915_gem_active *active,
428 		    struct i915_request *request)
429 {
430 	list_move(&active->link, &request->active_list);
431 	rcu_assign_pointer(active->request, request);
432 }
433 
434 /**
435  * i915_gem_active_set_retire_fn - updates the retirement callback
436  * @active - the active tracker
437  * @fn - the routine called when the request is retired
438  * @mutex - struct_mutex used to guard retirements
439  *
440  * i915_gem_active_set_retire_fn() updates the function pointer that
441  * is called when the final request associated with the @active tracker
442  * is retired.
443  */
444 static inline void
445 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
446 			      i915_gem_retire_fn fn,
447 			      struct mutex *mutex)
448 {
449 	lockdep_assert_held(mutex);
450 	active->retire = fn ?: i915_gem_retire_noop;
451 }
452 
453 static inline struct i915_request *
454 __i915_gem_active_peek(const struct i915_gem_active *active)
455 {
456 	/*
457 	 * Inside the error capture (running with the driver in an unknown
458 	 * state), we want to bend the rules slightly (a lot).
459 	 *
460 	 * Work is in progress to make it safer, in the meantime this keeps
461 	 * the known issue from spamming the logs.
462 	 */
463 	return rcu_dereference_protected(active->request, 1);
464 }
465 
466 /**
467  * i915_gem_active_raw - return the active request
468  * @active - the active tracker
469  *
470  * i915_gem_active_raw() returns the current request being tracked, or NULL.
471  * It does not obtain a reference on the request for the caller, so the caller
472  * must hold struct_mutex.
473  */
474 static inline struct i915_request *
475 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
476 {
477 	return rcu_dereference_protected(active->request,
478 					 lockdep_is_held(mutex));
479 }
480 
481 /**
482  * i915_gem_active_peek - report the active request being monitored
483  * @active - the active tracker
484  *
485  * i915_gem_active_peek() returns the current request being tracked if
486  * still active, or NULL. It does not obtain a reference on the request
487  * for the caller, so the caller must hold struct_mutex.
488  */
489 static inline struct i915_request *
490 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
491 {
492 	struct i915_request *request;
493 
494 	request = i915_gem_active_raw(active, mutex);
495 	if (!request || i915_request_completed(request))
496 		return NULL;
497 
498 	return request;
499 }
500 
501 /**
502  * i915_gem_active_get - return a reference to the active request
503  * @active - the active tracker
504  *
505  * i915_gem_active_get() returns a reference to the active request, or NULL
506  * if the active tracker is idle. The caller must hold struct_mutex.
507  */
508 static inline struct i915_request *
509 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
510 {
511 	return i915_request_get(i915_gem_active_peek(active, mutex));
512 }
513 
514 /**
515  * __i915_gem_active_get_rcu - return a reference to the active request
516  * @active - the active tracker
517  *
518  * __i915_gem_active_get() returns a reference to the active request, or NULL
519  * if the active tracker is idle. The caller must hold the RCU read lock, but
520  * the returned pointer is safe to use outside of RCU.
521  */
522 static inline struct i915_request *
523 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
524 {
525 	/*
526 	 * Performing a lockless retrieval of the active request is super
527 	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
528 	 * slab of request objects will not be freed whilst we hold the
529 	 * RCU read lock. It does not guarantee that the request itself
530 	 * will not be freed and then *reused*. Viz,
531 	 *
532 	 * Thread A			Thread B
533 	 *
534 	 * rq = active.request
535 	 *				retire(rq) -> free(rq);
536 	 *				(rq is now first on the slab freelist)
537 	 *				active.request = NULL
538 	 *
539 	 *				rq = new submission on a new object
540 	 * ref(rq)
541 	 *
542 	 * To prevent the request from being reused whilst the caller
543 	 * uses it, we take a reference like normal. Whilst acquiring
544 	 * the reference we check that it is not in a destroyed state
545 	 * (refcnt == 0). That prevents the request being reallocated
546 	 * whilst the caller holds on to it. To check that the request
547 	 * was not reallocated as we acquired the reference we have to
548 	 * check that our request remains the active request across
549 	 * the lookup, in the same manner as a seqlock. The visibility
550 	 * of the pointer versus the reference counting is controlled
551 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
552 	 *
553 	 * In the middle of all that, we inspect whether the request is
554 	 * complete. Retiring is lazy so the request may be completed long
555 	 * before the active tracker is updated. Querying whether the
556 	 * request is complete is far cheaper (as it involves no locked
557 	 * instructions setting cachelines to exclusive) than acquiring
558 	 * the reference, so we do it first. The RCU read lock ensures the
559 	 * pointer dereference is valid, but does not ensure that the
560 	 * seqno nor HWS is the right one! However, if the request was
561 	 * reallocated, that means the active tracker's request was complete.
562 	 * If the new request is also complete, then both are and we can
563 	 * just report the active tracker is idle. If the new request is
564 	 * incomplete, then we acquire a reference on it and check that
565 	 * it remained the active request.
566 	 *
567 	 * It is then imperative that we do not zero the request on
568 	 * reallocation, so that we can chase the dangling pointers!
569 	 * See i915_request_alloc().
570 	 */
571 	do {
572 		struct i915_request *request;
573 
574 		request = rcu_dereference(active->request);
575 		if (!request || i915_request_completed(request))
576 			return NULL;
577 
578 		/*
579 		 * An especially silly compiler could decide to recompute the
580 		 * result of i915_request_completed, more specifically
581 		 * re-emit the load for request->fence.seqno. A race would catch
582 		 * a later seqno value, which could flip the result from true to
583 		 * false. Which means part of the instructions below might not
584 		 * be executed, while later on instructions are executed. Due to
585 		 * barriers within the refcounting the inconsistency can't reach
586 		 * past the call to i915_request_get_rcu, but not executing
587 		 * that while still executing i915_request_put() creates
588 		 * havoc enough.  Prevent this with a compiler barrier.
589 		 */
590 		barrier();
591 
592 		request = i915_request_get_rcu(request);
593 
594 		/*
595 		 * What stops the following rcu_access_pointer() from occurring
596 		 * before the above i915_request_get_rcu()? If we were
597 		 * to read the value before pausing to get the reference to
598 		 * the request, we may not notice a change in the active
599 		 * tracker.
600 		 *
601 		 * The rcu_access_pointer() is a mere compiler barrier, which
602 		 * means both the CPU and compiler are free to perform the
603 		 * memory read without constraint. The compiler only has to
604 		 * ensure that any operations after the rcu_access_pointer()
605 		 * occur afterwards in program order. This means the read may
606 		 * be performed earlier by an out-of-order CPU, or adventurous
607 		 * compiler.
608 		 *
609 		 * The atomic operation at the heart of
610 		 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
611 		 * atomic_inc_not_zero() which is only a full memory barrier
612 		 * when successful. That is, if i915_request_get_rcu()
613 		 * returns the request (and so with the reference counted
614 		 * incremented) then the following read for rcu_access_pointer()
615 		 * must occur after the atomic operation and so confirm
616 		 * that this request is the one currently being tracked.
617 		 *
618 		 * The corresponding write barrier is part of
619 		 * rcu_assign_pointer().
620 		 */
621 		if (!request || request == rcu_access_pointer(active->request))
622 			return rcu_pointer_handoff(request);
623 
624 		i915_request_put(request);
625 	} while (1);
626 }
627 
628 /**
629  * i915_gem_active_get_unlocked - return a reference to the active request
630  * @active - the active tracker
631  *
632  * i915_gem_active_get_unlocked() returns a reference to the active request,
633  * or NULL if the active tracker is idle. The reference is obtained under RCU,
634  * so no locking is required by the caller.
635  *
636  * The reference should be freed with i915_request_put().
637  */
638 static inline struct i915_request *
639 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
640 {
641 	struct i915_request *request;
642 
643 	rcu_read_lock();
644 	request = __i915_gem_active_get_rcu(active);
645 	rcu_read_unlock();
646 
647 	return request;
648 }
649 
650 /**
651  * i915_gem_active_isset - report whether the active tracker is assigned
652  * @active - the active tracker
653  *
654  * i915_gem_active_isset() returns true if the active tracker is currently
655  * assigned to a request. Due to the lazy retiring, that request may be idle
656  * and this may report stale information.
657  */
658 static inline bool
659 i915_gem_active_isset(const struct i915_gem_active *active)
660 {
661 	return rcu_access_pointer(active->request);
662 }
663 
664 /**
665  * i915_gem_active_wait - waits until the request is completed
666  * @active - the active request on which to wait
667  * @flags - how to wait
668  * @timeout - how long to wait at most
669  * @rps - userspace client to charge for a waitboost
670  *
671  * i915_gem_active_wait() waits until the request is completed before
672  * returning, without requiring any locks to be held. Note that it does not
673  * retire any requests before returning.
674  *
675  * This function relies on RCU in order to acquire the reference to the active
676  * request without holding any locks. See __i915_gem_active_get_rcu() for the
677  * glory details on how that is managed. Once the reference is acquired, we
678  * can then wait upon the request, and afterwards release our reference,
679  * free of any locking.
680  *
681  * This function wraps i915_request_wait(), see it for the full details on
682  * the arguments.
683  *
684  * Returns 0 if successful, or a negative error code.
685  */
686 static inline int
687 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
688 {
689 	struct i915_request *request;
690 	long ret = 0;
691 
692 	request = i915_gem_active_get_unlocked(active);
693 	if (request) {
694 		ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
695 		i915_request_put(request);
696 	}
697 
698 	return ret < 0 ? ret : 0;
699 }
700 
701 /**
702  * i915_gem_active_retire - waits until the request is retired
703  * @active - the active request on which to wait
704  *
705  * i915_gem_active_retire() waits until the request is completed,
706  * and then ensures that at least the retirement handler for this
707  * @active tracker is called before returning. If the @active
708  * tracker is idle, the function returns immediately.
709  */
710 static inline int __must_check
711 i915_gem_active_retire(struct i915_gem_active *active,
712 		       struct mutex *mutex)
713 {
714 	struct i915_request *request;
715 	long ret;
716 
717 	request = i915_gem_active_raw(active, mutex);
718 	if (!request)
719 		return 0;
720 
721 	ret = i915_request_wait(request,
722 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
723 				MAX_SCHEDULE_TIMEOUT);
724 	if (ret < 0)
725 		return ret;
726 
727 	list_del_init(&active->link);
728 	RCU_INIT_POINTER(active->request, NULL);
729 
730 	active->retire(active, request);
731 
732 	return 0;
733 }
734 
735 #define for_each_active(mask, idx) \
736 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
737 
738 #endif /* I915_REQUEST_H */
739