1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 
30 #include "i915_gem.h"
31 #include "i915_scheduler.h"
32 #include "i915_sw_fence.h"
33 #include "i915_scheduler.h"
34 
35 #include <uapi/drm/i915_drm.h>
36 
37 struct drm_file;
38 struct drm_i915_gem_object;
39 struct i915_request;
40 struct i915_timeline;
41 
42 struct intel_wait {
43 	struct rb_node node;
44 	struct task_struct *tsk;
45 	struct i915_request *request;
46 	u32 seqno;
47 };
48 
49 struct intel_signal_node {
50 	struct intel_wait wait;
51 	struct list_head link;
52 };
53 
54 struct i915_capture_list {
55 	struct i915_capture_list *next;
56 	struct i915_vma *vma;
57 };
58 
59 /**
60  * Request queue structure.
61  *
62  * The request queue allows us to note sequence numbers that have been emitted
63  * and may be associated with active buffers to be retired.
64  *
65  * By keeping this list, we can avoid having to do questionable sequence
66  * number comparisons on buffer last_read|write_seqno. It also allows an
67  * emission time to be associated with the request for tracking how far ahead
68  * of the GPU the submission is.
69  *
70  * When modifying this structure be very aware that we perform a lockless
71  * RCU lookup of it that may race against reallocation of the struct
72  * from the slab freelist. We intentionally do not zero the structure on
73  * allocation so that the lookup can use the dangling pointers (and is
74  * cogniscent that those pointers may be wrong). Instead, everything that
75  * needs to be initialised must be done so explicitly.
76  *
77  * The requests are reference counted.
78  */
79 struct i915_request {
80 	struct dma_fence fence;
81 	spinlock_t lock;
82 
83 	/** On Which ring this request was generated */
84 	struct drm_i915_private *i915;
85 
86 	/**
87 	 * Context and ring buffer related to this request
88 	 * Contexts are refcounted, so when this request is associated with a
89 	 * context, we must increment the context's refcount, to guarantee that
90 	 * it persists while any request is linked to it. Requests themselves
91 	 * are also refcounted, so the request will only be freed when the last
92 	 * reference to it is dismissed, and the code in
93 	 * i915_request_free() will then decrement the refcount on the
94 	 * context.
95 	 */
96 	struct i915_gem_context *gem_context;
97 	struct intel_engine_cs *engine;
98 	struct intel_context *hw_context;
99 	struct intel_ring *ring;
100 	struct i915_timeline *timeline;
101 	struct intel_signal_node signaling;
102 
103 	/*
104 	 * The rcu epoch of when this request was allocated. Used to judiciously
105 	 * apply backpressure on future allocations to ensure that under
106 	 * mempressure there is sufficient RCU ticks for us to reclaim our
107 	 * RCU protected slabs.
108 	 */
109 	unsigned long rcustate;
110 
111 	/*
112 	 * Fences for the various phases in the request's lifetime.
113 	 *
114 	 * The submit fence is used to await upon all of the request's
115 	 * dependencies. When it is signaled, the request is ready to run.
116 	 * It is used by the driver to then queue the request for execution.
117 	 */
118 	struct i915_sw_fence submit;
119 	wait_queue_entry_t submitq;
120 	wait_queue_head_t execute;
121 
122 	/*
123 	 * A list of everyone we wait upon, and everyone who waits upon us.
124 	 * Even though we will not be submitted to the hardware before the
125 	 * submit fence is signaled (it waits for all external events as well
126 	 * as our own requests), the scheduler still needs to know the
127 	 * dependency tree for the lifetime of the request (from execbuf
128 	 * to retirement), i.e. bidirectional dependency information for the
129 	 * request not tied to individual fences.
130 	 */
131 	struct i915_sched_node sched;
132 	struct i915_dependency dep;
133 
134 	/**
135 	 * GEM sequence number associated with this request on the
136 	 * global execution timeline. It is zero when the request is not
137 	 * on the HW queue (i.e. not on the engine timeline list).
138 	 * Its value is guarded by the timeline spinlock.
139 	 */
140 	u32 global_seqno;
141 
142 	/** Position in the ring of the start of the request */
143 	u32 head;
144 
145 	/** Position in the ring of the start of the user packets */
146 	u32 infix;
147 
148 	/**
149 	 * Position in the ring of the start of the postfix.
150 	 * This is required to calculate the maximum available ring space
151 	 * without overwriting the postfix.
152 	 */
153 	u32 postfix;
154 
155 	/** Position in the ring of the end of the whole request */
156 	u32 tail;
157 
158 	/** Position in the ring of the end of any workarounds after the tail */
159 	u32 wa_tail;
160 
161 	/** Preallocate space in the ring for the emitting the request */
162 	u32 reserved_space;
163 
164 	/** Batch buffer related to this request if any (used for
165 	 * error state dump only).
166 	 */
167 	struct i915_vma *batch;
168 	/**
169 	 * Additional buffers requested by userspace to be captured upon
170 	 * a GPU hang. The vma/obj on this list are protected by their
171 	 * active reference - all objects on this list must also be
172 	 * on the active_list (of their final request).
173 	 */
174 	struct i915_capture_list *capture_list;
175 	struct list_head active_list;
176 
177 	/** Time at which this request was emitted, in jiffies. */
178 	unsigned long emitted_jiffies;
179 
180 	bool waitboost;
181 
182 	/** engine->request_list entry for this request */
183 	struct list_head link;
184 
185 	/** ring->request_list entry for this request */
186 	struct list_head ring_link;
187 
188 	struct drm_i915_file_private *file_priv;
189 	/** file_priv list entry for this request */
190 	struct list_head client_link;
191 };
192 
193 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
194 
195 extern const struct dma_fence_ops i915_fence_ops;
196 
197 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
198 {
199 	return fence->ops == &i915_fence_ops;
200 }
201 
202 struct i915_request * __must_check
203 i915_request_alloc(struct intel_engine_cs *engine,
204 		   struct i915_gem_context *ctx);
205 void i915_request_retire_upto(struct i915_request *rq);
206 
207 static inline struct i915_request *
208 to_request(struct dma_fence *fence)
209 {
210 	/* We assume that NULL fence/request are interoperable */
211 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
212 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
213 	return container_of(fence, struct i915_request, fence);
214 }
215 
216 static inline struct i915_request *
217 i915_request_get(struct i915_request *rq)
218 {
219 	return to_request(dma_fence_get(&rq->fence));
220 }
221 
222 static inline struct i915_request *
223 i915_request_get_rcu(struct i915_request *rq)
224 {
225 	return to_request(dma_fence_get_rcu(&rq->fence));
226 }
227 
228 static inline void
229 i915_request_put(struct i915_request *rq)
230 {
231 	dma_fence_put(&rq->fence);
232 }
233 
234 /**
235  * i915_request_global_seqno - report the current global seqno
236  * @request - the request
237  *
238  * A request is assigned a global seqno only when it is on the hardware
239  * execution queue. The global seqno can be used to maintain a list of
240  * requests on the same engine in retirement order, for example for
241  * constructing a priority queue for waiting. Prior to its execution, or
242  * if it is subsequently removed in the event of preemption, its global
243  * seqno is zero. As both insertion and removal from the execution queue
244  * may operate in IRQ context, it is not guarded by the usual struct_mutex
245  * BKL. Instead those relying on the global seqno must be prepared for its
246  * value to change between reads. Only when the request is complete can
247  * the global seqno be stable (due to the memory barriers on submitting
248  * the commands to the hardware to write the breadcrumb, if the HWS shows
249  * that it has passed the global seqno and the global seqno is unchanged
250  * after the read, it is indeed complete).
251  */
252 static u32
253 i915_request_global_seqno(const struct i915_request *request)
254 {
255 	return READ_ONCE(request->global_seqno);
256 }
257 
258 int i915_request_await_object(struct i915_request *to,
259 			      struct drm_i915_gem_object *obj,
260 			      bool write);
261 int i915_request_await_dma_fence(struct i915_request *rq,
262 				 struct dma_fence *fence);
263 
264 void i915_request_add(struct i915_request *rq);
265 
266 void __i915_request_submit(struct i915_request *request);
267 void i915_request_submit(struct i915_request *request);
268 
269 void i915_request_skip(struct i915_request *request, int error);
270 
271 void __i915_request_unsubmit(struct i915_request *request);
272 void i915_request_unsubmit(struct i915_request *request);
273 
274 long i915_request_wait(struct i915_request *rq,
275 		       unsigned int flags,
276 		       long timeout)
277 	__attribute__((nonnull(1)));
278 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
279 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
280 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
281 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
282 
283 static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
284 					    u32 seqno);
285 static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
286 					      u32 seqno);
287 
288 /**
289  * Returns true if seq1 is later than seq2.
290  */
291 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
292 {
293 	return (s32)(seq1 - seq2) >= 0;
294 }
295 
296 /**
297  * i915_request_started - check if the request has begun being executed
298  * @rq: the request
299  *
300  * Returns true if the request has been submitted to hardware, and the hardware
301  * has advanced passed the end of the previous request and so should be either
302  * currently processing the request (though it may be preempted and so
303  * not necessarily the next request to complete) or have completed the request.
304  */
305 static inline bool i915_request_started(const struct i915_request *rq)
306 {
307 	u32 seqno;
308 
309 	seqno = i915_request_global_seqno(rq);
310 	if (!seqno) /* not yet submitted to HW */
311 		return false;
312 
313 	return intel_engine_has_started(rq->engine, seqno);
314 }
315 
316 static inline bool
317 __i915_request_completed(const struct i915_request *rq, u32 seqno)
318 {
319 	GEM_BUG_ON(!seqno);
320 	return intel_engine_has_completed(rq->engine, seqno) &&
321 		seqno == i915_request_global_seqno(rq);
322 }
323 
324 static inline bool i915_request_completed(const struct i915_request *rq)
325 {
326 	u32 seqno;
327 
328 	seqno = i915_request_global_seqno(rq);
329 	if (!seqno)
330 		return false;
331 
332 	return __i915_request_completed(rq, seqno);
333 }
334 
335 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
336 {
337 	const struct i915_request *rq =
338 		container_of(node, const struct i915_request, sched);
339 
340 	return i915_request_completed(rq);
341 }
342 
343 void i915_retire_requests(struct drm_i915_private *i915);
344 
345 /*
346  * We treat requests as fences. This is not be to confused with our
347  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
348  * We use the fences to synchronize access from the CPU with activity on the
349  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
350  * is reading them. We also track fences at a higher level to provide
351  * implicit synchronisation around GEM objects, e.g. set-domain will wait
352  * for outstanding GPU rendering before marking the object ready for CPU
353  * access, or a pageflip will wait until the GPU is complete before showing
354  * the frame on the scanout.
355  *
356  * In order to use a fence, the object must track the fence it needs to
357  * serialise with. For example, GEM objects want to track both read and
358  * write access so that we can perform concurrent read operations between
359  * the CPU and GPU engines, as well as waiting for all rendering to
360  * complete, or waiting for the last GPU user of a "fence register". The
361  * object then embeds a #i915_gem_active to track the most recent (in
362  * retirement order) request relevant for the desired mode of access.
363  * The #i915_gem_active is updated with i915_gem_active_set() to track the
364  * most recent fence request, typically this is done as part of
365  * i915_vma_move_to_active().
366  *
367  * When the #i915_gem_active completes (is retired), it will
368  * signal its completion to the owner through a callback as well as mark
369  * itself as idle (i915_gem_active.request == NULL). The owner
370  * can then perform any action, such as delayed freeing of an active
371  * resource including itself.
372  */
373 struct i915_gem_active;
374 
375 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
376 				   struct i915_request *);
377 
378 struct i915_gem_active {
379 	struct i915_request __rcu *request;
380 	struct list_head link;
381 	i915_gem_retire_fn retire;
382 };
383 
384 void i915_gem_retire_noop(struct i915_gem_active *,
385 			  struct i915_request *request);
386 
387 /**
388  * init_request_active - prepares the activity tracker for use
389  * @active - the active tracker
390  * @func - a callback when then the tracker is retired (becomes idle),
391  *         can be NULL
392  *
393  * init_request_active() prepares the embedded @active struct for use as
394  * an activity tracker, that is for tracking the last known active request
395  * associated with it. When the last request becomes idle, when it is retired
396  * after completion, the optional callback @func is invoked.
397  */
398 static inline void
399 init_request_active(struct i915_gem_active *active,
400 		    i915_gem_retire_fn retire)
401 {
402 	RCU_INIT_POINTER(active->request, NULL);
403 	INIT_LIST_HEAD(&active->link);
404 	active->retire = retire ?: i915_gem_retire_noop;
405 }
406 
407 /**
408  * i915_gem_active_set - updates the tracker to watch the current request
409  * @active - the active tracker
410  * @request - the request to watch
411  *
412  * i915_gem_active_set() watches the given @request for completion. Whilst
413  * that @request is busy, the @active reports busy. When that @request is
414  * retired, the @active tracker is updated to report idle.
415  */
416 static inline void
417 i915_gem_active_set(struct i915_gem_active *active,
418 		    struct i915_request *request)
419 {
420 	list_move(&active->link, &request->active_list);
421 	rcu_assign_pointer(active->request, request);
422 }
423 
424 /**
425  * i915_gem_active_set_retire_fn - updates the retirement callback
426  * @active - the active tracker
427  * @fn - the routine called when the request is retired
428  * @mutex - struct_mutex used to guard retirements
429  *
430  * i915_gem_active_set_retire_fn() updates the function pointer that
431  * is called when the final request associated with the @active tracker
432  * is retired.
433  */
434 static inline void
435 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
436 			      i915_gem_retire_fn fn,
437 			      struct mutex *mutex)
438 {
439 	lockdep_assert_held(mutex);
440 	active->retire = fn ?: i915_gem_retire_noop;
441 }
442 
443 static inline struct i915_request *
444 __i915_gem_active_peek(const struct i915_gem_active *active)
445 {
446 	/*
447 	 * Inside the error capture (running with the driver in an unknown
448 	 * state), we want to bend the rules slightly (a lot).
449 	 *
450 	 * Work is in progress to make it safer, in the meantime this keeps
451 	 * the known issue from spamming the logs.
452 	 */
453 	return rcu_dereference_protected(active->request, 1);
454 }
455 
456 /**
457  * i915_gem_active_raw - return the active request
458  * @active - the active tracker
459  *
460  * i915_gem_active_raw() returns the current request being tracked, or NULL.
461  * It does not obtain a reference on the request for the caller, so the caller
462  * must hold struct_mutex.
463  */
464 static inline struct i915_request *
465 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
466 {
467 	return rcu_dereference_protected(active->request,
468 					 lockdep_is_held(mutex));
469 }
470 
471 /**
472  * i915_gem_active_peek - report the active request being monitored
473  * @active - the active tracker
474  *
475  * i915_gem_active_peek() returns the current request being tracked if
476  * still active, or NULL. It does not obtain a reference on the request
477  * for the caller, so the caller must hold struct_mutex.
478  */
479 static inline struct i915_request *
480 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
481 {
482 	struct i915_request *request;
483 
484 	request = i915_gem_active_raw(active, mutex);
485 	if (!request || i915_request_completed(request))
486 		return NULL;
487 
488 	return request;
489 }
490 
491 /**
492  * i915_gem_active_get - return a reference to the active request
493  * @active - the active tracker
494  *
495  * i915_gem_active_get() returns a reference to the active request, or NULL
496  * if the active tracker is idle. The caller must hold struct_mutex.
497  */
498 static inline struct i915_request *
499 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
500 {
501 	return i915_request_get(i915_gem_active_peek(active, mutex));
502 }
503 
504 /**
505  * __i915_gem_active_get_rcu - return a reference to the active request
506  * @active - the active tracker
507  *
508  * __i915_gem_active_get() returns a reference to the active request, or NULL
509  * if the active tracker is idle. The caller must hold the RCU read lock, but
510  * the returned pointer is safe to use outside of RCU.
511  */
512 static inline struct i915_request *
513 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
514 {
515 	/*
516 	 * Performing a lockless retrieval of the active request is super
517 	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
518 	 * slab of request objects will not be freed whilst we hold the
519 	 * RCU read lock. It does not guarantee that the request itself
520 	 * will not be freed and then *reused*. Viz,
521 	 *
522 	 * Thread A			Thread B
523 	 *
524 	 * rq = active.request
525 	 *				retire(rq) -> free(rq);
526 	 *				(rq is now first on the slab freelist)
527 	 *				active.request = NULL
528 	 *
529 	 *				rq = new submission on a new object
530 	 * ref(rq)
531 	 *
532 	 * To prevent the request from being reused whilst the caller
533 	 * uses it, we take a reference like normal. Whilst acquiring
534 	 * the reference we check that it is not in a destroyed state
535 	 * (refcnt == 0). That prevents the request being reallocated
536 	 * whilst the caller holds on to it. To check that the request
537 	 * was not reallocated as we acquired the reference we have to
538 	 * check that our request remains the active request across
539 	 * the lookup, in the same manner as a seqlock. The visibility
540 	 * of the pointer versus the reference counting is controlled
541 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
542 	 *
543 	 * In the middle of all that, we inspect whether the request is
544 	 * complete. Retiring is lazy so the request may be completed long
545 	 * before the active tracker is updated. Querying whether the
546 	 * request is complete is far cheaper (as it involves no locked
547 	 * instructions setting cachelines to exclusive) than acquiring
548 	 * the reference, so we do it first. The RCU read lock ensures the
549 	 * pointer dereference is valid, but does not ensure that the
550 	 * seqno nor HWS is the right one! However, if the request was
551 	 * reallocated, that means the active tracker's request was complete.
552 	 * If the new request is also complete, then both are and we can
553 	 * just report the active tracker is idle. If the new request is
554 	 * incomplete, then we acquire a reference on it and check that
555 	 * it remained the active request.
556 	 *
557 	 * It is then imperative that we do not zero the request on
558 	 * reallocation, so that we can chase the dangling pointers!
559 	 * See i915_request_alloc().
560 	 */
561 	do {
562 		struct i915_request *request;
563 
564 		request = rcu_dereference(active->request);
565 		if (!request || i915_request_completed(request))
566 			return NULL;
567 
568 		/*
569 		 * An especially silly compiler could decide to recompute the
570 		 * result of i915_request_completed, more specifically
571 		 * re-emit the load for request->fence.seqno. A race would catch
572 		 * a later seqno value, which could flip the result from true to
573 		 * false. Which means part of the instructions below might not
574 		 * be executed, while later on instructions are executed. Due to
575 		 * barriers within the refcounting the inconsistency can't reach
576 		 * past the call to i915_request_get_rcu, but not executing
577 		 * that while still executing i915_request_put() creates
578 		 * havoc enough.  Prevent this with a compiler barrier.
579 		 */
580 		barrier();
581 
582 		request = i915_request_get_rcu(request);
583 
584 		/*
585 		 * What stops the following rcu_access_pointer() from occurring
586 		 * before the above i915_request_get_rcu()? If we were
587 		 * to read the value before pausing to get the reference to
588 		 * the request, we may not notice a change in the active
589 		 * tracker.
590 		 *
591 		 * The rcu_access_pointer() is a mere compiler barrier, which
592 		 * means both the CPU and compiler are free to perform the
593 		 * memory read without constraint. The compiler only has to
594 		 * ensure that any operations after the rcu_access_pointer()
595 		 * occur afterwards in program order. This means the read may
596 		 * be performed earlier by an out-of-order CPU, or adventurous
597 		 * compiler.
598 		 *
599 		 * The atomic operation at the heart of
600 		 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
601 		 * atomic_inc_not_zero() which is only a full memory barrier
602 		 * when successful. That is, if i915_request_get_rcu()
603 		 * returns the request (and so with the reference counted
604 		 * incremented) then the following read for rcu_access_pointer()
605 		 * must occur after the atomic operation and so confirm
606 		 * that this request is the one currently being tracked.
607 		 *
608 		 * The corresponding write barrier is part of
609 		 * rcu_assign_pointer().
610 		 */
611 		if (!request || request == rcu_access_pointer(active->request))
612 			return rcu_pointer_handoff(request);
613 
614 		i915_request_put(request);
615 	} while (1);
616 }
617 
618 /**
619  * i915_gem_active_get_unlocked - return a reference to the active request
620  * @active - the active tracker
621  *
622  * i915_gem_active_get_unlocked() returns a reference to the active request,
623  * or NULL if the active tracker is idle. The reference is obtained under RCU,
624  * so no locking is required by the caller.
625  *
626  * The reference should be freed with i915_request_put().
627  */
628 static inline struct i915_request *
629 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
630 {
631 	struct i915_request *request;
632 
633 	rcu_read_lock();
634 	request = __i915_gem_active_get_rcu(active);
635 	rcu_read_unlock();
636 
637 	return request;
638 }
639 
640 /**
641  * i915_gem_active_isset - report whether the active tracker is assigned
642  * @active - the active tracker
643  *
644  * i915_gem_active_isset() returns true if the active tracker is currently
645  * assigned to a request. Due to the lazy retiring, that request may be idle
646  * and this may report stale information.
647  */
648 static inline bool
649 i915_gem_active_isset(const struct i915_gem_active *active)
650 {
651 	return rcu_access_pointer(active->request);
652 }
653 
654 /**
655  * i915_gem_active_wait - waits until the request is completed
656  * @active - the active request on which to wait
657  * @flags - how to wait
658  * @timeout - how long to wait at most
659  * @rps - userspace client to charge for a waitboost
660  *
661  * i915_gem_active_wait() waits until the request is completed before
662  * returning, without requiring any locks to be held. Note that it does not
663  * retire any requests before returning.
664  *
665  * This function relies on RCU in order to acquire the reference to the active
666  * request without holding any locks. See __i915_gem_active_get_rcu() for the
667  * glory details on how that is managed. Once the reference is acquired, we
668  * can then wait upon the request, and afterwards release our reference,
669  * free of any locking.
670  *
671  * This function wraps i915_request_wait(), see it for the full details on
672  * the arguments.
673  *
674  * Returns 0 if successful, or a negative error code.
675  */
676 static inline int
677 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
678 {
679 	struct i915_request *request;
680 	long ret = 0;
681 
682 	request = i915_gem_active_get_unlocked(active);
683 	if (request) {
684 		ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
685 		i915_request_put(request);
686 	}
687 
688 	return ret < 0 ? ret : 0;
689 }
690 
691 /**
692  * i915_gem_active_retire - waits until the request is retired
693  * @active - the active request on which to wait
694  *
695  * i915_gem_active_retire() waits until the request is completed,
696  * and then ensures that at least the retirement handler for this
697  * @active tracker is called before returning. If the @active
698  * tracker is idle, the function returns immediately.
699  */
700 static inline int __must_check
701 i915_gem_active_retire(struct i915_gem_active *active,
702 		       struct mutex *mutex)
703 {
704 	struct i915_request *request;
705 	long ret;
706 
707 	request = i915_gem_active_raw(active, mutex);
708 	if (!request)
709 		return 0;
710 
711 	ret = i915_request_wait(request,
712 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
713 				MAX_SCHEDULE_TIMEOUT);
714 	if (ret < 0)
715 		return ret;
716 
717 	list_del_init(&active->link);
718 	RCU_INIT_POINTER(active->request, NULL);
719 
720 	active->retire(active, request);
721 
722 	return 0;
723 }
724 
725 #define for_each_active(mask, idx) \
726 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
727 
728 #endif /* I915_REQUEST_H */
729