1 /*
2  * Copyright © 2008-2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 
30 #include "i915_gem.h"
31 #include "i915_scheduler.h"
32 #include "i915_sw_fence.h"
33 
34 #include <uapi/drm/i915_drm.h>
35 
36 struct drm_file;
37 struct drm_i915_gem_object;
38 struct i915_request;
39 struct i915_timeline;
40 
41 struct i915_capture_list {
42 	struct i915_capture_list *next;
43 	struct i915_vma *vma;
44 };
45 
46 enum {
47 	/*
48 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
49 	 *
50 	 * Set by __i915_request_submit() on handing over to HW, and cleared
51 	 * by __i915_request_unsubmit() if we preempt this request.
52 	 *
53 	 * Finally cleared for consistency on retiring the request, when
54 	 * we know the HW is no longer running this request.
55 	 *
56 	 * See i915_request_is_active()
57 	 */
58 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
59 
60 	/*
61 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
62 	 *
63 	 * Internal bookkeeping used by the breadcrumb code to track when
64 	 * a request is on the various signal_list.
65 	 */
66 	I915_FENCE_FLAG_SIGNAL,
67 };
68 
69 /**
70  * Request queue structure.
71  *
72  * The request queue allows us to note sequence numbers that have been emitted
73  * and may be associated with active buffers to be retired.
74  *
75  * By keeping this list, we can avoid having to do questionable sequence
76  * number comparisons on buffer last_read|write_seqno. It also allows an
77  * emission time to be associated with the request for tracking how far ahead
78  * of the GPU the submission is.
79  *
80  * When modifying this structure be very aware that we perform a lockless
81  * RCU lookup of it that may race against reallocation of the struct
82  * from the slab freelist. We intentionally do not zero the structure on
83  * allocation so that the lookup can use the dangling pointers (and is
84  * cogniscent that those pointers may be wrong). Instead, everything that
85  * needs to be initialised must be done so explicitly.
86  *
87  * The requests are reference counted.
88  */
89 struct i915_request {
90 	struct dma_fence fence;
91 	spinlock_t lock;
92 
93 	/** On Which ring this request was generated */
94 	struct drm_i915_private *i915;
95 
96 	/**
97 	 * Context and ring buffer related to this request
98 	 * Contexts are refcounted, so when this request is associated with a
99 	 * context, we must increment the context's refcount, to guarantee that
100 	 * it persists while any request is linked to it. Requests themselves
101 	 * are also refcounted, so the request will only be freed when the last
102 	 * reference to it is dismissed, and the code in
103 	 * i915_request_free() will then decrement the refcount on the
104 	 * context.
105 	 */
106 	struct i915_gem_context *gem_context;
107 	struct intel_engine_cs *engine;
108 	struct intel_context *hw_context;
109 	struct intel_ring *ring;
110 	struct i915_timeline *timeline;
111 	struct list_head signal_link;
112 
113 	/*
114 	 * The rcu epoch of when this request was allocated. Used to judiciously
115 	 * apply backpressure on future allocations to ensure that under
116 	 * mempressure there is sufficient RCU ticks for us to reclaim our
117 	 * RCU protected slabs.
118 	 */
119 	unsigned long rcustate;
120 
121 	/*
122 	 * Fences for the various phases in the request's lifetime.
123 	 *
124 	 * The submit fence is used to await upon all of the request's
125 	 * dependencies. When it is signaled, the request is ready to run.
126 	 * It is used by the driver to then queue the request for execution.
127 	 */
128 	struct i915_sw_fence submit;
129 	wait_queue_entry_t submitq;
130 
131 	/*
132 	 * A list of everyone we wait upon, and everyone who waits upon us.
133 	 * Even though we will not be submitted to the hardware before the
134 	 * submit fence is signaled (it waits for all external events as well
135 	 * as our own requests), the scheduler still needs to know the
136 	 * dependency tree for the lifetime of the request (from execbuf
137 	 * to retirement), i.e. bidirectional dependency information for the
138 	 * request not tied to individual fences.
139 	 */
140 	struct i915_sched_node sched;
141 	struct i915_dependency dep;
142 
143 	/*
144 	 * A convenience pointer to the current breadcrumb value stored in
145 	 * the HW status page (or our timeline's local equivalent). The full
146 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
147 	 */
148 	const u32 *hwsp_seqno;
149 
150 	/**
151 	 * GEM sequence number associated with this request on the
152 	 * global execution timeline. It is zero when the request is not
153 	 * on the HW queue (i.e. not on the engine timeline list).
154 	 * Its value is guarded by the timeline spinlock.
155 	 */
156 	u32 global_seqno;
157 
158 	/** Position in the ring of the start of the request */
159 	u32 head;
160 
161 	/** Position in the ring of the start of the user packets */
162 	u32 infix;
163 
164 	/**
165 	 * Position in the ring of the start of the postfix.
166 	 * This is required to calculate the maximum available ring space
167 	 * without overwriting the postfix.
168 	 */
169 	u32 postfix;
170 
171 	/** Position in the ring of the end of the whole request */
172 	u32 tail;
173 
174 	/** Position in the ring of the end of any workarounds after the tail */
175 	u32 wa_tail;
176 
177 	/** Preallocate space in the ring for the emitting the request */
178 	u32 reserved_space;
179 
180 	/** Batch buffer related to this request if any (used for
181 	 * error state dump only).
182 	 */
183 	struct i915_vma *batch;
184 	/**
185 	 * Additional buffers requested by userspace to be captured upon
186 	 * a GPU hang. The vma/obj on this list are protected by their
187 	 * active reference - all objects on this list must also be
188 	 * on the active_list (of their final request).
189 	 */
190 	struct i915_capture_list *capture_list;
191 	struct list_head active_list;
192 
193 	/** Time at which this request was emitted, in jiffies. */
194 	unsigned long emitted_jiffies;
195 
196 	bool waitboost;
197 
198 	/** engine->request_list entry for this request */
199 	struct list_head link;
200 
201 	/** ring->request_list entry for this request */
202 	struct list_head ring_link;
203 
204 	struct drm_i915_file_private *file_priv;
205 	/** file_priv list entry for this request */
206 	struct list_head client_link;
207 };
208 
209 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
210 
211 extern const struct dma_fence_ops i915_fence_ops;
212 
213 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
214 {
215 	return fence->ops == &i915_fence_ops;
216 }
217 
218 struct i915_request * __must_check
219 i915_request_alloc(struct intel_engine_cs *engine,
220 		   struct i915_gem_context *ctx);
221 void i915_request_retire_upto(struct i915_request *rq);
222 
223 static inline struct i915_request *
224 to_request(struct dma_fence *fence)
225 {
226 	/* We assume that NULL fence/request are interoperable */
227 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
228 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
229 	return container_of(fence, struct i915_request, fence);
230 }
231 
232 static inline struct i915_request *
233 i915_request_get(struct i915_request *rq)
234 {
235 	return to_request(dma_fence_get(&rq->fence));
236 }
237 
238 static inline struct i915_request *
239 i915_request_get_rcu(struct i915_request *rq)
240 {
241 	return to_request(dma_fence_get_rcu(&rq->fence));
242 }
243 
244 static inline void
245 i915_request_put(struct i915_request *rq)
246 {
247 	dma_fence_put(&rq->fence);
248 }
249 
250 /**
251  * i915_request_global_seqno - report the current global seqno
252  * @request - the request
253  *
254  * A request is assigned a global seqno only when it is on the hardware
255  * execution queue. The global seqno can be used to maintain a list of
256  * requests on the same engine in retirement order, for example for
257  * constructing a priority queue for waiting. Prior to its execution, or
258  * if it is subsequently removed in the event of preemption, its global
259  * seqno is zero. As both insertion and removal from the execution queue
260  * may operate in IRQ context, it is not guarded by the usual struct_mutex
261  * BKL. Instead those relying on the global seqno must be prepared for its
262  * value to change between reads. Only when the request is complete can
263  * the global seqno be stable (due to the memory barriers on submitting
264  * the commands to the hardware to write the breadcrumb, if the HWS shows
265  * that it has passed the global seqno and the global seqno is unchanged
266  * after the read, it is indeed complete).
267  */
268 static inline u32
269 i915_request_global_seqno(const struct i915_request *request)
270 {
271 	return READ_ONCE(request->global_seqno);
272 }
273 
274 int i915_request_await_object(struct i915_request *to,
275 			      struct drm_i915_gem_object *obj,
276 			      bool write);
277 int i915_request_await_dma_fence(struct i915_request *rq,
278 				 struct dma_fence *fence);
279 
280 void i915_request_add(struct i915_request *rq);
281 
282 void __i915_request_submit(struct i915_request *request);
283 void i915_request_submit(struct i915_request *request);
284 
285 void i915_request_skip(struct i915_request *request, int error);
286 
287 void __i915_request_unsubmit(struct i915_request *request);
288 void i915_request_unsubmit(struct i915_request *request);
289 
290 /* Note: part of the intel_breadcrumbs family */
291 bool i915_request_enable_breadcrumb(struct i915_request *request);
292 void i915_request_cancel_breadcrumb(struct i915_request *request);
293 
294 long i915_request_wait(struct i915_request *rq,
295 		       unsigned int flags,
296 		       long timeout)
297 	__attribute__((nonnull(1)));
298 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
299 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
300 #define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
301 #define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
302 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
303 
304 static inline bool i915_request_signaled(const struct i915_request *rq)
305 {
306 	/* The request may live longer than its HWSP, so check flags first! */
307 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
308 }
309 
310 static inline bool i915_request_is_active(const struct i915_request *rq)
311 {
312 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
313 }
314 
315 /**
316  * Returns true if seq1 is later than seq2.
317  */
318 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
319 {
320 	return (s32)(seq1 - seq2) >= 0;
321 }
322 
323 static inline u32 __hwsp_seqno(const struct i915_request *rq)
324 {
325 	return READ_ONCE(*rq->hwsp_seqno);
326 }
327 
328 /**
329  * hwsp_seqno - the current breadcrumb value in the HW status page
330  * @rq: the request, to chase the relevant HW status page
331  *
332  * The emphasis in naming here is that hwsp_seqno() is not a property of the
333  * request, but an indication of the current HW state (associated with this
334  * request). Its value will change as the GPU executes more requests.
335  *
336  * Returns the current breadcrumb value in the associated HW status page (or
337  * the local timeline's equivalent) for this request. The request itself
338  * has the associated breadcrumb value of rq->fence.seqno, when the HW
339  * status page has that breadcrumb or later, this request is complete.
340  */
341 static inline u32 hwsp_seqno(const struct i915_request *rq)
342 {
343 	u32 seqno;
344 
345 	rcu_read_lock(); /* the HWSP may be freed at runtime */
346 	seqno = __hwsp_seqno(rq);
347 	rcu_read_unlock();
348 
349 	return seqno;
350 }
351 
352 static inline bool __i915_request_has_started(const struct i915_request *rq)
353 {
354 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
355 }
356 
357 /**
358  * i915_request_started - check if the request has begun being executed
359  * @rq: the request
360  *
361  * Returns true if the request has been submitted to hardware, and the hardware
362  * has advanced passed the end of the previous request and so should be either
363  * currently processing the request (though it may be preempted and so
364  * not necessarily the next request to complete) or have completed the request.
365  */
366 static inline bool i915_request_started(const struct i915_request *rq)
367 {
368 	if (i915_request_signaled(rq))
369 		return true;
370 
371 	/* Remember: started but may have since been preempted! */
372 	return __i915_request_has_started(rq);
373 }
374 
375 /**
376  * i915_request_is_running - check if the request may actually be executing
377  * @rq: the request
378  *
379  * Returns true if the request is currently submitted to hardware, has passed
380  * its start point (i.e. the context is setup and not busywaiting). Note that
381  * it may no longer be running by the time the function returns!
382  */
383 static inline bool i915_request_is_running(const struct i915_request *rq)
384 {
385 	if (!i915_request_is_active(rq))
386 		return false;
387 
388 	return __i915_request_has_started(rq);
389 }
390 
391 static inline bool i915_request_completed(const struct i915_request *rq)
392 {
393 	if (i915_request_signaled(rq))
394 		return true;
395 
396 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
397 }
398 
399 static inline void i915_request_mark_complete(struct i915_request *rq)
400 {
401 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
402 }
403 
404 void i915_retire_requests(struct drm_i915_private *i915);
405 
406 /*
407  * We treat requests as fences. This is not be to confused with our
408  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
409  * We use the fences to synchronize access from the CPU with activity on the
410  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
411  * is reading them. We also track fences at a higher level to provide
412  * implicit synchronisation around GEM objects, e.g. set-domain will wait
413  * for outstanding GPU rendering before marking the object ready for CPU
414  * access, or a pageflip will wait until the GPU is complete before showing
415  * the frame on the scanout.
416  *
417  * In order to use a fence, the object must track the fence it needs to
418  * serialise with. For example, GEM objects want to track both read and
419  * write access so that we can perform concurrent read operations between
420  * the CPU and GPU engines, as well as waiting for all rendering to
421  * complete, or waiting for the last GPU user of a "fence register". The
422  * object then embeds a #i915_gem_active to track the most recent (in
423  * retirement order) request relevant for the desired mode of access.
424  * The #i915_gem_active is updated with i915_gem_active_set() to track the
425  * most recent fence request, typically this is done as part of
426  * i915_vma_move_to_active().
427  *
428  * When the #i915_gem_active completes (is retired), it will
429  * signal its completion to the owner through a callback as well as mark
430  * itself as idle (i915_gem_active.request == NULL). The owner
431  * can then perform any action, such as delayed freeing of an active
432  * resource including itself.
433  */
434 struct i915_gem_active;
435 
436 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
437 				   struct i915_request *);
438 
439 struct i915_gem_active {
440 	struct i915_request __rcu *request;
441 	struct list_head link;
442 	i915_gem_retire_fn retire;
443 };
444 
445 void i915_gem_retire_noop(struct i915_gem_active *,
446 			  struct i915_request *request);
447 
448 /**
449  * init_request_active - prepares the activity tracker for use
450  * @active - the active tracker
451  * @func - a callback when then the tracker is retired (becomes idle),
452  *         can be NULL
453  *
454  * init_request_active() prepares the embedded @active struct for use as
455  * an activity tracker, that is for tracking the last known active request
456  * associated with it. When the last request becomes idle, when it is retired
457  * after completion, the optional callback @func is invoked.
458  */
459 static inline void
460 init_request_active(struct i915_gem_active *active,
461 		    i915_gem_retire_fn retire)
462 {
463 	RCU_INIT_POINTER(active->request, NULL);
464 	INIT_LIST_HEAD(&active->link);
465 	active->retire = retire ?: i915_gem_retire_noop;
466 }
467 
468 /**
469  * i915_gem_active_set - updates the tracker to watch the current request
470  * @active - the active tracker
471  * @request - the request to watch
472  *
473  * i915_gem_active_set() watches the given @request for completion. Whilst
474  * that @request is busy, the @active reports busy. When that @request is
475  * retired, the @active tracker is updated to report idle.
476  */
477 static inline void
478 i915_gem_active_set(struct i915_gem_active *active,
479 		    struct i915_request *request)
480 {
481 	list_move(&active->link, &request->active_list);
482 	rcu_assign_pointer(active->request, request);
483 }
484 
485 /**
486  * i915_gem_active_set_retire_fn - updates the retirement callback
487  * @active - the active tracker
488  * @fn - the routine called when the request is retired
489  * @mutex - struct_mutex used to guard retirements
490  *
491  * i915_gem_active_set_retire_fn() updates the function pointer that
492  * is called when the final request associated with the @active tracker
493  * is retired.
494  */
495 static inline void
496 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
497 			      i915_gem_retire_fn fn,
498 			      struct mutex *mutex)
499 {
500 	lockdep_assert_held(mutex);
501 	active->retire = fn ?: i915_gem_retire_noop;
502 }
503 
504 static inline struct i915_request *
505 __i915_gem_active_peek(const struct i915_gem_active *active)
506 {
507 	/*
508 	 * Inside the error capture (running with the driver in an unknown
509 	 * state), we want to bend the rules slightly (a lot).
510 	 *
511 	 * Work is in progress to make it safer, in the meantime this keeps
512 	 * the known issue from spamming the logs.
513 	 */
514 	return rcu_dereference_protected(active->request, 1);
515 }
516 
517 /**
518  * i915_gem_active_raw - return the active request
519  * @active - the active tracker
520  *
521  * i915_gem_active_raw() returns the current request being tracked, or NULL.
522  * It does not obtain a reference on the request for the caller, so the caller
523  * must hold struct_mutex.
524  */
525 static inline struct i915_request *
526 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
527 {
528 	return rcu_dereference_protected(active->request,
529 					 lockdep_is_held(mutex));
530 }
531 
532 /**
533  * i915_gem_active_peek - report the active request being monitored
534  * @active - the active tracker
535  *
536  * i915_gem_active_peek() returns the current request being tracked if
537  * still active, or NULL. It does not obtain a reference on the request
538  * for the caller, so the caller must hold struct_mutex.
539  */
540 static inline struct i915_request *
541 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
542 {
543 	struct i915_request *request;
544 
545 	request = i915_gem_active_raw(active, mutex);
546 	if (!request || i915_request_completed(request))
547 		return NULL;
548 
549 	return request;
550 }
551 
552 /**
553  * i915_gem_active_get - return a reference to the active request
554  * @active - the active tracker
555  *
556  * i915_gem_active_get() returns a reference to the active request, or NULL
557  * if the active tracker is idle. The caller must hold struct_mutex.
558  */
559 static inline struct i915_request *
560 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
561 {
562 	return i915_request_get(i915_gem_active_peek(active, mutex));
563 }
564 
565 /**
566  * __i915_gem_active_get_rcu - return a reference to the active request
567  * @active - the active tracker
568  *
569  * __i915_gem_active_get() returns a reference to the active request, or NULL
570  * if the active tracker is idle. The caller must hold the RCU read lock, but
571  * the returned pointer is safe to use outside of RCU.
572  */
573 static inline struct i915_request *
574 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
575 {
576 	/*
577 	 * Performing a lockless retrieval of the active request is super
578 	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
579 	 * slab of request objects will not be freed whilst we hold the
580 	 * RCU read lock. It does not guarantee that the request itself
581 	 * will not be freed and then *reused*. Viz,
582 	 *
583 	 * Thread A			Thread B
584 	 *
585 	 * rq = active.request
586 	 *				retire(rq) -> free(rq);
587 	 *				(rq is now first on the slab freelist)
588 	 *				active.request = NULL
589 	 *
590 	 *				rq = new submission on a new object
591 	 * ref(rq)
592 	 *
593 	 * To prevent the request from being reused whilst the caller
594 	 * uses it, we take a reference like normal. Whilst acquiring
595 	 * the reference we check that it is not in a destroyed state
596 	 * (refcnt == 0). That prevents the request being reallocated
597 	 * whilst the caller holds on to it. To check that the request
598 	 * was not reallocated as we acquired the reference we have to
599 	 * check that our request remains the active request across
600 	 * the lookup, in the same manner as a seqlock. The visibility
601 	 * of the pointer versus the reference counting is controlled
602 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
603 	 *
604 	 * In the middle of all that, we inspect whether the request is
605 	 * complete. Retiring is lazy so the request may be completed long
606 	 * before the active tracker is updated. Querying whether the
607 	 * request is complete is far cheaper (as it involves no locked
608 	 * instructions setting cachelines to exclusive) than acquiring
609 	 * the reference, so we do it first. The RCU read lock ensures the
610 	 * pointer dereference is valid, but does not ensure that the
611 	 * seqno nor HWS is the right one! However, if the request was
612 	 * reallocated, that means the active tracker's request was complete.
613 	 * If the new request is also complete, then both are and we can
614 	 * just report the active tracker is idle. If the new request is
615 	 * incomplete, then we acquire a reference on it and check that
616 	 * it remained the active request.
617 	 *
618 	 * It is then imperative that we do not zero the request on
619 	 * reallocation, so that we can chase the dangling pointers!
620 	 * See i915_request_alloc().
621 	 */
622 	do {
623 		struct i915_request *request;
624 
625 		request = rcu_dereference(active->request);
626 		if (!request || i915_request_completed(request))
627 			return NULL;
628 
629 		/*
630 		 * An especially silly compiler could decide to recompute the
631 		 * result of i915_request_completed, more specifically
632 		 * re-emit the load for request->fence.seqno. A race would catch
633 		 * a later seqno value, which could flip the result from true to
634 		 * false. Which means part of the instructions below might not
635 		 * be executed, while later on instructions are executed. Due to
636 		 * barriers within the refcounting the inconsistency can't reach
637 		 * past the call to i915_request_get_rcu, but not executing
638 		 * that while still executing i915_request_put() creates
639 		 * havoc enough.  Prevent this with a compiler barrier.
640 		 */
641 		barrier();
642 
643 		request = i915_request_get_rcu(request);
644 
645 		/*
646 		 * What stops the following rcu_access_pointer() from occurring
647 		 * before the above i915_request_get_rcu()? If we were
648 		 * to read the value before pausing to get the reference to
649 		 * the request, we may not notice a change in the active
650 		 * tracker.
651 		 *
652 		 * The rcu_access_pointer() is a mere compiler barrier, which
653 		 * means both the CPU and compiler are free to perform the
654 		 * memory read without constraint. The compiler only has to
655 		 * ensure that any operations after the rcu_access_pointer()
656 		 * occur afterwards in program order. This means the read may
657 		 * be performed earlier by an out-of-order CPU, or adventurous
658 		 * compiler.
659 		 *
660 		 * The atomic operation at the heart of
661 		 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
662 		 * atomic_inc_not_zero() which is only a full memory barrier
663 		 * when successful. That is, if i915_request_get_rcu()
664 		 * returns the request (and so with the reference counted
665 		 * incremented) then the following read for rcu_access_pointer()
666 		 * must occur after the atomic operation and so confirm
667 		 * that this request is the one currently being tracked.
668 		 *
669 		 * The corresponding write barrier is part of
670 		 * rcu_assign_pointer().
671 		 */
672 		if (!request || request == rcu_access_pointer(active->request))
673 			return rcu_pointer_handoff(request);
674 
675 		i915_request_put(request);
676 	} while (1);
677 }
678 
679 /**
680  * i915_gem_active_get_unlocked - return a reference to the active request
681  * @active - the active tracker
682  *
683  * i915_gem_active_get_unlocked() returns a reference to the active request,
684  * or NULL if the active tracker is idle. The reference is obtained under RCU,
685  * so no locking is required by the caller.
686  *
687  * The reference should be freed with i915_request_put().
688  */
689 static inline struct i915_request *
690 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
691 {
692 	struct i915_request *request;
693 
694 	rcu_read_lock();
695 	request = __i915_gem_active_get_rcu(active);
696 	rcu_read_unlock();
697 
698 	return request;
699 }
700 
701 /**
702  * i915_gem_active_isset - report whether the active tracker is assigned
703  * @active - the active tracker
704  *
705  * i915_gem_active_isset() returns true if the active tracker is currently
706  * assigned to a request. Due to the lazy retiring, that request may be idle
707  * and this may report stale information.
708  */
709 static inline bool
710 i915_gem_active_isset(const struct i915_gem_active *active)
711 {
712 	return rcu_access_pointer(active->request);
713 }
714 
715 /**
716  * i915_gem_active_wait - waits until the request is completed
717  * @active - the active request on which to wait
718  * @flags - how to wait
719  * @timeout - how long to wait at most
720  * @rps - userspace client to charge for a waitboost
721  *
722  * i915_gem_active_wait() waits until the request is completed before
723  * returning, without requiring any locks to be held. Note that it does not
724  * retire any requests before returning.
725  *
726  * This function relies on RCU in order to acquire the reference to the active
727  * request without holding any locks. See __i915_gem_active_get_rcu() for the
728  * glory details on how that is managed. Once the reference is acquired, we
729  * can then wait upon the request, and afterwards release our reference,
730  * free of any locking.
731  *
732  * This function wraps i915_request_wait(), see it for the full details on
733  * the arguments.
734  *
735  * Returns 0 if successful, or a negative error code.
736  */
737 static inline int
738 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
739 {
740 	struct i915_request *request;
741 	long ret = 0;
742 
743 	request = i915_gem_active_get_unlocked(active);
744 	if (request) {
745 		ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
746 		i915_request_put(request);
747 	}
748 
749 	return ret < 0 ? ret : 0;
750 }
751 
752 /**
753  * i915_gem_active_retire - waits until the request is retired
754  * @active - the active request on which to wait
755  *
756  * i915_gem_active_retire() waits until the request is completed,
757  * and then ensures that at least the retirement handler for this
758  * @active tracker is called before returning. If the @active
759  * tracker is idle, the function returns immediately.
760  */
761 static inline int __must_check
762 i915_gem_active_retire(struct i915_gem_active *active,
763 		       struct mutex *mutex)
764 {
765 	struct i915_request *request;
766 	long ret;
767 
768 	request = i915_gem_active_raw(active, mutex);
769 	if (!request)
770 		return 0;
771 
772 	ret = i915_request_wait(request,
773 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
774 				MAX_SCHEDULE_TIMEOUT);
775 	if (ret < 0)
776 		return ret;
777 
778 	list_del_init(&active->link);
779 	RCU_INIT_POINTER(active->request, NULL);
780 
781 	active->retire(active, request);
782 
783 	return 0;
784 }
785 
786 #define for_each_active(mask, idx) \
787 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
788 
789 #endif /* I915_REQUEST_H */
790