xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_context.h (revision 04eb94d526423ff082efce61f4f26b0369d0bfdd)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #ifndef __INTEL_CONTEXT_H__
8 #define __INTEL_CONTEXT_H__
9 
10 #include <linux/lockdep.h>
11 
12 #include "intel_context_types.h"
13 #include "intel_engine_types.h"
14 
15 void intel_context_init(struct intel_context *ce,
16 			struct i915_gem_context *ctx,
17 			struct intel_engine_cs *engine);
18 
19 struct intel_context *
20 intel_context_create(struct i915_gem_context *ctx,
21 		     struct intel_engine_cs *engine);
22 
23 void intel_context_free(struct intel_context *ce);
24 
25 /**
26  * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
27  * @ce - the context
28  *
29  * Acquire a lock on the pinned status of the HW context, such that the context
30  * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
31  * intel_context_is_pinned() remains stable.
32  */
33 static inline int intel_context_lock_pinned(struct intel_context *ce)
34 	__acquires(ce->pin_mutex)
35 {
36 	return mutex_lock_interruptible(&ce->pin_mutex);
37 }
38 
39 /**
40  * intel_context_is_pinned - Reports the 'pinned' status
41  * @ce - the context
42  *
43  * While in use by the GPU, the context, along with its ring and page
44  * tables is pinned into memory and the GTT.
45  *
46  * Returns: true if the context is currently pinned for use by the GPU.
47  */
48 static inline bool
49 intel_context_is_pinned(struct intel_context *ce)
50 {
51 	return atomic_read(&ce->pin_count);
52 }
53 
54 /**
55  * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
56  * @ce - the context
57  *
58  * Releases the lock earlier acquired by intel_context_unlock_pinned().
59  */
60 static inline void intel_context_unlock_pinned(struct intel_context *ce)
61 	__releases(ce->pin_mutex)
62 {
63 	mutex_unlock(&ce->pin_mutex);
64 }
65 
66 int __intel_context_do_pin(struct intel_context *ce);
67 
68 static inline int intel_context_pin(struct intel_context *ce)
69 {
70 	if (likely(atomic_inc_not_zero(&ce->pin_count)))
71 		return 0;
72 
73 	return __intel_context_do_pin(ce);
74 }
75 
76 static inline void __intel_context_pin(struct intel_context *ce)
77 {
78 	GEM_BUG_ON(!intel_context_is_pinned(ce));
79 	atomic_inc(&ce->pin_count);
80 }
81 
82 void intel_context_unpin(struct intel_context *ce);
83 
84 void intel_context_enter_engine(struct intel_context *ce);
85 void intel_context_exit_engine(struct intel_context *ce);
86 
87 static inline void intel_context_enter(struct intel_context *ce)
88 {
89 	if (!ce->active_count++)
90 		ce->ops->enter(ce);
91 }
92 
93 static inline void intel_context_mark_active(struct intel_context *ce)
94 {
95 	++ce->active_count;
96 }
97 
98 static inline void intel_context_exit(struct intel_context *ce)
99 {
100 	GEM_BUG_ON(!ce->active_count);
101 	if (!--ce->active_count)
102 		ce->ops->exit(ce);
103 }
104 
105 int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
106 void intel_context_active_release(struct intel_context *ce);
107 
108 static inline struct intel_context *intel_context_get(struct intel_context *ce)
109 {
110 	kref_get(&ce->ref);
111 	return ce;
112 }
113 
114 static inline void intel_context_put(struct intel_context *ce)
115 {
116 	kref_put(&ce->ref, ce->ops->destroy);
117 }
118 
119 static inline int __must_check
120 intel_context_timeline_lock(struct intel_context *ce)
121 	__acquires(&ce->ring->timeline->mutex)
122 {
123 	return mutex_lock_interruptible(&ce->ring->timeline->mutex);
124 }
125 
126 static inline void intel_context_timeline_unlock(struct intel_context *ce)
127 	__releases(&ce->ring->timeline->mutex)
128 {
129 	mutex_unlock(&ce->ring->timeline->mutex);
130 }
131 
132 struct i915_request *intel_context_create_request(struct intel_context *ce);
133 
134 #endif /* __INTEL_CONTEXT_H__ */
135