1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_CONTEXT_H__
8 #define __I915_GEM_CONTEXT_H__
9 
10 #include "i915_gem_context_types.h"
11 
12 #include "gt/intel_context.h"
13 
14 #include "i915_gem.h"
15 #include "i915_scheduler.h"
16 #include "intel_device_info.h"
17 
18 struct drm_device;
19 struct drm_file;
20 
21 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
22 {
23 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
24 }
25 
26 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
27 {
28 	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
29 	set_bit(CONTEXT_CLOSED, &ctx->flags);
30 }
31 
32 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
33 {
34 	return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
35 }
36 
37 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
38 {
39 	set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
40 }
41 
42 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
43 {
44 	clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
45 }
46 
47 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
48 {
49 	return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
50 }
51 
52 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
53 {
54 	set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
55 }
56 
57 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
58 {
59 	clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
60 }
61 
62 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
63 {
64 	return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
65 }
66 
67 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
68 {
69 	set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
70 }
71 
72 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
73 {
74 	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
75 }
76 
77 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
78 {
79 	return test_bit(CONTEXT_BANNED, &ctx->flags);
80 }
81 
82 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
83 {
84 	set_bit(CONTEXT_BANNED, &ctx->flags);
85 }
86 
87 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
88 {
89 	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
90 }
91 
92 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
93 {
94 	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
95 }
96 
97 static inline bool
98 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
99 {
100 	return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
101 }
102 
103 static inline void
104 i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
105 {
106 	set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
107 }
108 
109 static inline void
110 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
111 {
112 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
113 }
114 
115 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
116 static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
117 {
118 	if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
119 		return 0;
120 
121 	return __i915_gem_context_pin_hw_id(ctx);
122 }
123 
124 static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
125 {
126 	GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
127 	atomic_dec(&ctx->hw_id_pin_count);
128 }
129 
130 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
131 {
132 	return !ctx->file_priv;
133 }
134 
135 /* i915_gem_context.c */
136 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
137 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
138 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
139 
140 int i915_gem_context_open(struct drm_i915_private *i915,
141 			  struct drm_file *file);
142 void i915_gem_context_close(struct drm_file *file);
143 
144 void i915_gem_context_release(struct kref *ctx_ref);
145 struct i915_gem_context *
146 i915_gem_context_create_gvt(struct drm_device *dev);
147 
148 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
149 			     struct drm_file *file);
150 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
151 			      struct drm_file *file);
152 
153 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
154 				  struct drm_file *file);
155 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
156 				   struct drm_file *file);
157 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
158 				    struct drm_file *file_priv);
159 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
160 				    struct drm_file *file_priv);
161 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
162 				       struct drm_file *file);
163 
164 struct i915_gem_context *
165 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
166 
167 static inline struct i915_gem_context *
168 i915_gem_context_get(struct i915_gem_context *ctx)
169 {
170 	kref_get(&ctx->ref);
171 	return ctx;
172 }
173 
174 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
175 {
176 	kref_put(&ctx->ref, i915_gem_context_release);
177 }
178 
179 static inline struct i915_gem_engines *
180 i915_gem_context_engines(struct i915_gem_context *ctx)
181 {
182 	return rcu_dereference_protected(ctx->engines,
183 					 lockdep_is_held(&ctx->engines_mutex));
184 }
185 
186 static inline struct i915_gem_engines *
187 i915_gem_context_lock_engines(struct i915_gem_context *ctx)
188 	__acquires(&ctx->engines_mutex)
189 {
190 	mutex_lock(&ctx->engines_mutex);
191 	return i915_gem_context_engines(ctx);
192 }
193 
194 static inline void
195 i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
196 	__releases(&ctx->engines_mutex)
197 {
198 	mutex_unlock(&ctx->engines_mutex);
199 }
200 
201 static inline struct intel_context *
202 i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
203 {
204 	return i915_gem_context_engines(ctx)->engines[idx];
205 }
206 
207 static inline struct intel_context *
208 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
209 {
210 	struct intel_context *ce = ERR_PTR(-EINVAL);
211 
212 	rcu_read_lock(); {
213 		struct i915_gem_engines *e = rcu_dereference(ctx->engines);
214 		if (likely(idx < e->num_engines && e->engines[idx]))
215 			ce = intel_context_get(e->engines[idx]);
216 	} rcu_read_unlock();
217 
218 	return ce;
219 }
220 
221 static inline void
222 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
223 			   struct i915_gem_engines *engines)
224 {
225 	GEM_BUG_ON(!engines);
226 	it->engines = engines;
227 	it->idx = 0;
228 }
229 
230 struct intel_context *
231 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
232 
233 #define for_each_gem_engine(ce, engines, it) \
234 	for (i915_gem_engines_iter_init(&(it), (engines)); \
235 	     ((ce) = i915_gem_engines_iter_next(&(it)));)
236 
237 struct i915_lut_handle *i915_lut_handle_alloc(void);
238 void i915_lut_handle_free(struct i915_lut_handle *lut);
239 
240 #endif /* !__I915_GEM_CONTEXT_H__ */
241