1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_CONTEXT_H__
8 #define __I915_GEM_CONTEXT_H__
9 
10 #include "i915_gem_context_types.h"
11 
12 #include "gt/intel_context.h"
13 
14 #include "i915_drv.h"
15 #include "i915_gem.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_scheduler.h"
18 #include "intel_device_info.h"
19 
20 struct drm_device;
21 struct drm_file;
22 
23 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
24 {
25 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
26 }
27 
28 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
29 {
30 	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
31 	set_bit(CONTEXT_CLOSED, &ctx->flags);
32 }
33 
34 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
35 {
36 	return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
37 }
38 
39 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
40 {
41 	set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
42 }
43 
44 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
45 {
46 	clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
47 }
48 
49 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
50 {
51 	return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
52 }
53 
54 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
55 {
56 	set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
57 }
58 
59 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
60 {
61 	clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
62 }
63 
64 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
65 {
66 	return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
67 }
68 
69 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
70 {
71 	set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
72 }
73 
74 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
75 {
76 	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
77 }
78 
79 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
80 {
81 	return test_bit(CONTEXT_BANNED, &ctx->flags);
82 }
83 
84 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
85 {
86 	set_bit(CONTEXT_BANNED, &ctx->flags);
87 }
88 
89 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
90 {
91 	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
92 }
93 
94 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
95 {
96 	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
97 }
98 
99 static inline bool
100 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
101 {
102 	return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
103 }
104 
105 static inline void
106 i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
107 {
108 	set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
109 }
110 
111 static inline void
112 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
113 {
114 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
115 }
116 
117 static inline bool
118 i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
119 {
120 	return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
121 }
122 
123 static inline void
124 i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
125 {
126 	set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
127 }
128 
129 static inline void
130 i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
131 {
132 	clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
133 }
134 
135 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
136 {
137 	return !ctx->file_priv;
138 }
139 
140 /* i915_gem_context.c */
141 int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
142 void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
143 
144 int i915_gem_context_open(struct drm_i915_private *i915,
145 			  struct drm_file *file);
146 void i915_gem_context_close(struct drm_file *file);
147 
148 void i915_gem_context_release(struct kref *ctx_ref);
149 
150 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
151 			     struct drm_file *file);
152 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
153 			      struct drm_file *file);
154 
155 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
156 				  struct drm_file *file);
157 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
158 				   struct drm_file *file);
159 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
160 				    struct drm_file *file_priv);
161 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
162 				    struct drm_file *file_priv);
163 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
164 				       struct drm_file *file);
165 
166 struct i915_gem_context *
167 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
168 
169 static inline struct i915_gem_context *
170 i915_gem_context_get(struct i915_gem_context *ctx)
171 {
172 	kref_get(&ctx->ref);
173 	return ctx;
174 }
175 
176 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
177 {
178 	kref_put(&ctx->ref, i915_gem_context_release);
179 }
180 
181 static inline struct i915_address_space *
182 i915_gem_context_vm(struct i915_gem_context *ctx)
183 {
184 	return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
185 }
186 
187 static inline struct i915_address_space *
188 i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
189 {
190 	struct i915_address_space *vm;
191 
192 	rcu_read_lock();
193 	vm = rcu_dereference(ctx->vm);
194 	if (!vm)
195 		vm = &ctx->i915->ggtt.vm;
196 	vm = i915_vm_get(vm);
197 	rcu_read_unlock();
198 
199 	return vm;
200 }
201 
202 static inline struct i915_gem_engines *
203 i915_gem_context_engines(struct i915_gem_context *ctx)
204 {
205 	return rcu_dereference_protected(ctx->engines,
206 					 lockdep_is_held(&ctx->engines_mutex));
207 }
208 
209 static inline struct i915_gem_engines *
210 i915_gem_context_lock_engines(struct i915_gem_context *ctx)
211 	__acquires(&ctx->engines_mutex)
212 {
213 	mutex_lock(&ctx->engines_mutex);
214 	return i915_gem_context_engines(ctx);
215 }
216 
217 static inline void
218 i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
219 	__releases(&ctx->engines_mutex)
220 {
221 	mutex_unlock(&ctx->engines_mutex);
222 }
223 
224 static inline struct intel_context *
225 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
226 {
227 	struct intel_context *ce = ERR_PTR(-EINVAL);
228 
229 	rcu_read_lock(); {
230 		struct i915_gem_engines *e = rcu_dereference(ctx->engines);
231 		if (likely(idx < e->num_engines && e->engines[idx]))
232 			ce = intel_context_get(e->engines[idx]);
233 	} rcu_read_unlock();
234 
235 	return ce;
236 }
237 
238 static inline void
239 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
240 			   struct i915_gem_engines *engines)
241 {
242 	GEM_BUG_ON(!engines);
243 	it->engines = engines;
244 	it->idx = 0;
245 }
246 
247 struct intel_context *
248 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
249 
250 #define for_each_gem_engine(ce, engines, it) \
251 	for (i915_gem_engines_iter_init(&(it), (engines)); \
252 	     ((ce) = i915_gem_engines_iter_next(&(it)));)
253 
254 struct i915_lut_handle *i915_lut_handle_alloc(void);
255 void i915_lut_handle_free(struct i915_lut_handle *lut);
256 
257 #endif /* !__I915_GEM_CONTEXT_H__ */
258