1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_CONTEXT_H__ 8 #define __I915_GEM_CONTEXT_H__ 9 10 #include "i915_gem_context_types.h" 11 12 #include "gt/intel_context.h" 13 14 #include "i915_drv.h" 15 #include "i915_gem.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_scheduler.h" 18 #include "intel_device_info.h" 19 20 struct drm_device; 21 struct drm_file; 22 23 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) 24 { 25 return test_bit(CONTEXT_CLOSED, &ctx->flags); 26 } 27 28 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) 29 { 30 GEM_BUG_ON(i915_gem_context_is_closed(ctx)); 31 set_bit(CONTEXT_CLOSED, &ctx->flags); 32 } 33 34 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) 35 { 36 return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 37 } 38 39 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) 40 { 41 set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 42 } 43 44 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) 45 { 46 clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 47 } 48 49 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) 50 { 51 return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 52 } 53 54 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) 55 { 56 set_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 57 } 58 59 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) 60 { 61 clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 62 } 63 64 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) 65 { 66 return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 67 } 68 69 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) 70 { 71 set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 72 } 73 74 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) 75 { 76 clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 77 } 78 79 static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx) 80 { 81 return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 82 } 83 84 static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx) 85 { 86 set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 87 } 88 89 static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx) 90 { 91 clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 92 } 93 94 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) 95 { 96 return test_bit(CONTEXT_BANNED, &ctx->flags); 97 } 98 99 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) 100 { 101 set_bit(CONTEXT_BANNED, &ctx->flags); 102 } 103 104 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) 105 { 106 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); 107 } 108 109 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx) 110 { 111 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); 112 } 113 114 static inline bool 115 i915_gem_context_user_engines(const struct i915_gem_context *ctx) 116 { 117 return test_bit(CONTEXT_USER_ENGINES, &ctx->flags); 118 } 119 120 static inline void 121 i915_gem_context_set_user_engines(struct i915_gem_context *ctx) 122 { 123 set_bit(CONTEXT_USER_ENGINES, &ctx->flags); 124 } 125 126 static inline void 127 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) 128 { 129 clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); 130 } 131 132 static inline bool 133 i915_gem_context_nopreempt(const struct i915_gem_context *ctx) 134 { 135 return test_bit(CONTEXT_NOPREEMPT, &ctx->flags); 136 } 137 138 static inline void 139 i915_gem_context_set_nopreempt(struct i915_gem_context *ctx) 140 { 141 set_bit(CONTEXT_NOPREEMPT, &ctx->flags); 142 } 143 144 static inline void 145 i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx) 146 { 147 clear_bit(CONTEXT_NOPREEMPT, &ctx->flags); 148 } 149 150 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) 151 { 152 return !ctx->file_priv; 153 } 154 155 /* i915_gem_context.c */ 156 int __must_check i915_gem_init_contexts(struct drm_i915_private *i915); 157 void i915_gem_driver_release__contexts(struct drm_i915_private *i915); 158 159 int i915_gem_context_open(struct drm_i915_private *i915, 160 struct drm_file *file); 161 void i915_gem_context_close(struct drm_file *file); 162 163 void i915_gem_context_release(struct kref *ctx_ref); 164 165 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 166 struct drm_file *file); 167 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 168 struct drm_file *file); 169 170 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 171 struct drm_file *file); 172 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 173 struct drm_file *file); 174 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 175 struct drm_file *file_priv); 176 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 177 struct drm_file *file_priv); 178 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 179 struct drm_file *file); 180 181 struct i915_gem_context * 182 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio); 183 184 static inline struct i915_gem_context * 185 i915_gem_context_get(struct i915_gem_context *ctx) 186 { 187 kref_get(&ctx->ref); 188 return ctx; 189 } 190 191 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 192 { 193 kref_put(&ctx->ref, i915_gem_context_release); 194 } 195 196 static inline struct i915_address_space * 197 i915_gem_context_vm(struct i915_gem_context *ctx) 198 { 199 return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex)); 200 } 201 202 static inline struct i915_address_space * 203 i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx) 204 { 205 struct i915_address_space *vm; 206 207 rcu_read_lock(); 208 vm = rcu_dereference(ctx->vm); 209 if (!vm) 210 vm = &ctx->i915->ggtt.vm; 211 vm = i915_vm_get(vm); 212 rcu_read_unlock(); 213 214 return vm; 215 } 216 217 static inline struct i915_gem_engines * 218 i915_gem_context_engines(struct i915_gem_context *ctx) 219 { 220 return rcu_dereference_protected(ctx->engines, 221 lockdep_is_held(&ctx->engines_mutex)); 222 } 223 224 static inline struct i915_gem_engines * 225 i915_gem_context_lock_engines(struct i915_gem_context *ctx) 226 __acquires(&ctx->engines_mutex) 227 { 228 mutex_lock(&ctx->engines_mutex); 229 return i915_gem_context_engines(ctx); 230 } 231 232 static inline void 233 i915_gem_context_unlock_engines(struct i915_gem_context *ctx) 234 __releases(&ctx->engines_mutex) 235 { 236 mutex_unlock(&ctx->engines_mutex); 237 } 238 239 static inline struct intel_context * 240 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) 241 { 242 struct intel_context *ce = ERR_PTR(-EINVAL); 243 244 rcu_read_lock(); { 245 struct i915_gem_engines *e = rcu_dereference(ctx->engines); 246 if (likely(idx < e->num_engines && e->engines[idx])) 247 ce = intel_context_get(e->engines[idx]); 248 } rcu_read_unlock(); 249 250 return ce; 251 } 252 253 static inline void 254 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it, 255 struct i915_gem_engines *engines) 256 { 257 GEM_BUG_ON(!engines); 258 it->engines = engines; 259 it->idx = 0; 260 } 261 262 struct intel_context * 263 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); 264 265 #define for_each_gem_engine(ce, engines, it) \ 266 for (i915_gem_engines_iter_init(&(it), (engines)); \ 267 ((ce) = i915_gem_engines_iter_next(&(it)));) 268 269 struct i915_lut_handle *i915_lut_handle_alloc(void); 270 void i915_lut_handle_free(struct i915_lut_handle *lut); 271 272 #endif /* !__I915_GEM_CONTEXT_H__ */ 273