1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_CONTEXT_H__ 7 #define __INTEL_CONTEXT_H__ 8 9 #include <linux/bitops.h> 10 #include <linux/lockdep.h> 11 #include <linux/types.h> 12 13 #include "i915_active.h" 14 #include "i915_drv.h" 15 #include "intel_context_types.h" 16 #include "intel_engine_types.h" 17 #include "intel_ring_types.h" 18 #include "intel_timeline_types.h" 19 20 #define CE_TRACE(ce, fmt, ...) do { \ 21 const struct intel_context *ce__ = (ce); \ 22 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ 23 ce__->timeline->fence_context, \ 24 ##__VA_ARGS__); \ 25 } while (0) 26 27 struct i915_gem_ww_ctx; 28 29 void intel_context_init(struct intel_context *ce, 30 struct intel_engine_cs *engine); 31 void intel_context_fini(struct intel_context *ce); 32 33 struct intel_context * 34 intel_context_create(struct intel_engine_cs *engine); 35 36 int intel_context_alloc_state(struct intel_context *ce); 37 38 void intel_context_free(struct intel_context *ce); 39 40 int intel_context_reconfigure_sseu(struct intel_context *ce, 41 const struct intel_sseu sseu); 42 43 /** 44 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context 45 * @ce - the context 46 * 47 * Acquire a lock on the pinned status of the HW context, such that the context 48 * can neither be bound to the GPU or unbound whilst the lock is held, i.e. 49 * intel_context_is_pinned() remains stable. 50 */ 51 static inline int intel_context_lock_pinned(struct intel_context *ce) 52 __acquires(ce->pin_mutex) 53 { 54 return mutex_lock_interruptible(&ce->pin_mutex); 55 } 56 57 /** 58 * intel_context_is_pinned - Reports the 'pinned' status 59 * @ce - the context 60 * 61 * While in use by the GPU, the context, along with its ring and page 62 * tables is pinned into memory and the GTT. 63 * 64 * Returns: true if the context is currently pinned for use by the GPU. 65 */ 66 static inline bool 67 intel_context_is_pinned(struct intel_context *ce) 68 { 69 return atomic_read(&ce->pin_count); 70 } 71 72 /** 73 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status 74 * @ce - the context 75 * 76 * Releases the lock earlier acquired by intel_context_unlock_pinned(). 77 */ 78 static inline void intel_context_unlock_pinned(struct intel_context *ce) 79 __releases(ce->pin_mutex) 80 { 81 mutex_unlock(&ce->pin_mutex); 82 } 83 84 int __intel_context_do_pin(struct intel_context *ce); 85 int __intel_context_do_pin_ww(struct intel_context *ce, 86 struct i915_gem_ww_ctx *ww); 87 88 static inline bool intel_context_pin_if_active(struct intel_context *ce) 89 { 90 return atomic_inc_not_zero(&ce->pin_count); 91 } 92 93 static inline int intel_context_pin(struct intel_context *ce) 94 { 95 if (likely(intel_context_pin_if_active(ce))) 96 return 0; 97 98 return __intel_context_do_pin(ce); 99 } 100 101 static inline int intel_context_pin_ww(struct intel_context *ce, 102 struct i915_gem_ww_ctx *ww) 103 { 104 if (likely(intel_context_pin_if_active(ce))) 105 return 0; 106 107 return __intel_context_do_pin_ww(ce, ww); 108 } 109 110 static inline void __intel_context_pin(struct intel_context *ce) 111 { 112 GEM_BUG_ON(!intel_context_is_pinned(ce)); 113 atomic_inc(&ce->pin_count); 114 } 115 116 void intel_context_unpin(struct intel_context *ce); 117 118 void intel_context_enter_engine(struct intel_context *ce); 119 void intel_context_exit_engine(struct intel_context *ce); 120 121 static inline void intel_context_enter(struct intel_context *ce) 122 { 123 lockdep_assert_held(&ce->timeline->mutex); 124 if (!ce->active_count++) 125 ce->ops->enter(ce); 126 } 127 128 static inline void intel_context_mark_active(struct intel_context *ce) 129 { 130 lockdep_assert_held(&ce->timeline->mutex); 131 ++ce->active_count; 132 } 133 134 static inline void intel_context_exit(struct intel_context *ce) 135 { 136 lockdep_assert_held(&ce->timeline->mutex); 137 GEM_BUG_ON(!ce->active_count); 138 if (!--ce->active_count) 139 ce->ops->exit(ce); 140 } 141 142 static inline struct intel_context *intel_context_get(struct intel_context *ce) 143 { 144 kref_get(&ce->ref); 145 return ce; 146 } 147 148 static inline void intel_context_put(struct intel_context *ce) 149 { 150 kref_put(&ce->ref, ce->ops->destroy); 151 } 152 153 static inline struct intel_timeline *__must_check 154 intel_context_timeline_lock(struct intel_context *ce) 155 __acquires(&ce->timeline->mutex) 156 { 157 struct intel_timeline *tl = ce->timeline; 158 int err; 159 160 err = mutex_lock_interruptible(&tl->mutex); 161 if (err) 162 return ERR_PTR(err); 163 164 return tl; 165 } 166 167 static inline void intel_context_timeline_unlock(struct intel_timeline *tl) 168 __releases(&tl->mutex) 169 { 170 mutex_unlock(&tl->mutex); 171 } 172 173 int intel_context_prepare_remote_request(struct intel_context *ce, 174 struct i915_request *rq); 175 176 struct i915_request *intel_context_create_request(struct intel_context *ce); 177 178 static inline struct intel_ring *__intel_context_ring_size(u64 sz) 179 { 180 return u64_to_ptr(struct intel_ring, sz); 181 } 182 183 static inline bool intel_context_is_barrier(const struct intel_context *ce) 184 { 185 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); 186 } 187 188 static inline bool intel_context_is_closed(const struct intel_context *ce) 189 { 190 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); 191 } 192 193 static inline bool intel_context_has_inflight(const struct intel_context *ce) 194 { 195 return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); 196 } 197 198 static inline bool intel_context_use_semaphores(const struct intel_context *ce) 199 { 200 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 201 } 202 203 static inline void intel_context_set_use_semaphores(struct intel_context *ce) 204 { 205 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 206 } 207 208 static inline void intel_context_clear_use_semaphores(struct intel_context *ce) 209 { 210 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 211 } 212 213 static inline bool intel_context_is_banned(const struct intel_context *ce) 214 { 215 return test_bit(CONTEXT_BANNED, &ce->flags); 216 } 217 218 static inline bool intel_context_set_banned(struct intel_context *ce) 219 { 220 return test_and_set_bit(CONTEXT_BANNED, &ce->flags); 221 } 222 223 static inline bool 224 intel_context_force_single_submission(const struct intel_context *ce) 225 { 226 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 227 } 228 229 static inline void 230 intel_context_set_single_submission(struct intel_context *ce) 231 { 232 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 233 } 234 235 static inline bool 236 intel_context_nopreempt(const struct intel_context *ce) 237 { 238 return test_bit(CONTEXT_NOPREEMPT, &ce->flags); 239 } 240 241 static inline void 242 intel_context_set_nopreempt(struct intel_context *ce) 243 { 244 set_bit(CONTEXT_NOPREEMPT, &ce->flags); 245 } 246 247 static inline void 248 intel_context_clear_nopreempt(struct intel_context *ce) 249 { 250 clear_bit(CONTEXT_NOPREEMPT, &ce->flags); 251 } 252 253 static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce) 254 { 255 const u32 period = ce->engine->gt->clock_period_ns; 256 257 return READ_ONCE(ce->runtime.total) * period; 258 } 259 260 static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) 261 { 262 const u32 period = ce->engine->gt->clock_period_ns; 263 264 return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period); 265 } 266 267 #endif /* __INTEL_CONTEXT_H__ */ 268