1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "gem/i915_gem_context.h" 8 #include "gem/i915_gem_pm.h" 9 10 #include "i915_drv.h" 11 #include "i915_globals.h" 12 13 #include "intel_context.h" 14 #include "intel_engine.h" 15 #include "intel_engine_pm.h" 16 17 static struct i915_global_context { 18 struct i915_global base; 19 struct kmem_cache *slab_ce; 20 } global; 21 22 static struct intel_context *intel_context_alloc(void) 23 { 24 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 25 } 26 27 void intel_context_free(struct intel_context *ce) 28 { 29 kmem_cache_free(global.slab_ce, ce); 30 } 31 32 struct intel_context * 33 intel_context_create(struct i915_gem_context *ctx, 34 struct intel_engine_cs *engine) 35 { 36 struct intel_context *ce; 37 38 ce = intel_context_alloc(); 39 if (!ce) 40 return ERR_PTR(-ENOMEM); 41 42 intel_context_init(ce, ctx, engine); 43 return ce; 44 } 45 46 int __intel_context_do_pin(struct intel_context *ce) 47 { 48 int err; 49 50 if (mutex_lock_interruptible(&ce->pin_mutex)) 51 return -EINTR; 52 53 if (likely(!atomic_read(&ce->pin_count))) { 54 intel_wakeref_t wakeref; 55 56 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { 57 err = ce->ops->alloc(ce); 58 if (unlikely(err)) 59 goto err; 60 61 __set_bit(CONTEXT_ALLOC_BIT, &ce->flags); 62 } 63 64 err = 0; 65 with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref) 66 err = ce->ops->pin(ce); 67 if (err) 68 goto err; 69 70 GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", 71 ce->engine->name, ce->timeline->fence_context, 72 ce->ring->head, ce->ring->tail); 73 74 i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ 75 76 smp_mb__before_atomic(); /* flush pin before it is visible */ 77 } 78 79 atomic_inc(&ce->pin_count); 80 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ 81 82 mutex_unlock(&ce->pin_mutex); 83 return 0; 84 85 err: 86 mutex_unlock(&ce->pin_mutex); 87 return err; 88 } 89 90 void intel_context_unpin(struct intel_context *ce) 91 { 92 if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) 93 return; 94 95 /* We may be called from inside intel_context_pin() to evict another */ 96 intel_context_get(ce); 97 mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); 98 99 if (likely(atomic_dec_and_test(&ce->pin_count))) { 100 GEM_TRACE("%s context:%llx retire\n", 101 ce->engine->name, ce->timeline->fence_context); 102 103 ce->ops->unpin(ce); 104 105 i915_gem_context_put(ce->gem_context); 106 intel_context_active_release(ce); 107 } 108 109 mutex_unlock(&ce->pin_mutex); 110 intel_context_put(ce); 111 } 112 113 static int __context_pin_state(struct i915_vma *vma) 114 { 115 u64 flags; 116 int err; 117 118 flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; 119 flags |= PIN_HIGH | PIN_GLOBAL; 120 121 err = i915_vma_pin(vma, 0, 0, flags); 122 if (err) 123 return err; 124 125 /* 126 * And mark it as a globally pinned object to let the shrinker know 127 * it cannot reclaim the object until we release it. 128 */ 129 i915_vma_make_unshrinkable(vma); 130 vma->obj->mm.dirty = true; 131 132 return 0; 133 } 134 135 static void __context_unpin_state(struct i915_vma *vma) 136 { 137 i915_vma_make_shrinkable(vma); 138 __i915_vma_unpin(vma); 139 } 140 141 __i915_active_call 142 static void __intel_context_retire(struct i915_active *active) 143 { 144 struct intel_context *ce = container_of(active, typeof(*ce), active); 145 146 GEM_TRACE("%s context:%llx retire\n", 147 ce->engine->name, ce->timeline->fence_context); 148 149 if (ce->state) 150 __context_unpin_state(ce->state); 151 152 intel_timeline_unpin(ce->timeline); 153 intel_ring_unpin(ce->ring); 154 155 intel_context_put(ce); 156 } 157 158 static int __intel_context_active(struct i915_active *active) 159 { 160 struct intel_context *ce = container_of(active, typeof(*ce), active); 161 int err; 162 163 intel_context_get(ce); 164 165 err = intel_ring_pin(ce->ring); 166 if (err) 167 goto err_put; 168 169 err = intel_timeline_pin(ce->timeline); 170 if (err) 171 goto err_ring; 172 173 if (!ce->state) 174 return 0; 175 176 err = __context_pin_state(ce->state); 177 if (err) 178 goto err_timeline; 179 180 return 0; 181 182 err_timeline: 183 intel_timeline_unpin(ce->timeline); 184 err_ring: 185 intel_ring_unpin(ce->ring); 186 err_put: 187 intel_context_put(ce); 188 return err; 189 } 190 191 int intel_context_active_acquire(struct intel_context *ce) 192 { 193 int err; 194 195 err = i915_active_acquire(&ce->active); 196 if (err) 197 return err; 198 199 /* Preallocate tracking nodes */ 200 if (!i915_gem_context_is_kernel(ce->gem_context)) { 201 err = i915_active_acquire_preallocate_barrier(&ce->active, 202 ce->engine); 203 if (err) { 204 i915_active_release(&ce->active); 205 return err; 206 } 207 } 208 209 return 0; 210 } 211 212 void intel_context_active_release(struct intel_context *ce) 213 { 214 /* Nodes preallocated in intel_context_active() */ 215 i915_active_acquire_barrier(&ce->active); 216 i915_active_release(&ce->active); 217 } 218 219 void 220 intel_context_init(struct intel_context *ce, 221 struct i915_gem_context *ctx, 222 struct intel_engine_cs *engine) 223 { 224 struct i915_address_space *vm; 225 226 GEM_BUG_ON(!engine->cops); 227 228 kref_init(&ce->ref); 229 230 ce->gem_context = ctx; 231 rcu_read_lock(); 232 vm = rcu_dereference(ctx->vm); 233 if (vm) 234 ce->vm = i915_vm_get(vm); 235 else 236 ce->vm = i915_vm_get(&engine->gt->ggtt->vm); 237 rcu_read_unlock(); 238 if (ctx->timeline) 239 ce->timeline = intel_timeline_get(ctx->timeline); 240 241 ce->engine = engine; 242 ce->ops = engine->cops; 243 ce->sseu = engine->sseu; 244 ce->ring = __intel_context_ring_size(SZ_16K); 245 246 INIT_LIST_HEAD(&ce->signal_link); 247 INIT_LIST_HEAD(&ce->signals); 248 249 mutex_init(&ce->pin_mutex); 250 251 i915_active_init(&ce->active, 252 __intel_context_active, __intel_context_retire); 253 } 254 255 void intel_context_fini(struct intel_context *ce) 256 { 257 if (ce->timeline) 258 intel_timeline_put(ce->timeline); 259 i915_vm_put(ce->vm); 260 261 mutex_destroy(&ce->pin_mutex); 262 i915_active_fini(&ce->active); 263 } 264 265 static void i915_global_context_shrink(void) 266 { 267 kmem_cache_shrink(global.slab_ce); 268 } 269 270 static void i915_global_context_exit(void) 271 { 272 kmem_cache_destroy(global.slab_ce); 273 } 274 275 static struct i915_global_context global = { { 276 .shrink = i915_global_context_shrink, 277 .exit = i915_global_context_exit, 278 } }; 279 280 int __init i915_global_context_init(void) 281 { 282 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); 283 if (!global.slab_ce) 284 return -ENOMEM; 285 286 i915_global_register(&global.base); 287 return 0; 288 } 289 290 void intel_context_enter_engine(struct intel_context *ce) 291 { 292 intel_engine_pm_get(ce->engine); 293 intel_timeline_enter(ce->timeline); 294 } 295 296 void intel_context_exit_engine(struct intel_context *ce) 297 { 298 intel_timeline_exit(ce->timeline); 299 intel_engine_pm_put(ce->engine); 300 } 301 302 int intel_context_prepare_remote_request(struct intel_context *ce, 303 struct i915_request *rq) 304 { 305 struct intel_timeline *tl = ce->timeline; 306 int err; 307 308 /* Only suitable for use in remotely modifying this context */ 309 GEM_BUG_ON(rq->hw_context == ce); 310 311 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 312 err = mutex_lock_interruptible_nested(&tl->mutex, 313 SINGLE_DEPTH_NESTING); 314 if (err) 315 return err; 316 317 /* Queue this switch after current activity by this context. */ 318 err = i915_active_fence_set(&tl->last_request, rq); 319 mutex_unlock(&tl->mutex); 320 if (err) 321 return err; 322 } 323 324 /* 325 * Guarantee context image and the timeline remains pinned until the 326 * modifying request is retired by setting the ce activity tracker. 327 * 328 * But we only need to take one pin on the account of it. Or in other 329 * words transfer the pinned ce object to tracked active request. 330 */ 331 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 332 return i915_active_add_request(&ce->active, rq); 333 } 334 335 struct i915_request *intel_context_create_request(struct intel_context *ce) 336 { 337 struct i915_request *rq; 338 int err; 339 340 err = intel_context_pin(ce); 341 if (unlikely(err)) 342 return ERR_PTR(err); 343 344 rq = i915_request_create(ce); 345 intel_context_unpin(ce); 346 347 return rq; 348 } 349 350 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 351 #include "selftest_context.c" 352 #endif 353