1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 #include "i915_gem_context.h" 9 #include "i915_globals.h" 10 11 #include "intel_context.h" 12 #include "intel_engine.h" 13 #include "intel_engine_pm.h" 14 15 static struct i915_global_context { 16 struct i915_global base; 17 struct kmem_cache *slab_ce; 18 } global; 19 20 static struct intel_context *intel_context_alloc(void) 21 { 22 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 23 } 24 25 void intel_context_free(struct intel_context *ce) 26 { 27 kmem_cache_free(global.slab_ce, ce); 28 } 29 30 struct intel_context * 31 intel_context_create(struct i915_gem_context *ctx, 32 struct intel_engine_cs *engine) 33 { 34 struct intel_context *ce; 35 36 ce = intel_context_alloc(); 37 if (!ce) 38 return ERR_PTR(-ENOMEM); 39 40 intel_context_init(ce, ctx, engine); 41 return ce; 42 } 43 44 int __intel_context_do_pin(struct intel_context *ce) 45 { 46 int err; 47 48 if (mutex_lock_interruptible(&ce->pin_mutex)) 49 return -EINTR; 50 51 if (likely(!atomic_read(&ce->pin_count))) { 52 intel_wakeref_t wakeref; 53 54 err = 0; 55 with_intel_runtime_pm(ce->engine->i915, wakeref) 56 err = ce->ops->pin(ce); 57 if (err) 58 goto err; 59 60 i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ 61 62 intel_context_get(ce); 63 smp_mb__before_atomic(); /* flush pin before it is visible */ 64 } 65 66 atomic_inc(&ce->pin_count); 67 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ 68 69 mutex_unlock(&ce->pin_mutex); 70 return 0; 71 72 err: 73 mutex_unlock(&ce->pin_mutex); 74 return err; 75 } 76 77 void intel_context_unpin(struct intel_context *ce) 78 { 79 if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) 80 return; 81 82 /* We may be called from inside intel_context_pin() to evict another */ 83 intel_context_get(ce); 84 mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); 85 86 if (likely(atomic_dec_and_test(&ce->pin_count))) { 87 ce->ops->unpin(ce); 88 89 i915_gem_context_put(ce->gem_context); 90 intel_context_put(ce); 91 } 92 93 mutex_unlock(&ce->pin_mutex); 94 intel_context_put(ce); 95 } 96 97 static void intel_context_retire(struct i915_active_request *active, 98 struct i915_request *rq) 99 { 100 struct intel_context *ce = 101 container_of(active, typeof(*ce), active_tracker); 102 103 intel_context_unpin(ce); 104 } 105 106 void 107 intel_context_init(struct intel_context *ce, 108 struct i915_gem_context *ctx, 109 struct intel_engine_cs *engine) 110 { 111 GEM_BUG_ON(!engine->cops); 112 113 kref_init(&ce->ref); 114 115 ce->gem_context = ctx; 116 ce->engine = engine; 117 ce->ops = engine->cops; 118 ce->sseu = engine->sseu; 119 ce->saturated = 0; 120 121 INIT_LIST_HEAD(&ce->signal_link); 122 INIT_LIST_HEAD(&ce->signals); 123 124 mutex_init(&ce->pin_mutex); 125 126 i915_active_request_init(&ce->active_tracker, 127 NULL, intel_context_retire); 128 } 129 130 static void i915_global_context_shrink(void) 131 { 132 kmem_cache_shrink(global.slab_ce); 133 } 134 135 static void i915_global_context_exit(void) 136 { 137 kmem_cache_destroy(global.slab_ce); 138 } 139 140 static struct i915_global_context global = { { 141 .shrink = i915_global_context_shrink, 142 .exit = i915_global_context_exit, 143 } }; 144 145 int __init i915_global_context_init(void) 146 { 147 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); 148 if (!global.slab_ce) 149 return -ENOMEM; 150 151 i915_global_register(&global.base); 152 return 0; 153 } 154 155 void intel_context_enter_engine(struct intel_context *ce) 156 { 157 intel_engine_pm_get(ce->engine); 158 } 159 160 void intel_context_exit_engine(struct intel_context *ce) 161 { 162 ce->saturated = 0; 163 intel_engine_pm_put(ce->engine); 164 } 165 166 struct i915_request *intel_context_create_request(struct intel_context *ce) 167 { 168 struct i915_request *rq; 169 int err; 170 171 err = intel_context_pin(ce); 172 if (unlikely(err)) 173 return ERR_PTR(err); 174 175 rq = i915_request_create(ce); 176 intel_context_unpin(ce); 177 178 return rq; 179 } 180