110be98a7SChris Wilson /* 210be98a7SChris Wilson * SPDX-License-Identifier: MIT 310be98a7SChris Wilson * 410be98a7SChris Wilson * Copyright © 2011-2012 Intel Corporation 510be98a7SChris Wilson */ 610be98a7SChris Wilson 710be98a7SChris Wilson /* 810be98a7SChris Wilson * This file implements HW context support. On gen5+ a HW context consists of an 910be98a7SChris Wilson * opaque GPU object which is referenced at times of context saves and restores. 1010be98a7SChris Wilson * With RC6 enabled, the context is also referenced as the GPU enters and exists 1110be98a7SChris Wilson * from RC6 (GPU has it's own internal power context, except on gen5). Though 1210be98a7SChris Wilson * something like a context does exist for the media ring, the code only 1310be98a7SChris Wilson * supports contexts for the render ring. 1410be98a7SChris Wilson * 1510be98a7SChris Wilson * In software, there is a distinction between contexts created by the user, 1610be98a7SChris Wilson * and the default HW context. The default HW context is used by GPU clients 1710be98a7SChris Wilson * that do not request setup of their own hardware context. The default 1810be98a7SChris Wilson * context's state is never restored to help prevent programming errors. This 1910be98a7SChris Wilson * would happen if a client ran and piggy-backed off another clients GPU state. 2010be98a7SChris Wilson * The default context only exists to give the GPU some offset to load as the 2110be98a7SChris Wilson * current to invoke a save of the context we actually care about. In fact, the 2210be98a7SChris Wilson * code could likely be constructed, albeit in a more complicated fashion, to 2310be98a7SChris Wilson * never use the default context, though that limits the driver's ability to 2410be98a7SChris Wilson * swap out, and/or destroy other contexts. 2510be98a7SChris Wilson * 2610be98a7SChris Wilson * All other contexts are created as a request by the GPU client. These contexts 2710be98a7SChris Wilson * store GPU state, and thus allow GPU clients to not re-emit state (and 2810be98a7SChris Wilson * potentially query certain state) at any time. The kernel driver makes 2910be98a7SChris Wilson * certain that the appropriate commands are inserted. 3010be98a7SChris Wilson * 3110be98a7SChris Wilson * The context life cycle is semi-complicated in that context BOs may live 3210be98a7SChris Wilson * longer than the context itself because of the way the hardware, and object 3310be98a7SChris Wilson * tracking works. Below is a very crude representation of the state machine 3410be98a7SChris Wilson * describing the context life. 3510be98a7SChris Wilson * refcount pincount active 3610be98a7SChris Wilson * S0: initial state 0 0 0 3710be98a7SChris Wilson * S1: context created 1 0 0 3810be98a7SChris Wilson * S2: context is currently running 2 1 X 3910be98a7SChris Wilson * S3: GPU referenced, but not current 2 0 1 4010be98a7SChris Wilson * S4: context is current, but destroyed 1 1 0 4110be98a7SChris Wilson * S5: like S3, but destroyed 1 0 1 4210be98a7SChris Wilson * 4310be98a7SChris Wilson * The most common (but not all) transitions: 4410be98a7SChris Wilson * S0->S1: client creates a context 4510be98a7SChris Wilson * S1->S2: client submits execbuf with context 4610be98a7SChris Wilson * S2->S3: other clients submits execbuf with context 4710be98a7SChris Wilson * S3->S1: context object was retired 4810be98a7SChris Wilson * S3->S2: clients submits another execbuf 4910be98a7SChris Wilson * S2->S4: context destroy called with current context 5010be98a7SChris Wilson * S3->S5->S0: destroy path 5110be98a7SChris Wilson * S4->S5->S0: destroy path on current context 5210be98a7SChris Wilson * 5310be98a7SChris Wilson * There are two confusing terms used above: 5410be98a7SChris Wilson * The "current context" means the context which is currently running on the 5510be98a7SChris Wilson * GPU. The GPU has loaded its state already and has stored away the gtt 5610be98a7SChris Wilson * offset of the BO. The GPU is not actively referencing the data at this 5710be98a7SChris Wilson * offset, but it will on the next context switch. The only way to avoid this 5810be98a7SChris Wilson * is to do a GPU reset. 5910be98a7SChris Wilson * 6010be98a7SChris Wilson * An "active context' is one which was previously the "current context" and is 6110be98a7SChris Wilson * on the active list waiting for the next context switch to occur. Until this 6210be98a7SChris Wilson * happens, the object must remain at the same gtt offset. It is therefore 6310be98a7SChris Wilson * possible to destroy a context, but it is still active. 6410be98a7SChris Wilson * 6510be98a7SChris Wilson */ 6610be98a7SChris Wilson 6710be98a7SChris Wilson #include <linux/log2.h> 6810be98a7SChris Wilson #include <linux/nospec.h> 6910be98a7SChris Wilson 7010be98a7SChris Wilson #include <drm/i915_drm.h> 7110be98a7SChris Wilson 722c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h" 739f3ccd40SChris Wilson #include "gt/intel_context.h" 742e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h" 75de5825beSChris Wilson #include "gt/intel_engine_pm.h" 76750e76b4SChris Wilson #include "gt/intel_engine_user.h" 772871ea85SChris Wilson #include "gt/intel_lrc_reg.h" 782871ea85SChris Wilson #include "gt/intel_ring.h" 7910be98a7SChris Wilson 8010be98a7SChris Wilson #include "i915_gem_context.h" 8110be98a7SChris Wilson #include "i915_globals.h" 8210be98a7SChris Wilson #include "i915_trace.h" 8310be98a7SChris Wilson #include "i915_user_extensions.h" 8410be98a7SChris Wilson 8510be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 8610be98a7SChris Wilson 8710be98a7SChris Wilson static struct i915_global_gem_context { 8810be98a7SChris Wilson struct i915_global base; 8910be98a7SChris Wilson struct kmem_cache *slab_luts; 9010be98a7SChris Wilson } global; 9110be98a7SChris Wilson 9210be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void) 9310be98a7SChris Wilson { 9410be98a7SChris Wilson return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 9510be98a7SChris Wilson } 9610be98a7SChris Wilson 9710be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut) 9810be98a7SChris Wilson { 9910be98a7SChris Wilson return kmem_cache_free(global.slab_luts, lut); 10010be98a7SChris Wilson } 10110be98a7SChris Wilson 10210be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx) 10310be98a7SChris Wilson { 10410be98a7SChris Wilson struct radix_tree_iter iter; 10510be98a7SChris Wilson void __rcu **slot; 10610be98a7SChris Wilson 107155ab883SChris Wilson lockdep_assert_held(&ctx->mutex); 10810be98a7SChris Wilson 10910be98a7SChris Wilson rcu_read_lock(); 11010be98a7SChris Wilson radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 11110be98a7SChris Wilson struct i915_vma *vma = rcu_dereference_raw(*slot); 112155ab883SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 113155ab883SChris Wilson struct i915_lut_handle *lut; 11410be98a7SChris Wilson 115155ab883SChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 116155ab883SChris Wilson continue; 117155ab883SChris Wilson 118155ab883SChris Wilson rcu_read_unlock(); 119155ab883SChris Wilson i915_gem_object_lock(obj); 120155ab883SChris Wilson list_for_each_entry(lut, &obj->lut_list, obj_link) { 121155ab883SChris Wilson if (lut->ctx != ctx) 122155ab883SChris Wilson continue; 123155ab883SChris Wilson 124155ab883SChris Wilson if (lut->handle != iter.index) 125155ab883SChris Wilson continue; 126155ab883SChris Wilson 127155ab883SChris Wilson list_del(&lut->obj_link); 128155ab883SChris Wilson break; 129155ab883SChris Wilson } 130155ab883SChris Wilson i915_gem_object_unlock(obj); 131155ab883SChris Wilson rcu_read_lock(); 132155ab883SChris Wilson 133155ab883SChris Wilson if (&lut->obj_link != &obj->lut_list) { 134155ab883SChris Wilson i915_lut_handle_free(lut); 13510be98a7SChris Wilson radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 136155ab883SChris Wilson if (atomic_dec_and_test(&vma->open_count) && 137155ab883SChris Wilson !i915_vma_is_ggtt(vma)) 138155ab883SChris Wilson i915_vma_close(vma); 139155ab883SChris Wilson i915_gem_object_put(obj); 140155ab883SChris Wilson } 14110be98a7SChris Wilson 142155ab883SChris Wilson i915_gem_object_put(obj); 14310be98a7SChris Wilson } 14410be98a7SChris Wilson rcu_read_unlock(); 14510be98a7SChris Wilson } 14610be98a7SChris Wilson 14710be98a7SChris Wilson static struct intel_context * 14810be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx, 14910be98a7SChris Wilson unsigned long flags, 15010be98a7SChris Wilson const struct i915_engine_class_instance *ci) 15110be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0) 15210be98a7SChris Wilson { 15310be98a7SChris Wilson int idx; 15410be98a7SChris Wilson 15510be98a7SChris Wilson if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 15610be98a7SChris Wilson return ERR_PTR(-EINVAL); 15710be98a7SChris Wilson 15810be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) { 15910be98a7SChris Wilson struct intel_engine_cs *engine; 16010be98a7SChris Wilson 16110be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 16210be98a7SChris Wilson ci->engine_class, 16310be98a7SChris Wilson ci->engine_instance); 16410be98a7SChris Wilson if (!engine) 16510be98a7SChris Wilson return ERR_PTR(-EINVAL); 16610be98a7SChris Wilson 167f1c4d157SChris Wilson idx = engine->legacy_idx; 16810be98a7SChris Wilson } else { 16910be98a7SChris Wilson idx = ci->engine_instance; 17010be98a7SChris Wilson } 17110be98a7SChris Wilson 17210be98a7SChris Wilson return i915_gem_context_get_engine(ctx, idx); 17310be98a7SChris Wilson } 17410be98a7SChris Wilson 17527dbae8fSChris Wilson static struct i915_address_space * 17627dbae8fSChris Wilson context_get_vm_rcu(struct i915_gem_context *ctx) 17727dbae8fSChris Wilson { 17827dbae8fSChris Wilson GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 17927dbae8fSChris Wilson 18027dbae8fSChris Wilson do { 18127dbae8fSChris Wilson struct i915_address_space *vm; 18227dbae8fSChris Wilson 18327dbae8fSChris Wilson /* 18427dbae8fSChris Wilson * We do not allow downgrading from full-ppgtt [to a shared 18527dbae8fSChris Wilson * global gtt], so ctx->vm cannot become NULL. 18627dbae8fSChris Wilson */ 18727dbae8fSChris Wilson vm = rcu_dereference(ctx->vm); 18827dbae8fSChris Wilson if (!kref_get_unless_zero(&vm->ref)) 18927dbae8fSChris Wilson continue; 19027dbae8fSChris Wilson 19127dbae8fSChris Wilson /* 19227dbae8fSChris Wilson * This ppgtt may have be reallocated between 19327dbae8fSChris Wilson * the read and the kref, and reassigned to a third 19427dbae8fSChris Wilson * context. In order to avoid inadvertent sharing 19527dbae8fSChris Wilson * of this ppgtt with that third context (and not 19627dbae8fSChris Wilson * src), we have to confirm that we have the same 19727dbae8fSChris Wilson * ppgtt after passing through the strong memory 19827dbae8fSChris Wilson * barrier implied by a successful 19927dbae8fSChris Wilson * kref_get_unless_zero(). 20027dbae8fSChris Wilson * 20127dbae8fSChris Wilson * Once we have acquired the current ppgtt of ctx, 20227dbae8fSChris Wilson * we no longer care if it is released from ctx, as 20327dbae8fSChris Wilson * it cannot be reallocated elsewhere. 20427dbae8fSChris Wilson */ 20527dbae8fSChris Wilson 20627dbae8fSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 20727dbae8fSChris Wilson return rcu_pointer_handoff(vm); 20827dbae8fSChris Wilson 20927dbae8fSChris Wilson i915_vm_put(vm); 21027dbae8fSChris Wilson } while (1); 21127dbae8fSChris Wilson } 21227dbae8fSChris Wilson 213e6ba7648SChris Wilson static void intel_context_set_gem(struct intel_context *ce, 214e6ba7648SChris Wilson struct i915_gem_context *ctx) 215e6ba7648SChris Wilson { 2166a8679c0SChris Wilson GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 2176a8679c0SChris Wilson RCU_INIT_POINTER(ce->gem_context, ctx); 218e6ba7648SChris Wilson 219e6ba7648SChris Wilson if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 220e6ba7648SChris Wilson ce->ring = __intel_context_ring_size(SZ_16K); 221e6ba7648SChris Wilson 222e6ba7648SChris Wilson if (rcu_access_pointer(ctx->vm)) { 223e6ba7648SChris Wilson struct i915_address_space *vm; 224e6ba7648SChris Wilson 225e6ba7648SChris Wilson rcu_read_lock(); 226e6ba7648SChris Wilson vm = context_get_vm_rcu(ctx); /* hmm */ 227e6ba7648SChris Wilson rcu_read_unlock(); 228e6ba7648SChris Wilson 229e6ba7648SChris Wilson i915_vm_put(ce->vm); 230e6ba7648SChris Wilson ce->vm = vm; 231e6ba7648SChris Wilson } 232e6ba7648SChris Wilson 233e6ba7648SChris Wilson GEM_BUG_ON(ce->timeline); 234e6ba7648SChris Wilson if (ctx->timeline) 235e6ba7648SChris Wilson ce->timeline = intel_timeline_get(ctx->timeline); 236e6ba7648SChris Wilson 237e6ba7648SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 238e6ba7648SChris Wilson intel_engine_has_semaphores(ce->engine)) 239e6ba7648SChris Wilson __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 240e6ba7648SChris Wilson } 241e6ba7648SChris Wilson 24210be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count) 24310be98a7SChris Wilson { 24410be98a7SChris Wilson while (count--) { 24510be98a7SChris Wilson if (!e->engines[count]) 24610be98a7SChris Wilson continue; 24710be98a7SChris Wilson 2486a8679c0SChris Wilson RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); 24910be98a7SChris Wilson intel_context_put(e->engines[count]); 25010be98a7SChris Wilson } 25110be98a7SChris Wilson kfree(e); 25210be98a7SChris Wilson } 25310be98a7SChris Wilson 25410be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e) 25510be98a7SChris Wilson { 25610be98a7SChris Wilson __free_engines(e, e->num_engines); 25710be98a7SChris Wilson } 25810be98a7SChris Wilson 259155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu) 26010be98a7SChris Wilson { 261155ab883SChris Wilson free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 26210be98a7SChris Wilson } 26310be98a7SChris Wilson 26410be98a7SChris Wilson static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 26510be98a7SChris Wilson { 266f1c4d157SChris Wilson const struct intel_gt *gt = &ctx->i915->gt; 26710be98a7SChris Wilson struct intel_engine_cs *engine; 26810be98a7SChris Wilson struct i915_gem_engines *e; 26910be98a7SChris Wilson enum intel_engine_id id; 27010be98a7SChris Wilson 27110be98a7SChris Wilson e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 27210be98a7SChris Wilson if (!e) 27310be98a7SChris Wilson return ERR_PTR(-ENOMEM); 27410be98a7SChris Wilson 275155ab883SChris Wilson init_rcu_head(&e->rcu); 276f1c4d157SChris Wilson for_each_engine(engine, gt, id) { 27710be98a7SChris Wilson struct intel_context *ce; 27810be98a7SChris Wilson 279a50134b1STvrtko Ursulin if (engine->legacy_idx == INVALID_ENGINE) 280a50134b1STvrtko Ursulin continue; 281a50134b1STvrtko Ursulin 282a50134b1STvrtko Ursulin GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 283a50134b1STvrtko Ursulin GEM_BUG_ON(e->engines[engine->legacy_idx]); 284a50134b1STvrtko Ursulin 285e6ba7648SChris Wilson ce = intel_context_create(engine); 28610be98a7SChris Wilson if (IS_ERR(ce)) { 287a50134b1STvrtko Ursulin __free_engines(e, e->num_engines + 1); 28810be98a7SChris Wilson return ERR_CAST(ce); 28910be98a7SChris Wilson } 29010be98a7SChris Wilson 291e6ba7648SChris Wilson intel_context_set_gem(ce, ctx); 292e6ba7648SChris Wilson 293a50134b1STvrtko Ursulin e->engines[engine->legacy_idx] = ce; 294a50134b1STvrtko Ursulin e->num_engines = max(e->num_engines, engine->legacy_idx); 29510be98a7SChris Wilson } 296a50134b1STvrtko Ursulin e->num_engines++; 29710be98a7SChris Wilson 29810be98a7SChris Wilson return e; 29910be98a7SChris Wilson } 30010be98a7SChris Wilson 30110be98a7SChris Wilson static void i915_gem_context_free(struct i915_gem_context *ctx) 30210be98a7SChris Wilson { 30310be98a7SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 30410be98a7SChris Wilson 305a4e7ccdaSChris Wilson spin_lock(&ctx->i915->gem.contexts.lock); 306a4e7ccdaSChris Wilson list_del(&ctx->link); 307a4e7ccdaSChris Wilson spin_unlock(&ctx->i915->gem.contexts.lock); 308a4e7ccdaSChris Wilson 30910be98a7SChris Wilson free_engines(rcu_access_pointer(ctx->engines)); 31010be98a7SChris Wilson mutex_destroy(&ctx->engines_mutex); 31110be98a7SChris Wilson 31210be98a7SChris Wilson if (ctx->timeline) 313f0c02c1bSTvrtko Ursulin intel_timeline_put(ctx->timeline); 31410be98a7SChris Wilson 31510be98a7SChris Wilson put_pid(ctx->pid); 31610be98a7SChris Wilson mutex_destroy(&ctx->mutex); 31710be98a7SChris Wilson 31810be98a7SChris Wilson kfree_rcu(ctx, rcu); 31910be98a7SChris Wilson } 32010be98a7SChris Wilson 321a4e7ccdaSChris Wilson static void contexts_free_all(struct llist_node *list) 32210be98a7SChris Wilson { 32310be98a7SChris Wilson struct i915_gem_context *ctx, *cn; 32410be98a7SChris Wilson 325a4e7ccdaSChris Wilson llist_for_each_entry_safe(ctx, cn, list, free_link) 32610be98a7SChris Wilson i915_gem_context_free(ctx); 32710be98a7SChris Wilson } 32810be98a7SChris Wilson 329a4e7ccdaSChris Wilson static void contexts_flush_free(struct i915_gem_contexts *gc) 33010be98a7SChris Wilson { 331a4e7ccdaSChris Wilson contexts_free_all(llist_del_all(&gc->free_list)); 33210be98a7SChris Wilson } 33310be98a7SChris Wilson 33410be98a7SChris Wilson static void contexts_free_worker(struct work_struct *work) 33510be98a7SChris Wilson { 336a4e7ccdaSChris Wilson struct i915_gem_contexts *gc = 337a4e7ccdaSChris Wilson container_of(work, typeof(*gc), free_work); 33810be98a7SChris Wilson 339a4e7ccdaSChris Wilson contexts_flush_free(gc); 34010be98a7SChris Wilson } 34110be98a7SChris Wilson 34210be98a7SChris Wilson void i915_gem_context_release(struct kref *ref) 34310be98a7SChris Wilson { 34410be98a7SChris Wilson struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 345a4e7ccdaSChris Wilson struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 34610be98a7SChris Wilson 34710be98a7SChris Wilson trace_i915_context_free(ctx); 348a4e7ccdaSChris Wilson if (llist_add(&ctx->free_link, &gc->free_list)) 349a4e7ccdaSChris Wilson schedule_work(&gc->free_work); 35010be98a7SChris Wilson } 35110be98a7SChris Wilson 3522e0986a5SChris Wilson static inline struct i915_gem_engines * 3532e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx) 3542e0986a5SChris Wilson { 3552e0986a5SChris Wilson return rcu_dereference_protected(ctx->engines, true); 3562e0986a5SChris Wilson } 3572e0986a5SChris Wilson 3582e0986a5SChris Wilson static bool __reset_engine(struct intel_engine_cs *engine) 3592e0986a5SChris Wilson { 3602e0986a5SChris Wilson struct intel_gt *gt = engine->gt; 3612e0986a5SChris Wilson bool success = false; 3622e0986a5SChris Wilson 3632e0986a5SChris Wilson if (!intel_has_reset_engine(gt)) 3642e0986a5SChris Wilson return false; 3652e0986a5SChris Wilson 3662e0986a5SChris Wilson if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 3672e0986a5SChris Wilson >->reset.flags)) { 3682e0986a5SChris Wilson success = intel_engine_reset(engine, NULL) == 0; 3692e0986a5SChris Wilson clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 3702e0986a5SChris Wilson >->reset.flags); 3712e0986a5SChris Wilson } 3722e0986a5SChris Wilson 3732e0986a5SChris Wilson return success; 3742e0986a5SChris Wilson } 3752e0986a5SChris Wilson 3762e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx, 3772e0986a5SChris Wilson struct intel_engine_cs *engine) 3782e0986a5SChris Wilson { 3792e0986a5SChris Wilson intel_gt_handle_error(engine->gt, engine->mask, 0, 3802e0986a5SChris Wilson "context closure in %s", ctx->name); 3812e0986a5SChris Wilson } 3822e0986a5SChris Wilson 3832e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine) 3842e0986a5SChris Wilson { 3852e0986a5SChris Wilson /* 3862e0986a5SChris Wilson * Send a "high priority pulse" down the engine to cause the 3872e0986a5SChris Wilson * current request to be momentarily preempted. (If it fails to 3882e0986a5SChris Wilson * be preempted, it will be reset). As we have marked our context 3892e0986a5SChris Wilson * as banned, any incomplete request, including any running, will 3902e0986a5SChris Wilson * be skipped following the preemption. 3912e0986a5SChris Wilson * 3922e0986a5SChris Wilson * If there is no hangchecking (one of the reasons why we try to 3932e0986a5SChris Wilson * cancel the context) and no forced preemption, there may be no 3942e0986a5SChris Wilson * means by which we reset the GPU and evict the persistent hog. 3952e0986a5SChris Wilson * Ergo if we are unable to inject a preemptive pulse that can 3962e0986a5SChris Wilson * kill the banned context, we fallback to doing a local reset 3972e0986a5SChris Wilson * instead. 3982e0986a5SChris Wilson */ 399babaab2fSChris Wilson if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 400babaab2fSChris Wilson !intel_engine_pulse(engine)) 4012e0986a5SChris Wilson return true; 4022e0986a5SChris Wilson 4032e0986a5SChris Wilson /* If we are unable to send a pulse, try resetting this engine. */ 4042e0986a5SChris Wilson return __reset_engine(engine); 4052e0986a5SChris Wilson } 4062e0986a5SChris Wilson 4074a317415SChris Wilson static struct intel_engine_cs *__active_engine(struct i915_request *rq) 4082e0986a5SChris Wilson { 4092e0986a5SChris Wilson struct intel_engine_cs *engine, *locked; 4102e0986a5SChris Wilson 4112e0986a5SChris Wilson /* 4122e0986a5SChris Wilson * Serialise with __i915_request_submit() so that it sees 4132e0986a5SChris Wilson * is-banned?, or we know the request is already inflight. 4142e0986a5SChris Wilson */ 4152e0986a5SChris Wilson locked = READ_ONCE(rq->engine); 4162e0986a5SChris Wilson spin_lock_irq(&locked->active.lock); 4172e0986a5SChris Wilson while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 4182e0986a5SChris Wilson spin_unlock(&locked->active.lock); 4192e0986a5SChris Wilson spin_lock(&engine->active.lock); 4202e0986a5SChris Wilson locked = engine; 4212e0986a5SChris Wilson } 4222e0986a5SChris Wilson 4232e0986a5SChris Wilson engine = NULL; 4242e0986a5SChris Wilson if (i915_request_is_active(rq) && !rq->fence.error) 4252e0986a5SChris Wilson engine = rq->engine; 4262e0986a5SChris Wilson 4272e0986a5SChris Wilson spin_unlock_irq(&locked->active.lock); 4282e0986a5SChris Wilson 4292e0986a5SChris Wilson return engine; 4302e0986a5SChris Wilson } 4312e0986a5SChris Wilson 4324a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce) 4334a317415SChris Wilson { 4344a317415SChris Wilson struct intel_engine_cs *engine = NULL; 4354a317415SChris Wilson struct i915_request *rq; 4364a317415SChris Wilson 4374a317415SChris Wilson if (!ce->timeline) 4384a317415SChris Wilson return NULL; 4394a317415SChris Wilson 4407ce596a8SChris Wilson mutex_lock(&ce->timeline->mutex); 4414a317415SChris Wilson list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 4424a317415SChris Wilson if (i915_request_completed(rq)) 4434a317415SChris Wilson break; 4444a317415SChris Wilson 4454a317415SChris Wilson /* Check with the backend if the request is inflight */ 4464a317415SChris Wilson engine = __active_engine(rq); 4474a317415SChris Wilson if (engine) 4484a317415SChris Wilson break; 4494a317415SChris Wilson } 4507ce596a8SChris Wilson mutex_unlock(&ce->timeline->mutex); 4514a317415SChris Wilson 4524a317415SChris Wilson return engine; 4534a317415SChris Wilson } 4544a317415SChris Wilson 4552e0986a5SChris Wilson static void kill_context(struct i915_gem_context *ctx) 4562e0986a5SChris Wilson { 4572e0986a5SChris Wilson struct i915_gem_engines_iter it; 4582e0986a5SChris Wilson struct intel_context *ce; 4592e0986a5SChris Wilson 4602e0986a5SChris Wilson /* 4612e0986a5SChris Wilson * Map the user's engine back to the actual engines; one virtual 4622e0986a5SChris Wilson * engine will be mapped to multiple engines, and using ctx->engine[] 4632e0986a5SChris Wilson * the same engine may be have multiple instances in the user's map. 4642e0986a5SChris Wilson * However, we only care about pending requests, so only include 4652e0986a5SChris Wilson * engines on which there are incomplete requests. 4662e0986a5SChris Wilson */ 4672e0986a5SChris Wilson for_each_gem_engine(ce, __context_engines_static(ctx), it) { 4682e0986a5SChris Wilson struct intel_engine_cs *engine; 4692e0986a5SChris Wilson 4709f3ccd40SChris Wilson if (intel_context_set_banned(ce)) 4719f3ccd40SChris Wilson continue; 4729f3ccd40SChris Wilson 4734a317415SChris Wilson /* 4744a317415SChris Wilson * Check the current active state of this context; if we 4754a317415SChris Wilson * are currently executing on the GPU we need to evict 4764a317415SChris Wilson * ourselves. On the other hand, if we haven't yet been 4774a317415SChris Wilson * submitted to the GPU or if everything is complete, 4784a317415SChris Wilson * we have nothing to do. 4794a317415SChris Wilson */ 4804a317415SChris Wilson engine = active_engine(ce); 4812e0986a5SChris Wilson 4822e0986a5SChris Wilson /* First attempt to gracefully cancel the context */ 4832e0986a5SChris Wilson if (engine && !__cancel_engine(engine)) 4842e0986a5SChris Wilson /* 4852e0986a5SChris Wilson * If we are unable to send a preemptive pulse to bump 4862e0986a5SChris Wilson * the context from the GPU, we have to resort to a full 4872e0986a5SChris Wilson * reset. We hope the collateral damage is worth it. 4882e0986a5SChris Wilson */ 4892e0986a5SChris Wilson __reset_context(ctx, engine); 4902e0986a5SChris Wilson } 4912e0986a5SChris Wilson } 4922e0986a5SChris Wilson 493267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx) 494267c0126SChris Wilson { 495267c0126SChris Wilson char *s; 496267c0126SChris Wilson 497267c0126SChris Wilson /* Replace '[]' with '<>' to indicate closed in debug prints */ 498267c0126SChris Wilson 499267c0126SChris Wilson s = strrchr(ctx->name, '['); 500267c0126SChris Wilson if (!s) 501267c0126SChris Wilson return; 502267c0126SChris Wilson 503267c0126SChris Wilson *s = '<'; 504267c0126SChris Wilson 505267c0126SChris Wilson s = strchr(s + 1, ']'); 506267c0126SChris Wilson if (s) 507267c0126SChris Wilson *s = '>'; 508267c0126SChris Wilson } 509267c0126SChris Wilson 51010be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx) 51110be98a7SChris Wilson { 512a4e7ccdaSChris Wilson struct i915_address_space *vm; 513a4e7ccdaSChris Wilson 5142850748eSChris Wilson i915_gem_context_set_closed(ctx); 515267c0126SChris Wilson set_closed_name(ctx); 5162850748eSChris Wilson 517155ab883SChris Wilson mutex_lock(&ctx->mutex); 518155ab883SChris Wilson 519a4e7ccdaSChris Wilson vm = i915_gem_context_vm(ctx); 520a4e7ccdaSChris Wilson if (vm) 521a4e7ccdaSChris Wilson i915_vm_close(vm); 522a4e7ccdaSChris Wilson 523155ab883SChris Wilson ctx->file_priv = ERR_PTR(-EBADF); 52410be98a7SChris Wilson 52510be98a7SChris Wilson /* 52610be98a7SChris Wilson * The LUT uses the VMA as a backpointer to unref the object, 52710be98a7SChris Wilson * so we need to clear the LUT before we close all the VMA (inside 52810be98a7SChris Wilson * the ppgtt). 52910be98a7SChris Wilson */ 53010be98a7SChris Wilson lut_close(ctx); 53110be98a7SChris Wilson 532155ab883SChris Wilson mutex_unlock(&ctx->mutex); 5332e0986a5SChris Wilson 5342e0986a5SChris Wilson /* 5352e0986a5SChris Wilson * If the user has disabled hangchecking, we can not be sure that 5362e0986a5SChris Wilson * the batches will ever complete after the context is closed, 5372e0986a5SChris Wilson * keeping the context and all resources pinned forever. So in this 5382e0986a5SChris Wilson * case we opt to forcibly kill off all remaining requests on 5392e0986a5SChris Wilson * context close. 5402e0986a5SChris Wilson */ 541a0e04715SChris Wilson if (!i915_gem_context_is_persistent(ctx) || 542a0e04715SChris Wilson !i915_modparams.enable_hangcheck) 5432e0986a5SChris Wilson kill_context(ctx); 5442e0986a5SChris Wilson 54510be98a7SChris Wilson i915_gem_context_put(ctx); 54610be98a7SChris Wilson } 54710be98a7SChris Wilson 548a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 549a0e04715SChris Wilson { 550a0e04715SChris Wilson if (i915_gem_context_is_persistent(ctx) == state) 551a0e04715SChris Wilson return 0; 552a0e04715SChris Wilson 553a0e04715SChris Wilson if (state) { 554a0e04715SChris Wilson /* 555a0e04715SChris Wilson * Only contexts that are short-lived [that will expire or be 556a0e04715SChris Wilson * reset] are allowed to survive past termination. We require 557a0e04715SChris Wilson * hangcheck to ensure that the persistent requests are healthy. 558a0e04715SChris Wilson */ 559a0e04715SChris Wilson if (!i915_modparams.enable_hangcheck) 560a0e04715SChris Wilson return -EINVAL; 561a0e04715SChris Wilson 562a0e04715SChris Wilson i915_gem_context_set_persistence(ctx); 563a0e04715SChris Wilson } else { 564a0e04715SChris Wilson /* To cancel a context we use "preempt-to-idle" */ 565a0e04715SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 566a0e04715SChris Wilson return -ENODEV; 567a0e04715SChris Wilson 568d1b9b5f1SChris Wilson /* 569d1b9b5f1SChris Wilson * If the cancel fails, we then need to reset, cleanly! 570d1b9b5f1SChris Wilson * 571d1b9b5f1SChris Wilson * If the per-engine reset fails, all hope is lost! We resort 572d1b9b5f1SChris Wilson * to a full GPU reset in that unlikely case, but realistically 573d1b9b5f1SChris Wilson * if the engine could not reset, the full reset does not fare 574d1b9b5f1SChris Wilson * much better. The damage has been done. 575d1b9b5f1SChris Wilson * 576d1b9b5f1SChris Wilson * However, if we cannot reset an engine by itself, we cannot 577d1b9b5f1SChris Wilson * cleanup a hanging persistent context without causing 578d1b9b5f1SChris Wilson * colateral damage, and we should not pretend we can by 579d1b9b5f1SChris Wilson * exposing the interface. 580d1b9b5f1SChris Wilson */ 581d1b9b5f1SChris Wilson if (!intel_has_reset_engine(&ctx->i915->gt)) 582d1b9b5f1SChris Wilson return -ENODEV; 583d1b9b5f1SChris Wilson 584a0e04715SChris Wilson i915_gem_context_clear_persistence(ctx); 585a0e04715SChris Wilson } 586a0e04715SChris Wilson 587a0e04715SChris Wilson return 0; 588a0e04715SChris Wilson } 589a0e04715SChris Wilson 59010be98a7SChris Wilson static struct i915_gem_context * 591e568ac38SChris Wilson __create_context(struct drm_i915_private *i915) 59210be98a7SChris Wilson { 59310be98a7SChris Wilson struct i915_gem_context *ctx; 59410be98a7SChris Wilson struct i915_gem_engines *e; 59510be98a7SChris Wilson int err; 59610be98a7SChris Wilson int i; 59710be98a7SChris Wilson 59810be98a7SChris Wilson ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 59910be98a7SChris Wilson if (!ctx) 60010be98a7SChris Wilson return ERR_PTR(-ENOMEM); 60110be98a7SChris Wilson 60210be98a7SChris Wilson kref_init(&ctx->ref); 603e568ac38SChris Wilson ctx->i915 = i915; 60410be98a7SChris Wilson ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 60510be98a7SChris Wilson mutex_init(&ctx->mutex); 60610be98a7SChris Wilson 60710be98a7SChris Wilson mutex_init(&ctx->engines_mutex); 60810be98a7SChris Wilson e = default_engines(ctx); 60910be98a7SChris Wilson if (IS_ERR(e)) { 61010be98a7SChris Wilson err = PTR_ERR(e); 61110be98a7SChris Wilson goto err_free; 61210be98a7SChris Wilson } 61310be98a7SChris Wilson RCU_INIT_POINTER(ctx->engines, e); 61410be98a7SChris Wilson 61510be98a7SChris Wilson INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 61610be98a7SChris Wilson 61710be98a7SChris Wilson /* NB: Mark all slices as needing a remap so that when the context first 61810be98a7SChris Wilson * loads it will restore whatever remap state already exists. If there 61910be98a7SChris Wilson * is no remap info, it will be a NOP. */ 620e568ac38SChris Wilson ctx->remap_slice = ALL_L3_SLICES(i915); 62110be98a7SChris Wilson 62210be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 62310be98a7SChris Wilson i915_gem_context_set_recoverable(ctx); 624a0e04715SChris Wilson __context_set_persistence(ctx, true /* cgroup hook? */); 62510be98a7SChris Wilson 62610be98a7SChris Wilson for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 62710be98a7SChris Wilson ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 62810be98a7SChris Wilson 629a4e7ccdaSChris Wilson spin_lock(&i915->gem.contexts.lock); 630a4e7ccdaSChris Wilson list_add_tail(&ctx->link, &i915->gem.contexts.list); 631a4e7ccdaSChris Wilson spin_unlock(&i915->gem.contexts.lock); 632a4e7ccdaSChris Wilson 63310be98a7SChris Wilson return ctx; 63410be98a7SChris Wilson 63510be98a7SChris Wilson err_free: 63610be98a7SChris Wilson kfree(ctx); 63710be98a7SChris Wilson return ERR_PTR(err); 63810be98a7SChris Wilson } 63910be98a7SChris Wilson 64048ae397bSChris Wilson static void 64148ae397bSChris Wilson context_apply_all(struct i915_gem_context *ctx, 64248ae397bSChris Wilson void (*fn)(struct intel_context *ce, void *data), 64348ae397bSChris Wilson void *data) 64448ae397bSChris Wilson { 64548ae397bSChris Wilson struct i915_gem_engines_iter it; 64648ae397bSChris Wilson struct intel_context *ce; 64748ae397bSChris Wilson 64848ae397bSChris Wilson for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 64948ae397bSChris Wilson fn(ce, data); 65048ae397bSChris Wilson i915_gem_context_unlock_engines(ctx); 65148ae397bSChris Wilson } 65248ae397bSChris Wilson 65348ae397bSChris Wilson static void __apply_ppgtt(struct intel_context *ce, void *vm) 65448ae397bSChris Wilson { 65548ae397bSChris Wilson i915_vm_put(ce->vm); 65648ae397bSChris Wilson ce->vm = i915_vm_get(vm); 65748ae397bSChris Wilson } 65848ae397bSChris Wilson 659e568ac38SChris Wilson static struct i915_address_space * 660e568ac38SChris Wilson __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 66110be98a7SChris Wilson { 662a4e7ccdaSChris Wilson struct i915_address_space *old = i915_gem_context_vm(ctx); 66310be98a7SChris Wilson 664a1c9ca22SChris Wilson GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 665a1c9ca22SChris Wilson 666a4e7ccdaSChris Wilson rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 66748ae397bSChris Wilson context_apply_all(ctx, __apply_ppgtt, vm); 668f5d974f9SChris Wilson 66910be98a7SChris Wilson return old; 67010be98a7SChris Wilson } 67110be98a7SChris Wilson 67210be98a7SChris Wilson static void __assign_ppgtt(struct i915_gem_context *ctx, 673e568ac38SChris Wilson struct i915_address_space *vm) 67410be98a7SChris Wilson { 675a4e7ccdaSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 67610be98a7SChris Wilson return; 67710be98a7SChris Wilson 678e568ac38SChris Wilson vm = __set_ppgtt(ctx, vm); 679e568ac38SChris Wilson if (vm) 6802850748eSChris Wilson i915_vm_close(vm); 68110be98a7SChris Wilson } 68210be98a7SChris Wilson 68375d0a7f3SChris Wilson static void __set_timeline(struct intel_timeline **dst, 68475d0a7f3SChris Wilson struct intel_timeline *src) 68575d0a7f3SChris Wilson { 68675d0a7f3SChris Wilson struct intel_timeline *old = *dst; 68775d0a7f3SChris Wilson 68875d0a7f3SChris Wilson *dst = src ? intel_timeline_get(src) : NULL; 68975d0a7f3SChris Wilson 69075d0a7f3SChris Wilson if (old) 69175d0a7f3SChris Wilson intel_timeline_put(old); 69275d0a7f3SChris Wilson } 69375d0a7f3SChris Wilson 69475d0a7f3SChris Wilson static void __apply_timeline(struct intel_context *ce, void *timeline) 69575d0a7f3SChris Wilson { 69675d0a7f3SChris Wilson __set_timeline(&ce->timeline, timeline); 69775d0a7f3SChris Wilson } 69875d0a7f3SChris Wilson 69975d0a7f3SChris Wilson static void __assign_timeline(struct i915_gem_context *ctx, 70075d0a7f3SChris Wilson struct intel_timeline *timeline) 70175d0a7f3SChris Wilson { 70275d0a7f3SChris Wilson __set_timeline(&ctx->timeline, timeline); 70375d0a7f3SChris Wilson context_apply_all(ctx, __apply_timeline, timeline); 70475d0a7f3SChris Wilson } 70575d0a7f3SChris Wilson 70610be98a7SChris Wilson static struct i915_gem_context * 707a4e7ccdaSChris Wilson i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 70810be98a7SChris Wilson { 70910be98a7SChris Wilson struct i915_gem_context *ctx; 71010be98a7SChris Wilson 71110be98a7SChris Wilson if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 712a4e7ccdaSChris Wilson !HAS_EXECLISTS(i915)) 71310be98a7SChris Wilson return ERR_PTR(-EINVAL); 71410be98a7SChris Wilson 715a4e7ccdaSChris Wilson /* Reap the stale contexts */ 716a4e7ccdaSChris Wilson contexts_flush_free(&i915->gem.contexts); 71710be98a7SChris Wilson 718a4e7ccdaSChris Wilson ctx = __create_context(i915); 71910be98a7SChris Wilson if (IS_ERR(ctx)) 72010be98a7SChris Wilson return ctx; 72110be98a7SChris Wilson 722a4e7ccdaSChris Wilson if (HAS_FULL_PPGTT(i915)) { 723ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 72410be98a7SChris Wilson 7252c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(&i915->gt); 72610be98a7SChris Wilson if (IS_ERR(ppgtt)) { 727baa89ba3SWambui Karuga drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 72810be98a7SChris Wilson PTR_ERR(ppgtt)); 72910be98a7SChris Wilson context_close(ctx); 73010be98a7SChris Wilson return ERR_CAST(ppgtt); 73110be98a7SChris Wilson } 73210be98a7SChris Wilson 733a4e7ccdaSChris Wilson mutex_lock(&ctx->mutex); 734e568ac38SChris Wilson __assign_ppgtt(ctx, &ppgtt->vm); 735a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 736a4e7ccdaSChris Wilson 737e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 73810be98a7SChris Wilson } 73910be98a7SChris Wilson 74010be98a7SChris Wilson if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 741f0c02c1bSTvrtko Ursulin struct intel_timeline *timeline; 74210be98a7SChris Wilson 743a4e7ccdaSChris Wilson timeline = intel_timeline_create(&i915->gt, NULL); 74410be98a7SChris Wilson if (IS_ERR(timeline)) { 74510be98a7SChris Wilson context_close(ctx); 74610be98a7SChris Wilson return ERR_CAST(timeline); 74710be98a7SChris Wilson } 74810be98a7SChris Wilson 74975d0a7f3SChris Wilson __assign_timeline(ctx, timeline); 75075d0a7f3SChris Wilson intel_timeline_put(timeline); 75110be98a7SChris Wilson } 75210be98a7SChris Wilson 75310be98a7SChris Wilson trace_i915_context_create(ctx); 75410be98a7SChris Wilson 75510be98a7SChris Wilson return ctx; 75610be98a7SChris Wilson } 75710be98a7SChris Wilson 758a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc) 75910be98a7SChris Wilson { 760a4e7ccdaSChris Wilson spin_lock_init(&gc->lock); 761a4e7ccdaSChris Wilson INIT_LIST_HEAD(&gc->list); 76210be98a7SChris Wilson 763a4e7ccdaSChris Wilson INIT_WORK(&gc->free_work, contexts_free_worker); 764a4e7ccdaSChris Wilson init_llist_head(&gc->free_list); 76510be98a7SChris Wilson } 76610be98a7SChris Wilson 767e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915) 76810be98a7SChris Wilson { 769a4e7ccdaSChris Wilson init_contexts(&i915->gem.contexts); 770baa89ba3SWambui Karuga drm_dbg(&i915->drm, "%s context support initialized\n", 771a4e7ccdaSChris Wilson DRIVER_CAPS(i915)->has_logical_contexts ? 77210be98a7SChris Wilson "logical" : "fake"); 77310be98a7SChris Wilson } 77410be98a7SChris Wilson 775a4e7ccdaSChris Wilson void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 77610be98a7SChris Wilson { 7775f00cac9SChris Wilson flush_work(&i915->gem.contexts.free_work); 77810be98a7SChris Wilson } 77910be98a7SChris Wilson 78010be98a7SChris Wilson static int gem_context_register(struct i915_gem_context *ctx, 781c100777cSTvrtko Ursulin struct drm_i915_file_private *fpriv, 782c100777cSTvrtko Ursulin u32 *id) 78310be98a7SChris Wilson { 784a4e7ccdaSChris Wilson struct i915_address_space *vm; 78510be98a7SChris Wilson int ret; 78610be98a7SChris Wilson 78710be98a7SChris Wilson ctx->file_priv = fpriv; 788a4e7ccdaSChris Wilson 789a4e7ccdaSChris Wilson mutex_lock(&ctx->mutex); 790a4e7ccdaSChris Wilson vm = i915_gem_context_vm(ctx); 791a4e7ccdaSChris Wilson if (vm) 792a4e7ccdaSChris Wilson WRITE_ONCE(vm->file, fpriv); /* XXX */ 793a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 79410be98a7SChris Wilson 79510be98a7SChris Wilson ctx->pid = get_task_pid(current, PIDTYPE_PID); 796fc4f125dSChris Wilson snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 79710be98a7SChris Wilson current->comm, pid_nr(ctx->pid)); 79810be98a7SChris Wilson 79910be98a7SChris Wilson /* And finally expose ourselves to userspace via the idr */ 800c100777cSTvrtko Ursulin ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 801c100777cSTvrtko Ursulin if (ret) 80210be98a7SChris Wilson put_pid(fetch_and_zero(&ctx->pid)); 803c100777cSTvrtko Ursulin 80410be98a7SChris Wilson return ret; 80510be98a7SChris Wilson } 80610be98a7SChris Wilson 80710be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915, 80810be98a7SChris Wilson struct drm_file *file) 80910be98a7SChris Wilson { 81010be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 81110be98a7SChris Wilson struct i915_gem_context *ctx; 81210be98a7SChris Wilson int err; 813c100777cSTvrtko Ursulin u32 id; 81410be98a7SChris Wilson 815c100777cSTvrtko Ursulin xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 816c100777cSTvrtko Ursulin 8175dbd2b7bSChris Wilson /* 0 reserved for invalid/unassigned ppgtt */ 8185dbd2b7bSChris Wilson xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 81910be98a7SChris Wilson 82010be98a7SChris Wilson ctx = i915_gem_create_context(i915, 0); 82110be98a7SChris Wilson if (IS_ERR(ctx)) { 82210be98a7SChris Wilson err = PTR_ERR(ctx); 82310be98a7SChris Wilson goto err; 82410be98a7SChris Wilson } 82510be98a7SChris Wilson 826c100777cSTvrtko Ursulin err = gem_context_register(ctx, file_priv, &id); 82710be98a7SChris Wilson if (err < 0) 82810be98a7SChris Wilson goto err_ctx; 82910be98a7SChris Wilson 830c100777cSTvrtko Ursulin GEM_BUG_ON(id); 83110be98a7SChris Wilson return 0; 83210be98a7SChris Wilson 83310be98a7SChris Wilson err_ctx: 83410be98a7SChris Wilson context_close(ctx); 83510be98a7SChris Wilson err: 8365dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 837c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 83810be98a7SChris Wilson return err; 83910be98a7SChris Wilson } 84010be98a7SChris Wilson 84110be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file) 84210be98a7SChris Wilson { 84310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 844a4e7ccdaSChris Wilson struct drm_i915_private *i915 = file_priv->dev_priv; 8455dbd2b7bSChris Wilson struct i915_address_space *vm; 846c100777cSTvrtko Ursulin struct i915_gem_context *ctx; 847c100777cSTvrtko Ursulin unsigned long idx; 84810be98a7SChris Wilson 849c100777cSTvrtko Ursulin xa_for_each(&file_priv->context_xa, idx, ctx) 850c100777cSTvrtko Ursulin context_close(ctx); 851c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 85210be98a7SChris Wilson 8535dbd2b7bSChris Wilson xa_for_each(&file_priv->vm_xa, idx, vm) 8545dbd2b7bSChris Wilson i915_vm_put(vm); 8555dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 856a4e7ccdaSChris Wilson 857a4e7ccdaSChris Wilson contexts_flush_free(&i915->gem.contexts); 85810be98a7SChris Wilson } 85910be98a7SChris Wilson 86010be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 86110be98a7SChris Wilson struct drm_file *file) 86210be98a7SChris Wilson { 86310be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 86410be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 86510be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 866ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 8675dbd2b7bSChris Wilson u32 id; 86810be98a7SChris Wilson int err; 86910be98a7SChris Wilson 87010be98a7SChris Wilson if (!HAS_FULL_PPGTT(i915)) 87110be98a7SChris Wilson return -ENODEV; 87210be98a7SChris Wilson 87310be98a7SChris Wilson if (args->flags) 87410be98a7SChris Wilson return -EINVAL; 87510be98a7SChris Wilson 8762c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(&i915->gt); 87710be98a7SChris Wilson if (IS_ERR(ppgtt)) 87810be98a7SChris Wilson return PTR_ERR(ppgtt); 87910be98a7SChris Wilson 88010be98a7SChris Wilson ppgtt->vm.file = file_priv; 88110be98a7SChris Wilson 88210be98a7SChris Wilson if (args->extensions) { 88310be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(args->extensions), 88410be98a7SChris Wilson NULL, 0, 88510be98a7SChris Wilson ppgtt); 88610be98a7SChris Wilson if (err) 88710be98a7SChris Wilson goto err_put; 88810be98a7SChris Wilson } 88910be98a7SChris Wilson 8905dbd2b7bSChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 8915dbd2b7bSChris Wilson xa_limit_32b, GFP_KERNEL); 89210be98a7SChris Wilson if (err) 89310be98a7SChris Wilson goto err_put; 89410be98a7SChris Wilson 8955dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 8965dbd2b7bSChris Wilson args->vm_id = id; 89710be98a7SChris Wilson return 0; 89810be98a7SChris Wilson 89910be98a7SChris Wilson err_put: 900e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 90110be98a7SChris Wilson return err; 90210be98a7SChris Wilson } 90310be98a7SChris Wilson 90410be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 90510be98a7SChris Wilson struct drm_file *file) 90610be98a7SChris Wilson { 90710be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 90810be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 909e568ac38SChris Wilson struct i915_address_space *vm; 91010be98a7SChris Wilson 91110be98a7SChris Wilson if (args->flags) 91210be98a7SChris Wilson return -EINVAL; 91310be98a7SChris Wilson 91410be98a7SChris Wilson if (args->extensions) 91510be98a7SChris Wilson return -EINVAL; 91610be98a7SChris Wilson 9175dbd2b7bSChris Wilson vm = xa_erase(&file_priv->vm_xa, args->vm_id); 918e568ac38SChris Wilson if (!vm) 91910be98a7SChris Wilson return -ENOENT; 92010be98a7SChris Wilson 921e568ac38SChris Wilson i915_vm_put(vm); 92210be98a7SChris Wilson return 0; 92310be98a7SChris Wilson } 92410be98a7SChris Wilson 92510be98a7SChris Wilson struct context_barrier_task { 92610be98a7SChris Wilson struct i915_active base; 92710be98a7SChris Wilson void (*task)(void *data); 92810be98a7SChris Wilson void *data; 92910be98a7SChris Wilson }; 93010be98a7SChris Wilson 931274cbf20SChris Wilson __i915_active_call 93210be98a7SChris Wilson static void cb_retire(struct i915_active *base) 93310be98a7SChris Wilson { 93410be98a7SChris Wilson struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 93510be98a7SChris Wilson 93610be98a7SChris Wilson if (cb->task) 93710be98a7SChris Wilson cb->task(cb->data); 93810be98a7SChris Wilson 93910be98a7SChris Wilson i915_active_fini(&cb->base); 94010be98a7SChris Wilson kfree(cb); 94110be98a7SChris Wilson } 94210be98a7SChris Wilson 94310be98a7SChris Wilson I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 94410be98a7SChris Wilson static int context_barrier_task(struct i915_gem_context *ctx, 94510be98a7SChris Wilson intel_engine_mask_t engines, 9461fe2d6f9SChris Wilson bool (*skip)(struct intel_context *ce, void *data), 94710be98a7SChris Wilson int (*emit)(struct i915_request *rq, void *data), 94810be98a7SChris Wilson void (*task)(void *data), 94910be98a7SChris Wilson void *data) 95010be98a7SChris Wilson { 95110be98a7SChris Wilson struct context_barrier_task *cb; 95210be98a7SChris Wilson struct i915_gem_engines_iter it; 95310be98a7SChris Wilson struct intel_context *ce; 95410be98a7SChris Wilson int err = 0; 95510be98a7SChris Wilson 95610be98a7SChris Wilson GEM_BUG_ON(!task); 95710be98a7SChris Wilson 95810be98a7SChris Wilson cb = kmalloc(sizeof(*cb), GFP_KERNEL); 95910be98a7SChris Wilson if (!cb) 96010be98a7SChris Wilson return -ENOMEM; 96110be98a7SChris Wilson 962b1e3177bSChris Wilson i915_active_init(&cb->base, NULL, cb_retire); 96312c255b5SChris Wilson err = i915_active_acquire(&cb->base); 96412c255b5SChris Wilson if (err) { 96512c255b5SChris Wilson kfree(cb); 96612c255b5SChris Wilson return err; 96712c255b5SChris Wilson } 96810be98a7SChris Wilson 96910be98a7SChris Wilson for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 97010be98a7SChris Wilson struct i915_request *rq; 97110be98a7SChris Wilson 97210be98a7SChris Wilson if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 97310be98a7SChris Wilson ce->engine->mask)) { 97410be98a7SChris Wilson err = -ENXIO; 97510be98a7SChris Wilson break; 97610be98a7SChris Wilson } 97710be98a7SChris Wilson 9781fe2d6f9SChris Wilson if (!(ce->engine->mask & engines)) 9791fe2d6f9SChris Wilson continue; 9801fe2d6f9SChris Wilson 9811fe2d6f9SChris Wilson if (skip && skip(ce, data)) 98210be98a7SChris Wilson continue; 98310be98a7SChris Wilson 98410be98a7SChris Wilson rq = intel_context_create_request(ce); 98510be98a7SChris Wilson if (IS_ERR(rq)) { 98610be98a7SChris Wilson err = PTR_ERR(rq); 98710be98a7SChris Wilson break; 98810be98a7SChris Wilson } 98910be98a7SChris Wilson 99010be98a7SChris Wilson err = 0; 99110be98a7SChris Wilson if (emit) 99210be98a7SChris Wilson err = emit(rq, data); 99310be98a7SChris Wilson if (err == 0) 994d19d71fcSChris Wilson err = i915_active_add_request(&cb->base, rq); 99510be98a7SChris Wilson 99610be98a7SChris Wilson i915_request_add(rq); 99710be98a7SChris Wilson if (err) 99810be98a7SChris Wilson break; 99910be98a7SChris Wilson } 100010be98a7SChris Wilson i915_gem_context_unlock_engines(ctx); 100110be98a7SChris Wilson 100210be98a7SChris Wilson cb->task = err ? NULL : task; /* caller needs to unwind instead */ 100310be98a7SChris Wilson cb->data = data; 100410be98a7SChris Wilson 100510be98a7SChris Wilson i915_active_release(&cb->base); 100610be98a7SChris Wilson 100710be98a7SChris Wilson return err; 100810be98a7SChris Wilson } 100910be98a7SChris Wilson 101010be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv, 101110be98a7SChris Wilson struct i915_gem_context *ctx, 101210be98a7SChris Wilson struct drm_i915_gem_context_param *args) 101310be98a7SChris Wilson { 1014e568ac38SChris Wilson struct i915_address_space *vm; 10155dbd2b7bSChris Wilson int err; 10165dbd2b7bSChris Wilson u32 id; 101710be98a7SChris Wilson 1018a4e7ccdaSChris Wilson if (!rcu_access_pointer(ctx->vm)) 101910be98a7SChris Wilson return -ENODEV; 102010be98a7SChris Wilson 1021a4e7ccdaSChris Wilson rcu_read_lock(); 102227dbae8fSChris Wilson vm = context_get_vm_rcu(ctx); 1023a4e7ccdaSChris Wilson rcu_read_unlock(); 102490211ea4SChris Wilson if (!vm) 102590211ea4SChris Wilson return -ENODEV; 102690211ea4SChris Wilson 102790211ea4SChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 10285dbd2b7bSChris Wilson if (err) 102910be98a7SChris Wilson goto err_put; 103010be98a7SChris Wilson 10312850748eSChris Wilson i915_vm_open(vm); 103210be98a7SChris Wilson 10335dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 10345dbd2b7bSChris Wilson args->value = id; 103510be98a7SChris Wilson args->size = 0; 103610be98a7SChris Wilson 103710be98a7SChris Wilson err_put: 1038e568ac38SChris Wilson i915_vm_put(vm); 10395dbd2b7bSChris Wilson return err; 104010be98a7SChris Wilson } 104110be98a7SChris Wilson 104210be98a7SChris Wilson static void set_ppgtt_barrier(void *data) 104310be98a7SChris Wilson { 1044e568ac38SChris Wilson struct i915_address_space *old = data; 104510be98a7SChris Wilson 1046e568ac38SChris Wilson if (INTEL_GEN(old->i915) < 8) 1047e568ac38SChris Wilson gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 104810be98a7SChris Wilson 10492850748eSChris Wilson i915_vm_close(old); 105010be98a7SChris Wilson } 105110be98a7SChris Wilson 105210be98a7SChris Wilson static int emit_ppgtt_update(struct i915_request *rq, void *data) 105310be98a7SChris Wilson { 10549f3ccd40SChris Wilson struct i915_address_space *vm = rq->context->vm; 105510be98a7SChris Wilson struct intel_engine_cs *engine = rq->engine; 105610be98a7SChris Wilson u32 base = engine->mmio_base; 105710be98a7SChris Wilson u32 *cs; 105810be98a7SChris Wilson int i; 105910be98a7SChris Wilson 1060e568ac38SChris Wilson if (i915_vm_is_4lvl(vm)) { 1061ab53497bSChris Wilson struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1062b5b7bef9SMika Kuoppala const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 106310be98a7SChris Wilson 106410be98a7SChris Wilson cs = intel_ring_begin(rq, 6); 106510be98a7SChris Wilson if (IS_ERR(cs)) 106610be98a7SChris Wilson return PTR_ERR(cs); 106710be98a7SChris Wilson 106810be98a7SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(2); 106910be98a7SChris Wilson 107010be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 107110be98a7SChris Wilson *cs++ = upper_32_bits(pd_daddr); 107210be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 107310be98a7SChris Wilson *cs++ = lower_32_bits(pd_daddr); 107410be98a7SChris Wilson 107510be98a7SChris Wilson *cs++ = MI_NOOP; 107610be98a7SChris Wilson intel_ring_advance(rq, cs); 107710be98a7SChris Wilson } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1078ab53497bSChris Wilson struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1079191797a8SChris Wilson int err; 1080191797a8SChris Wilson 1081191797a8SChris Wilson /* Magic required to prevent forcewake errors! */ 1082191797a8SChris Wilson err = engine->emit_flush(rq, EMIT_INVALIDATE); 1083191797a8SChris Wilson if (err) 1084191797a8SChris Wilson return err; 1085e568ac38SChris Wilson 108610be98a7SChris Wilson cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 108710be98a7SChris Wilson if (IS_ERR(cs)) 108810be98a7SChris Wilson return PTR_ERR(cs); 108910be98a7SChris Wilson 1090191797a8SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 109110be98a7SChris Wilson for (i = GEN8_3LVL_PDPES; i--; ) { 109210be98a7SChris Wilson const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 109310be98a7SChris Wilson 109410be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 109510be98a7SChris Wilson *cs++ = upper_32_bits(pd_daddr); 109610be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 109710be98a7SChris Wilson *cs++ = lower_32_bits(pd_daddr); 109810be98a7SChris Wilson } 109910be98a7SChris Wilson *cs++ = MI_NOOP; 110010be98a7SChris Wilson intel_ring_advance(rq, cs); 110110be98a7SChris Wilson } 110210be98a7SChris Wilson 110310be98a7SChris Wilson return 0; 110410be98a7SChris Wilson } 110510be98a7SChris Wilson 11061fe2d6f9SChris Wilson static bool skip_ppgtt_update(struct intel_context *ce, void *data) 11071fe2d6f9SChris Wilson { 1108aef82079SChris Wilson if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 1109aef82079SChris Wilson return true; 1110aef82079SChris Wilson 11111fe2d6f9SChris Wilson if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1112aef82079SChris Wilson return false; 1113aef82079SChris Wilson 1114aef82079SChris Wilson if (!atomic_read(&ce->pin_count)) 1115aef82079SChris Wilson return true; 1116aef82079SChris Wilson 1117aef82079SChris Wilson /* ppGTT is not part of the legacy context image */ 1118aef82079SChris Wilson if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) 1119aef82079SChris Wilson return true; 1120aef82079SChris Wilson 1121aef82079SChris Wilson return false; 11221fe2d6f9SChris Wilson } 11231fe2d6f9SChris Wilson 112410be98a7SChris Wilson static int set_ppgtt(struct drm_i915_file_private *file_priv, 112510be98a7SChris Wilson struct i915_gem_context *ctx, 112610be98a7SChris Wilson struct drm_i915_gem_context_param *args) 112710be98a7SChris Wilson { 1128e568ac38SChris Wilson struct i915_address_space *vm, *old; 112910be98a7SChris Wilson int err; 113010be98a7SChris Wilson 113110be98a7SChris Wilson if (args->size) 113210be98a7SChris Wilson return -EINVAL; 113310be98a7SChris Wilson 1134a4e7ccdaSChris Wilson if (!rcu_access_pointer(ctx->vm)) 113510be98a7SChris Wilson return -ENODEV; 113610be98a7SChris Wilson 113710be98a7SChris Wilson if (upper_32_bits(args->value)) 113810be98a7SChris Wilson return -ENOENT; 113910be98a7SChris Wilson 1140aabbe344SChris Wilson rcu_read_lock(); 11415dbd2b7bSChris Wilson vm = xa_load(&file_priv->vm_xa, args->value); 1142aabbe344SChris Wilson if (vm && !kref_get_unless_zero(&vm->ref)) 1143aabbe344SChris Wilson vm = NULL; 1144aabbe344SChris Wilson rcu_read_unlock(); 1145e568ac38SChris Wilson if (!vm) 114610be98a7SChris Wilson return -ENOENT; 114710be98a7SChris Wilson 1148a4e7ccdaSChris Wilson err = mutex_lock_interruptible(&ctx->mutex); 114910be98a7SChris Wilson if (err) 115010be98a7SChris Wilson goto out; 115110be98a7SChris Wilson 1152a4e7ccdaSChris Wilson if (i915_gem_context_is_closed(ctx)) { 1153a4e7ccdaSChris Wilson err = -ENOENT; 1154feba2b81SChris Wilson goto unlock; 1155a4e7ccdaSChris Wilson } 1156a4e7ccdaSChris Wilson 1157a4e7ccdaSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 115810be98a7SChris Wilson goto unlock; 115910be98a7SChris Wilson 116010be98a7SChris Wilson /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 116110be98a7SChris Wilson lut_close(ctx); 116210be98a7SChris Wilson 1163e568ac38SChris Wilson old = __set_ppgtt(ctx, vm); 116410be98a7SChris Wilson 116510be98a7SChris Wilson /* 116610be98a7SChris Wilson * We need to flush any requests using the current ppgtt before 116710be98a7SChris Wilson * we release it as the requests do not hold a reference themselves, 116810be98a7SChris Wilson * only indirectly through the context. 116910be98a7SChris Wilson */ 117010be98a7SChris Wilson err = context_barrier_task(ctx, ALL_ENGINES, 11711fe2d6f9SChris Wilson skip_ppgtt_update, 117210be98a7SChris Wilson emit_ppgtt_update, 117310be98a7SChris Wilson set_ppgtt_barrier, 117410be98a7SChris Wilson old); 117510be98a7SChris Wilson if (err) { 11762850748eSChris Wilson i915_vm_close(__set_ppgtt(ctx, old)); 11772850748eSChris Wilson i915_vm_close(old); 117810be98a7SChris Wilson } 117910be98a7SChris Wilson 118010be98a7SChris Wilson unlock: 1181a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 118210be98a7SChris Wilson out: 1183e568ac38SChris Wilson i915_vm_put(vm); 118410be98a7SChris Wilson return err; 118510be98a7SChris Wilson } 118610be98a7SChris Wilson 118710be98a7SChris Wilson static int gen8_emit_rpcs_config(struct i915_request *rq, 118810be98a7SChris Wilson struct intel_context *ce, 118910be98a7SChris Wilson struct intel_sseu sseu) 119010be98a7SChris Wilson { 119110be98a7SChris Wilson u64 offset; 119210be98a7SChris Wilson u32 *cs; 119310be98a7SChris Wilson 119410be98a7SChris Wilson cs = intel_ring_begin(rq, 4); 119510be98a7SChris Wilson if (IS_ERR(cs)) 119610be98a7SChris Wilson return PTR_ERR(cs); 119710be98a7SChris Wilson 119810be98a7SChris Wilson offset = i915_ggtt_offset(ce->state) + 119910be98a7SChris Wilson LRC_STATE_PN * PAGE_SIZE + 12007dc56af5SChris Wilson CTX_R_PWR_CLK_STATE * 4; 120110be98a7SChris Wilson 120210be98a7SChris Wilson *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 120310be98a7SChris Wilson *cs++ = lower_32_bits(offset); 120410be98a7SChris Wilson *cs++ = upper_32_bits(offset); 120510be98a7SChris Wilson *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); 120610be98a7SChris Wilson 120710be98a7SChris Wilson intel_ring_advance(rq, cs); 120810be98a7SChris Wilson 120910be98a7SChris Wilson return 0; 121010be98a7SChris Wilson } 121110be98a7SChris Wilson 121210be98a7SChris Wilson static int 121310be98a7SChris Wilson gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) 121410be98a7SChris Wilson { 121510be98a7SChris Wilson struct i915_request *rq; 121610be98a7SChris Wilson int ret; 121710be98a7SChris Wilson 121810be98a7SChris Wilson lockdep_assert_held(&ce->pin_mutex); 121910be98a7SChris Wilson 122010be98a7SChris Wilson /* 122110be98a7SChris Wilson * If the context is not idle, we have to submit an ordered request to 122210be98a7SChris Wilson * modify its context image via the kernel context (writing to our own 122310be98a7SChris Wilson * image, or into the registers directory, does not stick). Pristine 122410be98a7SChris Wilson * and idle contexts will be configured on pinning. 122510be98a7SChris Wilson */ 1226feed5c7bSChris Wilson if (!intel_context_pin_if_active(ce)) 122710be98a7SChris Wilson return 0; 122810be98a7SChris Wilson 1229de5825beSChris Wilson rq = intel_engine_create_kernel_request(ce->engine); 1230feed5c7bSChris Wilson if (IS_ERR(rq)) { 1231feed5c7bSChris Wilson ret = PTR_ERR(rq); 1232feed5c7bSChris Wilson goto out_unpin; 1233feed5c7bSChris Wilson } 123410be98a7SChris Wilson 1235a9877da2SChris Wilson /* Serialise with the remote context */ 1236a9877da2SChris Wilson ret = intel_context_prepare_remote_request(ce, rq); 1237a9877da2SChris Wilson if (ret == 0) 1238ce476c80SChris Wilson ret = gen8_emit_rpcs_config(rq, ce, sseu); 123910be98a7SChris Wilson 124010be98a7SChris Wilson i915_request_add(rq); 1241feed5c7bSChris Wilson out_unpin: 1242feed5c7bSChris Wilson intel_context_unpin(ce); 124310be98a7SChris Wilson return ret; 124410be98a7SChris Wilson } 124510be98a7SChris Wilson 124610be98a7SChris Wilson static int 12477e805762SChris Wilson intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) 124810be98a7SChris Wilson { 124910be98a7SChris Wilson int ret; 125010be98a7SChris Wilson 1251cb0c43f3SChris Wilson GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); 125210be98a7SChris Wilson 125310be98a7SChris Wilson ret = intel_context_lock_pinned(ce); 125410be98a7SChris Wilson if (ret) 125510be98a7SChris Wilson return ret; 125610be98a7SChris Wilson 125710be98a7SChris Wilson /* Nothing to do if unmodified. */ 125810be98a7SChris Wilson if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) 125910be98a7SChris Wilson goto unlock; 126010be98a7SChris Wilson 126110be98a7SChris Wilson ret = gen8_modify_rpcs(ce, sseu); 126210be98a7SChris Wilson if (!ret) 126310be98a7SChris Wilson ce->sseu = sseu; 126410be98a7SChris Wilson 126510be98a7SChris Wilson unlock: 126610be98a7SChris Wilson intel_context_unlock_pinned(ce); 126710be98a7SChris Wilson return ret; 126810be98a7SChris Wilson } 126910be98a7SChris Wilson 127010be98a7SChris Wilson static int 127110be98a7SChris Wilson user_to_context_sseu(struct drm_i915_private *i915, 127210be98a7SChris Wilson const struct drm_i915_gem_context_param_sseu *user, 127310be98a7SChris Wilson struct intel_sseu *context) 127410be98a7SChris Wilson { 127510be98a7SChris Wilson const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 127610be98a7SChris Wilson 127710be98a7SChris Wilson /* No zeros in any field. */ 127810be98a7SChris Wilson if (!user->slice_mask || !user->subslice_mask || 127910be98a7SChris Wilson !user->min_eus_per_subslice || !user->max_eus_per_subslice) 128010be98a7SChris Wilson return -EINVAL; 128110be98a7SChris Wilson 128210be98a7SChris Wilson /* Max > min. */ 128310be98a7SChris Wilson if (user->max_eus_per_subslice < user->min_eus_per_subslice) 128410be98a7SChris Wilson return -EINVAL; 128510be98a7SChris Wilson 128610be98a7SChris Wilson /* 128710be98a7SChris Wilson * Some future proofing on the types since the uAPI is wider than the 128810be98a7SChris Wilson * current internal implementation. 128910be98a7SChris Wilson */ 129010be98a7SChris Wilson if (overflows_type(user->slice_mask, context->slice_mask) || 129110be98a7SChris Wilson overflows_type(user->subslice_mask, context->subslice_mask) || 129210be98a7SChris Wilson overflows_type(user->min_eus_per_subslice, 129310be98a7SChris Wilson context->min_eus_per_subslice) || 129410be98a7SChris Wilson overflows_type(user->max_eus_per_subslice, 129510be98a7SChris Wilson context->max_eus_per_subslice)) 129610be98a7SChris Wilson return -EINVAL; 129710be98a7SChris Wilson 129810be98a7SChris Wilson /* Check validity against hardware. */ 129910be98a7SChris Wilson if (user->slice_mask & ~device->slice_mask) 130010be98a7SChris Wilson return -EINVAL; 130110be98a7SChris Wilson 130210be98a7SChris Wilson if (user->subslice_mask & ~device->subslice_mask[0]) 130310be98a7SChris Wilson return -EINVAL; 130410be98a7SChris Wilson 130510be98a7SChris Wilson if (user->max_eus_per_subslice > device->max_eus_per_subslice) 130610be98a7SChris Wilson return -EINVAL; 130710be98a7SChris Wilson 130810be98a7SChris Wilson context->slice_mask = user->slice_mask; 130910be98a7SChris Wilson context->subslice_mask = user->subslice_mask; 131010be98a7SChris Wilson context->min_eus_per_subslice = user->min_eus_per_subslice; 131110be98a7SChris Wilson context->max_eus_per_subslice = user->max_eus_per_subslice; 131210be98a7SChris Wilson 131310be98a7SChris Wilson /* Part specific restrictions. */ 131410be98a7SChris Wilson if (IS_GEN(i915, 11)) { 131510be98a7SChris Wilson unsigned int hw_s = hweight8(device->slice_mask); 131610be98a7SChris Wilson unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 131710be98a7SChris Wilson unsigned int req_s = hweight8(context->slice_mask); 131810be98a7SChris Wilson unsigned int req_ss = hweight8(context->subslice_mask); 131910be98a7SChris Wilson 132010be98a7SChris Wilson /* 132110be98a7SChris Wilson * Only full subslice enablement is possible if more than one 132210be98a7SChris Wilson * slice is turned on. 132310be98a7SChris Wilson */ 132410be98a7SChris Wilson if (req_s > 1 && req_ss != hw_ss_per_s) 132510be98a7SChris Wilson return -EINVAL; 132610be98a7SChris Wilson 132710be98a7SChris Wilson /* 132810be98a7SChris Wilson * If more than four (SScount bitfield limit) subslices are 132910be98a7SChris Wilson * requested then the number has to be even. 133010be98a7SChris Wilson */ 133110be98a7SChris Wilson if (req_ss > 4 && (req_ss & 1)) 133210be98a7SChris Wilson return -EINVAL; 133310be98a7SChris Wilson 133410be98a7SChris Wilson /* 133510be98a7SChris Wilson * If only one slice is enabled and subslice count is below the 133610be98a7SChris Wilson * device full enablement, it must be at most half of the all 133710be98a7SChris Wilson * available subslices. 133810be98a7SChris Wilson */ 133910be98a7SChris Wilson if (req_s == 1 && req_ss < hw_ss_per_s && 134010be98a7SChris Wilson req_ss > (hw_ss_per_s / 2)) 134110be98a7SChris Wilson return -EINVAL; 134210be98a7SChris Wilson 134310be98a7SChris Wilson /* ABI restriction - VME use case only. */ 134410be98a7SChris Wilson 134510be98a7SChris Wilson /* All slices or one slice only. */ 134610be98a7SChris Wilson if (req_s != 1 && req_s != hw_s) 134710be98a7SChris Wilson return -EINVAL; 134810be98a7SChris Wilson 134910be98a7SChris Wilson /* 135010be98a7SChris Wilson * Half subslices or full enablement only when one slice is 135110be98a7SChris Wilson * enabled. 135210be98a7SChris Wilson */ 135310be98a7SChris Wilson if (req_s == 1 && 135410be98a7SChris Wilson (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 135510be98a7SChris Wilson return -EINVAL; 135610be98a7SChris Wilson 135710be98a7SChris Wilson /* No EU configuration changes. */ 135810be98a7SChris Wilson if ((user->min_eus_per_subslice != 135910be98a7SChris Wilson device->max_eus_per_subslice) || 136010be98a7SChris Wilson (user->max_eus_per_subslice != 136110be98a7SChris Wilson device->max_eus_per_subslice)) 136210be98a7SChris Wilson return -EINVAL; 136310be98a7SChris Wilson } 136410be98a7SChris Wilson 136510be98a7SChris Wilson return 0; 136610be98a7SChris Wilson } 136710be98a7SChris Wilson 136810be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx, 136910be98a7SChris Wilson struct drm_i915_gem_context_param *args) 137010be98a7SChris Wilson { 137110be98a7SChris Wilson struct drm_i915_private *i915 = ctx->i915; 137210be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 137310be98a7SChris Wilson struct intel_context *ce; 137410be98a7SChris Wilson struct intel_sseu sseu; 137510be98a7SChris Wilson unsigned long lookup; 137610be98a7SChris Wilson int ret; 137710be98a7SChris Wilson 137810be98a7SChris Wilson if (args->size < sizeof(user_sseu)) 137910be98a7SChris Wilson return -EINVAL; 138010be98a7SChris Wilson 138110be98a7SChris Wilson if (!IS_GEN(i915, 11)) 138210be98a7SChris Wilson return -ENODEV; 138310be98a7SChris Wilson 138410be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 138510be98a7SChris Wilson sizeof(user_sseu))) 138610be98a7SChris Wilson return -EFAULT; 138710be98a7SChris Wilson 138810be98a7SChris Wilson if (user_sseu.rsvd) 138910be98a7SChris Wilson return -EINVAL; 139010be98a7SChris Wilson 139110be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 139210be98a7SChris Wilson return -EINVAL; 139310be98a7SChris Wilson 139410be98a7SChris Wilson lookup = 0; 139510be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 139610be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 139710be98a7SChris Wilson 139810be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 139910be98a7SChris Wilson if (IS_ERR(ce)) 140010be98a7SChris Wilson return PTR_ERR(ce); 140110be98a7SChris Wilson 140210be98a7SChris Wilson /* Only render engine supports RPCS configuration. */ 140310be98a7SChris Wilson if (ce->engine->class != RENDER_CLASS) { 140410be98a7SChris Wilson ret = -ENODEV; 140510be98a7SChris Wilson goto out_ce; 140610be98a7SChris Wilson } 140710be98a7SChris Wilson 140810be98a7SChris Wilson ret = user_to_context_sseu(i915, &user_sseu, &sseu); 140910be98a7SChris Wilson if (ret) 141010be98a7SChris Wilson goto out_ce; 141110be98a7SChris Wilson 141210be98a7SChris Wilson ret = intel_context_reconfigure_sseu(ce, sseu); 141310be98a7SChris Wilson if (ret) 141410be98a7SChris Wilson goto out_ce; 141510be98a7SChris Wilson 141610be98a7SChris Wilson args->size = sizeof(user_sseu); 141710be98a7SChris Wilson 141810be98a7SChris Wilson out_ce: 141910be98a7SChris Wilson intel_context_put(ce); 142010be98a7SChris Wilson return ret; 142110be98a7SChris Wilson } 142210be98a7SChris Wilson 142310be98a7SChris Wilson struct set_engines { 142410be98a7SChris Wilson struct i915_gem_context *ctx; 142510be98a7SChris Wilson struct i915_gem_engines *engines; 142610be98a7SChris Wilson }; 142710be98a7SChris Wilson 142810be98a7SChris Wilson static int 142910be98a7SChris Wilson set_engines__load_balance(struct i915_user_extension __user *base, void *data) 143010be98a7SChris Wilson { 143110be98a7SChris Wilson struct i915_context_engines_load_balance __user *ext = 143210be98a7SChris Wilson container_of_user(base, typeof(*ext), base); 143310be98a7SChris Wilson const struct set_engines *set = data; 1434d0bf4582SWambui Karuga struct drm_i915_private *i915 = set->ctx->i915; 143510be98a7SChris Wilson struct intel_engine_cs *stack[16]; 143610be98a7SChris Wilson struct intel_engine_cs **siblings; 143710be98a7SChris Wilson struct intel_context *ce; 143810be98a7SChris Wilson u16 num_siblings, idx; 143910be98a7SChris Wilson unsigned int n; 144010be98a7SChris Wilson int err; 144110be98a7SChris Wilson 1442d0bf4582SWambui Karuga if (!HAS_EXECLISTS(i915)) 144310be98a7SChris Wilson return -ENODEV; 144410be98a7SChris Wilson 1445d0bf4582SWambui Karuga if (USES_GUC_SUBMISSION(i915)) 144610be98a7SChris Wilson return -ENODEV; /* not implement yet */ 144710be98a7SChris Wilson 144810be98a7SChris Wilson if (get_user(idx, &ext->engine_index)) 144910be98a7SChris Wilson return -EFAULT; 145010be98a7SChris Wilson 145110be98a7SChris Wilson if (idx >= set->engines->num_engines) { 1452d0bf4582SWambui Karuga drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 145310be98a7SChris Wilson idx, set->engines->num_engines); 145410be98a7SChris Wilson return -EINVAL; 145510be98a7SChris Wilson } 145610be98a7SChris Wilson 145710be98a7SChris Wilson idx = array_index_nospec(idx, set->engines->num_engines); 145810be98a7SChris Wilson if (set->engines->engines[idx]) { 1459d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1460d0bf4582SWambui Karuga "Invalid placement[%d], already occupied\n", idx); 146110be98a7SChris Wilson return -EEXIST; 146210be98a7SChris Wilson } 146310be98a7SChris Wilson 146410be98a7SChris Wilson if (get_user(num_siblings, &ext->num_siblings)) 146510be98a7SChris Wilson return -EFAULT; 146610be98a7SChris Wilson 146710be98a7SChris Wilson err = check_user_mbz(&ext->flags); 146810be98a7SChris Wilson if (err) 146910be98a7SChris Wilson return err; 147010be98a7SChris Wilson 147110be98a7SChris Wilson err = check_user_mbz(&ext->mbz64); 147210be98a7SChris Wilson if (err) 147310be98a7SChris Wilson return err; 147410be98a7SChris Wilson 147510be98a7SChris Wilson siblings = stack; 147610be98a7SChris Wilson if (num_siblings > ARRAY_SIZE(stack)) { 147710be98a7SChris Wilson siblings = kmalloc_array(num_siblings, 147810be98a7SChris Wilson sizeof(*siblings), 147910be98a7SChris Wilson GFP_KERNEL); 148010be98a7SChris Wilson if (!siblings) 148110be98a7SChris Wilson return -ENOMEM; 148210be98a7SChris Wilson } 148310be98a7SChris Wilson 148410be98a7SChris Wilson for (n = 0; n < num_siblings; n++) { 148510be98a7SChris Wilson struct i915_engine_class_instance ci; 148610be98a7SChris Wilson 148710be98a7SChris Wilson if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 148810be98a7SChris Wilson err = -EFAULT; 148910be98a7SChris Wilson goto out_siblings; 149010be98a7SChris Wilson } 149110be98a7SChris Wilson 1492d0bf4582SWambui Karuga siblings[n] = intel_engine_lookup_user(i915, 149310be98a7SChris Wilson ci.engine_class, 149410be98a7SChris Wilson ci.engine_instance); 149510be98a7SChris Wilson if (!siblings[n]) { 1496d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1497d0bf4582SWambui Karuga "Invalid sibling[%d]: { class:%d, inst:%d }\n", 149810be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 149910be98a7SChris Wilson err = -EINVAL; 150010be98a7SChris Wilson goto out_siblings; 150110be98a7SChris Wilson } 150210be98a7SChris Wilson } 150310be98a7SChris Wilson 1504e6ba7648SChris Wilson ce = intel_execlists_create_virtual(siblings, n); 150510be98a7SChris Wilson if (IS_ERR(ce)) { 150610be98a7SChris Wilson err = PTR_ERR(ce); 150710be98a7SChris Wilson goto out_siblings; 150810be98a7SChris Wilson } 150910be98a7SChris Wilson 1510e6ba7648SChris Wilson intel_context_set_gem(ce, set->ctx); 1511e6ba7648SChris Wilson 151210be98a7SChris Wilson if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 151310be98a7SChris Wilson intel_context_put(ce); 151410be98a7SChris Wilson err = -EEXIST; 151510be98a7SChris Wilson goto out_siblings; 151610be98a7SChris Wilson } 151710be98a7SChris Wilson 151810be98a7SChris Wilson out_siblings: 151910be98a7SChris Wilson if (siblings != stack) 152010be98a7SChris Wilson kfree(siblings); 152110be98a7SChris Wilson 152210be98a7SChris Wilson return err; 152310be98a7SChris Wilson } 152410be98a7SChris Wilson 152510be98a7SChris Wilson static int 152610be98a7SChris Wilson set_engines__bond(struct i915_user_extension __user *base, void *data) 152710be98a7SChris Wilson { 152810be98a7SChris Wilson struct i915_context_engines_bond __user *ext = 152910be98a7SChris Wilson container_of_user(base, typeof(*ext), base); 153010be98a7SChris Wilson const struct set_engines *set = data; 1531d0bf4582SWambui Karuga struct drm_i915_private *i915 = set->ctx->i915; 153210be98a7SChris Wilson struct i915_engine_class_instance ci; 153310be98a7SChris Wilson struct intel_engine_cs *virtual; 153410be98a7SChris Wilson struct intel_engine_cs *master; 153510be98a7SChris Wilson u16 idx, num_bonds; 153610be98a7SChris Wilson int err, n; 153710be98a7SChris Wilson 153810be98a7SChris Wilson if (get_user(idx, &ext->virtual_index)) 153910be98a7SChris Wilson return -EFAULT; 154010be98a7SChris Wilson 154110be98a7SChris Wilson if (idx >= set->engines->num_engines) { 1542d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1543d0bf4582SWambui Karuga "Invalid index for virtual engine: %d >= %d\n", 154410be98a7SChris Wilson idx, set->engines->num_engines); 154510be98a7SChris Wilson return -EINVAL; 154610be98a7SChris Wilson } 154710be98a7SChris Wilson 154810be98a7SChris Wilson idx = array_index_nospec(idx, set->engines->num_engines); 154910be98a7SChris Wilson if (!set->engines->engines[idx]) { 1550d0bf4582SWambui Karuga drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 155110be98a7SChris Wilson return -EINVAL; 155210be98a7SChris Wilson } 155310be98a7SChris Wilson virtual = set->engines->engines[idx]->engine; 155410be98a7SChris Wilson 155510be98a7SChris Wilson err = check_user_mbz(&ext->flags); 155610be98a7SChris Wilson if (err) 155710be98a7SChris Wilson return err; 155810be98a7SChris Wilson 155910be98a7SChris Wilson for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 156010be98a7SChris Wilson err = check_user_mbz(&ext->mbz64[n]); 156110be98a7SChris Wilson if (err) 156210be98a7SChris Wilson return err; 156310be98a7SChris Wilson } 156410be98a7SChris Wilson 156510be98a7SChris Wilson if (copy_from_user(&ci, &ext->master, sizeof(ci))) 156610be98a7SChris Wilson return -EFAULT; 156710be98a7SChris Wilson 1568d0bf4582SWambui Karuga master = intel_engine_lookup_user(i915, 156910be98a7SChris Wilson ci.engine_class, ci.engine_instance); 157010be98a7SChris Wilson if (!master) { 1571d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1572d0bf4582SWambui Karuga "Unrecognised master engine: { class:%u, instance:%u }\n", 157310be98a7SChris Wilson ci.engine_class, ci.engine_instance); 157410be98a7SChris Wilson return -EINVAL; 157510be98a7SChris Wilson } 157610be98a7SChris Wilson 157710be98a7SChris Wilson if (get_user(num_bonds, &ext->num_bonds)) 157810be98a7SChris Wilson return -EFAULT; 157910be98a7SChris Wilson 158010be98a7SChris Wilson for (n = 0; n < num_bonds; n++) { 158110be98a7SChris Wilson struct intel_engine_cs *bond; 158210be98a7SChris Wilson 158310be98a7SChris Wilson if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 158410be98a7SChris Wilson return -EFAULT; 158510be98a7SChris Wilson 1586d0bf4582SWambui Karuga bond = intel_engine_lookup_user(i915, 158710be98a7SChris Wilson ci.engine_class, 158810be98a7SChris Wilson ci.engine_instance); 158910be98a7SChris Wilson if (!bond) { 1590d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1591d0bf4582SWambui Karuga "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 159210be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 159310be98a7SChris Wilson return -EINVAL; 159410be98a7SChris Wilson } 159510be98a7SChris Wilson 159610be98a7SChris Wilson /* 159710be98a7SChris Wilson * A non-virtual engine has no siblings to choose between; and 159810be98a7SChris Wilson * a submit fence will always be directed to the one engine. 159910be98a7SChris Wilson */ 160010be98a7SChris Wilson if (intel_engine_is_virtual(virtual)) { 160110be98a7SChris Wilson err = intel_virtual_engine_attach_bond(virtual, 160210be98a7SChris Wilson master, 160310be98a7SChris Wilson bond); 160410be98a7SChris Wilson if (err) 160510be98a7SChris Wilson return err; 160610be98a7SChris Wilson } 160710be98a7SChris Wilson } 160810be98a7SChris Wilson 160910be98a7SChris Wilson return 0; 161010be98a7SChris Wilson } 161110be98a7SChris Wilson 161210be98a7SChris Wilson static const i915_user_extension_fn set_engines__extensions[] = { 161310be98a7SChris Wilson [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 161410be98a7SChris Wilson [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 161510be98a7SChris Wilson }; 161610be98a7SChris Wilson 161710be98a7SChris Wilson static int 161810be98a7SChris Wilson set_engines(struct i915_gem_context *ctx, 161910be98a7SChris Wilson const struct drm_i915_gem_context_param *args) 162010be98a7SChris Wilson { 1621baa89ba3SWambui Karuga struct drm_i915_private *i915 = ctx->i915; 162210be98a7SChris Wilson struct i915_context_param_engines __user *user = 162310be98a7SChris Wilson u64_to_user_ptr(args->value); 162410be98a7SChris Wilson struct set_engines set = { .ctx = ctx }; 162510be98a7SChris Wilson unsigned int num_engines, n; 162610be98a7SChris Wilson u64 extensions; 162710be98a7SChris Wilson int err; 162810be98a7SChris Wilson 162910be98a7SChris Wilson if (!args->size) { /* switch back to legacy user_ring_map */ 163010be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) 163110be98a7SChris Wilson return 0; 163210be98a7SChris Wilson 163310be98a7SChris Wilson set.engines = default_engines(ctx); 163410be98a7SChris Wilson if (IS_ERR(set.engines)) 163510be98a7SChris Wilson return PTR_ERR(set.engines); 163610be98a7SChris Wilson 163710be98a7SChris Wilson goto replace; 163810be98a7SChris Wilson } 163910be98a7SChris Wilson 164010be98a7SChris Wilson BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 164110be98a7SChris Wilson if (args->size < sizeof(*user) || 164210be98a7SChris Wilson !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1643baa89ba3SWambui Karuga drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 164410be98a7SChris Wilson args->size); 164510be98a7SChris Wilson return -EINVAL; 164610be98a7SChris Wilson } 164710be98a7SChris Wilson 164810be98a7SChris Wilson /* 164910be98a7SChris Wilson * Note that I915_EXEC_RING_MASK limits execbuf to only using the 165010be98a7SChris Wilson * first 64 engines defined here. 165110be98a7SChris Wilson */ 165210be98a7SChris Wilson num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 165310be98a7SChris Wilson 165410be98a7SChris Wilson set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 165510be98a7SChris Wilson GFP_KERNEL); 165610be98a7SChris Wilson if (!set.engines) 165710be98a7SChris Wilson return -ENOMEM; 165810be98a7SChris Wilson 1659155ab883SChris Wilson init_rcu_head(&set.engines->rcu); 166010be98a7SChris Wilson for (n = 0; n < num_engines; n++) { 166110be98a7SChris Wilson struct i915_engine_class_instance ci; 166210be98a7SChris Wilson struct intel_engine_cs *engine; 166348ae397bSChris Wilson struct intel_context *ce; 166410be98a7SChris Wilson 166510be98a7SChris Wilson if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 166610be98a7SChris Wilson __free_engines(set.engines, n); 166710be98a7SChris Wilson return -EFAULT; 166810be98a7SChris Wilson } 166910be98a7SChris Wilson 167010be98a7SChris Wilson if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 167110be98a7SChris Wilson ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 167210be98a7SChris Wilson set.engines->engines[n] = NULL; 167310be98a7SChris Wilson continue; 167410be98a7SChris Wilson } 167510be98a7SChris Wilson 167610be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 167710be98a7SChris Wilson ci.engine_class, 167810be98a7SChris Wilson ci.engine_instance); 167910be98a7SChris Wilson if (!engine) { 1680baa89ba3SWambui Karuga drm_dbg(&i915->drm, 1681baa89ba3SWambui Karuga "Invalid engine[%d]: { class:%d, instance:%d }\n", 168210be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 168310be98a7SChris Wilson __free_engines(set.engines, n); 168410be98a7SChris Wilson return -ENOENT; 168510be98a7SChris Wilson } 168610be98a7SChris Wilson 1687e6ba7648SChris Wilson ce = intel_context_create(engine); 168848ae397bSChris Wilson if (IS_ERR(ce)) { 168910be98a7SChris Wilson __free_engines(set.engines, n); 169048ae397bSChris Wilson return PTR_ERR(ce); 169110be98a7SChris Wilson } 169248ae397bSChris Wilson 1693e6ba7648SChris Wilson intel_context_set_gem(ce, ctx); 1694e6ba7648SChris Wilson 169548ae397bSChris Wilson set.engines->engines[n] = ce; 169610be98a7SChris Wilson } 169710be98a7SChris Wilson set.engines->num_engines = num_engines; 169810be98a7SChris Wilson 169910be98a7SChris Wilson err = -EFAULT; 170010be98a7SChris Wilson if (!get_user(extensions, &user->extensions)) 170110be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(extensions), 170210be98a7SChris Wilson set_engines__extensions, 170310be98a7SChris Wilson ARRAY_SIZE(set_engines__extensions), 170410be98a7SChris Wilson &set); 170510be98a7SChris Wilson if (err) { 170610be98a7SChris Wilson free_engines(set.engines); 170710be98a7SChris Wilson return err; 170810be98a7SChris Wilson } 170910be98a7SChris Wilson 171010be98a7SChris Wilson replace: 171110be98a7SChris Wilson mutex_lock(&ctx->engines_mutex); 171210be98a7SChris Wilson if (args->size) 171310be98a7SChris Wilson i915_gem_context_set_user_engines(ctx); 171410be98a7SChris Wilson else 171510be98a7SChris Wilson i915_gem_context_clear_user_engines(ctx); 17161feace5dSPaul E. McKenney set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 171710be98a7SChris Wilson mutex_unlock(&ctx->engines_mutex); 171810be98a7SChris Wilson 1719155ab883SChris Wilson call_rcu(&set.engines->rcu, free_engines_rcu); 172010be98a7SChris Wilson 172110be98a7SChris Wilson return 0; 172210be98a7SChris Wilson } 172310be98a7SChris Wilson 172410be98a7SChris Wilson static struct i915_gem_engines * 172510be98a7SChris Wilson __copy_engines(struct i915_gem_engines *e) 172610be98a7SChris Wilson { 172710be98a7SChris Wilson struct i915_gem_engines *copy; 172810be98a7SChris Wilson unsigned int n; 172910be98a7SChris Wilson 173010be98a7SChris Wilson copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 173110be98a7SChris Wilson if (!copy) 173210be98a7SChris Wilson return ERR_PTR(-ENOMEM); 173310be98a7SChris Wilson 1734155ab883SChris Wilson init_rcu_head(©->rcu); 173510be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 173610be98a7SChris Wilson if (e->engines[n]) 173710be98a7SChris Wilson copy->engines[n] = intel_context_get(e->engines[n]); 173810be98a7SChris Wilson else 173910be98a7SChris Wilson copy->engines[n] = NULL; 174010be98a7SChris Wilson } 174110be98a7SChris Wilson copy->num_engines = n; 174210be98a7SChris Wilson 174310be98a7SChris Wilson return copy; 174410be98a7SChris Wilson } 174510be98a7SChris Wilson 174610be98a7SChris Wilson static int 174710be98a7SChris Wilson get_engines(struct i915_gem_context *ctx, 174810be98a7SChris Wilson struct drm_i915_gem_context_param *args) 174910be98a7SChris Wilson { 175010be98a7SChris Wilson struct i915_context_param_engines __user *user; 175110be98a7SChris Wilson struct i915_gem_engines *e; 175210be98a7SChris Wilson size_t n, count, size; 175310be98a7SChris Wilson int err = 0; 175410be98a7SChris Wilson 175510be98a7SChris Wilson err = mutex_lock_interruptible(&ctx->engines_mutex); 175610be98a7SChris Wilson if (err) 175710be98a7SChris Wilson return err; 175810be98a7SChris Wilson 175910be98a7SChris Wilson e = NULL; 176010be98a7SChris Wilson if (i915_gem_context_user_engines(ctx)) 176110be98a7SChris Wilson e = __copy_engines(i915_gem_context_engines(ctx)); 176210be98a7SChris Wilson mutex_unlock(&ctx->engines_mutex); 176310be98a7SChris Wilson if (IS_ERR_OR_NULL(e)) { 176410be98a7SChris Wilson args->size = 0; 176510be98a7SChris Wilson return PTR_ERR_OR_ZERO(e); 176610be98a7SChris Wilson } 176710be98a7SChris Wilson 176810be98a7SChris Wilson count = e->num_engines; 176910be98a7SChris Wilson 177010be98a7SChris Wilson /* Be paranoid in case we have an impedance mismatch */ 177110be98a7SChris Wilson if (!check_struct_size(user, engines, count, &size)) { 177210be98a7SChris Wilson err = -EINVAL; 177310be98a7SChris Wilson goto err_free; 177410be98a7SChris Wilson } 177510be98a7SChris Wilson if (overflows_type(size, args->size)) { 177610be98a7SChris Wilson err = -EINVAL; 177710be98a7SChris Wilson goto err_free; 177810be98a7SChris Wilson } 177910be98a7SChris Wilson 178010be98a7SChris Wilson if (!args->size) { 178110be98a7SChris Wilson args->size = size; 178210be98a7SChris Wilson goto err_free; 178310be98a7SChris Wilson } 178410be98a7SChris Wilson 178510be98a7SChris Wilson if (args->size < size) { 178610be98a7SChris Wilson err = -EINVAL; 178710be98a7SChris Wilson goto err_free; 178810be98a7SChris Wilson } 178910be98a7SChris Wilson 179010be98a7SChris Wilson user = u64_to_user_ptr(args->value); 179110be98a7SChris Wilson if (!access_ok(user, size)) { 179210be98a7SChris Wilson err = -EFAULT; 179310be98a7SChris Wilson goto err_free; 179410be98a7SChris Wilson } 179510be98a7SChris Wilson 179610be98a7SChris Wilson if (put_user(0, &user->extensions)) { 179710be98a7SChris Wilson err = -EFAULT; 179810be98a7SChris Wilson goto err_free; 179910be98a7SChris Wilson } 180010be98a7SChris Wilson 180110be98a7SChris Wilson for (n = 0; n < count; n++) { 180210be98a7SChris Wilson struct i915_engine_class_instance ci = { 180310be98a7SChris Wilson .engine_class = I915_ENGINE_CLASS_INVALID, 180410be98a7SChris Wilson .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 180510be98a7SChris Wilson }; 180610be98a7SChris Wilson 180710be98a7SChris Wilson if (e->engines[n]) { 180810be98a7SChris Wilson ci.engine_class = e->engines[n]->engine->uabi_class; 1809750e76b4SChris Wilson ci.engine_instance = e->engines[n]->engine->uabi_instance; 181010be98a7SChris Wilson } 181110be98a7SChris Wilson 181210be98a7SChris Wilson if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 181310be98a7SChris Wilson err = -EFAULT; 181410be98a7SChris Wilson goto err_free; 181510be98a7SChris Wilson } 181610be98a7SChris Wilson } 181710be98a7SChris Wilson 181810be98a7SChris Wilson args->size = size; 181910be98a7SChris Wilson 182010be98a7SChris Wilson err_free: 1821155ab883SChris Wilson free_engines(e); 182210be98a7SChris Wilson return err; 182310be98a7SChris Wilson } 182410be98a7SChris Wilson 1825a0e04715SChris Wilson static int 1826a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx, 1827a0e04715SChris Wilson const struct drm_i915_gem_context_param *args) 1828a0e04715SChris Wilson { 1829a0e04715SChris Wilson if (args->size) 1830a0e04715SChris Wilson return -EINVAL; 1831a0e04715SChris Wilson 1832a0e04715SChris Wilson return __context_set_persistence(ctx, args->value); 1833a0e04715SChris Wilson } 1834a0e04715SChris Wilson 18350f100b70SChris Wilson static void __apply_priority(struct intel_context *ce, void *arg) 18360f100b70SChris Wilson { 18370f100b70SChris Wilson struct i915_gem_context *ctx = arg; 18380f100b70SChris Wilson 18390f100b70SChris Wilson if (!intel_engine_has_semaphores(ce->engine)) 18400f100b70SChris Wilson return; 18410f100b70SChris Wilson 18420f100b70SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 18430f100b70SChris Wilson intel_context_set_use_semaphores(ce); 18440f100b70SChris Wilson else 18450f100b70SChris Wilson intel_context_clear_use_semaphores(ce); 18460f100b70SChris Wilson } 18470f100b70SChris Wilson 18480f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx, 18490f100b70SChris Wilson const struct drm_i915_gem_context_param *args) 18500f100b70SChris Wilson { 18510f100b70SChris Wilson s64 priority = args->value; 18520f100b70SChris Wilson 18530f100b70SChris Wilson if (args->size) 18540f100b70SChris Wilson return -EINVAL; 18550f100b70SChris Wilson 18560f100b70SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 18570f100b70SChris Wilson return -ENODEV; 18580f100b70SChris Wilson 18590f100b70SChris Wilson if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 18600f100b70SChris Wilson priority < I915_CONTEXT_MIN_USER_PRIORITY) 18610f100b70SChris Wilson return -EINVAL; 18620f100b70SChris Wilson 18630f100b70SChris Wilson if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 18640f100b70SChris Wilson !capable(CAP_SYS_NICE)) 18650f100b70SChris Wilson return -EPERM; 18660f100b70SChris Wilson 18670f100b70SChris Wilson ctx->sched.priority = I915_USER_PRIORITY(priority); 18680f100b70SChris Wilson context_apply_all(ctx, __apply_priority, ctx); 18690f100b70SChris Wilson 18700f100b70SChris Wilson return 0; 18710f100b70SChris Wilson } 18720f100b70SChris Wilson 187310be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv, 187410be98a7SChris Wilson struct i915_gem_context *ctx, 187510be98a7SChris Wilson struct drm_i915_gem_context_param *args) 187610be98a7SChris Wilson { 187710be98a7SChris Wilson int ret = 0; 187810be98a7SChris Wilson 187910be98a7SChris Wilson switch (args->param) { 188010be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ZEROMAP: 188110be98a7SChris Wilson if (args->size) 188210be98a7SChris Wilson ret = -EINVAL; 188310be98a7SChris Wilson else if (args->value) 188410be98a7SChris Wilson set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 188510be98a7SChris Wilson else 188610be98a7SChris Wilson clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 188710be98a7SChris Wilson break; 188810be98a7SChris Wilson 188910be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 189010be98a7SChris Wilson if (args->size) 189110be98a7SChris Wilson ret = -EINVAL; 189210be98a7SChris Wilson else if (args->value) 189310be98a7SChris Wilson i915_gem_context_set_no_error_capture(ctx); 189410be98a7SChris Wilson else 189510be98a7SChris Wilson i915_gem_context_clear_no_error_capture(ctx); 189610be98a7SChris Wilson break; 189710be98a7SChris Wilson 189810be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 189910be98a7SChris Wilson if (args->size) 190010be98a7SChris Wilson ret = -EINVAL; 190110be98a7SChris Wilson else if (!capable(CAP_SYS_ADMIN) && !args->value) 190210be98a7SChris Wilson ret = -EPERM; 190310be98a7SChris Wilson else if (args->value) 190410be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 190510be98a7SChris Wilson else 190610be98a7SChris Wilson i915_gem_context_clear_bannable(ctx); 190710be98a7SChris Wilson break; 190810be98a7SChris Wilson 190910be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 191010be98a7SChris Wilson if (args->size) 191110be98a7SChris Wilson ret = -EINVAL; 191210be98a7SChris Wilson else if (args->value) 191310be98a7SChris Wilson i915_gem_context_set_recoverable(ctx); 191410be98a7SChris Wilson else 191510be98a7SChris Wilson i915_gem_context_clear_recoverable(ctx); 191610be98a7SChris Wilson break; 191710be98a7SChris Wilson 191810be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 19190f100b70SChris Wilson ret = set_priority(ctx, args); 192010be98a7SChris Wilson break; 192110be98a7SChris Wilson 192210be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 192310be98a7SChris Wilson ret = set_sseu(ctx, args); 192410be98a7SChris Wilson break; 192510be98a7SChris Wilson 192610be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 192710be98a7SChris Wilson ret = set_ppgtt(fpriv, ctx, args); 192810be98a7SChris Wilson break; 192910be98a7SChris Wilson 193010be98a7SChris Wilson case I915_CONTEXT_PARAM_ENGINES: 193110be98a7SChris Wilson ret = set_engines(ctx, args); 193210be98a7SChris Wilson break; 193310be98a7SChris Wilson 1934a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 1935a0e04715SChris Wilson ret = set_persistence(ctx, args); 1936a0e04715SChris Wilson break; 1937a0e04715SChris Wilson 193810be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 193910be98a7SChris Wilson default: 194010be98a7SChris Wilson ret = -EINVAL; 194110be98a7SChris Wilson break; 194210be98a7SChris Wilson } 194310be98a7SChris Wilson 194410be98a7SChris Wilson return ret; 194510be98a7SChris Wilson } 194610be98a7SChris Wilson 194710be98a7SChris Wilson struct create_ext { 194810be98a7SChris Wilson struct i915_gem_context *ctx; 194910be98a7SChris Wilson struct drm_i915_file_private *fpriv; 195010be98a7SChris Wilson }; 195110be98a7SChris Wilson 195210be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data) 195310be98a7SChris Wilson { 195410be98a7SChris Wilson struct drm_i915_gem_context_create_ext_setparam local; 195510be98a7SChris Wilson const struct create_ext *arg = data; 195610be98a7SChris Wilson 195710be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 195810be98a7SChris Wilson return -EFAULT; 195910be98a7SChris Wilson 196010be98a7SChris Wilson if (local.param.ctx_id) 196110be98a7SChris Wilson return -EINVAL; 196210be98a7SChris Wilson 196310be98a7SChris Wilson return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 196410be98a7SChris Wilson } 196510be98a7SChris Wilson 196610be98a7SChris Wilson static int clone_engines(struct i915_gem_context *dst, 196710be98a7SChris Wilson struct i915_gem_context *src) 196810be98a7SChris Wilson { 196910be98a7SChris Wilson struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 197010be98a7SChris Wilson struct i915_gem_engines *clone; 197110be98a7SChris Wilson bool user_engines; 197210be98a7SChris Wilson unsigned long n; 197310be98a7SChris Wilson 197410be98a7SChris Wilson clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 197510be98a7SChris Wilson if (!clone) 197610be98a7SChris Wilson goto err_unlock; 197710be98a7SChris Wilson 1978155ab883SChris Wilson init_rcu_head(&clone->rcu); 197910be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 198010be98a7SChris Wilson struct intel_engine_cs *engine; 198110be98a7SChris Wilson 198210be98a7SChris Wilson if (!e->engines[n]) { 198310be98a7SChris Wilson clone->engines[n] = NULL; 198410be98a7SChris Wilson continue; 198510be98a7SChris Wilson } 198610be98a7SChris Wilson engine = e->engines[n]->engine; 198710be98a7SChris Wilson 198810be98a7SChris Wilson /* 198910be98a7SChris Wilson * Virtual engines are singletons; they can only exist 199010be98a7SChris Wilson * inside a single context, because they embed their 199110be98a7SChris Wilson * HW context... As each virtual context implies a single 199210be98a7SChris Wilson * timeline (each engine can only dequeue a single request 199310be98a7SChris Wilson * at any time), it would be surprising for two contexts 199410be98a7SChris Wilson * to use the same engine. So let's create a copy of 199510be98a7SChris Wilson * the virtual engine instead. 199610be98a7SChris Wilson */ 199710be98a7SChris Wilson if (intel_engine_is_virtual(engine)) 199810be98a7SChris Wilson clone->engines[n] = 1999e6ba7648SChris Wilson intel_execlists_clone_virtual(engine); 200010be98a7SChris Wilson else 2001e6ba7648SChris Wilson clone->engines[n] = intel_context_create(engine); 200210be98a7SChris Wilson if (IS_ERR_OR_NULL(clone->engines[n])) { 200310be98a7SChris Wilson __free_engines(clone, n); 200410be98a7SChris Wilson goto err_unlock; 200510be98a7SChris Wilson } 2006e6ba7648SChris Wilson 2007e6ba7648SChris Wilson intel_context_set_gem(clone->engines[n], dst); 200810be98a7SChris Wilson } 200910be98a7SChris Wilson clone->num_engines = n; 201010be98a7SChris Wilson 201110be98a7SChris Wilson user_engines = i915_gem_context_user_engines(src); 201210be98a7SChris Wilson i915_gem_context_unlock_engines(src); 201310be98a7SChris Wilson 2014d96bb620SChris Wilson /* Serialised by constructor */ 2015d96bb620SChris Wilson free_engines(__context_engines_static(dst)); 201610be98a7SChris Wilson RCU_INIT_POINTER(dst->engines, clone); 201710be98a7SChris Wilson if (user_engines) 201810be98a7SChris Wilson i915_gem_context_set_user_engines(dst); 201910be98a7SChris Wilson else 202010be98a7SChris Wilson i915_gem_context_clear_user_engines(dst); 202110be98a7SChris Wilson return 0; 202210be98a7SChris Wilson 202310be98a7SChris Wilson err_unlock: 202410be98a7SChris Wilson i915_gem_context_unlock_engines(src); 202510be98a7SChris Wilson return -ENOMEM; 202610be98a7SChris Wilson } 202710be98a7SChris Wilson 202810be98a7SChris Wilson static int clone_flags(struct i915_gem_context *dst, 202910be98a7SChris Wilson struct i915_gem_context *src) 203010be98a7SChris Wilson { 203110be98a7SChris Wilson dst->user_flags = src->user_flags; 203210be98a7SChris Wilson return 0; 203310be98a7SChris Wilson } 203410be98a7SChris Wilson 203510be98a7SChris Wilson static int clone_schedattr(struct i915_gem_context *dst, 203610be98a7SChris Wilson struct i915_gem_context *src) 203710be98a7SChris Wilson { 203810be98a7SChris Wilson dst->sched = src->sched; 203910be98a7SChris Wilson return 0; 204010be98a7SChris Wilson } 204110be98a7SChris Wilson 204210be98a7SChris Wilson static int clone_sseu(struct i915_gem_context *dst, 204310be98a7SChris Wilson struct i915_gem_context *src) 204410be98a7SChris Wilson { 204510be98a7SChris Wilson struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 204610be98a7SChris Wilson struct i915_gem_engines *clone; 204710be98a7SChris Wilson unsigned long n; 204810be98a7SChris Wilson int err; 204910be98a7SChris Wilson 2050d96bb620SChris Wilson /* no locking required; sole access under constructor*/ 2051d96bb620SChris Wilson clone = __context_engines_static(dst); 205210be98a7SChris Wilson if (e->num_engines != clone->num_engines) { 205310be98a7SChris Wilson err = -EINVAL; 205410be98a7SChris Wilson goto unlock; 205510be98a7SChris Wilson } 205610be98a7SChris Wilson 205710be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 205810be98a7SChris Wilson struct intel_context *ce = e->engines[n]; 205910be98a7SChris Wilson 206010be98a7SChris Wilson if (clone->engines[n]->engine->class != ce->engine->class) { 206110be98a7SChris Wilson /* Must have compatible engine maps! */ 206210be98a7SChris Wilson err = -EINVAL; 206310be98a7SChris Wilson goto unlock; 206410be98a7SChris Wilson } 206510be98a7SChris Wilson 206610be98a7SChris Wilson /* serialises with set_sseu */ 206710be98a7SChris Wilson err = intel_context_lock_pinned(ce); 206810be98a7SChris Wilson if (err) 206910be98a7SChris Wilson goto unlock; 207010be98a7SChris Wilson 207110be98a7SChris Wilson clone->engines[n]->sseu = ce->sseu; 207210be98a7SChris Wilson intel_context_unlock_pinned(ce); 207310be98a7SChris Wilson } 207410be98a7SChris Wilson 207510be98a7SChris Wilson err = 0; 207610be98a7SChris Wilson unlock: 207710be98a7SChris Wilson i915_gem_context_unlock_engines(src); 207810be98a7SChris Wilson return err; 207910be98a7SChris Wilson } 208010be98a7SChris Wilson 208110be98a7SChris Wilson static int clone_timeline(struct i915_gem_context *dst, 208210be98a7SChris Wilson struct i915_gem_context *src) 208310be98a7SChris Wilson { 208475d0a7f3SChris Wilson if (src->timeline) 208575d0a7f3SChris Wilson __assign_timeline(dst, src->timeline); 208610be98a7SChris Wilson 208710be98a7SChris Wilson return 0; 208810be98a7SChris Wilson } 208910be98a7SChris Wilson 209010be98a7SChris Wilson static int clone_vm(struct i915_gem_context *dst, 209110be98a7SChris Wilson struct i915_gem_context *src) 209210be98a7SChris Wilson { 2093e568ac38SChris Wilson struct i915_address_space *vm; 2094a4e7ccdaSChris Wilson int err = 0; 209510be98a7SChris Wilson 209627dbae8fSChris Wilson if (!rcu_access_pointer(src->vm)) 209727dbae8fSChris Wilson return 0; 209827dbae8fSChris Wilson 209910be98a7SChris Wilson rcu_read_lock(); 210027dbae8fSChris Wilson vm = context_get_vm_rcu(src); 210110be98a7SChris Wilson rcu_read_unlock(); 210210be98a7SChris Wilson 2103a4e7ccdaSChris Wilson if (!mutex_lock_interruptible(&dst->mutex)) { 2104e568ac38SChris Wilson __assign_ppgtt(dst, vm); 2105a4e7ccdaSChris Wilson mutex_unlock(&dst->mutex); 2106a4e7ccdaSChris Wilson } else { 2107a4e7ccdaSChris Wilson err = -EINTR; 2108a4e7ccdaSChris Wilson } 210910be98a7SChris Wilson 211027dbae8fSChris Wilson i915_vm_put(vm); 2111a4e7ccdaSChris Wilson return err; 211210be98a7SChris Wilson } 211310be98a7SChris Wilson 211410be98a7SChris Wilson static int create_clone(struct i915_user_extension __user *ext, void *data) 211510be98a7SChris Wilson { 211610be98a7SChris Wilson static int (* const fn[])(struct i915_gem_context *dst, 211710be98a7SChris Wilson struct i915_gem_context *src) = { 211810be98a7SChris Wilson #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 211910be98a7SChris Wilson MAP(ENGINES, clone_engines), 212010be98a7SChris Wilson MAP(FLAGS, clone_flags), 212110be98a7SChris Wilson MAP(SCHEDATTR, clone_schedattr), 212210be98a7SChris Wilson MAP(SSEU, clone_sseu), 212310be98a7SChris Wilson MAP(TIMELINE, clone_timeline), 212410be98a7SChris Wilson MAP(VM, clone_vm), 212510be98a7SChris Wilson #undef MAP 212610be98a7SChris Wilson }; 212710be98a7SChris Wilson struct drm_i915_gem_context_create_ext_clone local; 212810be98a7SChris Wilson const struct create_ext *arg = data; 212910be98a7SChris Wilson struct i915_gem_context *dst = arg->ctx; 213010be98a7SChris Wilson struct i915_gem_context *src; 213110be98a7SChris Wilson int err, bit; 213210be98a7SChris Wilson 213310be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 213410be98a7SChris Wilson return -EFAULT; 213510be98a7SChris Wilson 213610be98a7SChris Wilson BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 213710be98a7SChris Wilson I915_CONTEXT_CLONE_UNKNOWN); 213810be98a7SChris Wilson 213910be98a7SChris Wilson if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 214010be98a7SChris Wilson return -EINVAL; 214110be98a7SChris Wilson 214210be98a7SChris Wilson if (local.rsvd) 214310be98a7SChris Wilson return -EINVAL; 214410be98a7SChris Wilson 214510be98a7SChris Wilson rcu_read_lock(); 214610be98a7SChris Wilson src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 214710be98a7SChris Wilson rcu_read_unlock(); 214810be98a7SChris Wilson if (!src) 214910be98a7SChris Wilson return -ENOENT; 215010be98a7SChris Wilson 215110be98a7SChris Wilson GEM_BUG_ON(src == dst); 215210be98a7SChris Wilson 215310be98a7SChris Wilson for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 215410be98a7SChris Wilson if (!(local.flags & BIT(bit))) 215510be98a7SChris Wilson continue; 215610be98a7SChris Wilson 215710be98a7SChris Wilson err = fn[bit](dst, src); 215810be98a7SChris Wilson if (err) 215910be98a7SChris Wilson return err; 216010be98a7SChris Wilson } 216110be98a7SChris Wilson 216210be98a7SChris Wilson return 0; 216310be98a7SChris Wilson } 216410be98a7SChris Wilson 216510be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = { 216610be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 216710be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 216810be98a7SChris Wilson }; 216910be98a7SChris Wilson 217010be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv) 217110be98a7SChris Wilson { 217210be98a7SChris Wilson return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 217310be98a7SChris Wilson } 217410be98a7SChris Wilson 217510be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 217610be98a7SChris Wilson struct drm_file *file) 217710be98a7SChris Wilson { 217810be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 217910be98a7SChris Wilson struct drm_i915_gem_context_create_ext *args = data; 218010be98a7SChris Wilson struct create_ext ext_data; 218110be98a7SChris Wilson int ret; 2182c100777cSTvrtko Ursulin u32 id; 218310be98a7SChris Wilson 218410be98a7SChris Wilson if (!DRIVER_CAPS(i915)->has_logical_contexts) 218510be98a7SChris Wilson return -ENODEV; 218610be98a7SChris Wilson 218710be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 218810be98a7SChris Wilson return -EINVAL; 218910be98a7SChris Wilson 2190cb823ed9SChris Wilson ret = intel_gt_terminally_wedged(&i915->gt); 219110be98a7SChris Wilson if (ret) 219210be98a7SChris Wilson return ret; 219310be98a7SChris Wilson 219410be98a7SChris Wilson ext_data.fpriv = file->driver_priv; 219510be98a7SChris Wilson if (client_is_banned(ext_data.fpriv)) { 2196baa89ba3SWambui Karuga drm_dbg(&i915->drm, 2197baa89ba3SWambui Karuga "client %s[%d] banned from creating ctx\n", 2198ba16a48aSTvrtko Ursulin current->comm, task_pid_nr(current)); 219910be98a7SChris Wilson return -EIO; 220010be98a7SChris Wilson } 220110be98a7SChris Wilson 220210be98a7SChris Wilson ext_data.ctx = i915_gem_create_context(i915, args->flags); 220310be98a7SChris Wilson if (IS_ERR(ext_data.ctx)) 220410be98a7SChris Wilson return PTR_ERR(ext_data.ctx); 220510be98a7SChris Wilson 220610be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 220710be98a7SChris Wilson ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 220810be98a7SChris Wilson create_extensions, 220910be98a7SChris Wilson ARRAY_SIZE(create_extensions), 221010be98a7SChris Wilson &ext_data); 221110be98a7SChris Wilson if (ret) 221210be98a7SChris Wilson goto err_ctx; 221310be98a7SChris Wilson } 221410be98a7SChris Wilson 2215c100777cSTvrtko Ursulin ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 221610be98a7SChris Wilson if (ret < 0) 221710be98a7SChris Wilson goto err_ctx; 221810be98a7SChris Wilson 2219c100777cSTvrtko Ursulin args->ctx_id = id; 2220baa89ba3SWambui Karuga drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 222110be98a7SChris Wilson 222210be98a7SChris Wilson return 0; 222310be98a7SChris Wilson 222410be98a7SChris Wilson err_ctx: 222510be98a7SChris Wilson context_close(ext_data.ctx); 222610be98a7SChris Wilson return ret; 222710be98a7SChris Wilson } 222810be98a7SChris Wilson 222910be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 223010be98a7SChris Wilson struct drm_file *file) 223110be98a7SChris Wilson { 223210be98a7SChris Wilson struct drm_i915_gem_context_destroy *args = data; 223310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 223410be98a7SChris Wilson struct i915_gem_context *ctx; 223510be98a7SChris Wilson 223610be98a7SChris Wilson if (args->pad != 0) 223710be98a7SChris Wilson return -EINVAL; 223810be98a7SChris Wilson 223910be98a7SChris Wilson if (!args->ctx_id) 224010be98a7SChris Wilson return -ENOENT; 224110be98a7SChris Wilson 2242c100777cSTvrtko Ursulin ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 224310be98a7SChris Wilson if (!ctx) 224410be98a7SChris Wilson return -ENOENT; 224510be98a7SChris Wilson 224610be98a7SChris Wilson context_close(ctx); 224710be98a7SChris Wilson return 0; 224810be98a7SChris Wilson } 224910be98a7SChris Wilson 225010be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx, 225110be98a7SChris Wilson struct drm_i915_gem_context_param *args) 225210be98a7SChris Wilson { 225310be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 225410be98a7SChris Wilson struct intel_context *ce; 225510be98a7SChris Wilson unsigned long lookup; 225610be98a7SChris Wilson int err; 225710be98a7SChris Wilson 225810be98a7SChris Wilson if (args->size == 0) 225910be98a7SChris Wilson goto out; 226010be98a7SChris Wilson else if (args->size < sizeof(user_sseu)) 226110be98a7SChris Wilson return -EINVAL; 226210be98a7SChris Wilson 226310be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 226410be98a7SChris Wilson sizeof(user_sseu))) 226510be98a7SChris Wilson return -EFAULT; 226610be98a7SChris Wilson 226710be98a7SChris Wilson if (user_sseu.rsvd) 226810be98a7SChris Wilson return -EINVAL; 226910be98a7SChris Wilson 227010be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 227110be98a7SChris Wilson return -EINVAL; 227210be98a7SChris Wilson 227310be98a7SChris Wilson lookup = 0; 227410be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 227510be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 227610be98a7SChris Wilson 227710be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 227810be98a7SChris Wilson if (IS_ERR(ce)) 227910be98a7SChris Wilson return PTR_ERR(ce); 228010be98a7SChris Wilson 228110be98a7SChris Wilson err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 228210be98a7SChris Wilson if (err) { 228310be98a7SChris Wilson intel_context_put(ce); 228410be98a7SChris Wilson return err; 228510be98a7SChris Wilson } 228610be98a7SChris Wilson 228710be98a7SChris Wilson user_sseu.slice_mask = ce->sseu.slice_mask; 228810be98a7SChris Wilson user_sseu.subslice_mask = ce->sseu.subslice_mask; 228910be98a7SChris Wilson user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 229010be98a7SChris Wilson user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 229110be98a7SChris Wilson 229210be98a7SChris Wilson intel_context_unlock_pinned(ce); 229310be98a7SChris Wilson intel_context_put(ce); 229410be98a7SChris Wilson 229510be98a7SChris Wilson if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 229610be98a7SChris Wilson sizeof(user_sseu))) 229710be98a7SChris Wilson return -EFAULT; 229810be98a7SChris Wilson 229910be98a7SChris Wilson out: 230010be98a7SChris Wilson args->size = sizeof(user_sseu); 230110be98a7SChris Wilson 230210be98a7SChris Wilson return 0; 230310be98a7SChris Wilson } 230410be98a7SChris Wilson 230510be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 230610be98a7SChris Wilson struct drm_file *file) 230710be98a7SChris Wilson { 230810be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 230910be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 231010be98a7SChris Wilson struct i915_gem_context *ctx; 231110be98a7SChris Wilson int ret = 0; 231210be98a7SChris Wilson 231310be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 231410be98a7SChris Wilson if (!ctx) 231510be98a7SChris Wilson return -ENOENT; 231610be98a7SChris Wilson 231710be98a7SChris Wilson switch (args->param) { 231810be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ZEROMAP: 231910be98a7SChris Wilson args->size = 0; 232010be98a7SChris Wilson args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 232110be98a7SChris Wilson break; 232210be98a7SChris Wilson 232310be98a7SChris Wilson case I915_CONTEXT_PARAM_GTT_SIZE: 232410be98a7SChris Wilson args->size = 0; 2325a4e7ccdaSChris Wilson rcu_read_lock(); 2326a4e7ccdaSChris Wilson if (rcu_access_pointer(ctx->vm)) 2327a4e7ccdaSChris Wilson args->value = rcu_dereference(ctx->vm)->total; 232810be98a7SChris Wilson else 232910be98a7SChris Wilson args->value = to_i915(dev)->ggtt.vm.total; 2330a4e7ccdaSChris Wilson rcu_read_unlock(); 233110be98a7SChris Wilson break; 233210be98a7SChris Wilson 233310be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 233410be98a7SChris Wilson args->size = 0; 233510be98a7SChris Wilson args->value = i915_gem_context_no_error_capture(ctx); 233610be98a7SChris Wilson break; 233710be98a7SChris Wilson 233810be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 233910be98a7SChris Wilson args->size = 0; 234010be98a7SChris Wilson args->value = i915_gem_context_is_bannable(ctx); 234110be98a7SChris Wilson break; 234210be98a7SChris Wilson 234310be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 234410be98a7SChris Wilson args->size = 0; 234510be98a7SChris Wilson args->value = i915_gem_context_is_recoverable(ctx); 234610be98a7SChris Wilson break; 234710be98a7SChris Wilson 234810be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 234910be98a7SChris Wilson args->size = 0; 235010be98a7SChris Wilson args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 235110be98a7SChris Wilson break; 235210be98a7SChris Wilson 235310be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 235410be98a7SChris Wilson ret = get_sseu(ctx, args); 235510be98a7SChris Wilson break; 235610be98a7SChris Wilson 235710be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 235810be98a7SChris Wilson ret = get_ppgtt(file_priv, ctx, args); 235910be98a7SChris Wilson break; 236010be98a7SChris Wilson 236110be98a7SChris Wilson case I915_CONTEXT_PARAM_ENGINES: 236210be98a7SChris Wilson ret = get_engines(ctx, args); 236310be98a7SChris Wilson break; 236410be98a7SChris Wilson 2365a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2366a0e04715SChris Wilson args->size = 0; 2367a0e04715SChris Wilson args->value = i915_gem_context_is_persistent(ctx); 2368a0e04715SChris Wilson break; 2369a0e04715SChris Wilson 237010be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 237110be98a7SChris Wilson default: 237210be98a7SChris Wilson ret = -EINVAL; 237310be98a7SChris Wilson break; 237410be98a7SChris Wilson } 237510be98a7SChris Wilson 237610be98a7SChris Wilson i915_gem_context_put(ctx); 237710be98a7SChris Wilson return ret; 237810be98a7SChris Wilson } 237910be98a7SChris Wilson 238010be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 238110be98a7SChris Wilson struct drm_file *file) 238210be98a7SChris Wilson { 238310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 238410be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 238510be98a7SChris Wilson struct i915_gem_context *ctx; 238610be98a7SChris Wilson int ret; 238710be98a7SChris Wilson 238810be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 238910be98a7SChris Wilson if (!ctx) 239010be98a7SChris Wilson return -ENOENT; 239110be98a7SChris Wilson 239210be98a7SChris Wilson ret = ctx_setparam(file_priv, ctx, args); 239310be98a7SChris Wilson 239410be98a7SChris Wilson i915_gem_context_put(ctx); 239510be98a7SChris Wilson return ret; 239610be98a7SChris Wilson } 239710be98a7SChris Wilson 239810be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 239910be98a7SChris Wilson void *data, struct drm_file *file) 240010be98a7SChris Wilson { 2401a4e7ccdaSChris Wilson struct drm_i915_private *i915 = to_i915(dev); 240210be98a7SChris Wilson struct drm_i915_reset_stats *args = data; 240310be98a7SChris Wilson struct i915_gem_context *ctx; 240410be98a7SChris Wilson int ret; 240510be98a7SChris Wilson 240610be98a7SChris Wilson if (args->flags || args->pad) 240710be98a7SChris Wilson return -EINVAL; 240810be98a7SChris Wilson 240910be98a7SChris Wilson ret = -ENOENT; 241010be98a7SChris Wilson rcu_read_lock(); 241110be98a7SChris Wilson ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 241210be98a7SChris Wilson if (!ctx) 241310be98a7SChris Wilson goto out; 241410be98a7SChris Wilson 241510be98a7SChris Wilson /* 241610be98a7SChris Wilson * We opt for unserialised reads here. This may result in tearing 241710be98a7SChris Wilson * in the extremely unlikely event of a GPU hang on this context 241810be98a7SChris Wilson * as we are querying them. If we need that extra layer of protection, 241910be98a7SChris Wilson * we should wrap the hangstats with a seqlock. 242010be98a7SChris Wilson */ 242110be98a7SChris Wilson 242210be98a7SChris Wilson if (capable(CAP_SYS_ADMIN)) 2423a4e7ccdaSChris Wilson args->reset_count = i915_reset_count(&i915->gpu_error); 242410be98a7SChris Wilson else 242510be98a7SChris Wilson args->reset_count = 0; 242610be98a7SChris Wilson 242710be98a7SChris Wilson args->batch_active = atomic_read(&ctx->guilty_count); 242810be98a7SChris Wilson args->batch_pending = atomic_read(&ctx->active_count); 242910be98a7SChris Wilson 243010be98a7SChris Wilson ret = 0; 243110be98a7SChris Wilson out: 243210be98a7SChris Wilson rcu_read_unlock(); 243310be98a7SChris Wilson return ret; 243410be98a7SChris Wilson } 243510be98a7SChris Wilson 243610be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */ 243710be98a7SChris Wilson struct intel_context * 243810be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 243910be98a7SChris Wilson { 244010be98a7SChris Wilson const struct i915_gem_engines *e = it->engines; 244110be98a7SChris Wilson struct intel_context *ctx; 244210be98a7SChris Wilson 244310be98a7SChris Wilson do { 244410be98a7SChris Wilson if (it->idx >= e->num_engines) 244510be98a7SChris Wilson return NULL; 244610be98a7SChris Wilson 244710be98a7SChris Wilson ctx = e->engines[it->idx++]; 244810be98a7SChris Wilson } while (!ctx); 244910be98a7SChris Wilson 245010be98a7SChris Wilson return ctx; 245110be98a7SChris Wilson } 245210be98a7SChris Wilson 245310be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 245410be98a7SChris Wilson #include "selftests/mock_context.c" 245510be98a7SChris Wilson #include "selftests/i915_gem_context.c" 245610be98a7SChris Wilson #endif 245710be98a7SChris Wilson 245810be98a7SChris Wilson static void i915_global_gem_context_shrink(void) 245910be98a7SChris Wilson { 246010be98a7SChris Wilson kmem_cache_shrink(global.slab_luts); 246110be98a7SChris Wilson } 246210be98a7SChris Wilson 246310be98a7SChris Wilson static void i915_global_gem_context_exit(void) 246410be98a7SChris Wilson { 246510be98a7SChris Wilson kmem_cache_destroy(global.slab_luts); 246610be98a7SChris Wilson } 246710be98a7SChris Wilson 246810be98a7SChris Wilson static struct i915_global_gem_context global = { { 246910be98a7SChris Wilson .shrink = i915_global_gem_context_shrink, 247010be98a7SChris Wilson .exit = i915_global_gem_context_exit, 247110be98a7SChris Wilson } }; 247210be98a7SChris Wilson 247310be98a7SChris Wilson int __init i915_global_gem_context_init(void) 247410be98a7SChris Wilson { 247510be98a7SChris Wilson global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 247610be98a7SChris Wilson if (!global.slab_luts) 247710be98a7SChris Wilson return -ENOMEM; 247810be98a7SChris Wilson 247910be98a7SChris Wilson i915_global_register(&global.base); 248010be98a7SChris Wilson return 0; 248110be98a7SChris Wilson } 2482